text
stringlengths 5
1.04M
|
|---|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "gpu/config/gpu_test_expectations_parser.h"
#include <stddef.h>
#include <stdint.h>
#include "base/check_op.h"
#include "base/files/file_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
namespace gpu {
namespace {
enum LineParserStage {
kLineParserBegin = 0,
kLineParserBugID,
kLineParserConfigs,
kLineParserColon,
kLineParserTestName,
kLineParserEqual,
kLineParserExpectations,
};
enum Token {
// os
kConfigWinXP = 0,
kConfigWinVista,
kConfigWin7,
kConfigWin8,
kConfigWin10,
kConfigWin,
kConfigMacLeopard,
kConfigMacSnowLeopard,
kConfigMacLion,
kConfigMacMountainLion,
kConfigMacMavericks,
kConfigMacYosemite,
kConfigMacElCapitan,
kConfigMacSierra,
kConfigMacHighSierra,
kConfigMacMojave,
kConfigMacCatalina,
kConfigMacBigSur,
kConfigMacMonterey,
kConfigMac,
kConfigLinux,
kConfigChromeOS,
kConfigAndroid,
// gpu vendor
kConfigNVidia,
kConfigAMD,
kConfigIntel,
kConfigVMWare,
// build type
kConfigRelease,
kConfigDebug,
// ANGLE renderer
kConfigD3D9,
kConfigD3D11,
kConfigGLDesktop,
kConfigGLES,
// command decoder
kConfigPassthrough,
kConfigValidating,
// expectation
kExpectationPass,
kExpectationFail,
kExpectationFlaky,
kExpectationTimeout,
kExpectationSkip,
// separator
kSeparatorColon,
kSeparatorEqual,
kNumberOfExactMatchTokens,
// others
kConfigGPUDeviceID,
kTokenComment,
kTokenWord,
};
struct TokenInfo {
const char* name;
int32_t flag;
};
const TokenInfo kTokenData[] = {
{"xp", GPUTestConfig::kOsWinXP},
{"vista", GPUTestConfig::kOsWinVista},
{"win7", GPUTestConfig::kOsWin7},
{"win8", GPUTestConfig::kOsWin8},
{"win10", GPUTestConfig::kOsWin10},
{"win", GPUTestConfig::kOsWin},
{"leopard", GPUTestConfig::kOsMacLeopard},
{"snowleopard", GPUTestConfig::kOsMacSnowLeopard},
{"lion", GPUTestConfig::kOsMacLion},
{"mountainlion", GPUTestConfig::kOsMacMountainLion},
{"mavericks", GPUTestConfig::kOsMacMavericks},
{"yosemite", GPUTestConfig::kOsMacYosemite},
{"elcapitan", GPUTestConfig::kOsMacElCapitan},
{"sierra", GPUTestConfig::kOsMacSierra},
{"highsierra", GPUTestConfig::kOsMacHighSierra},
{"mojave", GPUTestConfig::kOsMacMojave},
{"catalina", GPUTestConfig::kOsMacCatalina},
{"bigsur", GPUTestConfig::kOsMacBigSur},
{"monterey", GPUTestConfig::kOsMacMonterey},
{"mac", GPUTestConfig::kOsMac},
{"linux", GPUTestConfig::kOsLinux},
{"chromeos", GPUTestConfig::kOsChromeOS},
{"android", GPUTestConfig::kOsAndroid},
{"nvidia", 0x10DE},
{"amd", 0x1002},
{"intel", 0x8086},
{"vmware", 0x15ad},
{"release", GPUTestConfig::kBuildTypeRelease},
{"debug", GPUTestConfig::kBuildTypeDebug},
{"d3d9", GPUTestConfig::kAPID3D9},
{"d3d11", GPUTestConfig::kAPID3D11},
{"opengl", GPUTestConfig::kAPIGLDesktop},
{"gles", GPUTestConfig::kAPIGLES},
{"passthrough", GPUTestConfig::kCommandDecoderPassthrough},
{"validating", GPUTestConfig::kCommandDecoderValidating},
{"pass", GPUTestExpectationsParser::kGpuTestPass},
{"fail", GPUTestExpectationsParser::kGpuTestFail},
{"flaky", GPUTestExpectationsParser::kGpuTestFlaky},
{"timeout", GPUTestExpectationsParser::kGpuTestTimeout},
{"skip", GPUTestExpectationsParser::kGpuTestSkip},
{":", 0},
{"=", 0},
};
enum ErrorType {
kErrorFileIO = 0,
kErrorIllegalEntry,
kErrorInvalidEntry,
kErrorEntryWithOsConflicts,
kErrorEntryWithGpuVendorConflicts,
kErrorEntryWithBuildTypeConflicts,
kErrorEntryWithAPIConflicts,
kErrorEntryWithCommandDecoderConflicts,
kErrorEntryWithGpuDeviceIdConflicts,
kErrorEntryWithExpectationConflicts,
kErrorEntriesOverlap,
kNumberOfErrors,
};
const char* kErrorMessage[] = {
"file IO failed",
"entry with wrong format",
"entry invalid, likely wrong modifiers combination",
"entry with OS modifier conflicts",
"entry with GPU vendor modifier conflicts",
"entry with GPU build type conflicts",
"entry with GPU API conflicts",
"entry with GPU process command decoder conflicts",
"entry with GPU device id conflicts or malformat",
"entry with expectation modifier conflicts",
"two entries' configs overlap",
};
Token ParseToken(const std::string& word) {
if (base::StartsWith(word, "//", base::CompareCase::INSENSITIVE_ASCII))
return kTokenComment;
if (base::StartsWith(word, "0x", base::CompareCase::INSENSITIVE_ASCII))
return kConfigGPUDeviceID;
for (int32_t i = 0; i < kNumberOfExactMatchTokens; ++i) {
if (base::LowerCaseEqualsASCII(word, kTokenData[i].name))
return static_cast<Token>(i);
}
return kTokenWord;
}
// reference name can have the last character as *.
bool NamesMatching(const std::string& ref, const std::string& test_name) {
size_t len = ref.length();
if (len == 0)
return false;
if (ref[len - 1] == '*') {
if (test_name.length() > len -1 &&
ref.compare(0, len - 1, test_name, 0, len - 1) == 0)
return true;
return false;
}
return (ref == test_name);
}
} // namespace anonymous
GPUTestExpectationsParser::GPUTestExpectationsParser() {
// Some sanity check.
DCHECK_EQ(static_cast<unsigned int>(kNumberOfExactMatchTokens),
sizeof(kTokenData) / sizeof(kTokenData[0]));
DCHECK_EQ(static_cast<unsigned int>(kNumberOfErrors),
sizeof(kErrorMessage) / sizeof(kErrorMessage[0]));
}
GPUTestExpectationsParser::~GPUTestExpectationsParser() = default;
bool GPUTestExpectationsParser::LoadTestExpectations(const std::string& data) {
entries_.clear();
error_messages_.clear();
std::vector<std::string> lines = base::SplitString(
data, "\n", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
bool rt = true;
for (size_t i = 0; i < lines.size(); ++i) {
if (!ParseLine(lines[i], i + 1))
rt = false;
}
if (DetectConflictsBetweenEntries()) {
entries_.clear();
rt = false;
}
return rt;
}
bool GPUTestExpectationsParser::LoadTestExpectations(
const base::FilePath& path) {
entries_.clear();
error_messages_.clear();
std::string data;
if (!base::ReadFileToString(path, &data)) {
error_messages_.push_back(kErrorMessage[kErrorFileIO]);
return false;
}
return LoadTestExpectations(data);
}
int32_t GPUTestExpectationsParser::GetTestExpectation(
const std::string& test_name,
const GPUTestBotConfig& bot_config) const {
for (size_t i = 0; i < entries_.size(); ++i) {
if (NamesMatching(entries_[i].test_name, test_name) &&
bot_config.Matches(entries_[i].test_config))
return entries_[i].test_expectation;
}
return kGpuTestPass;
}
const std::vector<std::string>&
GPUTestExpectationsParser::GetErrorMessages() const {
return error_messages_;
}
bool GPUTestExpectationsParser::ParseConfig(
const std::string& config_data, GPUTestConfig* config) {
DCHECK(config);
std::vector<std::string> tokens = base::SplitString(
config_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
base::SPLIT_WANT_NONEMPTY);
for (size_t i = 0; i < tokens.size(); ++i) {
Token token = ParseToken(tokens[i]);
switch (token) {
case kConfigWinXP:
case kConfigWinVista:
case kConfigWin7:
case kConfigWin8:
case kConfigWin10:
case kConfigWin:
case kConfigMacLeopard:
case kConfigMacSnowLeopard:
case kConfigMacLion:
case kConfigMacMountainLion:
case kConfigMacMavericks:
case kConfigMacYosemite:
case kConfigMacElCapitan:
case kConfigMacSierra:
case kConfigMacHighSierra:
case kConfigMacMojave:
case kConfigMacCatalina:
case kConfigMacBigSur:
case kConfigMacMonterey:
case kConfigMac:
case kConfigLinux:
case kConfigChromeOS:
case kConfigAndroid:
case kConfigNVidia:
case kConfigAMD:
case kConfigIntel:
case kConfigVMWare:
case kConfigRelease:
case kConfigDebug:
case kConfigD3D9:
case kConfigD3D11:
case kConfigGLDesktop:
case kConfigGLES:
case kConfigPassthrough:
case kConfigValidating:
case kConfigGPUDeviceID:
if (token == kConfigGPUDeviceID) {
if (!UpdateTestConfig(config, tokens[i], 0))
return false;
} else {
if (!UpdateTestConfig(config, token, 0))
return false;
}
break;
default:
return false;
}
}
return true;
}
bool GPUTestExpectationsParser::ParseLine(
const std::string& line_data, size_t line_number) {
std::vector<std::string> tokens = base::SplitString(
line_data, base::kWhitespaceASCII, base::KEEP_WHITESPACE,
base::SPLIT_WANT_NONEMPTY);
int32_t stage = kLineParserBegin;
GPUTestExpectationEntry entry;
entry.line_number = line_number;
GPUTestConfig& config = entry.test_config;
bool comments_encountered = false;
for (size_t i = 0; i < tokens.size() && !comments_encountered; ++i) {
Token token = ParseToken(tokens[i]);
switch (token) {
case kTokenComment:
comments_encountered = true;
break;
case kConfigWinXP:
case kConfigWinVista:
case kConfigWin7:
case kConfigWin8:
case kConfigWin10:
case kConfigWin:
case kConfigMacLeopard:
case kConfigMacSnowLeopard:
case kConfigMacLion:
case kConfigMacMountainLion:
case kConfigMacMavericks:
case kConfigMacYosemite:
case kConfigMacElCapitan:
case kConfigMacSierra:
case kConfigMacHighSierra:
case kConfigMacMojave:
case kConfigMacCatalina:
case kConfigMacBigSur:
case kConfigMacMonterey:
case kConfigMac:
case kConfigLinux:
case kConfigChromeOS:
case kConfigAndroid:
case kConfigNVidia:
case kConfigAMD:
case kConfigIntel:
case kConfigVMWare:
case kConfigRelease:
case kConfigDebug:
case kConfigD3D9:
case kConfigD3D11:
case kConfigGLDesktop:
case kConfigGLES:
case kConfigPassthrough:
case kConfigValidating:
case kConfigGPUDeviceID:
// MODIFIERS, could be in any order, need at least one.
if (stage != kLineParserConfigs && stage != kLineParserBugID) {
PushErrorMessage(kErrorMessage[kErrorIllegalEntry],
line_number);
return false;
}
if (token == kConfigGPUDeviceID) {
if (!UpdateTestConfig(&config, tokens[i], line_number))
return false;
} else {
if (!UpdateTestConfig(&config, token, line_number))
return false;
}
if (stage == kLineParserBugID)
stage++;
break;
case kSeparatorColon:
// :
if (stage != kLineParserConfigs) {
PushErrorMessage(kErrorMessage[kErrorIllegalEntry],
line_number);
return false;
}
stage++;
break;
case kSeparatorEqual:
// =
if (stage != kLineParserTestName) {
PushErrorMessage(kErrorMessage[kErrorIllegalEntry],
line_number);
return false;
}
stage++;
break;
case kTokenWord:
// BUG_ID or TEST_NAME
if (stage == kLineParserBegin) {
// Bug ID is not used for anything; ignore it.
} else if (stage == kLineParserColon) {
entry.test_name = tokens[i];
} else {
PushErrorMessage(kErrorMessage[kErrorIllegalEntry],
line_number);
return false;
}
stage++;
break;
case kExpectationPass:
case kExpectationFail:
case kExpectationFlaky:
case kExpectationTimeout:
case kExpectationSkip:
// TEST_EXPECTATIONS
if (stage != kLineParserEqual && stage != kLineParserExpectations) {
PushErrorMessage(kErrorMessage[kErrorIllegalEntry],
line_number);
return false;
}
if ((kTokenData[token].flag & entry.test_expectation) != 0) {
PushErrorMessage(kErrorMessage[kErrorEntryWithExpectationConflicts],
line_number);
return false;
}
entry.test_expectation =
(kTokenData[token].flag | entry.test_expectation);
if (stage == kLineParserEqual)
stage++;
break;
default:
DCHECK(false);
break;
}
}
if (stage == kLineParserBegin) {
// The whole line is empty or all comments
return true;
}
if (stage == kLineParserExpectations) {
if (!config.IsValid()) {
PushErrorMessage(kErrorMessage[kErrorInvalidEntry], line_number);
return false;
}
entries_.push_back(entry);
return true;
}
PushErrorMessage(kErrorMessage[kErrorIllegalEntry], line_number);
return false;
}
bool GPUTestExpectationsParser::UpdateTestConfig(GPUTestConfig* config,
int32_t token,
size_t line_number) {
DCHECK(config);
switch (token) {
case kConfigWinXP:
case kConfigWinVista:
case kConfigWin7:
case kConfigWin8:
case kConfigWin10:
case kConfigWin:
case kConfigMacLeopard:
case kConfigMacSnowLeopard:
case kConfigMacLion:
case kConfigMacMountainLion:
case kConfigMacMavericks:
case kConfigMacYosemite:
case kConfigMacElCapitan:
case kConfigMacSierra:
case kConfigMacHighSierra:
case kConfigMacMojave:
case kConfigMacCatalina:
case kConfigMacBigSur:
case kConfigMacMonterey:
case kConfigMac:
case kConfigLinux:
case kConfigChromeOS:
case kConfigAndroid:
if ((config->os() & kTokenData[token].flag) != 0) {
PushErrorMessage(kErrorMessage[kErrorEntryWithOsConflicts],
line_number);
return false;
}
config->set_os(config->os() | kTokenData[token].flag);
break;
case kConfigNVidia:
case kConfigAMD:
case kConfigIntel:
case kConfigVMWare:
{
uint32_t gpu_vendor = static_cast<uint32_t>(kTokenData[token].flag);
for (size_t i = 0; i < config->gpu_vendor().size(); ++i) {
if (config->gpu_vendor()[i] == gpu_vendor) {
PushErrorMessage(
kErrorMessage[kErrorEntryWithGpuVendorConflicts],
line_number);
return false;
}
}
config->AddGPUVendor(gpu_vendor);
}
break;
case kConfigRelease:
case kConfigDebug:
if ((config->build_type() & kTokenData[token].flag) != 0) {
PushErrorMessage(
kErrorMessage[kErrorEntryWithBuildTypeConflicts],
line_number);
return false;
}
config->set_build_type(
config->build_type() | kTokenData[token].flag);
break;
case kConfigD3D9:
case kConfigD3D11:
case kConfigGLDesktop:
case kConfigGLES:
if ((config->api() & kTokenData[token].flag) != 0) {
PushErrorMessage(kErrorMessage[kErrorEntryWithAPIConflicts],
line_number);
return false;
}
config->set_api(config->api() | kTokenData[token].flag);
break;
case kConfigPassthrough:
case kConfigValidating:
if ((config->command_decoder() & kTokenData[token].flag) != 0) {
PushErrorMessage(kErrorMessage[kErrorEntryWithCommandDecoderConflicts],
line_number);
return false;
}
config->set_command_decoder(config->command_decoder() |
kTokenData[token].flag);
break;
default:
DCHECK(false);
break;
}
return true;
}
bool GPUTestExpectationsParser::UpdateTestConfig(
GPUTestConfig* config,
const std::string& gpu_device_id,
size_t line_number) {
DCHECK(config);
uint32_t device_id = 0;
if (config->gpu_device_id() != 0 ||
!base::HexStringToUInt(gpu_device_id, &device_id) ||
device_id == 0) {
PushErrorMessage(kErrorMessage[kErrorEntryWithGpuDeviceIdConflicts],
line_number);
return false;
}
config->set_gpu_device_id(device_id);
return true;
}
bool GPUTestExpectationsParser::DetectConflictsBetweenEntries() {
bool rt = false;
for (size_t i = 0; i < entries_.size(); ++i) {
for (size_t j = i + 1; j < entries_.size(); ++j) {
if (entries_[i].test_name == entries_[j].test_name &&
entries_[i].test_config.OverlapsWith(entries_[j].test_config)) {
PushErrorMessage(kErrorMessage[kErrorEntriesOverlap],
entries_[i].line_number,
entries_[j].line_number);
rt = true;
}
}
}
return rt;
}
void GPUTestExpectationsParser::PushErrorMessage(
const std::string& message, size_t line_number) {
error_messages_.push_back(
base::StringPrintf("Line %d : %s",
static_cast<int>(line_number), message.c_str()));
}
void GPUTestExpectationsParser::PushErrorMessage(
const std::string& message,
size_t entry1_line_number,
size_t entry2_line_number) {
error_messages_.push_back(
base::StringPrintf("Line %d and %d : %s",
static_cast<int>(entry1_line_number),
static_cast<int>(entry2_line_number),
message.c_str()));
}
GPUTestExpectationsParser:: GPUTestExpectationEntry::GPUTestExpectationEntry()
: test_expectation(0),
line_number(0) {
}
} // namespace gpu
|
#include "util.h"
#include "ModuleInfo.h"
#include <vector>
#include <sstream>
#include <iomanip>
#include "log.h"
#include "UtilException.h"
//#include "Thread.h"
#include "IniSettings.h"
#include <thread>
#include <stdio.h>
#include <unistd.h>
#include "UUCProperties.h"
#include <sys/stat.h>
static char *szCompiledFile=__FILE__;
std::string globalLogDir;
std::string globalLogName;
bool FunctionLog=false;
bool globalLogParam=false;
bool firstGlobal=false;
const char *logGlobalVersion;
unsigned int GlobalDepth = 0;
bool mainInit=false;
bool mainEnable=false;
unsigned int GlobalCount;
enum logMode {
LM_Single, // un solo file
LM_Module, // un file per modulo
LM_Thread, // un file per thread
LM_Module_Thread // un file per modulo e per thread
} LogMode = LM_Module;
void initLog(const char *moduleName, const char *iniFile,const char *version)
{
if (mainInit)
return;
mainInit=true;
logGlobalVersion=version;
OutputDebugString("File INI:");
OutputDebugString(iniFile);
OutputDebugString("\n");
UUCProperties settings;
settings.load(iniFile);
LogMode = (logMode)(settings.getIntProperty("LogMode", (int)LM_Single));//, "Modalità di Log. Valori possibili:\n"
// "0 ;LM_Single, // un solo file\n"
// "1 ;LM_Module, // un file per modulo\n"
// "2 ;LM_Thread, // un file per thread\n"
// "3 ;LM_Module_Thread // un file per modulo e per thread\n")).GetValue((char*)iniFile);
if (LogMode==-1) {
LogMode=LM_Single;
}
mainEnable = settings.getIntProperty("LogEnable",1);//,"Abilitazione log globale")).GetValue((char*)iniFile);
FunctionLog = settings.getIntProperty("FunctionLog", 1);//, "Abilitazione log delle chiamate a funzione")).GetValue((char*)iniFile);
GlobalDepth = settings.getIntProperty("FunctionDepth", 10);//, "Definisce la profondità massima di log delle funzioni\n")).GetValue((char*)iniFile);
globalLogParam = settings.getIntProperty("ParamLog", 1);//, "Abilitazione log dei parametri di input delle funzioni")).GetValue((char*)iniFile);
globalLogName = moduleName;
char* home = getenv("HOME");
std::string path(home);
path.append("/.CIEPKI/");
struct stat st = {0};
if (stat(path.c_str(), &st) == -1) {
int r = mkdir(path.c_str(), 0700);
// printf("mkdir: %d", r);
}
globalLogDir = settings.getProperty("LogDir", path.c_str()); //"Definisce il path in cui salvare il file di log (con / finale)"))
// char SectionName[30];
// int numMod=1;
// while (true) {
// sprintf(SectionName,"%s%i","LogModule",numMod);
// std::string modName;
//
// (IniSettingsString(SectionName, "Name", "", "Nome della sezione log di log")).GetValue((char*)iniFile, modName);
//
// if (modName[0]==0)
// break;
//
// CLog emptyLog;
// logInit.push_back(emptyLog);
// CLog &log=logInit[logInit.size()-1];
// log.logName=modName;
//
// log.Enabled = (IniSettingsBool(SectionName, "LogEnable", MainEnable, "Abilitazione log della sezione")).GetValue((char*)iniFile);
//
// (IniSettingsString(SectionName, "LogDir", logDirGlobal.c_str(), "Definisce il path in cui salvare il file di log di questa sezione (con \\ finale). Default: directory di log globale")).GetValue((char*)iniFile, log.logDir);
//
// (IniSettingsString(SectionName, "LogFile", log.logName.c_str(), "Definisce il nome del file in cui salvare il file di log di questa sezione (con \\ finale). Default: il nome della sezione di log")).GetValue((char*)iniFile, log.logFileName);
//
// log.FunctionLog = (IniSettingsBool(SectionName, "FunctionLog", FunctionLog, "Abilitazione log delle chiamate a funzione per questa sezione")).GetValue((char*)iniFile);
//
// log.LogParam = (IniSettingsBool(SectionName, "ParamLog", GlobalParam, "Abilitazione log dei parametri di input delle funzioni per questa sezione")).GetValue((char*)iniFile);
//
// log.Initialized=true;
// numMod++;
// }
// if (logInit.size()==0) {
// OutputDebugString("Nessun LogModule definito. Impostare le sezioni [LogModule1]...[LogModuleN] con i valori:\n");
// OutputDebugString("Name,LogEnable,LogDir,LogFile,FunctionLog,ParamLog\n");
// }
}
CLog::CLog() {
init();
}
CLog::~CLog() {
Enabled=false;
FirstLog=false;
}
void CLog::init() {
Enabled=mainEnable;
LogParam=globalLogParam;
LogCount=0;
logName = globalLogName;
logFileName = globalLogName;
std::stringstream th;
th << std::setw(8) << std::setfill('0');
time_t T= time(NULL);
struct tm tm = *localtime(&T);
switch (LogMode) {
case (LM_Single): {
th << logFileName << "_" << std::setw(4) << tm.tm_year << "-" << std::setw(2) << tm.tm_mon << "-" << tm.tm_mday << ".log";
break;
}
case (LM_Module): {
th << std::setw(4) << tm.tm_year << "-" << std::setw(2) << tm.tm_mon << "-" << tm.tm_mday << "_" << logFileName << ".log";
// log per modulo: il nome del file è yyyy-mm-gg_name.log, senza alcun path assegnato
break;
}
case (LM_Thread): {
th << std::setw(4) << tm.tm_year << "-" << std::setw(2) << tm.tm_mon << "-" << tm.tm_mday << "_00000000.log";
// log per thread: il nome del file è yyyy-mm-gg_tttttttt.log, senza alcun path assegnato
break;
}
case (LM_Module_Thread): {
th << std::setw(4) << tm.tm_year << "-" << std::setw(2) << tm.tm_mon << "-" << tm.tm_mday << "_" << logFileName << "_00000000.log";
// log per modulo e per thread: il nome del file è yyyy-mm-gg_name_tttttttt.log, senza alcun path assegnato
break;
}
}
logPath = th.str();
if ((LogMode==LM_Module || LogMode==LM_Module_Thread) && logDir.length()!=0) {
// se c'è un path specifico lo metto lì
std::string path = logPath;
logPath=logDir.append(path);
}
else if (!globalLogDir.empty()) {
// se c'è un path globale lo metto lì
std::string path=logPath;
logPath = globalLogDir.append(path);
}
threadPos = logPath.begin()+logPath.length() - 12;
Initialized=true;
if (LogMode!=LM_Module && LogMode!=LM_Module_Thread && Enabled) writePure("Module %02i: %s",ModuleNum,logName.c_str());
}
DWORD CLog::write(const char *format,...) {
va_list params;
va_start (params, format);
char pbtDate[0x800];
unsigned int dummy = 0;
unsigned int *Num = &dummy;
if (Enabled && Initialized && mainEnable) {
if (!firstGlobal && LogMode==LM_Single) {
firstGlobal =true;
write("Inizio Sessione - versione: %s",logGlobalVersion);
writeModuleInfo();
}
if (!FirstLog && (LogMode==LM_Module || LogMode==LM_Module_Thread)) {
FirstLog=true;
write("%s - Inizio Sessione - versione file: %s",logName.c_str(), logVersion.c_str());
writeModuleInfo();
}
//DWORD thNum;
switch(LogMode) {
case (LM_Module) : Num=&LogCount; break;
case (LM_Module_Thread) :
//case (LM_Thread) : thNum=dwThreadCount;dwNum=&thNum; break;
case (LM_Single) : Num=&GlobalCount; break;
}
#ifdef WIN32
SYSTEMTIME stTime;
GetLocalTime(&stTime);
sprintf_s(pbtDate,sizeof(pbtDate),"%05u:[%02d:%02d:%02d.%03d]", *Num, stTime.wHour, stTime.wMinute, stTime.wSecond, stTime.wMilliseconds);
#else
time_t t = time(NULL);
tm tm = *localtime(&t);
sprintf(pbtDate,"%05u:[%02d:%02d:%02d]", *Num, tm.tm_hour, tm.tm_min, tm.tm_sec);
#endif
// se siamo in LM_thread devo scrivere il thread nel nome del file
std::hash<std::thread::id> hasher;
auto dwThreadID = hasher(std::this_thread::get_id());
if (LogMode == LM_Thread || LogMode == LM_Module_Thread) {
std::stringstream th;
th << std::setiosflags(std::ios::hex | std::ios::uppercase);
th << std::setw(8);
th << dwThreadID << ".log";
logPath.replace(threadPos, threadPos + 14, th.str());
}
FILE *lf=nullptr;
#ifdef WIN32
fopen_s(&lf,logPath.c_str(), "a+t");
if (lf) {
switch(LogMode) {
case (LM_Single) : fprintf(lf,"%s|%04i|%04i|%02i|", pbtDate, GetCurrentProcessId(), dwThreadID, ModuleNum); break;
case (LM_Module) : fprintf(lf,"%s|%04i|%04x|", pbtDate, GetCurrentProcessId(), dwThreadID); break;
case (LM_Thread) : fprintf(lf,"%s|%04i|%02i|", pbtDate, GetCurrentProcessId(), ModuleNum); break;
case (LM_Module_Thread) : fprintf(lf,"%s|", pbtDate); break;
}
vfprintf(lf, format, params);
fprintf(lf, "\n");
fclose(lf);
}
#else
lf = fopen(logPath.c_str(), "a+t");
if (lf) {
switch(LogMode) {
case (LM_Single) : fprintf(lf,"%s|%04i|%04i|%02i|", pbtDate, getpid(), dwThreadID, ModuleNum); break;
case (LM_Module) : fprintf(lf,"%s|%04i|%04x|", pbtDate, getpid(), dwThreadID); break;
case (LM_Thread) : fprintf(lf,"%s|%04i|%02i|", pbtDate, getpid(), ModuleNum); break;
case (LM_Module_Thread) : fprintf(lf,"%s|", pbtDate); break;
}
vfprintf(lf, format, params);
fprintf(lf, "\n");
fclose(lf);
}
// printf(format, params);
// printf("\n", NULL);
#endif
}
#ifdef _DEBUG
#ifdef WIN32
vsprintf_s(pbtDate, format, params);
int dtLen = (int)strnlen(pbtDate, sizeof(pbtDate));
sprintf_s(pbtDate + dtLen, 2048 - dtLen, "|thread:%08x|%s|", GetCurrentThreadId(), logName.c_str());
dtLen = (int)strnlen(pbtDate, sizeof(pbtDate));
sprintf_s(pbtDate+ dtLen, 2048 - dtLen , "\n");
OutputDebugString(pbtDate);
#else
puts(pbtDate);
#endif
#endif
va_end(params);
switch(LogMode) {
case (LM_Module) : LogCount++; break;
case (LM_Module_Thread) :
//case (LM_Thread) : dwThreadCount=thNum+1; break;
case (LM_Single) : GlobalCount++; break;
}
return(*Num);
}
void CLog::writePure(const char *format,...) {
va_list params;
va_start (params, format);
char pbtDate[0x800]={NULL};
if (Enabled && Initialized && mainEnable) {
if (!firstGlobal && LogMode==LM_Single) {
firstGlobal =true;
write("Inizio Sessione - versione: %s",logGlobalVersion);
writeModuleInfo();
}
if (!FirstLog && (LogMode==LM_Module || LogMode==LM_Module_Thread)) {
FirstLog=true;
write("%s - Inizio Sessione - versione file: %s",logName.c_str(), logVersion.c_str());
writeModuleInfo();
}
// se siamo in LM_thread devo scrivere il thread nel nome del file
std::hash<std::thread::id> hasher;
auto dwThreadID = hasher(std::this_thread::get_id());
if (LogMode == LM_Thread || LogMode == LM_Module_Thread) {
std::stringstream th;
th << std::setiosflags(std::ios::hex | std::ios::uppercase);
th << std::setw(8);
th << dwThreadID << ".log";
logPath.replace(threadPos, threadPos + 14, th.str());
}
FILE *lf = nullptr;
#ifdef WIN32
fopen_s(&lf,logPath.c_str(), "a+t");
#else
lf = fopen(logPath.c_str(), "a+t");
#endif
if (lf) {
vfprintf(lf, format, params);
fprintf(lf, "\n");
fclose(lf);
}
// printf(format, params);
// printf("\n", NULL);
}
#ifdef _DEBUG
#ifdef WIN32
int dtLen = (int)strnlen(pbtDate, sizeof(pbtDate));
vsprintf_s(pbtDate+dtLen,2048-dtLen, format, params);
dtLen = (int)strnlen(pbtDate, sizeof(pbtDate));
sprintf_s(pbtDate + dtLen, 2048 - dtLen , "\n");
OutputDebugString(pbtDate);
#else
puts(pbtDate);
#endif
#endif
va_end(params);
}
void CLog::writeBinData(BYTE *data, size_t datalen) {
if (!Enabled || !Initialized || !mainEnable) return;
if (!firstGlobal && LogMode==LM_Single) {
firstGlobal =true;
write("Inizio Sessione - versione: %s",logGlobalVersion);
writeModuleInfo();
}
if (!FirstLog && (LogMode==LM_Module || LogMode==LM_Module_Thread)) {
FirstLog=true;
write("%s - Inizio Sessione - versione file: %s",logName.c_str(), logVersion.c_str());
writeModuleInfo();
}
char pbtDate[0x800]={NULL};
// se siamo in LM_thread devo scrivere il thread nel nome del file
std::hash<std::thread::id> hasher;
auto dwThreadID = hasher(std::this_thread::get_id());
if (LogMode == LM_Thread || LogMode == LM_Module_Thread) {
std::stringstream th;
th << std::setiosflags(std::ios::hex | std::ios::uppercase);
th << std::setw(8);
th << dwThreadID << ".log";
logPath.replace(threadPos, threadPos + 14, th.str());
}
FILE *lf = nullptr;
#ifdef WIN32
fopen_s(&lf,logPath.c_str(), "a+t");
#else
lf = fopen(logPath.c_str(), "a+t");
#endif
if (lf) {
if (datalen>100) datalen=100;
for (size_t i=0;i<datalen;i++)
fprintf(lf, "%02x ", data[i]);
fprintf(lf, "\n");
fclose(lf);
}
}
void CLog::writeModuleInfo() {
if (!Enabled) return;
CModuleInfo module;
HANDLE mainModule = module.getApplicationModule();
module.init(mainModule);
write("Applicazione chiamante: %s",module.szModuleName.c_str());
}
|
#pragma once // Source encoding: utf-8 ∩
// #include <p/expressive/use_weakly_all.hpp>
// Copyright © 2017 Alf P. Steinbach, distributed under Boost license 1.0.
#include <p/expressive/all.hpp>
$use_weakly_all_from( progrock::expressive );
|
// Copyright (c) 2016, the Newspeak project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/lookup_cache.h"
namespace psoup {
void LookupCache::InsertOrdinary(intptr_t cid,
String selector,
Method target) {
intptr_t hash = cid
^ (static_cast<intptr_t>(selector) >> kObjectAlignmentLog2);
intptr_t probe1 = hash & kMask;
entries_[probe1].ordinary_cid = cid;
entries_[probe1].ordinary_selector = selector;
entries_[probe1].ordinary_target = target;
intptr_t probe2 = (hash >> 3) & kMask;
entries_[probe2].ordinary_cid = cid;
entries_[probe2].ordinary_selector = selector;
entries_[probe2].ordinary_target = target;
}
void LookupCache::InsertNS(intptr_t cid,
String selector,
Method caller,
intptr_t rule,
Object absent_receiver,
Method target) {
intptr_t hash = cid
^ (static_cast<intptr_t>(selector) >> kObjectAlignmentLog2)
^ (static_cast<intptr_t>(caller) >> kObjectAlignmentLog2);
intptr_t cid_and_rule = (cid << 16) | rule;
intptr_t probe1 = hash & kMask;
entries_[probe1].ns_cid_and_rule = cid_and_rule;
entries_[probe1].ns_selector = selector;
entries_[probe1].ns_caller = caller;
entries_[probe1].ns_target = target;
entries_[probe1].ns_absent_receiver = absent_receiver;
intptr_t probe2 = (hash >> 3) & kMask;
entries_[probe2].ns_cid_and_rule = cid_and_rule;
entries_[probe2].ns_selector = selector;
entries_[probe2].ns_caller = caller;
entries_[probe2].ns_target = target;
entries_[probe2].ns_absent_receiver = absent_receiver;
}
void LookupCache::Clear() {
for (intptr_t i = 0; i < kSize; i++) {
entries_[i].ordinary_cid = kIllegalCid;
entries_[i].ns_cid_and_rule = kIllegalCid << 16;
}
}
} // namespace psoup
|
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
#pragma once
#include <Layer.hpp>
namespace armnn
{
/// A layer user-provided data can be bound to (e.g. inputs, outputs).
class InputLayer : public BindableLayer
{
public:
/// Makes a workload for the Input type.
/// @param [in] graph The graph where this layer can be found.
/// @param [in] factory The workload factory which will create the workload.
/// @return A pointer to the created workload, or nullptr if not created.
virtual std::unique_ptr<IWorkload> CreateWorkload(const IWorkloadFactory& factory) const override;
/// Creates a dynamically-allocated copy of this layer.
/// @param [in] graph The graph into which this layer is being cloned.
InputLayer* Clone(Graph& graph) const override;
/// Check if the input tensor shape(s)
/// will lead to a valid configuration of @ref InputLayer.
/// @param [in] shapeInferenceMethod Indicates if output shape shall be overwritten or just validate.
void ValidateTensorShapesFromInputs() override;
void Accept(ILayerVisitor& visitor) const override;
protected:
/// Constructor to create an InputLayer.
/// @param id The layer binding id number.
/// @param name Optional name for the layer.
InputLayer(LayerBindingId id, const char* name);
/// Default destructor
~InputLayer() = default;
};
} // namespace
|
/*=========================================================================
Program: Visualization Toolkit
Module: vtkCellSizeFilter.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkCellSizeFilter.h"
#include "vtkCellData.h"
#include "vtkCellType.h"
#include "vtkCompositeDataSet.h"
#include "vtkCompositeDataIterator.h"
#include "vtkDataSet.h"
#include "vtkDoubleArray.h"
#include "vtkGenericCell.h"
#include "vtkIdList.h"
#include "vtkImageData.h"
#include "vtkInformation.h"
#include "vtkInformationVector.h"
#include "vtkMath.h"
#include "vtkMeshQuality.h"
#include "vtkNew.h"
#include "vtkObjectFactory.h"
#include "vtkPointSet.h"
#include "vtkPolygon.h"
#include "vtkTetra.h"
#include "vtkTriangle.h"
#include "vtkUnsignedCharArray.h"
vtkStandardNewMacro(vtkCellSizeFilter);
//-----------------------------------------------------------------------------
vtkCellSizeFilter::vtkCellSizeFilter() :
ComputeVertexCount(true), ComputeLength(true), ComputeArea(true), ComputeVolume(true),
ComputeSum(false), VertexCountArrayName(nullptr), LengthArrayName(nullptr),
AreaArrayName(nullptr), VolumeArrayName(nullptr)
{
this->SetVertexCountArrayName("VertexCount");
this->SetLengthArrayName("Length");
this->SetAreaArrayName("Area");
this->SetVolumeArrayName("Volume");
}
//-----------------------------------------------------------------------------
vtkCellSizeFilter::~vtkCellSizeFilter()
{
this->SetVertexCountArrayName(nullptr);
this->SetLengthArrayName(nullptr);
this->SetAreaArrayName(nullptr);
this->SetVolumeArrayName(nullptr);
}
//----------------------------------------------------------------------------
void vtkCellSizeFilter::ExecuteBlock(vtkDataSet* input, vtkDataSet* output, double sum[4])
{
vtkSmartPointer<vtkIdList> cellPtIds = vtkSmartPointer<vtkIdList>::New();
vtkIdType numCells = input->GetNumberOfCells();
vtkSmartPointer<vtkPoints> cellPoints = vtkSmartPointer<vtkPoints>::New();
int cellType;
vtkDoubleArray* arrays[4] = {nullptr, nullptr, nullptr, nullptr};
if (this->ComputeVertexCount)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->VertexCountArrayName);
array->SetNumberOfTuples(numCells);
array->Fill(0);
output->GetCellData()->AddArray(array);
arrays[0] = array;
}
if (this->ComputeLength)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->LengthArrayName);
array->SetNumberOfTuples(numCells);
array->Fill(0);
output->GetCellData()->AddArray(array);
arrays[1] = array;
}
if (this->ComputeArea)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->AreaArrayName);
array->SetNumberOfTuples(numCells);
array->Fill(0);
output->GetCellData()->AddArray(array);
arrays[2] = array;
}
if (this->ComputeVolume)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->VolumeArrayName);
array->SetNumberOfTuples(numCells);
array->Fill(0);
output->GetCellData()->AddArray(array);
arrays[3] = array;
}
vtkNew<vtkGenericCell> cell;
vtkPointSet* inputPS = vtkPointSet::SafeDownCast(input);
vtkUnsignedCharArray* ghostArray = nullptr;
if (sum)
{
ghostArray = input->GetCellGhostArray();
}
for (vtkIdType cellId = 0; cellId < numCells; ++cellId)
{
double value = 0;
int cellDimension = -1;
cellType = input->GetCellType(cellId);
value = -1;
switch (cellType)
{
case VTK_EMPTY_CELL:
value = 0;
break;
case VTK_VERTEX:
if ( this->ComputeVertexCount )
{
value = 1;
cellDimension = 0;
}
else
{
value = 0;
}
break;
case VTK_POLY_VERTEX:
if ( this->ComputeVertexCount )
{
input->GetCellPoints(cellId, cellPtIds);
value = static_cast<double>(cellPtIds->GetNumberOfIds());
cellDimension = 0;
}
else
{
value = 0;
}
break;
case VTK_POLY_LINE:
case VTK_LINE:
{
if ( this->ComputeLength )
{
input->GetCellPoints(cellId, cellPtIds);
value = this->IntegratePolyLine(input, cellPtIds);
cellDimension = 1;
}
else
{
value = 0;
}
}
break;
case VTK_TRIANGLE:
{
if ( this->ComputeArea )
{
input->GetCell(cellId, cell);
value = vtkMeshQuality::TriangleArea(cell);
cellDimension = 2;
}
else
{
value = 0;
}
}
break;
case VTK_TRIANGLE_STRIP:
{
if ( this->ComputeArea )
{
input->GetCellPoints(cellId, cellPtIds);
value = this->IntegrateTriangleStrip(inputPS, cellPtIds);
cellDimension = 2;
}
else
{
value = 0;
}
}
break;
case VTK_POLYGON:
{
if ( this->ComputeArea )
{
input->GetCellPoints(cellId, cellPtIds);
value = this->IntegratePolygon(inputPS, cellPtIds);
cellDimension = 2;
}
else
{
value = 0;
}
}
break;
case VTK_PIXEL:
{
if ( this->ComputeArea )
{
input->GetCellPoints(cellId, cellPtIds);
value = this->IntegratePixel(input, cellPtIds);
cellDimension = 2;
}
else
{
value = 0;
}
}
break;
case VTK_QUAD:
{
if ( this->ComputeArea )
{
input->GetCell(cellId, cell);
value = vtkMeshQuality::QuadArea(cell);
cellDimension = 2;
}
else
{
value = 0;
}
}
break;
case VTK_VOXEL:
{
if ( this->ComputeVolume )
{
input->GetCellPoints(cellId, cellPtIds);
value = this->IntegrateVoxel(input, cellPtIds);
cellDimension = 3;
}
else
{
value = 0;
}
}
break;
case VTK_TETRA:
{
if ( this->ComputeVolume )
{
input->GetCell(cellId, cell);
value = vtkMeshQuality::TetVolume(cell);
cellDimension = 3;
}
else
{
value = 0;
}
}
break;
default:
{
// We need to explicitly get the cell
input->GetCell(cellId, cell);
cellDimension = cell->GetCellDimension();
switch (cellDimension)
{
case 0:
if ( this->ComputeVertexCount )
{
input->GetCellPoints(cellId, cellPtIds);
value = static_cast<double>(cellPtIds->GetNumberOfIds());
}
else
{
value = 0;
cellDimension = -1;
}
break;
case 1:
if ( this->ComputeLength )
{
cell->Triangulate(1, cellPtIds, cellPoints);
value = this->IntegrateGeneral1DCell(input, cellPtIds);
}
else
{
value = 0;
cellDimension = -1;
}
break;
case 2:
if ( this->ComputeArea )
{
cell->Triangulate(1, cellPtIds, cellPoints);
value = this->IntegrateGeneral2DCell(inputPS, cellPtIds);
}
else
{
value = 0;
cellDimension = -1;
}
break;
case 3:
if ( this->ComputeVolume )
{
cell->Triangulate(1, cellPtIds, cellPoints);
value = this->IntegrateGeneral3DCell(inputPS, cellPtIds);
}
else
{
value = 0;
cellDimension = -1;
}
break;
default:
vtkWarningMacro("Unsupported Cell Dimension = " << cellDimension);
cellDimension = -1;
}
}
} // end switch (cellType)
if (cellDimension != -1)
{ // a valid cell that we want to compute the size of
arrays[cellDimension]->SetValue(cellId, value);
if ( sum && (!ghostArray || !ghostArray->GetValue(cellId) ) )
{
sum[cellDimension] += value;
}
}
} // end cell iteration
}
//-----------------------------------------------------------------------------
int vtkCellSizeFilter::RequestData(
vtkInformation*, vtkInformationVector** inputVector, vtkInformationVector* outputVector)
{
vtkInformation* info = outputVector->GetInformationObject(0);
vtkInformation* inInfo = inputVector[0]->GetInformationObject(0);
bool retVal = true;
if (vtkDataSet* inputDataSet = vtkDataSet::SafeDownCast(inInfo->Get(vtkDataObject::DATA_OBJECT())))
{
vtkDataSet* output = vtkDataSet::SafeDownCast(info->Get(vtkDataObject::DATA_OBJECT()));
double sum[4] = {0, 0, 0, 0};
retVal = this->ComputeDataSet(inputDataSet, output, sum);
if (this->ComputeSum)
{
this->ComputeGlobalSum(sum);
this->AddSumFieldData(output, sum);
}
}
else if (vtkCompositeDataSet* input =
vtkCompositeDataSet::SafeDownCast(inInfo->Get(vtkDataObject::DATA_OBJECT())))
{
vtkCompositeDataSet* output =
vtkCompositeDataSet::SafeDownCast(info->Get(vtkDataObject::DATA_OBJECT()));
output->CopyStructure(input);
vtkCompositeDataIterator* iter = input->NewIterator();
iter->SkipEmptyNodesOff();
double sumComposite[4] = {0, 0, 0, 0};
for (iter->InitTraversal(); !iter->IsDoneWithTraversal(); iter->GoToNextItem())
{
double sum[4] = {0, 0, 0, 0};
if (vtkDataSet* inputDS = vtkDataSet::SafeDownCast(iter->GetCurrentDataObject()))
{
vtkDataSet* outputDS = inputDS->NewInstance();
output->SetDataSet(iter, outputDS);
outputDS->Delete();
retVal = retVal && this->ComputeDataSet(inputDS, outputDS, sum);
if (this->ComputeSum)
{
this->ComputeGlobalSum(sum);
}
}
if (this->ComputeSum)
{
for (int i=0;i<4;i++)
{
sumComposite[i] += sum[i];
}
}
}
iter->Delete();
if (this->ComputeSum)
{
this->AddSumFieldData(output, sumComposite);
}
}
else
{
retVal = false;
vtkWarningMacro("Cannot handle input of type " <<
inInfo->Get(vtkDataObject::DATA_OBJECT())->GetClassName());
}
return retVal;
}
//-----------------------------------------------------------------------------
bool vtkCellSizeFilter::ComputeDataSet(
vtkDataSet* input, vtkDataSet* output, double sum[4])
{
output->ShallowCopy(input);
// fast path for image data since all the cells have the same volume
if (vtkImageData* imageData = vtkImageData::SafeDownCast(input))
{
this->IntegrateImageData(imageData, vtkImageData::SafeDownCast(output), sum);
}
else
{
this->ExecuteBlock(input, output, sum);
}
if (this->ComputeSum)
{
this->AddSumFieldData(output, sum);
}
return 1;
}
//-----------------------------------------------------------------------------
void vtkCellSizeFilter::IntegrateImageData(
vtkImageData* input, vtkImageData* output, double sum[4])
{
int extent[6];
input->GetExtent(extent);
double spacing[3];
input->GetSpacing(spacing);
double val = 1;
int dimension = 0;
for (int i=0;i<3;i++)
{
if (extent[2*i+1] > extent[2*i])
{
val *= spacing[i];
dimension++;
}
}
if (this->ComputeVertexCount)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->VertexCountArrayName);
array->SetNumberOfTuples(output->GetNumberOfCells());
if (dimension == 0)
{
array->SetValue(0, 1);
}
else
{
array->Fill(0);
}
output->GetCellData()->AddArray(array);
}
if (this->ComputeLength)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->LengthArrayName);
array->SetNumberOfTuples(output->GetNumberOfCells());
if (dimension == 1)
{
array->Fill(val);
}
else
{
array->Fill(0);
}
output->GetCellData()->AddArray(array);
}
if (this->ComputeArea)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->AreaArrayName);
array->SetNumberOfTuples(output->GetNumberOfCells());
if (dimension == 2)
{
array->Fill(val);
}
else
{
array->Fill(0);
}
output->GetCellData()->AddArray(array);
}
if (this->ComputeVolume)
{
vtkNew<vtkDoubleArray> array;
array->SetName(this->VolumeArrayName);
array->SetNumberOfTuples(output->GetNumberOfCells());
if (dimension == 3)
{
array->Fill(val);
}
else
{
array->Fill(0);
}
output->GetCellData()->AddArray(array);
}
if (this->ComputeSum)
{
if (vtkUnsignedCharArray* ghosts = input->GetCellGhostArray())
{
for (vtkIdType i=0;i<output->GetNumberOfCells();i++)
{
if (!ghosts->GetValue(i))
{
sum[dimension] += val;
}
}
}
else
{
sum[dimension] = input->GetNumberOfCells()*val;
}
}
}
//-----------------------------------------------------------------------------
double vtkCellSizeFilter::IntegratePolyLine(vtkDataSet* input, vtkIdList* ptIds)
{
double sum = 0;
double pt1[3], pt2[3];
vtkIdType numLines = ptIds->GetNumberOfIds() - 1;
for (vtkIdType lineIdx = 0; lineIdx < numLines; ++lineIdx)
{
vtkIdType pt1Id = ptIds->GetId(lineIdx);
vtkIdType pt2Id = ptIds->GetId(lineIdx + 1);
input->GetPoint(pt1Id, pt1);
input->GetPoint(pt2Id, pt2);
// Compute the length of the line.
double length = sqrt(vtkMath::Distance2BetweenPoints(pt1, pt2));
sum += length;
}
return sum;
}
//-----------------------------------------------------------------------------
double vtkCellSizeFilter::IntegrateGeneral1DCell(
vtkDataSet* input, vtkIdList* ptIds)
{
// Determine the number of lines
vtkIdType nPnts = ptIds->GetNumberOfIds();
// There should be an even number of points from the triangulation
if (nPnts % 2)
{
vtkWarningMacro("Odd number of points(" << nPnts << ") encountered - skipping ");
return 0;
}
double pt1[3], pt2[3];
vtkIdType pid = 0;
vtkIdType pt1Id, pt2Id;
double sum = 0;
while (pid < nPnts)
{
pt1Id = ptIds->GetId(pid++);
pt2Id = ptIds->GetId(pid++);
input->GetPoint(pt1Id, pt1);
input->GetPoint(pt2Id, pt2);
// Compute the length of the line.
double length = sqrt(vtkMath::Distance2BetweenPoints(pt1, pt2));
sum += length;
}
return sum;
}
//-----------------------------------------------------------------------------
double vtkCellSizeFilter::IntegrateTriangleStrip(
vtkPointSet* input, vtkIdList* ptIds)
{
vtkIdType trianglePtIds[3];
vtkIdType numTris = ptIds->GetNumberOfIds() - 2;
double sum = 0;
for (vtkIdType triIdx = 0; triIdx < numTris; ++triIdx)
{
trianglePtIds[0] = ptIds->GetId(triIdx);
trianglePtIds[1] = ptIds->GetId(triIdx + 1);
trianglePtIds[2] = ptIds->GetId(triIdx + 2);
vtkNew<vtkTriangle> triangle;
triangle->Initialize(3, trianglePtIds, input->GetPoints());
sum += triangle->ComputeArea();
}
return sum;
}
//-----------------------------------------------------------------------------
// Works for convex polygons, and interpolation is not correct.
double vtkCellSizeFilter::IntegratePolygon(vtkPointSet* input, vtkIdList* ptIds)
{
vtkIdType numTris = ptIds->GetNumberOfIds() - 2;
vtkIdType trianglePtIds[3] = {ptIds->GetId(0), 0, 0};
double sum = 0;
for (vtkIdType triIdx = 0; triIdx < numTris; ++triIdx)
{
trianglePtIds[1] = ptIds->GetId(triIdx + 1);
trianglePtIds[2] = ptIds->GetId(triIdx + 2);
vtkNew<vtkTriangle> triangle;
triangle->Initialize(3, trianglePtIds, input->GetPoints());
sum += triangle->ComputeArea();
}
return sum;
}
//-----------------------------------------------------------------------------
// For axis aligned rectangular cells
double vtkCellSizeFilter::IntegratePixel(vtkDataSet* input, vtkIdList* cellPtIds)
{
vtkIdType pt1Id, pt2Id, pt3Id, pt4Id;
double pts[4][3];
pt1Id = cellPtIds->GetId(0);
pt2Id = cellPtIds->GetId(1);
pt3Id = cellPtIds->GetId(2);
pt4Id = cellPtIds->GetId(3);
input->GetPoint(pt1Id, pts[0]);
input->GetPoint(pt2Id, pts[1]);
input->GetPoint(pt3Id, pts[2]);
input->GetPoint(pt4Id, pts[3]);
// get the lengths of its 2 orthogonal sides. Since only 1 coordinate
// can be different we can add the differences in all 3 directions
double l = (pts[0][0] - pts[1][0]) + (pts[0][1] - pts[1][1]) + (pts[0][2] - pts[1][2]);
double w = (pts[0][0] - pts[2][0]) + (pts[0][1] - pts[2][1]) + (pts[0][2] - pts[2][2]);
return fabs(l * w);
}
//-----------------------------------------------------------------------------
double vtkCellSizeFilter::IntegrateGeneral2DCell(
vtkPointSet* input, vtkIdList* ptIds)
{
vtkIdType nPnts = ptIds->GetNumberOfIds();
// There should be a number of points that is a multiple of 3
// from the triangulation
if (nPnts % 3)
{
vtkWarningMacro("Number of points (" << nPnts << ") is not divisible by 3 - skipping ");
return 0;
}
vtkIdType triIdx = 0;
vtkIdType trianglePtIds[3];
double sum = 0;
while (triIdx < nPnts)
{
trianglePtIds[0] = ptIds->GetId(triIdx++);
trianglePtIds[1] = ptIds->GetId(triIdx++);
trianglePtIds[2] = ptIds->GetId(triIdx++);
vtkNew<vtkTriangle> triangle;
triangle->Initialize(3, trianglePtIds, input->GetPoints());
sum += triangle->ComputeArea();
}
return sum;
}
//-----------------------------------------------------------------------------
// For axis aligned hexahedral cells
double vtkCellSizeFilter::IntegrateVoxel(
vtkDataSet* input, vtkIdList* cellPtIds)
{
vtkIdType pt1Id, pt2Id, pt3Id, pt4Id, pt5Id;
double pts[5][3];
pt1Id = cellPtIds->GetId(0);
pt2Id = cellPtIds->GetId(1);
pt3Id = cellPtIds->GetId(2);
pt4Id = cellPtIds->GetId(3);
pt5Id = cellPtIds->GetId(4);
input->GetPoint(pt1Id, pts[0]);
input->GetPoint(pt2Id, pts[1]);
input->GetPoint(pt3Id, pts[2]);
input->GetPoint(pt4Id, pts[3]);
input->GetPoint(pt5Id, pts[4]);
// Calculate the volume of the voxel
double l = pts[1][0] - pts[0][0];
double w = pts[2][1] - pts[0][1];
double h = pts[4][2] - pts[0][2];
return fabs(l * w * h);
}
//-----------------------------------------------------------------------------
double vtkCellSizeFilter::IntegrateGeneral3DCell(
vtkPointSet* input, vtkIdList* ptIds)
{
vtkIdType nPnts = ptIds->GetNumberOfIds();
// There should be a number of points that is a multiple of 4
// from the triangulation
if (nPnts % 4)
{
vtkWarningMacro("Number of points (" << nPnts << ") is not divisible by 4 - skipping ");
return 0;
}
vtkIdType tetIdx = 0;
vtkIdType tetPtIds[4];
double sum = 0;
while (tetIdx < nPnts)
{
tetPtIds[0] = ptIds->GetId(tetIdx++);
tetPtIds[1] = ptIds->GetId(tetIdx++);
tetPtIds[2] = ptIds->GetId(tetIdx++);
tetPtIds[3] = ptIds->GetId(tetIdx++);
vtkNew<vtkTetra> tet;
tet->Initialize(4, tetPtIds, input->GetPoints());
sum += vtkMeshQuality::TetVolume(tet);
}
return sum;
}
//-----------------------------------------------------------------------------
void vtkCellSizeFilter::AddSumFieldData(vtkDataObject* output, double sum[4])
{
if (this->ComputeVertexCount)
{
vtkNew<vtkDoubleArray> array;
array->SetNumberOfTuples(1);
array->SetValue(0, sum[0]);
array->SetName(this->VertexCountArrayName);
output->GetFieldData()->AddArray(array);
}
if (this->ComputeLength)
{
vtkNew<vtkDoubleArray> array;
array->SetNumberOfTuples(1);
array->SetValue(0, sum[1]);
array->SetName(this->LengthArrayName);
output->GetFieldData()->AddArray(array);
}
if (this->ComputeArea)
{
vtkNew<vtkDoubleArray> array;
array->SetNumberOfTuples(1);
array->SetValue(0, sum[2]);
array->SetName(this->AreaArrayName);
output->GetFieldData()->AddArray(array);
}
if (this->ComputeVolume)
{
vtkNew<vtkDoubleArray> array;
array->SetNumberOfTuples(1);
array->SetValue(0, sum[3]);
array->SetName(this->VolumeArrayName);
output->GetFieldData()->AddArray(array);
}
}
//-----------------------------------------------------------------------------
void vtkCellSizeFilter::PrintSelf(ostream& os, vtkIndent indent)
{
this->Superclass::PrintSelf(os, indent);
os << indent << "ComputeVertexCount: " << this->ComputeVertexCount << endl;
os << indent << "ComputeLength: " << this->ComputeLength << endl;
os << indent << "ComputeArea: " << this->ComputeArea << endl;
os << indent << "ComputeVolume: " << this->ComputeVolume << endl;
if (this->VertexCountArrayName)
{
os << indent << "VertexCountArrayName:" << this->VertexCountArrayName << endl;
}
else
{
os << indent << "VertexCountArrayName: (null)\n";
}
if (this->LengthArrayName)
{
os << indent << "LengthArrayName:" << this->LengthArrayName << endl;
}
else
{
os << indent << "LengthArrayName: (null)\n";
}
if (this->AreaArrayName)
{
os << indent << "AreaArrayName:" << this->AreaArrayName << endl;
}
else
{
os << indent << "AreaArrayName: (null)\n";
}
if (this->VolumeArrayName)
{
os << indent << "VolumeArrayName:" << this->VolumeArrayName << endl;
}
else
{
os << indent << "VolumeArrayName: (null)\n";
}
os << indent << "ComputeSum: " << this->ComputeSum << endl;
}
|
//
// FILE: BitArray.cpp
// AUTHOR: Rob Tillaart
// VERSION: 0.2.4
// PURPOSE: BitArray library for Arduino
// URL: https://github.com/RobTillaart/BitArray
// http://forum.arduino.cc/index.php?topic=361167
//
// HISTORY
// 0.1.00 initial version
// 0.1.01 added clear() + fixed set bug
// 0.1.02 first stable version (at last)
// 0.1.03 refactoring
// 0.1.04 improve performance
// 0.1.05 added upper limits
// 0.1.06 refactored
// 0.1.07 private calls inline -> performance & footprint
// 0.1.8 added toggle
// 0.1.9 fix constructor bug
// 0.2.0 2020-03-28 #pragma once, readme, fix Fibonacci demo
// 0.2.1 2020-06-05 fix library.json
// 0.2.2 2020-12-14 add Arduino-CI + unit test
// 0.2.3 2021-10-19 update Arduino-CI + setAll(value)
// 0.2.4 2021-12-14 update library.json, license, minor edits
#include "BitArray.h"
BitArray::BitArray()
{
for (uint8_t i = 0; i < BA_MAX_SEGMENTS; i++)
{
_ar[i] = NULL;
}
}
BitArray::~BitArray()
{
for (uint8_t i = 0; i < BA_MAX_SEGMENTS; i++)
{
if (_ar[i]) free(_ar[i]);
}
}
uint8_t BitArray::begin(const uint8_t bits, const uint16_t size)
{
if (bits == 0 || bits > 32)
{
_error = BA_ELEMENT_SIZE_ERR;
return _error;
}
if ((1UL * bits * size)/8 > (1UL * BA_MAX_SEGMENTS * BA_SEGMENT_SIZE))
{
_error = BA_SIZE_ERR;
return _error;
}
for (uint8_t i = 0; i < BA_MAX_SEGMENTS; i++)
{
if (_ar[i]) free(_ar[i]);
}
_segments = 0;
_bits = bits;
_bytes = (_bits * size + 7) / 8;
uint16_t b = _bytes;
while (b > 0)
{
_ar[_segments] = (uint8_t*) malloc(min(b, (uint16_t) BA_SEGMENT_SIZE));
if (_ar[_segments] == NULL)
{
_error = BA_NO_MEMORY_ERR;
return _error;
}
b = b - min(b, (uint16_t) BA_SEGMENT_SIZE);
_segments++;
}
_error = BA_OK;
return _error;
}
uint32_t BitArray::get(const uint16_t index)
{
// if (_error != BA_OK) return BA_ERR;
// if (index >= _size) return BA_IDX_RANGE;
uint32_t v = 0;
uint16_t pos = index * _bits;
for (uint8_t i = _bits; i-- > 0;)
{
v <<= 1;
v += _bitget(pos + i);
}
return v;
}
uint32_t BitArray::set(const uint16_t index, uint32_t value)
{
// if (_error != BA_OK) return BA_ERR;
// if (index >= _size) return BA_IDX_RANGE;
uint16_t pos = index * _bits;
uint32_t mask = 1UL;
for (uint8_t i = 0; i < _bits; i++)
{
uint8_t v = (value & mask) > 0 ? 1 : 0;
_bitset(pos + i, v);
mask <<= 1;
}
return value;
}
uint32_t BitArray::toggle(const uint16_t index)
{
// if (_error != BA_OK) return BA_ERR;
// if (index >= _size) return BA_IDX_RANGE;
uint32_t v = 0;
uint16_t pos = index * _bits;
for (uint8_t i = _bits; i-- > 0;)
{
v <<= 1;
v += _bittoggle(pos + i);
}
return v;
}
void BitArray::clear()
{
uint16_t b = _bytes;
for (uint8_t s = 0; s < _segments; s++)
{
uint8_t *p = _ar[s];
if (p)
{
uint8_t t = min(b, (uint16_t) BA_SEGMENT_SIZE);
b -= t;
while(t--)
{
*p++ = 0;
}
}
if (b == 0) break;
}
}
void BitArray::setAll(uint32_t value)
{
for (uint16_t i = 0; i < capacity(); i++)
{
set(i, value);
}
}
// 16 bit address usage is faster
// TODO verify correctness
//
// void BitArray::clear()
// {
// uint16_t b = _bytes;
// for (uint8_t s = 0; s < _segments; s++)
// {
// uint8_t *q = _ar[s];
// uint16_t *p = (uint16_t*)q;
// if (p)
// {
// for (uint8_t t = 0; t < BA_SEGMENT_SIZE/2; t++)
// {
// *p++ = 0; // might be bug @ edge..
// }
// }
// if (b == 0) break;
// }
// }
// PRIVATE
inline uint8_t BitArray::_bitget(uint16_t pos)
{
uint8_t se = 0;
uint16_t re = pos;
while (re >= (BA_SEGMENT_SIZE * 8)) // 8 == #bits in uint8_t
{
se++;
re -= (BA_SEGMENT_SIZE * 8);
}
uint8_t by = re / 8;
uint8_t bi = re & 7;
uint8_t * p = _ar[se];
return (p[by] >> bi) & 0x01; // bitRead(p[by], bi);
}
inline void BitArray::_bitset(uint16_t pos, uint8_t value)
{
uint8_t se = 0;
uint16_t re = pos;
while (re >= (BA_SEGMENT_SIZE * 8)) // 8 == #bits in uint8_t
{
se++;
re -= (BA_SEGMENT_SIZE * 8);
}
uint8_t by = re / 8;
uint8_t bi = re & 7;
uint8_t * p = _ar[se];
if (value == 0) p[by] &= ~(1 << bi); // bitClear(p[by], bi);
else p[by] |= (1 << bi); // bitSet(p[by], bi);
}
inline uint8_t BitArray::_bittoggle(const uint16_t pos)
{
uint8_t se = 0;
uint16_t re = pos;
while (re >= (BA_SEGMENT_SIZE * 8)) // 8 == #bits in uint8_t
{
se++;
re -= (BA_SEGMENT_SIZE * 8);
}
uint8_t by = re / 8;
uint8_t bi = re & 7;
uint8_t * p = _ar[se];
uint8_t mask = 1 << bi;
p[by] ^= mask;
return (mask > 0);
}
// -- END OF FILE --
|
#include "kalman_filter.h"
//TODO Remove
#include "iostream"
using Eigen::MatrixXd;
using Eigen::VectorXd;
/*
* Please note that the Eigen library does not initialize
* VectorXd or MatrixXd objects with zeros upon creation.
*/
KalmanFilter::~KalmanFilter()
{}
void KalmanFilter::Init(int num_states)
{
x_ = VectorXd(num_states);
x_.setZero();
P_ = MatrixXd(num_states, num_states);
P_.setIdentity() * 1000;
}
void KalmanFilter::Predict(const MatrixXd &F, const MatrixXd &Q)
{
x_ = F * x_;
MatrixXd Ft = F.transpose();
P_ = F * P_ * Ft + Q;
}
void KalmanFilter::UpdateEKF(const Eigen::VectorXd &z, const Eigen::MatrixXd &H, const Eigen::MatrixXd &R)
{
VectorXd z_pred = VectorXd(3);
z_pred[0] = sqrt(x_[0] * x_[0] + x_[1] * x_[1]);
z_pred[1] = atan2(x_[1], x_[0]);
if (z_pred[0] < 0.0001)
{
z_pred[2] = (x_[0] * x_[2] + x_[1] * x_[3]) / 0.0001;
}
else
{
z_pred[2] = ((x_[0] * x_[2] + x_[1] * x_[3])) / z_pred[0];
}
VectorXd y = z - z_pred;
while (y[1] > 3.14)
{
y[1] = y[1] - 6.28;
}
while (y[1] < -3.14)
{
y[1] = y[1] + 6.28;
}
UpdateCommon(y, H, R);
}
void KalmanFilter::UpdateCommon(const Eigen::VectorXd &y, const Eigen::MatrixXd &H, const Eigen::MatrixXd &R)
{
MatrixXd Ht = H.transpose();
MatrixXd S = H * P_ * Ht + R;
MatrixXd Si = S.inverse();
MatrixXd K = P_ * Ht * Si;
x_ = x_ + (K * y);
MatrixXd I = MatrixXd(K.rows(), H.cols());
I.setIdentity();
P_ = (I - K * H) * P_;
}
void KalmanFilter::Update(const Eigen::VectorXd &z, const Eigen::MatrixXd &H, const Eigen::MatrixXd &R)
{
VectorXd z_pred = H * x_;
VectorXd y = z - z_pred;
UpdateCommon(y, H, R);
}
|
//
// COMP 371 Assignment Framework
//
// Created by Nicolas Bergeron on 8/7/14.
// Updated by Gary Chang on 14/1/15
//
// Copyright (c) 2014-2015 Concordia University. All rights reserved.
//
#include "EventManager.h"
#include "Renderer.h"
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
using namespace std;
// Time
double EventManager::sLastFrameTime = glfwGetTime();
float EventManager::sFrameTime = 0.0f;
// Mouse
double EventManager::sLastMousePositionX = 0.0f;
float EventManager::sMouseDeltaX = 0.0f;
double EventManager::sLastMousePositionY = 0.0f;
float EventManager::sMouseDeltaY = 0.0f;
// Window
GLFWwindow* EventManager::spWindow = nullptr;
void EventManager::Initialize()
{
// Initialise GLFW
if( !glfwInit() )
{
fprintf( stderr, "Failed to initialize GLFW\n" );
exit(-1);
}
glfwWindowHint(GLFW_SAMPLES, 4);
#if defined(PLATFORM_OSX)
// OS X would use legacy opengl by default, and wouldn't be able to load shaders
// This is the proper way to setup GLFW to use modern OpenGL
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_DEPTH_BITS, 32);
#else
// Allow older laptops to run the framework, even though, our framework
// is compatible with OpenGL 3.3 (which removed many deprecated features)
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 2);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 1);
glfwWindowHint(GLFW_DEPTH_BITS, 32);
#endif
// Open a window and create its OpenGL context
glfwWindowHint(GLFW_RESIZABLE, 0);
spWindow = glfwCreateWindow(1024, 768, "Space Shooter 5000", nullptr, nullptr);
if (spWindow == nullptr)
{
fprintf( stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n" );
glfwTerminate();
exit(-1);
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(spWindow, GLFW_STICKY_KEYS, GL_TRUE);
// Initial mouse position
glfwPollEvents();
double x, y;
glfwGetCursorPos(spWindow, &x, &y);
sLastMousePositionX = x;
sLastMousePositionY = y;
// Initial time
sLastFrameTime = glfwGetTime();
srand(time(nullptr));
}
void EventManager::Shutdown()
{
// Close OpenGL window and terminate GLFW
glfwTerminate();
spWindow = nullptr;
}
void EventManager::Update()
{
// Update inputs / events
glfwPollEvents();
// Update mouse position
double x, y;
glfwGetCursorPos(spWindow, &x, &y);
sMouseDeltaX = static_cast<float>( x - sLastMousePositionX );
sMouseDeltaY = static_cast<float>( y - sLastMousePositionY );
sLastMousePositionX = x;
sLastMousePositionY = y;
// Update frame time
double currentTime = glfwGetTime();
sFrameTime = static_cast<float>(currentTime - sLastFrameTime);
sLastFrameTime = currentTime;
}
float EventManager::GetFrameTime()
{
return sFrameTime;
}
bool EventManager::ExitRequested()
{
return glfwGetKey(spWindow, GLFW_KEY_ESCAPE ) == GLFW_PRESS || glfwWindowShouldClose(spWindow);
}
GLFWwindow* EventManager::GetWindow()
{
return spWindow;
}
float EventManager::GetMouseMotionX()
{
return sMouseDeltaX;
}
float EventManager::GetMouseMotionY()
{
return sMouseDeltaY;
}
void EventManager::EnableMouseCursor()
{
glfwSetInputMode(spWindow, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
}
void EventManager::DisableMouseCursor()
{
glfwSetInputMode(spWindow, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
}
float EventManager::GetRandomFloat(float min, float max)
{
float value = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
return min + value*(max - min);
}
|
/*
* Copyright (c) Contributors to the Open 3D Engine Project.
* For complete copyright and license terms please see the LICENSE at the root of this distribution.
*
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
*/
#include "EditorDefs.h"
// CryCommon
#include <CryCommon/Maestro/Types/AnimParamType.h> // for AnimParamType
// Editor
#include "TrackViewKeyPropertiesDlg.h" // for CTrackViewKeyUIControls
//////////////////////////////////////////////////////////////////////////
class CSoundKeyUIControls
: public CTrackViewKeyUIControls
{
public:
CSmartVariableArray mv_table;
CSmartVariableArray mv_options;
CSmartVariable<QString> mv_startTrigger;
CSmartVariable<QString> mv_stopTrigger;
CSmartVariable<float> mv_duration;
CSmartVariable<Vec3> mv_customColor;
virtual void OnCreateVars()
{
AddVariable(mv_table, "Key Properties");
AddVariable(mv_table, mv_startTrigger, "StartTrigger", IVariable::DT_AUDIO_TRIGGER);
AddVariable(mv_table, mv_stopTrigger, "StopTrigger", IVariable::DT_AUDIO_TRIGGER);
AddVariable(mv_table, mv_duration, "Duration");
AddVariable(mv_options, "Options");
AddVariable(mv_options, mv_customColor, "Custom Color", IVariable::DT_COLOR);
}
bool SupportTrackType(const CAnimParamType& paramType, [[maybe_unused]] EAnimCurveType trackType, [[maybe_unused]] AnimValueType valueType) const
{
return paramType == AnimParamType::Sound;
}
virtual bool OnKeySelectionChange(CTrackViewKeyBundle& selectedKeys);
virtual void OnUIChange(IVariable* pVar, CTrackViewKeyBundle& selectedKeys);
virtual unsigned int GetPriority() const { return 1; }
static const GUID& GetClassID()
{
// {AB2226E5-D593-49d2-B7CB-989412CAAEDE}
static const GUID guid =
{
0xab2226e5, 0xd593, 0x49d2, { 0xb7, 0xcb, 0x98, 0x94, 0x12, 0xca, 0xae, 0xde }
};
return guid;
}
};
//////////////////////////////////////////////////////////////////////////
bool CSoundKeyUIControls::OnKeySelectionChange(CTrackViewKeyBundle& selectedKeys)
{
if (!selectedKeys.AreAllKeysOfSameType())
{
return false;
}
bool bAssigned = false;
if (selectedKeys.GetKeyCount() == 1)
{
const CTrackViewKeyHandle& keyHandle = selectedKeys.GetKey(0);
CAnimParamType paramType = keyHandle.GetTrack()->GetParameterType();
if (paramType == AnimParamType::Sound)
{
ISoundKey soundKey;
keyHandle.GetKey(&soundKey);
mv_startTrigger = soundKey.sStartTrigger.c_str();
mv_stopTrigger = soundKey.sStopTrigger.c_str();
mv_duration = soundKey.fDuration;
mv_customColor = soundKey.customColor;
bAssigned = true;
}
}
return bAssigned;
}
// Called when UI variable changes.
void CSoundKeyUIControls::OnUIChange(IVariable* pVar, CTrackViewKeyBundle& selectedKeys)
{
CTrackViewSequence* pSequence = GetIEditor()->GetAnimation()->GetSequence();
if (!pSequence || !selectedKeys.AreAllKeysOfSameType())
{
return;
}
for (unsigned int keyIndex = 0; keyIndex < selectedKeys.GetKeyCount(); ++keyIndex)
{
CTrackViewKeyHandle keyHandle = selectedKeys.GetKey(keyIndex);
CAnimParamType paramType = keyHandle.GetTrack()->GetParameterType();
if (paramType == AnimParamType::Sound)
{
ISoundKey soundKey;
keyHandle.GetKey(&soundKey);
bool bChangedSoundFile = false;
if (pVar == mv_startTrigger.GetVar())
{
QString sFilename = mv_startTrigger;
bChangedSoundFile = sFilename != soundKey.sStartTrigger.c_str();
soundKey.sStartTrigger = sFilename.toUtf8().data();
}
else if (pVar == mv_stopTrigger.GetVar())
{
QString sFilename = mv_stopTrigger;
bChangedSoundFile = sFilename != soundKey.sStopTrigger.c_str();
soundKey.sStopTrigger = sFilename.toUtf8().data();
}
SyncValue(mv_duration, soundKey.fDuration, false, pVar);
SyncValue(mv_customColor, soundKey.customColor, false, pVar);
keyHandle.SetKey(&soundKey);
}
}
}
REGISTER_QT_CLASS_DESC(CSoundKeyUIControls, "TrackView.KeyUI.Sound", "TrackViewKeyUI");
|
/* Copyright 2015 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/framework/rendezvous.h"
#include <gtest/gtest.h>
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/port.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/env.h"
#include "tensorflow/core/public/tensor.h"
#include "tensorflow/core/public/tensor_shape.h"
namespace tensorflow {
TEST(RendezvousTest, Key) {
const string key = Rendezvous::CreateKey(
"/job:mnist/replica:1/task:2/CPU:0", 7890,
"/job:mnist/replica:1/task:2/GPU:0", "var0", FrameAndIter(0, 0));
EXPECT_EQ(key,
"/job:mnist/replica:1/task:2/CPU:0;"
"0000000000001ed2;" // 7890 = 0x1ed2
"/job:mnist/replica:1/task:2/GPU:0;"
"var0;"
"0:0");
Rendezvous::ParsedKey parsed;
EXPECT_OK(Rendezvous::ParseKey(key, &parsed));
EXPECT_EQ(parsed.src_device, "/job:mnist/replica:1/task:2/CPU:0");
EXPECT_EQ(parsed.src_incarnation, 7890);
EXPECT_EQ(parsed.src.type, "CPU");
EXPECT_EQ(parsed.dst_device, "/job:mnist/replica:1/task:2/GPU:0");
EXPECT_EQ(parsed.dst.type, "GPU");
EXPECT_FALSE(Rendezvous::ParseKey("foo;bar;baz", &parsed).ok());
EXPECT_FALSE(Rendezvous::ParseKey("/job:mnist/replica:1/task:2/CPU:0;"
"/job:mnist/replica:1/task:2/GPU:0;",
&parsed)
.ok());
EXPECT_FALSE(
Rendezvous::ParseKey(strings::StrCat(key, ";", key), &parsed).ok());
}
class LocalRendezvousTest : public ::testing::Test {
public:
LocalRendezvousTest()
: threads_(new thread::ThreadPool(Env::Default(), "test", 16)) {
rendez_ = NewLocalRendezvous();
}
~LocalRendezvousTest() override {
rendez_->Unref();
delete threads_;
}
void SchedClosure(std::function<void()> fn) { threads_->Schedule(fn); }
Rendezvous* rendez_;
private:
thread::ThreadPool* threads_;
};
// string -> Tensor<string>
Tensor V(const string& content) {
Tensor tensor(DT_STRING, TensorShape({}));
tensor.scalar<string>()() = content;
return tensor;
}
// Tensor<string> -> string
string V(const Tensor& tensor) {
CHECK_EQ(tensor.dtype(), DT_STRING);
CHECK(TensorShapeUtils::IsScalar(tensor.shape()));
return tensor.scalar<string>()();
}
TEST_F(LocalRendezvousTest, SendRecv) {
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
EXPECT_TRUE(errors::IsAborted(rendez_->Send("foo", args, V("hello"), false)));
Tensor val(DT_STRING);
bool is_dead = false;
ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, RecvSend) {
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(10000);
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
});
Tensor val(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &val, &is_dead));
EXPECT_EQ("hello", V(val));
}
TEST_F(LocalRendezvousTest, DuplicateWaiterRecv) {
SchedClosure([this]() {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
EXPECT_TRUE(errors::IsAborted(rendez_->Recv("foo", args, &val, &val_dead)));
ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
}
TEST_F(LocalRendezvousTest, DuplicateSerialRecv) {
SchedClosure([this]() {
Tensor t(DT_STRING);
bool is_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv("foo", args, &t, &is_dead));
ASSERT_OK(rendez_->Send("bar", args, t, is_dead));
});
Env::Default()->SleepForMicroseconds(1000000);
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Send("foo", args, V("secret msg"), val_dead));
ASSERT_OK(rendez_->Recv("bar", args, &val, &val_dead));
EXPECT_EQ("secret msg", V(val));
EXPECT_TRUE(errors::IsAborted(rendez_->Recv("foo", args, &val, &val_dead)));
}
// A simple structure that behaves a bit like a blocking counter. The
// user that decrements counter to 0 does done.Notify(), and the main
// thread waits for done to be notified.
struct BlockingState {
mutex lock;
int counter;
Notification done;
};
TEST_F(LocalRendezvousTest, RandomSendRecv) {
static const int N = 1000;
BlockingState state;
state.counter = N;
for (int i = 0; i < N; ++i) {
SchedClosure([this, i]() {
random::PhiloxRandom philox(testing::RandomSeed() + i, 17);
random::SimplePhilox rnd(&philox);
Env::Default()->SleepForMicroseconds(1000 + rnd.Uniform(10000));
Rendezvous::Args args;
ASSERT_OK(rendez_->Send(strings::StrCat(i), args, V(strings::StrCat(i)),
false));
});
SchedClosure([this, &state, i]() {
random::PhiloxRandom philox(testing::RandomSeed() + N + i, 17);
random::SimplePhilox rnd(&philox);
Env::Default()->SleepForMicroseconds(1000 + rnd.Uniform(10000));
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
ASSERT_OK(rendez_->Recv(strings::StrCat(i), args, &val, &val_dead));
EXPECT_EQ(strings::StrCat(i), V(val));
bool done = false;
{
mutex_lock l(state.lock);
state.counter--;
if (state.counter == 0) {
done = true;
}
}
if (done) {
state.done.Notify();
}
});
}
state.done.WaitForNotification();
}
TEST_F(LocalRendezvousTest, RecvAbort) {
rendez_->Ref();
SchedClosure([this]() {
rendez_->StartAbort(errors::Aborted("")); // abort
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv("foo", args, &val, &val_dead);
EXPECT_TRUE(errors::IsAborted(status));
}
// Similar to RecvAbort. But this test case ensures the main thread
// Recv() call happens after StartAbort().
TEST_F(LocalRendezvousTest, RecvSleepAbort) {
rendez_->Ref();
SchedClosure([this]() {
Env::Default()->SleepForMicroseconds(1000000);
rendez_->StartAbort(errors::Aborted("")); // abort
rendez_->Unref();
});
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
Status status = rendez_->Recv("foo", args, &val, &val_dead);
EXPECT_TRUE(errors::IsAborted(status));
}
TEST_F(LocalRendezvousTest, AbortThenRecvOrSend) {
rendez_->StartAbort(errors::Aborted(""));
Tensor val(DT_STRING);
bool val_dead = false;
Rendezvous::Args args;
EXPECT_TRUE(errors::IsAborted(rendez_->Send("foo", args, val, val_dead)));
EXPECT_TRUE(errors::IsAborted(rendez_->Recv("foo", args, &val, &val_dead)));
}
class DummyDeviceContext : public DeviceContext {
public:
explicit DummyDeviceContext(int stream_id) : stream_id_(stream_id) {}
~DummyDeviceContext() override {}
int stream_id() const { return stream_id_; }
private:
const int stream_id_;
};
TEST_F(LocalRendezvousTest, TransferDummyDeviceContext) {
Rendezvous::Args args;
args.device_context = new DummyDeviceContext(123);
ASSERT_OK(rendez_->Send("foo", args, V("hello"), false));
Notification n;
Rendezvous::Args args1;
args1.device_context = new DummyDeviceContext(1);
rendez_->RecvAsync("foo", args1, [&n](const Status& s,
const Rendezvous::Args& send_args,
const Rendezvous::Args& recv_args,
const Tensor& val, bool is_dead) {
CHECK_EQ(123,
dynamic_cast<const DummyDeviceContext*>(send_args.device_context)
->stream_id());
n.Notify();
});
n.WaitForNotification();
args.device_context->Unref();
args1.device_context->Unref();
}
static void BM_SendRecv(int iters) {
Rendezvous* rendez = NewLocalRendezvous();
Tensor orig = V("val");
Tensor val(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
Status s;
if (iters > 0) {
while (iters--) {
s = rendez->Send("foo", args, orig, is_dead);
s = rendez->Recv("foo", args, &val, &is_dead);
}
CHECK_EQ(V(val), V(orig));
}
rendez->Unref();
}
BENCHMARK(BM_SendRecv);
static void BM_RecvSend(int iters) {
thread::ThreadPool* pool = new thread::ThreadPool(Env::Default(), "test", 1);
// The main thread sends "foo" for iters/2 times and receives "bar"
// for iters/2 times. The other thread sends "bar" for iters/2
// times and receives "foo" for iters/2 times.
Rendezvous* rendez = NewLocalRendezvous();
pool->Schedule([rendez, iters]() {
Tensor bar = V("bar");
Tensor foo(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
Status s;
for (int i = 0; i < iters / 2; ++i) {
s = rendez->Recv("foo", args, &foo, &is_dead);
s = rendez->Send("bar", args, bar, is_dead);
}
CHECK_EQ("foo", V(foo));
});
Tensor foo = V("foo");
Tensor bar(DT_STRING, TensorShape({}));
bool is_dead = false;
Rendezvous::Args args;
Status s;
for (int i = 0; i < iters / 2; ++i) {
s = rendez->Send("foo", args, foo, is_dead);
s = rendez->Recv("bar", args, &bar, &is_dead);
}
CHECK_EQ("bar", V(bar));
delete pool;
}
BENCHMARK(BM_RecvSend);
} // namespace tensorflow
|
/******************************************************************************
* Copyright 2020 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
#include "modules/canbus/vehicle/devkit/protocol/vcu_report_505.h"
#include "gtest/gtest.h"
namespace apollo {
namespace canbus {
namespace devkit {
class Vcureport505Test : public ::testing::Test {
public:
virtual void SetUp() {}
};
TEST_F(Vcureport505Test, General) {
uint8_t data[8] = {0x07, 0x01, 0x01, 0x02, 0x8A, 0x03, 0x04, 0x05};
int32_t length = 8;
ChassisDetail cd;
Vcureport505 vcureport;
vcureport.Parse(data, length, &cd);
EXPECT_EQ(data[0], 0b00000111);
EXPECT_EQ(data[1], 0b00000001);
EXPECT_EQ(data[2], 0b00000001);
EXPECT_EQ(data[3], 0b00000010);
EXPECT_EQ(data[4], 0b10001010);
EXPECT_EQ(data[5], 0b00000011);
EXPECT_EQ(data[6], 0b00000100);
EXPECT_EQ(data[7], 0b00000101);
EXPECT_EQ(cd.devkit().vcu_report_505().battary_soc(), 3);
EXPECT_EQ(cd.devkit().vcu_report_505().vehicle_mode_state(), 1);
EXPECT_EQ(cd.devkit().vcu_report_505().frontcrash_state(), 1);
EXPECT_EQ(cd.devkit().vcu_report_505().backcrash_state(), 0);
EXPECT_EQ(cd.devkit().vcu_report_505().aeb_state(), 0);
EXPECT_EQ(cd.devkit().vcu_report_505().acc(), 1.12);
EXPECT_EQ(cd.devkit().vcu_report_505().speed(), 0.258);
}
} // namespace devkit
} // namespace canbus
} // namespace apollo
|
/*
* Copyright (C) 2019 Green_waves Technologies
* All rights reserved.
*/
#ifndef TRACE_DUMPER_PROFILING_HPP
#define TRACE_DUMPER_PROFILING_HPP
// used as indices, so don't play with the = 0 value
// stall_strings must correspond line by line, so don't shuffle the lines either
enum stall_e {
JMP_STALL = 0,
I_MISS,
MISALIGNED,
LD_STALL,
N_STALL_REASONS
};
const char* const stall_strings[] = {
"pcer_jmp_stall",
"pcer_imiss",
"misaligned",
"pcer_ld_stall"
};
// depends on the number of parenthesis in the stall regex
#define STALL_NMATCH (N_STALL_REASONS + 6)
const char* const dma_c_regex = "^.*/dma/channel_[0-9]+$";
const char* const stall_cluster_regex = "^.*/cluster/pe([0-9]+)/";
const char* const stall_fc_regex = "^.*/chip/soc/fc/";
typedef struct {
stall_e type;
int core_id;
} stall_trace_t;
typedef struct {
stall_trace_t trace;
uint32_t pc;
int cycle_penalty;
} stall_event_t;
#endif // TRACE_DUMPER_PROFILING_HPP
|
// Copyright (c) 2020 The Orbit Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "SessionSetup/ServiceDeployManager.h"
#include <absl/flags/declare.h>
#include <absl/flags/flag.h>
#include <absl/strings/str_format.h>
#include <absl/strings/str_split.h>
#include <QApplication>
#include <QEventLoop>
#include <QMetaObject>
#include <Qt>
#include <chrono>
#include <filesystem>
#include <system_error>
#include <thread>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "OrbitBase/Future.h"
#include "OrbitBase/Logging.h"
#include "OrbitBase/Promise.h"
#include "OrbitSsh/AddrAndPort.h"
#include "OrbitSshQt/ScopedConnection.h"
#include "OrbitSshQt/SftpChannel.h"
#include "OrbitSshQt/SftpCopyToLocalOperation.h"
#include "OrbitSshQt/SftpCopyToRemoteOperation.h"
#include "OrbitSshQt/Task.h"
#include "QtUtils/EventLoop.h"
#include "SessionSetup/Error.h"
ABSL_DECLARE_FLAG(bool, devmode);
static const std::string kLocalhost = "127.0.0.1";
static const std::string kDebDestinationPath = "/tmp/orbitprofiler.deb";
static const std::string kSigDestinationPath = "/tmp/orbitprofiler.deb.asc";
static const std::string_view kSshWatchdogPassphrase = "start_watchdog";
static const std::chrono::milliseconds kSshWatchdogInterval(1000);
namespace orbit_session_setup {
namespace {
template <typename Func>
[[nodiscard]] orbit_ssh_qt::ScopedConnection ConnectQuitHandler(
orbit_qt_utils::EventLoop* loop,
const typename QtPrivate::FunctionPointer<Func>::Object* sender, Func signal) {
return orbit_ssh_qt::ScopedConnection{
QObject::connect(sender, signal, loop, &orbit_qt_utils::EventLoop::quit)};
}
template <typename Func>
[[nodiscard]] orbit_ssh_qt::ScopedConnection ConnectErrorHandler(
orbit_qt_utils::EventLoop* loop,
const typename QtPrivate::FunctionPointer<Func>::Object* sender, Func signal) {
return orbit_ssh_qt::ScopedConnection{
QObject::connect(sender, signal, loop, &orbit_qt_utils::EventLoop::error)};
}
[[nodiscard]] orbit_ssh_qt::ScopedConnection ConnectCancelHandler(orbit_qt_utils::EventLoop* loop,
ServiceDeployManager* sdm) {
return orbit_ssh_qt::ScopedConnection{QObject::connect(
sdm, &ServiceDeployManager::cancelRequested, loop,
[loop]() { loop->error(make_error_code(Error::kUserCanceledServiceDeployment)); })};
}
void PrintAsOrbitService(const std::string& buffer) {
std::vector<std::string_view> lines = absl::StrSplit(buffer, '\n');
for (const auto& line : lines) {
if (!line.empty()) {
PLATFORM_LOG(absl::StrFormat("[ OrbitService] %s\n", line).c_str());
}
}
};
// This function makes it easy to execute a function object on a different thread in a synchronous
// way.
//
// While waiting for the function to finish executing on a different thread a Qt event loop
// processes other (UI-) events. The thread is determined by the associated thread of the QObject
// context.
template <typename Func>
void DeferToBackgroundThreadAndWait(QObject* context, Func&& func) {
QEventLoop waiting_loop; // This event loop processes main thread events while we wait for the
// background thread to finish executing func();
QMetaObject::invokeMethod(
context, [func = std::forward<Func>(func), waiting_loop = QPointer{&waiting_loop}]() mutable {
func();
if (waiting_loop) QMetaObject::invokeMethod(waiting_loop, &QEventLoop::quit);
});
waiting_loop.exec();
}
} // namespace
template <typename T>
static outcome::result<T> MapError(outcome::result<T> result, Error new_error) {
if (result) {
return result;
} else {
const auto new_error_code = make_error_code(new_error);
ERROR("%s: %s", new_error_code.message().c_str(), result.error().message().c_str());
return outcome::failure(new_error_code);
}
}
ServiceDeployManager::ServiceDeployManager(const DeploymentConfiguration* deployment_configuration,
const orbit_ssh::Context* context,
orbit_ssh::Credentials credentials,
const ServiceDeployManager::GrpcPort& grpc_port,
QObject* parent)
: QObject(parent),
deployment_configuration_(deployment_configuration),
context_(context),
credentials_(std::move(credentials)),
grpc_port_(grpc_port),
ssh_watchdog_timer_(this) {
CHECK(deployment_configuration != nullptr);
CHECK(context != nullptr);
background_thread_.start();
moveToThread(&background_thread_);
}
ServiceDeployManager::~ServiceDeployManager() noexcept {
// ssh_watchdog_timer is registered in background_thread_, so it has to be stopped there to
// not trigger a race condition.
QMetaObject::invokeMethod(
this, [this]() { ssh_watchdog_timer_.stop(); }, Qt::BlockingQueuedConnection);
background_thread_.quit();
background_thread_.wait();
}
void ServiceDeployManager::Cancel() {
// By transforming this function call into a signal we leverage Qt's automatic thread
// synchronization and don't have to bother from what thread Cancel was called.
emit cancelRequested();
}
outcome::result<bool> ServiceDeployManager::CheckIfInstalled() {
CHECK(QThread::currentThread() == thread());
emit statusMessage(QString("Checking if OrbitService is already installed in version %1 on the "
"remote instance.")
.arg(QApplication::applicationVersion()));
auto version = QApplication::applicationVersion().toStdString();
if (!version.empty() && version.front() == 'v') {
// The old git tags have a 'v' in front which is not supported by debian
// packages. So we have to remove it.
version = version.substr(1);
}
const auto command = absl::StrFormat(
"/usr/bin/dpkg-query -W -f '${Version}' orbitprofiler 2>/dev/null | grep -xF '%s'", version);
orbit_ssh_qt::Task check_if_installed_task{&session_.value(), command};
orbit_qt_utils::EventLoop loop{};
QObject::connect(&check_if_installed_task, &orbit_ssh_qt::Task::finished, &loop,
&orbit_qt_utils::EventLoop::exit);
auto error_handler =
ConnectErrorHandler(&loop, &check_if_installed_task, &orbit_ssh_qt::Task::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
check_if_installed_task.Start();
OUTCOME_TRY(auto&& result, loop.exec());
if (result == 0) {
// Already installed
emit statusMessage("The correct version of OrbitService is already installed.");
return outcome::success(true);
} else {
emit statusMessage("The correct version of OrbitService is not yet installed.");
return outcome::success(false);
}
}
outcome::result<uint16_t> ServiceDeployManager::StartTunnel(
std::optional<orbit_ssh_qt::Tunnel>* tunnel, uint16_t port) {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Setting up port forwarding...");
LOG("Setting up tunnel on port %d", port);
tunnel->emplace(&session_.value(), kLocalhost, port, this);
orbit_qt_utils::EventLoop loop{};
auto error_handler =
ConnectErrorHandler(&loop, &tunnel->value(), &orbit_ssh_qt::Tunnel::errorOccurred);
auto quit_handler = ConnectQuitHandler(&loop, &tunnel->value(), &orbit_ssh_qt::Tunnel::started);
auto cancel_handler = ConnectCancelHandler(&loop, this);
tunnel->value().Start();
OUTCOME_TRY(MapError(loop.exec(), Error::kCouldNotStartTunnel));
QObject::connect(&tunnel->value(), &orbit_ssh_qt::Tunnel::errorOccurred, this,
&ServiceDeployManager::handleSocketError);
return outcome::success(tunnel->value().GetListenPort());
}
outcome::result<std::unique_ptr<orbit_ssh_qt::SftpChannel>>
ServiceDeployManager::StartSftpChannel() {
CHECK(QThread::currentThread() == thread());
auto sftp_channel = std::make_unique<orbit_ssh_qt::SftpChannel>(&session_.value());
orbit_qt_utils::EventLoop loop{};
auto quit_handler =
ConnectQuitHandler(&loop, sftp_channel.get(), &orbit_ssh_qt::SftpChannel::started);
auto error_handler =
ConnectErrorHandler(&loop, sftp_channel.get(), &orbit_ssh_qt::SftpChannel::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
sftp_channel->Start();
OUTCOME_TRY(loop.exec());
return sftp_channel;
}
outcome::result<void> ServiceDeployManager::CopyFileToRemote(
const std::string& source, const std::string& dest,
orbit_ssh_qt::SftpCopyToRemoteOperation::FileMode dest_mode) {
CHECK(QThread::currentThread() == thread());
orbit_ssh_qt::SftpCopyToRemoteOperation operation{&session_.value(), sftp_channel_.get()};
orbit_qt_utils::EventLoop loop{};
auto quit_handler =
ConnectQuitHandler(&loop, &operation, &orbit_ssh_qt::SftpCopyToRemoteOperation::stopped);
auto error_handler = ConnectErrorHandler(&loop, &operation,
&orbit_ssh_qt::SftpCopyToRemoteOperation::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
LOG("About to start copying from %s to %s...", source, dest);
operation.CopyFileToRemote(source, dest, dest_mode);
OUTCOME_TRY(loop.exec());
return outcome::success();
}
outcome::result<void> ServiceDeployManager::StopSftpChannel(
orbit_ssh_qt::SftpChannel* sftp_channel) {
CHECK(QThread::currentThread() == thread());
orbit_qt_utils::EventLoop loop{};
auto quit_handler = ConnectQuitHandler(&loop, sftp_channel, &orbit_ssh_qt::SftpChannel::stopped);
auto error_handler =
ConnectErrorHandler(&loop, sftp_channel, &orbit_ssh_qt::SftpChannel::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
sftp_channel->Stop();
OUTCOME_TRY(loop.exec());
return outcome::success();
}
void ServiceDeployManager::StopSftpChannel() { (void)StopSftpChannel(sftp_channel_.get()); }
outcome::result<void> ServiceDeployManager::CopyOrbitServicePackage() {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Copying OrbitService package to the remote instance...");
auto& config = std::get<SignedDebianPackageDeployment>(*deployment_configuration_);
using FileMode = orbit_ssh_qt::SftpCopyToRemoteOperation::FileMode;
OUTCOME_TRY(MapError(CopyFileToRemote(config.path_to_package.string(), kDebDestinationPath,
FileMode::kUserWritable),
Error::kCouldNotUploadPackage));
OUTCOME_TRY(MapError(CopyFileToRemote(config.path_to_signature.string(), kSigDestinationPath,
FileMode::kUserWritable),
Error::kCouldNotUploadSignature));
emit statusMessage("Finished copying the OrbitService package to the remote instance.");
return outcome::success();
}
ErrorMessageOr<void> ServiceDeployManager::CopyFileToLocal(std::string source,
std::string destination) {
orbit_base::Promise<ErrorMessageOr<void>> promise;
auto future = promise.GetFuture();
DeferToBackgroundThreadAndWait(
this, [this, source = std::move(source), destination = std::move(destination),
promise = std::move(promise)]() mutable {
promise.SetResult(CopyFileToLocalImpl(source, destination));
});
if (!future.IsFinished()) return ErrorMessage{"Copy operation was aborted."};
return future.Get();
}
ErrorMessageOr<void> ServiceDeployManager::CopyFileToLocalImpl(std::string_view source,
std::string_view destination) {
CHECK(QThread::currentThread() == thread());
LOG("Copying remote \"%s\" to local \"%s\"", source, destination);
auto sftp_channel = StartSftpChannel();
if (!sftp_channel) {
return ErrorMessage(
absl::StrFormat(R"(Unable to start sftp channel to copy the remote "%s" to "%s": %s)",
source, destination, sftp_channel.error().message()));
}
orbit_ssh_qt::SftpCopyToLocalOperation operation{&session_.value(), sftp_channel.value().get()};
orbit_qt_utils::EventLoop loop{};
auto quit_handler =
ConnectQuitHandler(&loop, &operation, &orbit_ssh_qt::SftpCopyToLocalOperation::stopped);
auto error_handler = ConnectErrorHandler(&loop, &operation,
&orbit_ssh_qt::SftpCopyToLocalOperation::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
operation.CopyFileToLocal(source, destination);
auto result = loop.exec();
if (!result) {
return ErrorMessage(absl::StrFormat(R"(Error copying remote "%s" to "%s": %s)", source,
destination, result.error().message()));
}
auto sftp_channel_stop_result = StopSftpChannel(sftp_channel.value().get());
if (!sftp_channel_stop_result) {
std::string sftp_error_message =
absl::StrFormat(R"(Error closing sftp channel (after copied remote "%s" to "%s": %s))",
source, destination, sftp_channel_stop_result.error().message());
ERROR("%s", sftp_error_message);
return ErrorMessage(
absl::StrFormat("Download of file %s failed: %s", source, sftp_error_message));
}
return outcome::success();
}
outcome::result<void> ServiceDeployManager::CopyOrbitServiceExecutable(
const BareExecutableAndRootPasswordDeployment& config) {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Copying OrbitService executable to the remote instance...");
const std::string exe_destination_path = "/tmp/OrbitService";
OUTCOME_TRY(CopyFileToRemote(
config.path_to_executable.string(), exe_destination_path,
orbit_ssh_qt::SftpCopyToRemoteOperation::FileMode::kUserWritableAllExecutable));
emit statusMessage("Finished copying the OrbitService executable to the remote instance.");
return outcome::success();
}
outcome::result<void> ServiceDeployManager::CopyOrbitApiLibrary(
const BareExecutableAndRootPasswordDeployment& config) {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Copying liborbit.so to the remote instance...");
const std::string library_destination_path = "/tmp/liborbit.so";
const auto library_source_path = config.path_to_executable.parent_path() / "../lib/liborbit.so";
OUTCOME_TRY(CopyFileToRemote(
library_source_path.string(), library_destination_path,
orbit_ssh_qt::SftpCopyToRemoteOperation::FileMode::kUserWritableAllExecutable));
emit statusMessage("Finished copying liborbit.so to the remote instance.");
return outcome::success();
}
outcome::result<void> ServiceDeployManager::CopyOrbitUserSpaceInstrumentationLibrary(
const BareExecutableAndRootPasswordDeployment& config) {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Copying liborbituserspaceinstrumentation.so to the remote instance...");
const std::string library_destination_path = "/tmp/liborbituserspaceinstrumentation.so";
const auto library_source_path =
config.path_to_executable.parent_path() / "../lib/liborbituserspaceinstrumentation.so";
OUTCOME_TRY(CopyFileToRemote(
library_source_path.string(), library_destination_path,
orbit_ssh_qt::SftpCopyToRemoteOperation::FileMode::kUserWritableAllExecutable));
emit statusMessage(
"Finished copying liborbituserspaceinstrumentation.so to the remote instance.");
return outcome::success();
}
outcome::result<void> ServiceDeployManager::StartOrbitService() {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Starting OrbitService on the remote instance...");
std::string task_string = "/opt/developer/tools/OrbitService";
if (absl::GetFlag(FLAGS_devmode)) {
task_string += " --devmode";
}
orbit_service_task_.emplace(&session_.value(), task_string);
orbit_qt_utils::EventLoop loop{};
auto quit_handler =
ConnectQuitHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::started);
auto error_handler =
ConnectErrorHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::readyReadStdOut, this,
[this]() { PrintAsOrbitService(orbit_service_task_->ReadStdOut()); });
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::readyReadStdErr, this,
[this]() { PrintAsOrbitService(orbit_service_task_->ReadStdErr()); });
orbit_service_task_->Start();
OUTCOME_TRY(loop.exec());
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::errorOccurred, this,
&ServiceDeployManager::handleSocketError);
return outcome::success();
}
outcome::result<void> ServiceDeployManager::StartOrbitServicePrivileged(
const BareExecutableAndRootPasswordDeployment& config) {
CHECK(QThread::currentThread() == thread());
// TODO(antonrohr) Check whether the password was incorrect.
// There are multiple ways of doing this. the best way is probably to have a
// second task running before OrbitService that sets the SUID bit. It might be
// necessary to close stdin by sending EOF, since sudo would ask for trying to
// enter the password again. Another option is to use std err as soon as its
// implemented in OrbitSshQt::Task.
emit statusMessage("Starting OrbitService on the remote instance...");
std::string task_string = "sudo --stdin /tmp/OrbitService";
if (absl::GetFlag(FLAGS_devmode)) {
task_string += " --devmode";
}
orbit_service_task_.emplace(&session_.value(), task_string);
orbit_service_task_->Write(absl::StrFormat("%s\n", config.root_password));
orbit_qt_utils::EventLoop loop{};
auto error_handler =
ConnectErrorHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::errorOccurred);
auto quit_handler =
ConnectQuitHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::started);
auto cancel_handler = ConnectCancelHandler(&loop, this);
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::readyReadStdOut, this,
[this]() { PrintAsOrbitService(orbit_service_task_->ReadStdOut()); });
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::readyReadStdErr, this,
[this]() { PrintAsOrbitService(orbit_service_task_->ReadStdErr()); });
orbit_service_task_->Start();
OUTCOME_TRY(loop.exec());
QObject::connect(&orbit_service_task_.value(), &orbit_ssh_qt::Task::errorOccurred, this,
&ServiceDeployManager::handleSocketError);
return outcome::success();
}
outcome::result<void> ServiceDeployManager::InstallOrbitServicePackage() {
CHECK(QThread::currentThread() == thread());
emit statusMessage("Installing the OrbitService package on the remote instance...");
const auto command = absl::StrFormat(
"sudo /usr/local/cloudcast/sbin/install_signed_package.sh %s", kDebDestinationPath);
orbit_ssh_qt::Task install_service_task{&session_.value(), command};
orbit_qt_utils::EventLoop loop{};
QObject::connect(&install_service_task, &orbit_ssh_qt::Task::finished, this, [&](int exit_code) {
if (exit_code == 0) {
loop.quit();
} else {
// TODO(antonrohr) use stderr message once its implemented in
// OrbitSshQt::Task
ERROR("Unable to install install OrbitService package, exit code: %d", exit_code);
loop.error(make_error_code(Error::kCouldNotInstallPackage));
}
});
auto error_handler =
ConnectErrorHandler(&loop, &install_service_task, &orbit_ssh_qt::Task::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
install_service_task.Start();
OUTCOME_TRY(loop.exec());
return outcome::success();
}
outcome::result<void> ServiceDeployManager::ConnectToServer() {
CHECK(QThread::currentThread() == thread());
emit statusMessage(QString("Connecting to %1:%2...")
.arg(QString::fromStdString(credentials_.addr_and_port.addr))
.arg(credentials_.addr_and_port.port));
session_.emplace(context_, this);
using orbit_ssh_qt::Session;
orbit_qt_utils::EventLoop loop{};
auto quit_handler = ConnectQuitHandler(&loop, &session_.value(), &Session::started);
auto error_handler = ConnectErrorHandler(&loop, &session_.value(), &Session::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
session_->ConnectToServer(credentials_);
OUTCOME_TRY(MapError(loop.exec(), Error::kCouldNotConnectToServer));
emit statusMessage(QString("Successfully connected to %1:%2.")
.arg(QString::fromStdString(credentials_.addr_and_port.addr))
.arg(credentials_.addr_and_port.port));
QObject::connect(&session_.value(), &Session::errorOccurred, this,
&ServiceDeployManager::handleSocketError);
return outcome::success();
}
void ServiceDeployManager::StartWatchdog() {
CHECK(QThread::currentThread() == thread());
orbit_service_task_->Write(kSshWatchdogPassphrase);
QObject::connect(&ssh_watchdog_timer_, &QTimer::timeout, [this]() {
if (orbit_service_task_) {
orbit_service_task_->Write(".");
} else {
ssh_watchdog_timer_.stop();
}
});
ssh_watchdog_timer_.start(kSshWatchdogInterval);
}
outcome::result<ServiceDeployManager::GrpcPort> ServiceDeployManager::Exec() {
outcome::result<GrpcPort> result = outcome::success(GrpcPort{0});
DeferToBackgroundThreadAndWait(this, [&]() { result = ExecImpl(); });
return result;
}
outcome::result<ServiceDeployManager::GrpcPort> ServiceDeployManager::ExecImpl() {
CHECK(QThread::currentThread() == thread());
OUTCOME_TRY(ConnectToServer());
OUTCOME_TRY(auto&& sftp_channel, StartSftpChannel());
sftp_channel_ = std::move(sftp_channel);
// Release mode: Deploying a signed debian package. No password required.
if (std::holds_alternative<SignedDebianPackageDeployment>(*deployment_configuration_)) {
OUTCOME_TRY(auto&& service_already_installed, CheckIfInstalled());
if (!service_already_installed) {
OUTCOME_TRY(CopyOrbitServicePackage());
OUTCOME_TRY(InstallOrbitServicePackage());
}
OUTCOME_TRY(StartOrbitService());
// TODO(hebecker): Replace this timeout by waiting for a
// stdout-greeting-message.
std::this_thread::sleep_for(std::chrono::milliseconds{100});
StartWatchdog();
// Developer mode: Deploying a bare executable and start it via sudo.
} else if (std::holds_alternative<BareExecutableAndRootPasswordDeployment>(
*deployment_configuration_)) {
const auto& config =
std::get<BareExecutableAndRootPasswordDeployment>(*deployment_configuration_);
OUTCOME_TRY(CopyOrbitServiceExecutable(config));
OUTCOME_TRY(CopyOrbitApiLibrary(config));
OUTCOME_TRY(CopyOrbitUserSpaceInstrumentationLibrary(config));
OUTCOME_TRY(StartOrbitServicePrivileged(config));
// TODO(hebecker): Replace this timeout by waiting for a
// stdout-greeting-message.
std::this_thread::sleep_for(std::chrono::milliseconds{200});
StartWatchdog();
// Manual Developer mode: No deployment, no starting. Just the tunnels.
} else if (std::holds_alternative<NoDeployment>(*deployment_configuration_)) {
// Nothing to deploy
emit statusMessage(
"Skipping deployment step. Expecting that OrbitService is already "
"running...");
}
outcome::result<uint16_t> local_grpc_port_result =
StartTunnel(&grpc_tunnel_, grpc_port_.grpc_port);
int retry = 3;
while (retry > 0 && local_grpc_port_result.has_error()) {
ERROR("Failed to establish tunnel. Trying again in 500ms");
std::this_thread::sleep_for(std::chrono::milliseconds{500});
local_grpc_port_result = StartTunnel(&grpc_tunnel_, grpc_port_.grpc_port);
retry--;
}
OUTCOME_TRY(auto&& local_grpc_port, local_grpc_port_result);
emit statusMessage("Successfully set up port forwarding!");
LOG("Local port for gRPC is %d", local_grpc_port);
return outcome::success(GrpcPort{local_grpc_port});
}
void ServiceDeployManager::handleSocketError(std::error_code e) {
LOG("Socket error: %s", e.message());
emit socketErrorOccurred(e);
}
void ServiceDeployManager::ShutdownTunnel(std::optional<orbit_ssh_qt::Tunnel>* tunnel) {
if (!tunnel || !*tunnel) {
return;
}
orbit_qt_utils::EventLoop loop{};
auto quit_handler = ConnectQuitHandler(&loop, &tunnel->value(), &orbit_ssh_qt::Tunnel::started);
auto error_handler =
ConnectQuitHandler(&loop, &tunnel->value(), &orbit_ssh_qt::Tunnel::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
tunnel->value().Stop();
(void)loop.exec();
*tunnel = std::nullopt;
}
void ServiceDeployManager::ShutdownOrbitService() {
if (!orbit_service_task_) {
return;
}
orbit_qt_utils::EventLoop loop{};
auto quit_handler =
ConnectQuitHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::finished);
auto error_handler =
ConnectQuitHandler(&loop, &orbit_service_task_.value(), &orbit_ssh_qt::Task::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
orbit_service_task_->Stop();
(void)loop.exec();
orbit_service_task_ = std::nullopt;
}
void ServiceDeployManager::ShutdownSession() {
if (!session_) {
return;
}
orbit_qt_utils::EventLoop loop{};
auto quit_handler = ConnectQuitHandler(&loop, &session_.value(), &orbit_ssh_qt::Session::stopped);
auto error_handler =
ConnectQuitHandler(&loop, &session_.value(), &orbit_ssh_qt::Session::errorOccurred);
auto cancel_handler = ConnectCancelHandler(&loop, this);
session_->Disconnect();
(void)loop.exec();
session_ = std::nullopt;
}
void ServiceDeployManager::Shutdown() {
DeferToBackgroundThreadAndWait(this, [this]() {
StopSftpChannel();
ShutdownTunnel(&grpc_tunnel_);
ShutdownOrbitService();
ShutdownSession();
});
}
} // namespace orbit_session_setup
|
#include <stdlib.h>
#include "Application.h"
#include "Globals.h"
#include "Libraries/SDL/include/SDL.h"
#pragma comment( lib, "SDL2.lib" )
#pragma comment( lib, "SDL2main.lib" )
#include "DebugLeaks.h"
#include "Brofiler.h"
enum main_states
{
MAIN_CREATION,
MAIN_START,
MAIN_UPDATE,
MAIN_FINISH,
MAIN_EXIT
};
Application* App = NULL;
void DumpLeaks(void)
{
_CrtDumpMemoryLeaks(); // show leaks with file and line where allocation was made
}
int main(int argc, char ** argv)
{
atexit(DumpLeaks);
int main_return = EXIT_FAILURE;
main_states state = MAIN_CREATION;
while (state != MAIN_EXIT)
{
BROFILER_FRAME("YourThreadName");
switch (state)
{
case MAIN_CREATION:
LOG("Application Creation --------------");
App = new Application();
state = MAIN_START;
break;
case MAIN_START:
LOG("Application Init --------------");
if (App->Init() == false)
{
LOG("Application Init exits with error -----");
state = MAIN_EXIT;
}
else
{
state = MAIN_UPDATE;
LOG("Application Update --------------");
}
break;
case MAIN_UPDATE:
{
int update_return = App->Update();
if (update_return == UPDATE_ERROR)
{
LOG("Application Update exits with error -----");
state = MAIN_EXIT;
}
if (update_return == UPDATE_STOP)
state = MAIN_FINISH;
}
break;
case MAIN_FINISH:
LOG("Application CleanUp --------------");
if (App->CleanUp() == false)
{
LOG("Application CleanUp exits with error -----");
}
else
main_return = EXIT_SUCCESS;
state = MAIN_EXIT;
break;
}
}
delete App;
//LOG("Bye :)\n");
return main_return;
}
|
/* This file is part of Jellyfish.
Jellyfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Jellyfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Jellyfish. If not, see <http://www.gnu.org/licenses/>.
*/
#include <signal.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <chrono>
#include <jellyfish/err.hpp>
#include <jellyfish/mer_overlap_sequence_parser.hpp>
#include <jellyfish/mer_iterator.hpp>
#include <jellyfish/stream_manager.hpp>
#include <jellyfish/generator_manager.hpp>
#include <jellyfish/mer_dna_bloom_counter.hpp>
#include <jellyfish/thread_exec.hpp>
#include <jellyfish/file_header.hpp>
#include <sub_commands/bc_main_cmdline.hpp>
using std::chrono::system_clock;
using std::chrono::duration;
using std::chrono::duration_cast;
template<typename DtnType>
inline double as_seconds(DtnType dtn) { return duration_cast<duration<double>>(dtn).count(); }
static bc_main_cmdline args; // Command line switches and arguments
typedef std::vector<const char*> file_vector;
using jellyfish::mer_dna;
using jellyfish::mer_dna_bloom_counter;
typedef jellyfish::mer_overlap_sequence_parser<jellyfish::stream_manager<file_vector::const_iterator> > sequence_parser;
typedef jellyfish::mer_iterator<sequence_parser, jellyfish::mer_dna> mer_iterator;
template<typename PathIterator>
class mer_bloom_counter : public jellyfish::thread_exec {
int nb_threads_;
mer_dna_bloom_counter& filter_;
jellyfish::stream_manager<PathIterator> streams_;
sequence_parser parser_;
public:
mer_bloom_counter(int nb_threads, mer_dna_bloom_counter& filter,
PathIterator file_begin, PathIterator file_end,
PathIterator pipe_begin, PathIterator pipe_end,
uint32_t concurent_files) :
filter_(filter),
streams_(file_begin, file_end, pipe_begin, pipe_end, concurent_files),
parser_(jellyfish::mer_dna::k(), streams_.nb_streams(), 3 * nb_threads, 4096, streams_)
{ }
virtual void start(int thid) {
for(mer_iterator mers(parser_, args.canonical_flag) ; mers; ++mers) {
filter_.insert(*mers);
}
}
};
// If get a termination signal, kill the manager and then kill myself.
static pid_t manager_pid = 0;
static void signal_handler(int sig) {
if(manager_pid)
kill(manager_pid, SIGTERM);
signal(sig, SIG_DFL);
kill(getpid(), sig);
_exit(EXIT_FAILURE); // Should not be reached
}
int bc_main(int argc, char *argv[])
{
auto start_time = system_clock::now();
jellyfish::file_header header;
header.fill_standard();
header.set_cmdline(argc, argv);
args.parse(argc, argv);
mer_dna::k(args.mer_len_arg);
std::unique_ptr<jellyfish::generator_manager> generator_manager;
if(args.generator_given) {
auto gm =
new jellyfish::generator_manager(args.generator_arg, args.Generators_arg,
args.shell_given ? args.shell_arg : (const char*)0);
generator_manager.reset(gm);
generator_manager->start();
manager_pid = generator_manager->pid();
struct sigaction act;
memset(&act, '\0', sizeof(act));
act.sa_handler = signal_handler;
assert(sigaction(SIGTERM, &act, 0) == 0);
}
header.canonical(args.canonical_flag);
std::ofstream output(args.output_arg);
if(!output.good())
die << "Can't open output file '" << args.output_arg << "'";
header.format("bloomcounter");
header.key_len(args.mer_len_arg * 2);
jellyfish::hash_pair<mer_dna> hash_fns;
header.matrix(hash_fns.m1, 1);
header.matrix(hash_fns.m2, 2);
mer_dna_bloom_counter filter(args.fpr_arg, args.size_arg, hash_fns);
header.size(filter.m());
header.nb_hashes(filter.k());
header.write(output);
auto after_init_time = system_clock::now();
// Iterators to the multi pipe paths. If no generator manager,
// generate an empty range.
auto pipes_begin = generator_manager.get() ? generator_manager->pipes().begin() : args.file_arg.end();
auto pipes_end = (bool)generator_manager ? generator_manager->pipes().end() : args.file_arg.end();
mer_bloom_counter<file_vector::const_iterator> counter(args.threads_arg, filter,
args.file_arg.begin(), args.file_arg.end(),
pipes_begin, pipes_end, args.Files_arg);
counter.exec_join(args.threads_arg);
// If we have a manager, wait for it
if(generator_manager) {
signal(SIGTERM, SIG_DFL);
manager_pid = 0;
if(!generator_manager->wait())
die << "Some generator commands failed";
generator_manager.reset();
}
auto after_count_time = system_clock::now();
filter.write_bits(output);
output.close();
auto after_dump_time = system_clock::now();
if(args.timing_given) {
std::ofstream timing_file(args.timing_arg);
timing_file << "Init " << as_seconds(after_init_time - start_time) << "\n"
<< "Counting " << as_seconds(after_count_time - after_init_time) << "\n"
<< "Writing " << as_seconds(after_dump_time - after_count_time) << "\n";
}
return 0;
}
|
// Copyright (c) 2011-2013 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "askpassphrasedialog.h"
#include "ui_askpassphrasedialog.h"
#include "guiconstants.h"
#include "walletmodel.h"
#include <QMessageBox>
#include <QPushButton>
#include <QKeyEvent>
AskPassphraseDialog::AskPassphraseDialog(Mode mode, QWidget *parent) :
QDialog(parent),
ui(new Ui::AskPassphraseDialog),
mode(mode),
model(0),
fCapsLock(false)
{
ui->setupUi(this);
ui->passEdit1->setMaxLength(MAX_PASSPHRASE_SIZE);
ui->passEdit2->setMaxLength(MAX_PASSPHRASE_SIZE);
ui->passEdit3->setMaxLength(MAX_PASSPHRASE_SIZE);
// Setup Caps Lock detection.
ui->passEdit1->installEventFilter(this);
ui->passEdit2->installEventFilter(this);
ui->passEdit3->installEventFilter(this);
switch(mode)
{
case Encrypt: // Ask passphrase x2
ui->passLabel1->hide();
ui->passEdit1->hide();
ui->warningLabel->setText(tr("Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>."));
setWindowTitle(tr("Encrypt wallet"));
break;
case Unlock: // Ask passphrase
ui->warningLabel->setText(tr("This operation needs your wallet passphrase to unlock the wallet."));
ui->passLabel2->hide();
ui->passEdit2->hide();
ui->passLabel3->hide();
ui->passEdit3->hide();
setWindowTitle(tr("Unlock wallet"));
break;
case Decrypt: // Ask passphrase
ui->warningLabel->setText(tr("This operation needs your wallet passphrase to decrypt the wallet."));
ui->passLabel2->hide();
ui->passEdit2->hide();
ui->passLabel3->hide();
ui->passEdit3->hide();
setWindowTitle(tr("Decrypt wallet"));
break;
case ChangePass: // Ask old passphrase + new passphrase x2
setWindowTitle(tr("Change passphrase"));
ui->warningLabel->setText(tr("Enter the old and new passphrase to the wallet."));
break;
}
textChanged();
connect(ui->passEdit1, SIGNAL(textChanged(QString)), this, SLOT(textChanged()));
connect(ui->passEdit2, SIGNAL(textChanged(QString)), this, SLOT(textChanged()));
connect(ui->passEdit3, SIGNAL(textChanged(QString)), this, SLOT(textChanged()));
}
AskPassphraseDialog::~AskPassphraseDialog()
{
// Attempt to overwrite text so that they do not linger around in memory
ui->passEdit1->setText(QString(" ").repeated(ui->passEdit1->text().size()));
ui->passEdit2->setText(QString(" ").repeated(ui->passEdit2->text().size()));
ui->passEdit3->setText(QString(" ").repeated(ui->passEdit3->text().size()));
delete ui;
}
void AskPassphraseDialog::setModel(WalletModel *model)
{
this->model = model;
}
void AskPassphraseDialog::accept()
{
SecureString oldpass, newpass1, newpass2;
if(!model)
return;
oldpass.reserve(MAX_PASSPHRASE_SIZE);
newpass1.reserve(MAX_PASSPHRASE_SIZE);
newpass2.reserve(MAX_PASSPHRASE_SIZE);
// TODO: get rid of this .c_str() by implementing SecureString::operator=(std::string)
// Alternately, find a way to make this input mlock()'d to begin with.
oldpass.assign(ui->passEdit1->text().toStdString().c_str());
newpass1.assign(ui->passEdit2->text().toStdString().c_str());
newpass2.assign(ui->passEdit3->text().toStdString().c_str());
switch(mode)
{
case Encrypt: {
if(newpass1.empty() || newpass2.empty())
{
// Cannot encrypt with empty passphrase
break;
}
QMessageBox::StandardButton retval = QMessageBox::question(this, tr("Confirm wallet encryption"),
tr("Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR JERMABUXS</b>!") + "<br><br>" + tr("Are you sure you wish to encrypt your wallet?"),
QMessageBox::Yes|QMessageBox::Cancel,
QMessageBox::Cancel);
if(retval == QMessageBox::Yes)
{
if(newpass1 == newpass2)
{
if(model->setWalletEncrypted(true, newpass1))
{
QMessageBox::warning(this, tr("Wallet encrypted"),
"<qt>" +
tr("Jermabux will close now to finish the encryption process. "
"Remember that encrypting your wallet cannot fully protect "
"your jermabuxs from being stolen by malware infecting your computer.") +
"<br><br><b>" +
tr("IMPORTANT: Any previous backups you have made of your wallet file "
"should be replaced with the newly generated, encrypted wallet file. "
"For security reasons, previous backups of the unencrypted wallet file "
"will become useless as soon as you start using the new, encrypted wallet.") +
"</b></qt>");
QApplication::quit();
}
else
{
QMessageBox::critical(this, tr("Wallet encryption failed"),
tr("Wallet encryption failed due to an internal error. Your wallet was not encrypted."));
}
QDialog::accept(); // Success
}
else
{
QMessageBox::critical(this, tr("Wallet encryption failed"),
tr("The supplied passphrases do not match."));
}
}
else
{
QDialog::reject(); // Cancelled
}
} break;
case Unlock:
if(!model->setWalletLocked(false, oldpass))
{
QMessageBox::critical(this, tr("Wallet unlock failed"),
tr("The passphrase entered for the wallet decryption was incorrect."));
}
else
{
QDialog::accept(); // Success
}
break;
case Decrypt:
if(!model->setWalletEncrypted(false, oldpass))
{
QMessageBox::critical(this, tr("Wallet decryption failed"),
tr("The passphrase entered for the wallet decryption was incorrect."));
}
else
{
QDialog::accept(); // Success
}
break;
case ChangePass:
if(newpass1 == newpass2)
{
if(model->changePassphrase(oldpass, newpass1))
{
QMessageBox::information(this, tr("Wallet encrypted"),
tr("Wallet passphrase was successfully changed."));
QDialog::accept(); // Success
}
else
{
QMessageBox::critical(this, tr("Wallet encryption failed"),
tr("The passphrase entered for the wallet decryption was incorrect."));
}
}
else
{
QMessageBox::critical(this, tr("Wallet encryption failed"),
tr("The supplied passphrases do not match."));
}
break;
}
}
void AskPassphraseDialog::textChanged()
{
// Validate input, set Ok button to enabled when acceptable
bool acceptable = false;
switch(mode)
{
case Encrypt: // New passphrase x2
acceptable = !ui->passEdit2->text().isEmpty() && !ui->passEdit3->text().isEmpty();
break;
case Unlock: // Old passphrase x1
case Decrypt:
acceptable = !ui->passEdit1->text().isEmpty();
break;
case ChangePass: // Old passphrase x1, new passphrase x2
acceptable = !ui->passEdit1->text().isEmpty() && !ui->passEdit2->text().isEmpty() && !ui->passEdit3->text().isEmpty();
break;
}
ui->buttonBox->button(QDialogButtonBox::Ok)->setEnabled(acceptable);
}
bool AskPassphraseDialog::event(QEvent *event)
{
// Detect Caps Lock key press.
if (event->type() == QEvent::KeyPress) {
QKeyEvent *ke = static_cast<QKeyEvent *>(event);
if (ke->key() == Qt::Key_CapsLock) {
fCapsLock = !fCapsLock;
}
if (fCapsLock) {
ui->capsLabel->setText(tr("Warning: The Caps Lock key is on!"));
} else {
ui->capsLabel->clear();
}
}
return QWidget::event(event);
}
bool AskPassphraseDialog::eventFilter(QObject *object, QEvent *event)
{
/* Detect Caps Lock.
* There is no good OS-independent way to check a key state in Qt, but we
* can detect Caps Lock by checking for the following condition:
* Shift key is down and the result is a lower case character, or
* Shift key is not down and the result is an upper case character.
*/
if (event->type() == QEvent::KeyPress) {
QKeyEvent *ke = static_cast<QKeyEvent *>(event);
QString str = ke->text();
if (str.length() != 0) {
const QChar *psz = str.unicode();
bool fShift = (ke->modifiers() & Qt::ShiftModifier) != 0;
if ((fShift && *psz >= 'a' && *psz <= 'z') || (!fShift && *psz >= 'A' && *psz <= 'Z')) {
fCapsLock = true;
ui->capsLabel->setText(tr("Warning: The Caps Lock key is on!"));
} else if (psz->isLetter()) {
fCapsLock = false;
ui->capsLabel->clear();
}
}
}
return QDialog::eventFilter(object, event);
}
|
#ifndef BOOST_MPL_BACK_HPP_INCLUDED
#define BOOST_MPL_BACK_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id: back.hpp 49267 2008-10-11 06:19:02Z agurtovoy $
// $Date: 2008-10-10 23:19:02 -0700 (Fri, 10 Oct 2008) $
// $Revision: 49267 $
#include <boost/mpl/back_fwd.hpp>
#include <boost/mpl/aux_/back_impl.hpp>
#include <boost/mpl/sequence_tag.hpp>
#include <boost/mpl/aux_/na_spec.hpp>
#include <boost/mpl/aux_/lambda_support.hpp>
namespace boost { namespace mpl {
template<
typename BOOST_MPL_AUX_NA_PARAM(Sequence)
>
struct back
: back_impl< typename sequence_tag<Sequence>::type >
::template apply< Sequence >
{
BOOST_MPL_AUX_LAMBDA_SUPPORT(1,back,(Sequence))
};
BOOST_MPL_AUX_NA_SPEC(1, back)
}}
#endif // BOOST_MPL_BACK_HPP_INCLUDED
|
//===----------------------------------------------------------------------===// compass
//
// parse.cpp
//
// Identification: src/parse/parse.cpp
//
// Last Modified : 2022.1.15 Jiawei Wang
//
// Copyright (c) 2022 Angold-4
//
//===----------------------------------------------------------------------===//
#include "./parse.hpp"
#include "./seqparse.cpp"
std::map<int, VS> thrdatafields; // sorted map for multithreading TODO
pthread_mutex_t mutex;
// VVS datafields; // final write-to-csv data (Test only)
int curlWriter(char* data, int size, int nmemb, std::string* buffer) {
// write to the buffer
int result = 0;
if (buffer != NULL) {
buffer->append(data, size * nmemb);
result = size * nmemb;
}
return result;
}
/**
* thrimp()
* single thread implementation for multithreading
* just a wrapper of for loop in main()
*/
void *thrimp(void* indexurl) {
VS datafield = {}; // store this data (S/O/B)
// 1. Parse this "url", get index, url and type
std::string *sp = static_cast<std::string*>(indexurl);
std::string iturl = *sp;
std::string sindex = "";
// 1.1 Get index
for (auto it = iturl.begin(); it < iturl.end(); it++) {
if (*it == ',') {
iturl.erase(iturl.begin(), it+1);
break;
}
sindex.push_back(*it);
}
int index = std::stoi(sindex);
// 1.2 Get type
std::string type = "";
for (auto it = iturl.begin(); it < iturl.end(); it++) {
if (*it == ',') {
iturl.erase(iturl.begin(), it+1);
break;
}
type.push_back(*it);
}
std::string url = iturl;
std::cout << "index: " << index << " " << "type: " << type << " " << "url: " << url << std::endl;
// 2. Get html data
// CurlObj* co = new CurlObj(url);
// std::string html = co->getData();
CURL* curl = curl_easy_init();
std::string curlBuffer = "";
if (!curl) throw std::string("Curl did not initialize.");
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curlWriter);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &curlBuffer);
curl_easy_perform(curl);
// std::cout << curlBuffer.size() << std::endl;
// 3. Create parser and parse data
Parser* parser = new Parser(curlBuffer); // Create its own parser
if (type == "Stock") {
datafield = parser->stock();
} else if (type == "Option") {
datafield = parser->option();
// last
VS stockbondfield = {" ", " ", " ", " ", " ", " ", " ", " ", " ",
" ", " ", " ", " ", " ", " ", " "};
for (std::string data : datafield) {
stockbondfield.push_back(data);
}
datafield = stockbondfield;
} else if (type == "Bond") {
datafield = parser->bond();
VS stockfield = {" ", " ", " ", " ", " ", " "};
for (std::string data : datafield) {
stockfield.push_back(data);
}
datafield = stockfield;
} else {
// invalid type
std::cout << "Type Error" << std::endl;
datafield = {"invalid"};
}
// 4. Push valid data into datafields
// datafields.push_back(datafield);
thrdatafields[index] = datafield;
// std::cout << "Finishing thread: " << index << std::endl;
pthread_exit(NULL);
}
std::pair<int, int> Parser::bparse(std::string bhtml, std::string key) {
int start = bhtml.find(key);
start += key.size();
int end = bhtml.find("\"", start);
if ( start == std::string::npos || end == std::string::npos || end < start) {
std::cerr << "Searching key error, on bparse bond" << std::endl;
return std::make_pair(-1, -1);
}
// std::cout << "start: " << start << std::endl;
// std::cout << "end: " << end << std::endl;
return std::make_pair(start, end);
}
// currency, name, Bid, Ask, Coupon, Maturity, Rating, YTM, Seniority, Type
VS Parser::bond() {
if (this->phtml.size() < 1000) { // invalid html
VS invalidret = {"invalid"};
return invalidret;
}
std::string Currency, IssuerName, Bid, Ask, Coupon, Maturity, Rating, YTM, Seniority, Type;
// Currency : bondCurrencyCode
// IssuerName : bondIssuer
// Bid : endBidPrice
// Ask : endAskPrice
// Coupon : bondName
// Maturity : bondName
// Rating : bondCreditRate
// YTM : yrsToMaturityDisplay
// Seniority : seniority (2nd)
// Type : bondType (2nd)
// speed up
int bblkend = this->phtml.find(":[[");
std::string bhtml = this->phtml.substr(0, bblkend);
// Seperately
// 1. Currency, Coupon, Maturity
std::string namekey = "bondName\":\"";
std::pair<int, int> np = this->bparse(bhtml, namekey);
std::string Name = bhtml.substr(np.first, np.second - np.first);
if (np.first == -1) return {};
// std::cout << "Name: " << Name << std::endl;
// 1.1 Coupon
int coupstart = Name.find(' ') + 1;
int coupend = Name.find('%', coupstart) + 1;
Coupon = Name.substr(coupstart, coupend - coupstart);
// std::cout << "Coupon: " << Coupon << std::endl;
// 1.2 Maturity
int maturstart = Name.find(' ', coupend) + 1;
int maturend = Name.find(' ', maturstart);
Maturity = Name.substr(maturstart, maturend - maturstart);
// std::cout << "Maturity: " << Maturity << std::endl;
// 1.3 Currency
int currstart = Name.find('(', maturend) + 1;
int currend = Name.find(')', currstart);
Currency = Name.substr(currstart, currend - currstart);
// std::cout << "Currency: " << Currency << std::endl;
// 2. IssuerName
std::string issuerkey = "bondIssuer\":\"";
std::pair<int, int> ip = this->bparse(bhtml, issuerkey);
IssuerName = bhtml.substr(ip.first, ip.second - ip.first);
// std::cout << "IssuerName: " << IssuerName << std::endl;
// 3. Bid
std::string bidkey = "endBidPrice\":";
std::pair<int, int> bp = this->bparse(bhtml, bidkey);
Bid = bhtml.substr(bp.first, bp.second - 1 - bp.first);
// std::cout << "Bid: " << Bid << std::endl;
// 4. Ask
std::string askkey = "endAskPrice\":";
std::pair<int, int> ap = this->bparse(bhtml, askkey);
Ask = bhtml.substr(ap.first, ap.second - 1 - ap.first);
// std::cout << "Ask: " << Ask << std::endl;
// 5. Rating
std::string ratkey = "bondCreditRate\":\"";
std::pair<int, int> rp = this->bparse(bhtml, ratkey);
Rating = bhtml.substr(rp.first, rp.second - rp.first);
// std::cout << "Rating: " << Rating << std::endl;
// 6. YTM
std::string ytmkey = "yrsToMaturityDisplay\":\"";
std::pair<int, int> yp = this->bparse(bhtml, ytmkey);
YTM = bhtml.substr(yp.first, yp.second - yp.first);
// std::cout << "YTM: " << YTM << std::endl;
// 7. Seniority 2nd
std::string senkey = "seniority\":\"";
int sfirst = bhtml.find(senkey) + senkey.size();
int ssecond = bhtml.find(senkey, sfirst);
ssecond += senkey.size();
int send = bhtml.find("\"", ssecond);
Seniority = bhtml.substr(ssecond, send - ssecond);
// std::cout << "Seniority: " << Seniority << std::endl;
// 8. Type 2nd
std::string typekey = "bondType\":\"";
int tfirst = bhtml.find(typekey) + typekey.size();
int tsecond = bhtml.find(typekey, tfirst);
tsecond += typekey.size();
int tend = bhtml.find("\"", tsecond);
Type = bhtml.substr(tsecond, tend - tsecond);
// std::cout << "Type: " << Type << std::endl;
// currency, name, Bid, Ask, Coupon, Maturity, Rating, YTM, Seniority, Type
VS ret = {Currency, IssuerName, Bid, Ask, Coupon, Maturity, Rating, YTM, Seniority, Type};
for (std::string& s : ret) {
for (auto it = s.begin(); it < s.end(); it++) {
if (*it == ',') s.erase(it);
}
}
return ret;
}
std::pair<int, int> Parser::gparse(std::string sblk, std::string key) {
int start = sblk.find(key);
start = sblk.find('>', start);
start += 1;
int end = sblk.find('<', start);
if ( start == std::string::npos || end == std::string::npos || end < start) {
std::cerr << "Searching key error, on gparse stock" << std::endl;
return std::make_pair(-1, -1);
}
return std::make_pair(start, end);
}
// currency, O, H, L, C, Volume
VS Parser::stock() {
if (this->phtml.size() < 100) {
VS invalidret = {"invalid"};
return invalidret;
}
std::string Currency, O, H, L, C, Volume;
// 0. Get Currency
std::string currencykey = "Currency in ";
int currencystart = this->phtml.find(currencykey);
int currencyend = this->phtml.find("</s", currencystart);
if (currencystart == std::string::npos || currencyend == std::string::npos || currencyend < currencystart) {
std::cerr << "Searching key error, on parse stock" << std::endl;
return {"invalid"};
}
currencystart += 12;
Currency = this->phtml.substr(currencystart, currencyend-currencystart);
// std::cout << Currency << std::endl;
// 1. Get block
std::string blk = this->getYahooBlk(this->phtml);
// 2. Get each data
// 2.1 Open price
std::string openkey = "OPEN-value";
std::pair<int, int> op = this->gparse(blk, openkey);
if (op.first == -1) return {};
O = blk.substr(op.first, op.second-op.first);
// Just for debugging
// std::cout << "Open: " << O << std::endl;
// 2.2 High and Low price
std::string hlkey = "DAYS_RANGE-value";
std::pair<int, int> hlp = this->gparse(blk, hlkey);
std::string HL = blk.substr(hlp.first, hlp.second - hlp.first);
// Just for debugging
// std::cout << "High and Low block: " << HL << std::endl;
int Hend = HL.find('-');
L = HL.substr(0, Hend-1);
Hend += 2;
H = HL.substr(Hend, HL.size()-Hend);
// Just for debugging
// std::cout << "High: " << H << std::endl;
// std::cout << "Low: " << L << std::endl;
// 2.3 Close price
std::string closekey = "PREV_CLOSE-value";
std::pair<int, int> cp = this->gparse(blk, closekey);
C = blk.substr(cp.first, cp.second - cp.first);
// Just for debugging
// std::cout << "Close: " << C << std::endl;
// 2.4 Volume
std::string volumekey = "TD_VOLUME-value";
// Volume changing everytime, so make sure I can fetch it
// More generic way
int volumestart = blk.find(volumekey);
volumestart = blk.find("value=", volumestart);
volumestart += 7;
int volumeend = blk.find("\"", volumestart);
Volume = blk.substr(volumestart, volumeend - volumestart);
for (auto it = Volume.begin(); it < Volume.end(); it++) {
if (*it == ',') Volume.erase(it);
}
// std::cout << "Volume: " << Volume << std::endl;
VS ret = {Currency, O, H, L, C, Volume};
// filter ret value avoid xxx,xxx
for (std::string& s : ret) {
for (auto it = s.begin(); it < s.end(); it++) {
if (*it == ',') s.erase(it);
}
}
return ret;
}
// currency, O, H, L, C, Strike, Ex Date, Put/Call, Open interest
VS Parser::option() {
if (this->phtml.size() < 100) {
VS invalidret = {"invalid"};
return invalidret;
}
std::string Currency, O, H, L, C, Strike, ExDate, PutCall, Interest;
// 0.1 Get Currency
std::string currencykey = "Currency in ";
int currencystart = this->phtml.find(currencykey);
int currencyend = this->phtml.find("</s", currencystart);
if (currencystart == std::string::npos || currencyend == std::string::npos || currencyend < currencystart) {
std::cerr << "Searching key error, on parse option" << std::endl;
return {"invalid"};
}
currencystart += 12;
Currency = this->phtml.substr(currencystart, currencyend-currencystart);
// std::cout << "Currency: " << Currency << std::endl;
// 0.2 Get Put / Call
std::string pckey = "</h1></div>";
int pcend = this->phtml.find(pckey);
int pcstart = this->phtml.rfind(' ', pcend)+1;
if (pcstart == std::string::npos || pcend == std::string::npos || pcend < pcstart) {
std::cout << "pcstart: " << pcstart << std::endl;
std::cout << "pcend: " << pcend << std::endl;
std::cerr << "Searching key error, on parse option" << std::endl;
return {"invalid"};
}
PutCall = this->phtml.substr(pcstart, pcend-pcstart);
if (PutCall != "put" && PutCall != "call") {
std::cout << PutCall << std::endl;
std::cerr << "Searching key error, on parse put/call" << std::endl;
return {"invalid"};
}
// std::cout << "Put or Call: " << PutCall << std::endl;
// 1. Get block
std::string blk = this->getYahooBlk(this->phtml);
// 2. Get each data
// 2.1 Open price
std::string openkey = "OPEN-value";
std::pair<int, int> op = this->gparse(blk, openkey);
if (op.first == -1) return {};
O = blk.substr(op.first, op.second-op.first);
// Just for debugging
// std::cout << "Open: " << O << std::endl;
// 2.2 High and Low price
std::string hlkey = "DAYS_RANGE-value";
std::pair<int, int> hlp = this->gparse(blk, hlkey);
std::string HL = blk.substr(hlp.first, hlp.second - hlp.first);
// Just for debugging
// std::cout << "High and Low block: " << HL << std::endl;
int Hend = HL.find('-');
L = HL.substr(0, Hend-1);
Hend += 2;
H = HL.substr(Hend, HL.size()-Hend);
// Just for debugging
// std::cout << "High: " << H << std::endl;
// std::cout << "Low: " << L << std::endl;
// 2.3 Close price
std::string closekey = "PREV_CLOSE-value";
std::pair<int, int> cp = this->gparse(blk, closekey);
C = blk.substr(cp.first, cp.second - cp.first);
// Just for debugging
// std::cout << "Close: " << C << std::endl;
// 2.4 Strike
std::string strikekey = "STRIKE-value";
std::pair<int, int> strikep = this->gparse(blk, strikekey);
Strike = blk.substr(strikep.first, strikep.second - strikep.first);
// std::cout << "Strike: " << Strike << std::endl;
// 2.5 Expire date
std::string exkey = "EXPIRE_DATE-value";
std::pair<int, int> ExDatep = this->gparse(blk, exkey);
ExDate = blk.substr(ExDatep.first, ExDatep.second-ExDatep.first);
// std::cout << "ExDate: " << ExDate << std::endl;
// 2.6 Open Interest
std::string interestkey = "OPEN_INTEREST-value";
std::pair<int, int> Interestp = this->gparse(blk, interestkey);
Interest = blk.substr(Interestp.first, Interestp.second-Interestp.first);
// std::cout << "Open Interest: " << Interest << std::endl;
VS ret = {Currency, O, H, L, C, Strike, ExDate, PutCall, Interest};
for (std::string& s : ret) {
for (auto it = s.begin(); it < s.end(); it++) {
if (*it == ',') s.erase(it);
}
}
return ret;
}
std::string Parser::getYahooBlk(std::string shtml) {
std::string begblk = "Previous Close</span>";
std::string endblk = "</tbody></table></div></div></div></div></div>";
int begidx = shtml.find(begblk);
int endidx = shtml.find(endblk);
if (begidx == std::string::npos || endidx == std::string::npos || endidx < begidx) {
std::cerr << "Searching key error, on function getStockBlk" << std::endl;
return "";
}
std::string blk = shtml.substr(begidx, endidx-begidx);
return shtml.substr(begidx, endidx-begidx); // 5114, 3214 chars
};
int readcsv(std::string cpath, VPSS& codetype) {
std::string line;
try {
std::ifstream csvfile(cpath);
while (std::getline(csvfile, line)) {
std::string code, type;
std::string temp = "";
for (char c : line) {
if (c == ',') {
code = temp;
temp = "";
continue;
}
temp += c;
}
type = temp;
codetype.push_back(std::make_pair(code, type));
}
csvfile.close();
} catch (const std::ifstream::failure& e) {
// std::cout << "Exception opening/reading file" << std::endl;
return 0;
}
return 1;
}
/**
* main()
* Entry of the entire parse program
*/
int main() {
// 1. Read csv file from download.js (sheet)
VPSS codetype; // code with its type
readcsv("sheet.csv", codetype);
/*// for debugging
for (auto p : codetype) {
std::cout << p.first << " " << p.second << std::endl;
}
*/
pthread_mutex_init(&mutex, NULL); // deafult
// 2. Concatenate urls
// {{1, url}, {2, url}} for multithreading
VS indexurl = {};
for (int i = 0; i < codetype.size(); i++) {
std::pair<std::string, std::string> ct = codetype[i];
std::string code = ct.first;
std::string type = ct.second;
std::string url = "";
if (type == "Stock" || type == "Option") url = YAHOO + code;
else if (type == "Bond") url = BOND + code;
else {
// do not alert
url = " ";
}
indexurl.push_back(std::to_string(i)+ ',' + type + ',' + url); // need to be parsed
}
// 3. Threads Execute
int nthr = indexurl.size();
pthread_t threadpool[nthr];
for (int i = 0; i < nthr; i++) {
std::string iturl = indexurl[i]; // index type url
void* thrurl = static_cast<void*>(new std::string(iturl));
int result = pthread_create(&threadpool[i], NULL, thrimp, thrurl);
if (result != 0) {
std::cerr << "Error on creating thread " << i << std::endl;
continue;
}
}
// wait all threads to finish
for (int fj = 0; fj < nthr; fj++) {
pthread_join(threadpool[fj], NULL);
}
/*
// 3.1 Get the number of execution group
int remain = nthr % THRLIMIT;
int numgroup = nthr / THRLIMIT; // do not care about the remain < 20 numofthreads
std::cout << "Split them into: " << numgroup << " groups" << std::endl;
int currindex = 0;
// 3.2 Create all no-remain group and then execute them in parallel
// Then wait for all finish
for (int i = 0; i < numgroup; i++) {
pthread_t threadpool[THRLIMIT]; // Create a new threadpool
for (int j = 0; j < THRLIMIT; j++) {
std::string iturl = indexurl[currindex]; // index type url
void* thrurl = static_cast<void*>(new std::string(iturl));
int result = pthread_create(&threadpool[i], NULL, thrimp, thrurl);
if (result != 0) {
std::cerr << "Error on creating thread " << i << std::endl;
continue;
}
currindex++;
}
// wait all threads to finish
for (int fj = 0; fj < THRLIMIT; fj++) {
pthread_join(threadpool[fj], NULL);
}
// std::this_thread::sleep_for(std::chrono::milliseconds(4000)); // sleep
}
std::cout << "All: " << numgroup << " finish!" << std::endl;
// 3.3 Deal with the remaining threads (if any)
if (remain) {
int remainstart = numgroup * THRLIMIT;
pthread_t remainthreadpool[remain];
for (int i = 0; i < remain; i++) {
std::string iturl = indexurl[remainstart]; // index type url
void* thrurl = static_cast<void*>(new std::string(iturl));
int result = pthread_create(&remainthreadpool[i], NULL, thrimp, thrurl);
if (result != 0) {
std::cerr << "Error on creating thread " << i << std::endl;
continue;
}
remainstart++;
}
// wait all threads to finish
for (int fj = 0; fj < remain; fj++) {
pthread_join(remainthreadpool[fj], NULL);
}
}
*/
/*
// 3.2 Create threads
for (int i = 0; i < nthr; i++) {
std::string iturl = indexurl[i]; // index type url
void* thrurl = static_cast<void*>(new std::string(iturl));
int result = pthread_create(&threadpool[i], NULL, thrimp, thrurl);
if (result != 0) {
std::cerr << "Error on creating thread " << i << std::endl;
continue;
}
}
// 3.3 Execute threads (at most 30 threads one moment)
int i = 0;
while (i < nthr) {
std::cout << "In thr loop..." << std::endl;
int j = i;
i += THRLIMIT;
if (i > nthr) i = nthr;
for (; j < i; j++) {
pthread_join(threadpool[j], NULL);
}
}
for (int i = 0; i < nthr; i++) {
pthread_join(threadpool[i], NULL);
}
*/
std::cout << "Now write them into files..." << std::endl;
// 4. Write them into files
std::ofstream testcsv;
testcsv.open("toutput.csv"); // test current dir
for (std::pair<int, VS> thrdatafield : thrdatafields) {
VS datafield = thrdatafield.second;
for (std::string s : datafield) {
std::cout << s << ',';
testcsv << s << ',';
}
std::cout << std::endl;
testcsv << '\n';
}
testcsv.close();
/*
// 3. For each url, Create its CurlObj and Parser
// TODO: add multithreading
for (auto pis : indexurl) {
// for each url, in this step, we do not care whether it is valid or not
// (actually, we cannot do that now) just check when push back to datafields
VS datafield = {}; // store this data (S/O/B)
// 1. Parse this "url", get url and type
std::string url = pis.second;
std::cout << url << std::endl;
std::string type = "";
for (auto it = url.begin(); it < url.end(); it++) {
if (*it == ',') {
url.erase(url.begin(), it+1);
break;
}
type.push_back(*it);
}
// 2. Get html data
CurlObj* co = new CurlObj(url);
std::string html = co->getData();
if (html == "") { // invalid
datafield.push_back("Invalid");
datafields.push_back(datafield);
continue;
}
// 3. Create parser and parse data
Parser* parser = new Parser(html); // Create its own parser
if (type == "Stock") {
datafield = parser->stock();
} else if (type == "Option") {
datafield = parser->option();
// last
VS stockbondfield = {" ", " ", " ", " ", " ", " ", " ", " ", " ",
" ", " ", " ", " ", " ", " ", " "};
for (std::string data : datafield) {
stockbondfield.push_back(data);
}
datafield = stockbondfield;
} else if (type == "Bond") {
datafield = parser->bond();
VS stockfield = {" ", " ", " ", " ", " ", " "};
for (std::string data : datafield) {
stockfield.push_back(data);
}
datafield = stockfield;
} else {
// cannot reach here
std::cerr << "Unexpected error, on creating parser with type" << std::endl;
}
// 4. Push valid data into datafields
datafields.push_back(datafield);
// 5. Write to file (testonly)
std::ofstream testcsv;
testcsv.open("coutput.csv"); // test current dir
for (VS datafield : datafields) {
for (std::string s : datafield) {
testcsv << s << ',';
}
testcsv << '\n';
}
}
*/
/*// for debugging
for (auto p : indexurl) {
std::cout << p.first << " " << p.second << std::endl;
}
*/
/*
std::string testcode = "HSBC";
std::string testtype = "Stock";
std::string url = YAHOO + testcode;
// 3. Fetch html
CurlObj* co = new CurlObj(url);
std::string html = co->getData();
// for debugging
std::ofstream out("../test.html");
out << html;
out.close();
// 4. Parse html
Parser* parser = new Parser(html);
VS datafield = parser->stock();
// std::string subhtml = parser->testwrapper();
// 5. Write to csv file
std::ofstream testcsv;
testcsv.open("../coutput.csv");
for (VS datafield : datafields) {
for (std::string s : datafield) {
testcsv << s << ',';
}
}
*/
/* // for debugging
std::ofstream out("test.html");
out << subhtml;
out.close();
*/
}
|
////////////////////////////////////////////////////////////////////////////////
///
/// A buffer class for temporarily storaging sound samples, operates as a
/// first-in-first-out pipe.
///
/// Samples are added to the end of the sample buffer with the 'putSamples'
/// function, and are received from the beginning of the buffer by calling
/// the 'receiveSamples' function. The class automatically removes the
/// outputted samples from the buffer, as well as grows the buffer size
/// whenever necessary.
///
/// Author : Copyright (c) Olli Parviainen
/// Author e-mail : oparviai 'at' iki.fi
/// SoundTouch WWW: http://www.surina.net/soundtouch
///
////////////////////////////////////////////////////////////////////////////////
//
// Last changed : $Date: 2012-11-08 20:53:01 +0200 (Thu, 08 Nov 2012) $
// File revision : $Revision: 4 $
//
// $Id: FIFOSampleBuffer.cpp 160 2012-11-08 18:53:01Z oparviai $
//
////////////////////////////////////////////////////////////////////////////////
//
// License :
//
// SoundTouch audio processing library
// Copyright (c) Olli Parviainen
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
////////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <memory.h>
#include <string.h>
#include <assert.h>
#include "../soundtouch/include/FIFOSampleBuffer.h"
using namespace soundtouch;
// Constructor
FIFOSampleBuffer::FIFOSampleBuffer(int numChannels)
{
assert(numChannels > 0);
sizeInBytes = 0; // reasonable initial value
buffer = NULL;
bufferUnaligned = NULL;
samplesInBuffer = 0;
bufferPos = 0;
channels = (uint)numChannels;
ensureCapacity(32); // allocate initial capacity
}
// destructor
FIFOSampleBuffer::~FIFOSampleBuffer()
{
delete[] bufferUnaligned;
bufferUnaligned = NULL;
buffer = NULL;
}
// Sets number of channels, 1 = mono, 2 = stereo
void FIFOSampleBuffer::setChannels(int numChannels)
{
uint usedBytes;
assert(numChannels > 0);
usedBytes = channels * samplesInBuffer;
channels = (uint)numChannels;
samplesInBuffer = usedBytes / channels;
}
// if output location pointer 'bufferPos' isn't zero, 'rewinds' the buffer and
// zeroes this pointer by copying samples from the 'bufferPos' pointer
// location on to the beginning of the buffer.
void FIFOSampleBuffer::rewind()
{
if (buffer && bufferPos)
{
memmove(buffer, ptrBegin(), sizeof(SAMPLETYPE) * channels * samplesInBuffer);
bufferPos = 0;
}
}
// Adds 'numSamples' pcs of samples from the 'samples' memory position to
// the sample buffer.
void FIFOSampleBuffer::putSamples(const SAMPLETYPE *samples, uint nSamples)
{
memcpy(ptrEnd(nSamples), samples, sizeof(SAMPLETYPE) * nSamples * channels);
samplesInBuffer += nSamples;
}
// Increases the number of samples in the buffer without copying any actual
// samples.
//
// This function is used to update the number of samples in the sample buffer
// when accessing the buffer directly with 'ptrEnd' function. Please be
// careful though!
void FIFOSampleBuffer::putSamples(uint nSamples)
{
uint req;
req = samplesInBuffer + nSamples;
ensureCapacity(req);
samplesInBuffer += nSamples;
}
// Returns a pointer to the end of the used part of the sample buffer (i.e.
// where the new samples are to be inserted). This function may be used for
// inserting new samples into the sample buffer directly. Please be careful!
//
// Parameter 'slackCapacity' tells the function how much free capacity (in
// terms of samples) there _at least_ should be, in order to the caller to
// succesfully insert all the required samples to the buffer. When necessary,
// the function grows the buffer size to comply with this requirement.
//
// When using this function as means for inserting new samples, also remember
// to increase the sample count afterwards, by calling the
// 'putSamples(numSamples)' function.
SAMPLETYPE *FIFOSampleBuffer::ptrEnd(uint slackCapacity)
{
ensureCapacity(samplesInBuffer + slackCapacity);
return buffer + samplesInBuffer * channels;
}
// Returns a pointer to the beginning of the currently non-outputted samples.
// This function is provided for accessing the output samples directly.
// Please be careful!
//
// When using this function to output samples, also remember to 'remove' the
// outputted samples from the buffer by calling the
// 'receiveSamples(numSamples)' function
SAMPLETYPE *FIFOSampleBuffer::ptrBegin()
{
assert(buffer);
return buffer + bufferPos * channels;
}
// Ensures that the buffer has enought capacity, i.e. space for _at least_
// 'capacityRequirement' number of samples. The buffer is grown in steps of
// 4 kilobytes to eliminate the need for frequently growing up the buffer,
// as well as to round the buffer size up to the virtual memory page size.
void FIFOSampleBuffer::ensureCapacity(uint capacityRequirement)
{
SAMPLETYPE *tempUnaligned, *temp;
if (capacityRequirement > getCapacity())
{
// enlarge the buffer in 4kbyte steps (round up to next 4k boundary)
sizeInBytes = (capacityRequirement * channels * sizeof(SAMPLETYPE) + 4095) & (uint)-4096;
assert(sizeInBytes % 2 == 0);
tempUnaligned = new SAMPLETYPE[sizeInBytes / sizeof(SAMPLETYPE) + 16 / sizeof(SAMPLETYPE)];
if (tempUnaligned == NULL)
{
ST_THROW_RT_ERROR("Couldn't allocate memory!\n");
}
// Align the buffer to begin at 16byte cache line boundary for optimal performance
temp = (SAMPLETYPE *)SOUNDTOUCH_ALIGN_POINTER_16(tempUnaligned);
if (samplesInBuffer)
{
memcpy(temp, ptrBegin(), samplesInBuffer * channels * sizeof(SAMPLETYPE));
}
delete[] bufferUnaligned;
buffer = temp;
bufferUnaligned = tempUnaligned;
bufferPos = 0;
}
else
{
// simply rewind the buffer (if necessary)
rewind();
}
}
// Returns the current buffer capacity in terms of samples
uint FIFOSampleBuffer::getCapacity() const
{
return sizeInBytes / (channels * sizeof(SAMPLETYPE));
}
// Returns the number of samples currently in the buffer
uint FIFOSampleBuffer::numSamples() const
{
return samplesInBuffer;
}
// Output samples from beginning of the sample buffer. Copies demanded number
// of samples to output and removes them from the sample buffer. If there
// are less than 'numsample' samples in the buffer, returns all available.
//
// Returns number of samples copied.
uint FIFOSampleBuffer::receiveSamples(SAMPLETYPE *output, uint maxSamples)
{
uint num;
num = (maxSamples > samplesInBuffer) ? samplesInBuffer : maxSamples;
memcpy(output, ptrBegin(), channels * sizeof(SAMPLETYPE) * num);
return receiveSamples(num);
}
// Removes samples from the beginning of the sample buffer without copying them
// anywhere. Used to reduce the number of samples in the buffer, when accessing
// the sample buffer with the 'ptrBegin' function.
uint FIFOSampleBuffer::receiveSamples(uint maxSamples)
{
if (maxSamples >= samplesInBuffer)
{
uint temp;
temp = samplesInBuffer;
samplesInBuffer = 0;
return temp;
}
samplesInBuffer -= maxSamples;
bufferPos += maxSamples;
return maxSamples;
}
// Returns nonzero if the sample buffer is empty
int FIFOSampleBuffer::isEmpty() const
{
return (samplesInBuffer == 0) ? 1 : 0;
}
// Clears the sample buffer
void FIFOSampleBuffer::clear()
{
samplesInBuffer = 0;
bufferPos = 0;
}
/// allow trimming (downwards) amount of samples in pipeline.
/// Returns adjusted amount of samples
uint FIFOSampleBuffer::adjustAmountOfSamples(uint numSamples)
{
if (numSamples < samplesInBuffer)
{
samplesInBuffer = numSamples;
}
return samplesInBuffer;
}
|
#ifndef MESSAGE_BOX_HPP
#define MESSAGE_BOX_HPP
//MB_ABORTRETRYIGNORE The message box contains three push buttons: Abort, Retry, and Ignore.
//MB_OK The message box contains one push button: OK. This is the default.
//MB_OKCANCEL The message box contains two push buttons: OK and Cancel.
//MB_RETRYCANCEL The message box contains two push buttons: Retry and Cancel.
//MB_YESNO The message box contains two push buttons: Yes and No.
//MB_YESNOCANCEL The message box contains three push buttons: Yes, No, and Cancel.
int DUIMessageBox(HWND hWnd, LPCTSTR lpText, LPCTSTR lpCaption, UINT uType = MB_OK);
class WindowImplBase;
class CMessageBox : public WindowImplBase
{
friend int DUIMessageBox(HWND hWnd, LPCTSTR lpText, LPCTSTR lpCaption, UINT uType);
protected:
CMessageBox(HWND hParent, const tString& caption, const tString& message, UINT uType);
~CMessageBox();
protected:
LPCTSTR GetWindowClassName() const;
virtual void OnFinalMessage(HWND hWnd);
virtual void Init();
virtual LRESULT ResponseDefaultKeyEvent(WPARAM wParam);
virtual tString GetSkinFile();
virtual tString GetSkinFolder();
virtual LONG GetStyle();
virtual CControlUI* CreateControl(LPCTSTR pstrClass);
virtual LRESULT HandleMessage(UINT uMsg, WPARAM wParam, LPARAM lParam);
virtual LRESULT OnSysCommand(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
virtual LRESULT HandleCustomMessage(UINT uMsg, WPARAM wParam, LPARAM lParam, BOOL& bHandled);
protected:
void Notify(TNotifyUI& msg);
void OnPrepare(TNotifyUI& msg);
private:
HWND parent_;
tString caption_;
tString message_;
UINT type_;
};
#endif // MESSAGE_BOX_HPP
|
// ArduinoJson - arduinojson.org
// Copyright Benoit Blanchon 2014-2019
// MIT License
#pragma once
#include "ConstRamStringAdapter.hpp"
namespace ARDUINOJSON_NAMESPACE {
class String {
public:
String() : _data(0), _isStatic(true) {}
String(const char* data, bool isStaticData = true)
: _data(data), _isStatic(isStaticData) {}
const char* c_str() const {
return _data;
}
bool isNull() const {
return !_data;
}
bool isStatic() const {
return _isStatic;
}
friend bool operator==(String lhs, String rhs) {
if (lhs._data == rhs._data) return true;
if (!lhs._data) return false;
if (!rhs._data) return false;
return strcmp(lhs._data, rhs._data) == 0;
}
private:
const char* _data;
bool _isStatic;
};
class StringAdapter : public RamStringAdapter {
public:
StringAdapter(const String& str)
: RamStringAdapter(str.c_str()), _isStatic(str.isStatic()) {}
bool isStatic() const {
return _isStatic;
}
/* const char* save(MemoryPool* pool) const {
if (_isStatic) return c_str();
return RamStringAdapter::save(pool);
}*/
private:
bool _isStatic;
};
template <>
struct IsString<String> : true_type {};
inline StringAdapter adaptString(const String& str) {
return StringAdapter(str);
}
} // namespace ARDUINOJSON_NAMESPACE
|
/**
* Definition for a binary tree node.
* struct TreeNode {
* int val;
* TreeNode *left;
* TreeNode *right;
* TreeNode() : val(0), left(nullptr), right(nullptr) {}
* TreeNode(int x) : val(x), left(nullptr), right(nullptr) {}
* TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {}
* };
*/
class Solution {
public:
TreeNode* buildTree(vector<int>& inorder, vector<int>& postorder) {
unordered_map<int,int>inordermap = populateMap(inorder);
int postIndex =postorder.size()-1;
return constructTree(postorder,inordermap,0,inorder.size()-1,postIndex);
}
unordered_map<int,int> populateMap(vector<int>& inorder)
{
unordered_map<int,int>mp;
for(int i=0;i<inorder.size();i++)
{
mp[inorder[i]]=i;
}
return mp;
}
TreeNode* constructTree(vector<int>& postorder,unordered_map<int,int>&inordermap,int start,int end ,int &postIndex)
{
if(start>end) return NULL;
TreeNode* root = new TreeNode(postorder[postIndex]);
int currIndex = inordermap[postorder[postIndex]];
postIndex-=1;
root->right =constructTree(postorder,inordermap,currIndex+1,end,postIndex);
root->left = constructTree(postorder,inordermap,start,currIndex-1,postIndex);
return root;
}
};
|
// -*- C++ -*-
//
// michael a.g. aïvázis
// orthologue
// (c) 1998-2019 all rights reserved
//
// access the low level interface to create a file that can fit a grid of a specified size
//
// N.B.: this test leaves behind a file named "grid.dat" that is used by the other tests; it
// must be cleaned up after the tests are run
// portability
#include <portinfo>
// externals
#include <unistd.h>
// support
#include <pyre/memory.h>
// entry point
int main() {
// the cell type
typedef double cell_t;
// desired size
size_t page = ::getpagesize();
// the name of the file
pyre::memory::uri_t name {"grid.dat"};
// turn on the info channel
pyre::journal::debug_t("pyre.geometry.direct").activate();
// create a file that can fit the payload
pyre::memory::direct_t<cell_t>::create(name, 2*page);
// all done
return 0;
}
// end of file
|
//===--- TypeLowering.cpp - Type information for SILGen -------------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "libsil"
#include "swift/AST/AnyFunctionRef.h"
#include "swift/AST/ASTContext.h"
#include "swift/AST/CanTypeVisitor.h"
#include "swift/SIL/SILInstruction.h"
#include "swift/AST/Decl.h"
#include "swift/AST/DiagnosticEngine.h"
#include "swift/AST/DiagnosticsSIL.h"
#include "swift/AST/Expr.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/AST/LazyResolver.h"
#include "swift/AST/Module.h"
#include "swift/AST/NameLookup.h"
#include "swift/AST/ParameterList.h"
#include "swift/AST/Pattern.h"
#include "swift/AST/PrettyStackTrace.h"
#include "swift/AST/PropertyWrappers.h"
#include "swift/AST/TypeDifferenceVisitor.h"
#include "swift/AST/Types.h"
#include "swift/ClangImporter/ClangModule.h"
#include "swift/SIL/PrettyStackTrace.h"
#include "swift/SIL/SILArgument.h"
#include "swift/SIL/SILBuilder.h"
#include "swift/SIL/SILModule.h"
#include "swift/SIL/TypeLowering.h"
#include "clang/AST/Type.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
using namespace swift;
using namespace Lowering;
namespace {
/// A CRTP type visitor for deciding whether the metatype for a type
/// is a singleton type, i.e. whether there can only ever be one
/// such value.
struct HasSingletonMetatype : CanTypeVisitor<HasSingletonMetatype, bool> {
/// Class metatypes have non-trivial representation due to the
/// possibility of subclassing.
bool visitClassType(CanClassType type) {
return false;
}
bool visitBoundGenericClassType(CanBoundGenericClassType type) {
return false;
}
bool visitDynamicSelfType(CanDynamicSelfType type) {
return false;
}
/// Dependent types have non-trivial representation in case they
/// instantiate to a class metatype.
bool visitGenericTypeParamType(CanGenericTypeParamType type) {
return false;
}
bool visitDependentMemberType(CanDependentMemberType type) {
return false;
}
/// Archetype metatypes have non-trivial representation in case
/// they instantiate to a class metatype.
bool visitArchetypeType(CanArchetypeType type) {
return false;
}
/// All levels of class metatypes support subtyping.
bool visitMetatypeType(CanMetatypeType type) {
return visit(type.getInstanceType());
}
/// Everything else is trivial. Note that ordinary metatypes of
/// existential types are still singleton.
bool visitType(CanType type) {
return true;
}
};
} // end anonymous namespace
/// Does the metatype for the given type have a known-singleton
/// representation?
static bool hasSingletonMetatype(CanType instanceType) {
return HasSingletonMetatype().visit(instanceType);
}
CaptureKind TypeConverter::getDeclCaptureKind(CapturedValue capture,
TypeExpansionContext expansion) {
auto decl = capture.getDecl();
auto *var = cast<VarDecl>(decl);
assert(var->hasStorage() &&
"should not have attempted to directly capture this variable");
// If this is a non-address-only stored 'let' constant, we can capture it
// by value. If it is address-only, then we can't load it, so capture it
// by its address (like a var) instead.
if (!var->supportsMutation() &&
!getTypeLowering(var->getType(),
TypeExpansionContext::noOpaqueTypeArchetypesSubstitution(
expansion.getResilienceExpansion()))
.isAddressOnly())
return CaptureKind::Constant;
// In-out parameters are captured by address.
if (auto *param = dyn_cast<ParamDecl>(var)) {
if (param->isInOut())
return CaptureKind::StorageAddress;
}
// Reference storage types can appear in a capture list, which means
// we might allocate boxes to store the captures. However, those boxes
// have the same lifetime as the closure itself, so we must capture
// the box itself and not the payload, even if the closure is noescape,
// otherwise they will be destroyed when the closure is formed.
if (var->getType()->is<ReferenceStorageType>()) {
return CaptureKind::Box;
}
// For 'let' constants
if (!var->supportsMutation()) {
assert(getTypeLowering(
var->getType(),
TypeExpansionContext::noOpaqueTypeArchetypesSubstitution(
expansion.getResilienceExpansion()))
.isAddressOnly());
return CaptureKind::Immutable;
}
// If we're capturing into a non-escaping closure, we can generally just
// capture the address of the value as no-escape.
return (capture.isNoEscape()
? CaptureKind::StorageAddress
: CaptureKind::Box);
}
using RecursiveProperties = TypeLowering::RecursiveProperties;
static RecursiveProperties
classifyType(AbstractionPattern origType, CanType type,
TypeConverter &TC, TypeExpansionContext expansion);
namespace {
/// A CRTP helper class for doing things that depends on type
/// classification.
template <class Impl, class RetTy>
class TypeClassifierBase
: public CanTypeVisitor<Impl, RetTy, AbstractionPattern, IsTypeExpansionSensitive_t>
{
Impl &asImpl() { return *static_cast<Impl*>(this); }
protected:
TypeConverter &TC;
TypeExpansionContext Expansion;
TypeClassifierBase(TypeConverter &TC, TypeExpansionContext Expansion)
: TC(TC), Expansion(Expansion) {}
public:
// The subclass should implement:
// // Trivial, fixed-layout, and non-address-only.
// RetTy handleTrivial(CanType);
// RetTy handleTrivial(CanType, RecursiveProperties properties);
// // A reference type.
// RetTy handleReference(CanType, RecursiveProperties properties);
// RetTy handleReference(CanType, RecursiveProperties properties);
// // Non-trivial and address-only.
// RetTy handleAddressOnly(CanType, RecursiveProperties properties);
// and, if it doesn't override handleTupleType,
// // An aggregate type that's non-trivial.
// RetTy handleNonTrivialAggregate(CanType, RecursiveProperties properties);
//
// Alternatively, it can just implement:
// RetTy handle(CanType, RecursiveProperties properties);
/// Handle a trivial, fixed-size, loadable type.
RetTy handleTrivial(CanType type, RecursiveProperties properties) {
return asImpl().handle(type, properties);
}
RetTy handleAddressOnly(CanType type, RecursiveProperties properties) {
return asImpl().handle(type, properties);
}
RetTy handleNonTrivialAggregate(CanType type,
RecursiveProperties properties) {
return asImpl().handle(type, properties);
}
RetTy handleTrivial(CanType type) {
return asImpl().handleTrivial(type, RecursiveProperties::forTrivial());
}
RetTy handleReference(CanType type) {
return handleReference(type, RecursiveProperties::forReference());
}
RetTy handleReference(CanType type, RecursiveProperties properties) {
return asImpl().handle(type, properties);
}
RecursiveProperties
mergeIsTypeExpansionSensitive(IsTypeExpansionSensitive_t isSensitive,
RecursiveProperties props) {
if (isSensitive == IsTypeExpansionSensitive)
props.setTypeExpansionSensitive(isSensitive);
return props;
}
RecursiveProperties
getTrivialRecursiveProperties(IsTypeExpansionSensitive_t isSensitive) {
return mergeIsTypeExpansionSensitive(isSensitive,
RecursiveProperties::forTrivial());
}
RecursiveProperties
getReferenceRecursiveProperties(IsTypeExpansionSensitive_t isSensitive) {
return mergeIsTypeExpansionSensitive(isSensitive,
RecursiveProperties::forReference());
}
RecursiveProperties
getOpaqueRecursiveProperties(IsTypeExpansionSensitive_t isSensitive) {
return mergeIsTypeExpansionSensitive(isSensitive,
RecursiveProperties::forOpaque());
}
#define IMPL(TYPE, LOWERING) \
RetTy visit##TYPE##Type(Can##TYPE##Type type, AbstractionPattern orig, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handle##LOWERING(type, \
get##LOWERING##RecursiveProperties(isSensitive)); \
}
IMPL(BuiltinInteger, Trivial)
IMPL(BuiltinIntegerLiteral, Trivial)
IMPL(BuiltinFloat, Trivial)
IMPL(BuiltinRawPointer, Trivial)
IMPL(BuiltinRawUnsafeContinuation, Trivial)
IMPL(BuiltinJob, Trivial)
IMPL(BuiltinNativeObject, Reference)
IMPL(BuiltinBridgeObject, Reference)
IMPL(BuiltinVector, Trivial)
IMPL(SILToken, Trivial)
IMPL(Class, Reference)
IMPL(BoundGenericClass, Reference)
IMPL(AnyMetatype, Trivial)
IMPL(Module, Trivial)
#undef IMPL
RetTy visitBuiltinUnsafeValueBufferType(
CanBuiltinUnsafeValueBufferType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().handleAddressOnly(type, {IsNotTrivial, IsFixedABI,
IsAddressOnly, IsNotResilient,
isSensitive});
}
RetTy visitAnyFunctionType(CanAnyFunctionType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
switch (type->getRepresentation()) {
case AnyFunctionType::Representation::Swift:
case AnyFunctionType::Representation::Block:
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
case AnyFunctionType::Representation::CFunctionPointer:
case AnyFunctionType::Representation::Thin:
return asImpl().handleTrivial(
type, getTrivialRecursiveProperties(isSensitive));
}
llvm_unreachable("bad function representation");
}
RetTy visitSILFunctionType(CanSILFunctionType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
// Handle `@differentiable` and `@differentiable(linear)` functions.
switch (type->getDifferentiabilityKind()) {
case DifferentiabilityKind::Normal:
return asImpl().visitNormalDifferentiableSILFunctionType(
type, mergeIsTypeExpansionSensitive(
isSensitive,
getNormalDifferentiableSILFunctionTypeRecursiveProperties(
type, origType)));
case DifferentiabilityKind::Linear:
return asImpl().visitLinearDifferentiableSILFunctionType(
type, mergeIsTypeExpansionSensitive(
isSensitive,
getLinearDifferentiableSILFunctionTypeRecursiveProperties(
type, origType)));
case DifferentiabilityKind::NonDifferentiable:
break;
}
// Only escaping closures are references.
bool isSwiftEscaping = type->getExtInfo().isNoEscape() &&
type->getExtInfo().getRepresentation() ==
SILFunctionType::Representation::Thick;
if (type->getExtInfo().hasContext() && !isSwiftEscaping)
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
// No escaping closures are trivial types.
return asImpl().handleTrivial(type,
getTrivialRecursiveProperties(isSensitive));
}
RecursiveProperties
getNormalDifferentiableSILFunctionTypeRecursiveProperties(
CanSILFunctionType type, AbstractionPattern origType) {
auto &M = TC.M;
auto origTy = type->getWithoutDifferentiability();
// Pass the `AbstractionPattern` generic signature to
// `SILFunctionType:getAutoDiffDerivativeFunctionType` for correct type
// lowering.
auto jvpTy = origTy->getAutoDiffDerivativeFunctionType(
type->getDifferentiabilityParameterIndices(),
type->getDifferentiabilityResultIndices(),
AutoDiffDerivativeFunctionKind::JVP, TC,
LookUpConformanceInModule(&M), CanGenericSignature());
auto vjpTy = origTy->getAutoDiffDerivativeFunctionType(
type->getDifferentiabilityParameterIndices(),
type->getDifferentiabilityResultIndices(),
AutoDiffDerivativeFunctionKind::VJP, TC,
LookUpConformanceInModule(&M), CanGenericSignature());
RecursiveProperties props;
props.addSubobject(classifyType(origType, origTy, TC, Expansion));
props.addSubobject(classifyType(origType, jvpTy, TC, Expansion));
props.addSubobject(classifyType(origType, vjpTy, TC, Expansion));
return props;
}
RecursiveProperties
getLinearDifferentiableSILFunctionTypeRecursiveProperties(
CanSILFunctionType type, AbstractionPattern origType) {
auto &M = TC.M;
auto origTy = type->getWithoutDifferentiability();
auto transposeTy = origTy->getAutoDiffTransposeFunctionType(
type->getDifferentiabilityParameterIndices(), TC,
LookUpConformanceInModule(&M), origType.getGenericSignatureOrNull());
RecursiveProperties props;
props.addSubobject(classifyType(origType, origTy, TC, Expansion));
props.addSubobject(classifyType(origType, transposeTy, TC, Expansion));
return props;
}
RetTy visitNormalDifferentiableSILFunctionType(
CanSILFunctionType type, RecursiveProperties props) {
return handleAggregateByProperties(type, props);
}
RetTy visitLinearDifferentiableSILFunctionType(
CanSILFunctionType type, RecursiveProperties props) {
return handleAggregateByProperties(type, props);
}
RetTy visitLValueType(CanLValueType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t) {
llvm_unreachable("shouldn't get an l-value type here");
}
RetTy visitInOutType(CanInOutType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t) {
llvm_unreachable("shouldn't get an inout type here");
}
RetTy visitErrorType(CanErrorType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().handleTrivial(type,
getTrivialRecursiveProperties(isSensitive));
}
// Dependent types can be lowered according to their corresponding
// abstraction pattern.
RetTy visitAbstractTypeParamType(CanType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
if (origType.isTypeParameterOrOpaqueArchetype() ||
origType.isOpaqueFunctionOrOpaqueDerivativeFunction()) {
if (origType.requiresClass()) {
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
} else {
return asImpl().handleAddressOnly(
type, getOpaqueRecursiveProperties(isSensitive));
}
} else {
// If the abstraction pattern provides a concrete type, lower as that
// type. This can occur if the abstraction pattern provides a more
// constrained generic signature with more same-type constraints than
// the original declaration whose type we're lowering.
return asImpl().visit(origType.getType(), origType, isSensitive);
}
}
RetTy visitGenericTypeParamType(CanGenericTypeParamType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return visitAbstractTypeParamType(type, origType, isSensitive);
}
RetTy visitDependentMemberType(CanDependentMemberType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return visitAbstractTypeParamType(type, origType, isSensitive);
}
Type getConcreteReferenceStorageReferent(Type type) {
if (type->isTypeParameter()) {
return TC.Context.getAnyObjectType();
}
return type;
}
#define NEVER_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
RetTy visit##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handleAddressOnly(type, {IsNotTrivial, \
IsFixedABI, \
IsAddressOnly, \
IsNotResilient, \
isSensitive}); \
}
#define ALWAYS_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
RetTy visit##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handleReference(type, \
getReferenceRecursiveProperties(isSensitive)); \
}
#define SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
RetTy visitLoadable##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handleReference(type, \
getReferenceRecursiveProperties(isSensitive)); \
} \
RetTy visitAddressOnly##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handleAddressOnly(type, {IsNotTrivial, \
IsFixedABI, \
IsAddressOnly, \
IsNotResilient, \
isSensitive}); \
} \
RetTy visit##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
auto referentType = \
type->getReferentType()->lookThroughSingleOptionalType(); \
auto concreteType = getConcreteReferenceStorageReferent(referentType); \
if (Name##StorageType::get(concreteType, TC.Context) \
->isLoadable(Expansion.getResilienceExpansion())) { \
return asImpl().visitLoadable##Name##StorageType(type, origType, \
isSensitive); \
} else { \
return asImpl().visitAddressOnly##Name##StorageType(type, origType, \
isSensitive); \
} \
}
#define UNCHECKED_REF_STORAGE(Name, ...) \
RetTy visit##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return asImpl().handleTrivial(type, \
getTrivialRecursiveProperties(isSensitive)); \
}
#include "swift/AST/ReferenceStorage.def"
RetTy visitOpaqueTypeArchetypeType(CanOpaqueTypeArchetypeType ty,
AbstractionPattern origType, \
IsTypeExpansionSensitive_t) {
auto replacedTy = substOpaqueTypesWithUnderlyingTypes(ty, Expansion);
if (replacedTy == ty)
return visitArchetypeType(ty, origType, IsTypeExpansionSensitive);
return this->visit(replacedTy, origType, IsTypeExpansionSensitive);
}
RetTy visitArchetypeType(CanArchetypeType ty, AbstractionPattern origType) {
return visitArchetypeType(ty, origType, IsNotTypeExpansionSensitive);
}
RetTy
visitArchetypeType(CanArchetypeType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
auto LayoutInfo = type->getLayoutConstraint();
if (LayoutInfo) {
if (LayoutInfo->isFixedSizeTrivial()) {
return asImpl().handleTrivial(
type, getTrivialRecursiveProperties(isSensitive));
}
if (LayoutInfo->isAddressOnlyTrivial()) {
auto properties = getTrivialRecursiveProperties(isSensitive);
properties.setAddressOnly();
return asImpl().handleAddressOnly(type, properties);
}
if (LayoutInfo->isRefCounted()) {
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
}
}
return asImpl().handleAddressOnly(
type, getOpaqueRecursiveProperties(isSensitive));
}
RetTy visitExistentialType(CanType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
switch (SILType::getPrimitiveObjectType(type)
.getPreferredExistentialRepresentation()) {
case ExistentialRepresentation::None:
llvm_unreachable("not an existential type?!");
// Opaque existentials are address-only.
case ExistentialRepresentation::Opaque:
return asImpl().handleAddressOnly(type, {IsNotTrivial,
IsFixedABI,
IsAddressOnly,
IsNotResilient,
isSensitive});
// Class-constrained and boxed existentials are refcounted.
case ExistentialRepresentation::Class:
case ExistentialRepresentation::Boxed:
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
// Existential metatypes are trivial.
case ExistentialRepresentation::Metatype:
return asImpl().handleTrivial(
type, getTrivialRecursiveProperties(isSensitive));
}
llvm_unreachable("Unhandled ExistentialRepresentation in switch.");
}
RetTy visitProtocolType(CanProtocolType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return visitExistentialType(type, origType, isSensitive);
}
RetTy visitProtocolCompositionType(CanProtocolCompositionType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return visitExistentialType(type, origType, isSensitive);
}
// Enums depend on their enumerators.
RetTy visitEnumType(CanEnumType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().visitAnyEnumType(type, origType, type->getDecl(),
isSensitive);
}
RetTy visitBoundGenericEnumType(CanBoundGenericEnumType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().visitAnyEnumType(type, origType, type->getDecl(),
isSensitive);
}
// Structs depend on their physical fields.
RetTy visitStructType(CanStructType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().visitAnyStructType(type, origType, type->getDecl(),
isSensitive);
}
RetTy visitBoundGenericStructType(CanBoundGenericStructType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return asImpl().visitAnyStructType(type, origType, type->getDecl(),
isSensitive);
}
// Tuples depend on their elements.
RetTy visitTupleType(CanTupleType type, AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
RecursiveProperties props;
for (unsigned i = 0, e = type->getNumElements(); i < e; ++i) {
props.addSubobject(classifyType(origType.getTupleElementType(i),
type.getElementType(i),
TC, Expansion));
}
props = mergeIsTypeExpansionSensitive(isSensitive, props);
return asImpl().handleAggregateByProperties(type, props);
}
RetTy visitDynamicSelfType(CanDynamicSelfType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
return this->visit(type.getSelfType(), origType, isSensitive);
}
RetTy visitSILBlockStorageType(CanSILBlockStorageType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
// Should not be loaded.
return asImpl().handleAddressOnly(type, {IsNotTrivial,
IsFixedABI,
IsAddressOnly,
IsNotResilient,
isSensitive});
}
RetTy visitSILBoxType(CanSILBoxType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
// Should not be loaded.
return asImpl().handleReference(
type, getReferenceRecursiveProperties(isSensitive));
}
RetTy handleAggregateByProperties(CanType type, RecursiveProperties props) {
if (props.isAddressOnly()) {
return asImpl().handleAddressOnly(type, props);
}
assert(props.isFixedABI() && "unsupported combination for now");
if (props.isTrivial()) {
return asImpl().handleTrivial(type, props);
}
return asImpl().handleNonTrivialAggregate(type, props);
}
};
class TypeClassifier :
public TypeClassifierBase<TypeClassifier, RecursiveProperties> {
public:
TypeClassifier(TypeConverter &TC,
TypeExpansionContext Expansion)
: TypeClassifierBase(TC, Expansion) {}
RecursiveProperties handle(CanType type, RecursiveProperties properties) {
return properties;
}
RecursiveProperties visitAnyEnumType(CanType type,
AbstractionPattern origType,
EnumDecl *D,
IsTypeExpansionSensitive_t isSensitive) {
// We have to look through optionals here without grabbing the
// type lowering because the way that optionals are reabstracted
// can trip recursion checks if we try to build a lowered type.
if (D->isOptionalDecl()) {
return visit(type.getOptionalObjectType(),
origType.getOptionalObjectType(),
isSensitive);
}
// Consult the type lowering.
auto &lowering = TC.getTypeLowering(origType, type, Expansion);
return handleClassificationFromLowering(type, lowering, isSensitive);
}
RecursiveProperties visitAnyStructType(CanType type,
AbstractionPattern origType,
StructDecl *D,
IsTypeExpansionSensitive_t isSensitive) {
// Consult the type lowering.
auto &lowering = TC.getTypeLowering(origType, type, Expansion);
return handleClassificationFromLowering(type, lowering, isSensitive);
}
private:
RecursiveProperties
handleClassificationFromLowering(CanType type, const TypeLowering &lowering,
IsTypeExpansionSensitive_t isSensitive) {
return handle(type, mergeIsTypeExpansionSensitive(
isSensitive, lowering.getRecursiveProperties()));
}
};
} // end anonymous namespace
static RecursiveProperties classifyType(AbstractionPattern origType,
CanType type,
TypeConverter &tc,
TypeExpansionContext expansion) {
return TypeClassifier(tc, expansion)
.visit(type, origType, IsNotTypeExpansionSensitive);
}
/// True if the type, or the referenced type of an address
/// type, is address-only. For example, it could be a resilient struct or
/// something of unknown size.
bool SILType::isAddressOnly(CanType type,
TypeConverter &tc,
CanGenericSignature sig,
TypeExpansionContext expansion) {
return classifyType(AbstractionPattern(sig, type),
type, tc, expansion).isAddressOnly();
}
namespace {
/// A class for types that can be loaded and stored in SIL.
/// This always include loadable types, but can include address-only types if
/// opaque values are passed by value.
class LoadableTypeLowering : public TypeLowering {
protected:
LoadableTypeLowering(SILType type, RecursiveProperties properties,
IsReferenceCounted_t isRefCounted,
TypeExpansionContext forExpansion)
: TypeLowering(type, properties, isRefCounted, forExpansion) {}
public:
void emitDestroyAddress(SILBuilder &B, SILLocation loc,
SILValue addr) const override {
SILValue value = emitLoad(B, loc, addr, LoadOwnershipQualifier::Take);
emitDestroyValue(B, loc, value);
}
void emitDestroyRValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
emitDestroyValue(B, loc, value);
}
void emitCopyInto(SILBuilder &B, SILLocation loc,
SILValue src, SILValue dest, IsTake_t isTake,
IsInitialization_t isInit) const override {
SILValue value = emitLoadOfCopy(B, loc, src, isTake);
emitStoreOfCopy(B, loc, value, dest, isInit);
}
};
/// A class for trivial, fixed-layout, loadable types.
class TrivialTypeLowering final : public LoadableTypeLowering {
public:
TrivialTypeLowering(SILType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: LoadableTypeLowering(type, properties, IsNotReferenceCounted,
forExpansion) {
assert(properties.isFixedABI());
assert(properties.isTrivial());
assert(!properties.isAddressOnly());
}
SILValue emitLoadOfCopy(SILBuilder &B, SILLocation loc, SILValue addr,
IsTake_t isTake) const override {
return emitLoad(B, loc, addr, LoadOwnershipQualifier::Trivial);
}
void emitStoreOfCopy(SILBuilder &B, SILLocation loc,
SILValue value, SILValue addr,
IsInitialization_t isInit) const override {
emitStore(B, loc, value, addr, StoreOwnershipQualifier::Trivial);
}
void emitStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual) const override {
if (B.getFunction().hasOwnership()) {
B.createStore(loc, value, addr, StoreOwnershipQualifier::Trivial);
return;
}
B.createStore(loc, value, addr, StoreOwnershipQualifier::Unqualified);
}
SILValue emitLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual) const override {
if (B.getFunction().hasOwnership())
return B.createLoad(loc, addr, LoadOwnershipQualifier::Trivial);
return B.createLoad(loc, addr, LoadOwnershipQualifier::Unqualified);
}
SILValue emitLoweredLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual,
TypeExpansionKind) const override {
if (B.getFunction().hasOwnership())
return B.createLoad(loc, addr, LoadOwnershipQualifier::Trivial);
return B.createLoad(loc, addr, LoadOwnershipQualifier::Unqualified);
}
void emitLoweredStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual,
Lowering::TypeLowering::TypeExpansionKind
expansionKind) const override {
auto storeQual = [&]() -> StoreOwnershipQualifier {
if (B.getFunction().hasOwnership())
return StoreOwnershipQualifier::Trivial;
return StoreOwnershipQualifier::Unqualified;
}();
B.createStore(loc, value, addr, storeQual);
}
void emitDestroyAddress(SILBuilder &B, SILLocation loc,
SILValue addr) const override {
// Trivial
}
void
emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value,
TypeExpansionKind loweringStyle) const override {
// Trivial
}
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue value,
TypeExpansionKind style) const override {
// Trivial
return value;
}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
// Trivial
return value;
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
// Trivial
}
};
class NonTrivialLoadableTypeLowering : public LoadableTypeLowering {
public:
NonTrivialLoadableTypeLowering(SILType type,
RecursiveProperties properties,
IsReferenceCounted_t isRefCounted,
TypeExpansionContext forExpansion)
: LoadableTypeLowering(type, properties, isRefCounted, forExpansion) {
assert(!properties.isTrivial());
}
SILValue emitLoadOfCopy(SILBuilder &B, SILLocation loc,
SILValue addr, IsTake_t isTake) const override {
auto qual =
isTake ? LoadOwnershipQualifier::Take : LoadOwnershipQualifier::Copy;
return emitLoad(B, loc, addr, qual);
}
void emitStoreOfCopy(SILBuilder &B, SILLocation loc,
SILValue newValue, SILValue addr,
IsInitialization_t isInit) const override {
auto qual = isInit ? StoreOwnershipQualifier::Init
: StoreOwnershipQualifier::Assign;
emitStore(B, loc, newValue, addr, qual);
}
void emitStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual) const override {
if (B.getFunction().hasOwnership()) {
B.createStore(loc, value, addr, qual);
return;
}
if (qual != StoreOwnershipQualifier::Assign) {
B.createStore(loc, value, addr, StoreOwnershipQualifier::Unqualified);
return;
}
// If the ownership qualifier is [assign], then we need to eliminate the
// old value.
//
// 1. Load old value.
// 2. Store new value.
// 3. Release old value.
SILValue old =
B.createLoad(loc, addr, LoadOwnershipQualifier::Unqualified);
B.createStore(loc, value, addr, StoreOwnershipQualifier::Unqualified);
B.emitDestroyValueOperation(loc, old);
}
SILValue emitLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual) const override {
if (B.getFunction().hasOwnership())
return B.createLoad(loc, addr, qual);
SILValue loadValue =
B.createLoad(loc, addr, LoadOwnershipQualifier::Unqualified);
// If we do not have a copy, just return the value...
if (qual != LoadOwnershipQualifier::Copy)
return loadValue;
// Otherwise, emit the copy value operation and return our original
// value. This is a small non-ownership optimization to not destabilize
// the optimizer pipeline.
//
// TODO: Once the pass pipeline is fixed, we should evaluate if we can do
// this again.
B.emitCopyValueOperation(loc, loadValue);
return loadValue;
}
SILValue emitLoweredLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual,
TypeExpansionKind expansionKind) const override {
if (B.getFunction().hasOwnership())
return B.createLoad(loc, addr, qual);
SILValue loadValue =
B.createLoad(loc, addr, LoadOwnershipQualifier::Unqualified);
// If we do not have a copy, just return the value...
if (qual != LoadOwnershipQualifier::Copy)
return loadValue;
// Otherwise, emit the copy value operation.
B.emitLoweredCopyValueOperation(loc, loadValue, expansionKind);
// Otherwise, emit the copy value operation and return our original
// value. This is a small non-ownership optimization to not destabilize
// the optimizer pipeline.
//
// TODO: Once the pass pipeline is fixed, we should evaluate if we can do
// this again.
return loadValue;
}
void emitLoweredStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual,
Lowering::TypeLowering::TypeExpansionKind
expansionKind) const override {
if (B.getFunction().hasOwnership()) {
B.createStore(loc, value, addr, qual);
return;
}
if (qual == StoreOwnershipQualifier::Assign) {
SILValue oldValue = B.emitLoadValueOperation(
loc, addr, LoadOwnershipQualifier::Unqualified);
B.emitLoweredDestroyValueOperation(loc, oldValue, expansionKind);
}
B.createStore(loc, value, addr, StoreOwnershipQualifier::Unqualified);
}
};
/// A CRTP helper class for loadable but non-trivial aggregate types.
template <class Impl, class IndexType>
class LoadableAggTypeLowering : public NonTrivialLoadableTypeLowering {
public:
/// A child of this aggregate type.
class Child {
/// The index of this child, used to project it out.
IndexType Index;
/// The aggregate's type lowering.
const TypeLowering *Lowering;
public:
Child(IndexType index, const TypeLowering &lowering)
: Index(index), Lowering(&lowering) {}
const TypeLowering &getLowering() const { return *Lowering; }
IndexType getIndex() const { return Index; }
bool isTrivial() const { return Lowering->isTrivial(); }
};
private:
const Impl &asImpl() const { return static_cast<const Impl&>(*this); }
Impl &asImpl() { return static_cast<Impl&>(*this); }
// A reference to the lazily-allocated children vector.
mutable ArrayRef<Child> Children = {};
protected:
virtual void lowerChildren(TypeConverter &TC, SmallVectorImpl<Child> &children)
const = 0;
public:
LoadableAggTypeLowering(CanType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: NonTrivialLoadableTypeLowering(SILType::getPrimitiveObjectType(type),
properties, IsNotReferenceCounted,
forExpansion) {
}
/// CRTP Default implementation of destructuring an aggregate value.
///
/// Uses getChildren() and emitRValueProject() to create projections for
/// each child. Subclasses should override this to customize on how
/// destructuring is done.
///
/// NOTE: Due to the CRTP, this must always be called as
/// asImpl().destructureAggregate() to ensure that one gets the proper
/// implementation!
void destructureAggregate(
SILBuilder &B, SILLocation loc, SILValue aggValue, bool skipTrivial,
function_ref<void(unsigned, SILValue, const TypeLowering &)> visitor)
const {
for (auto pair : llvm::enumerate(getChildren(B.getModule().Types))) {
auto &child = pair.value();
auto &childLowering = child.getLowering();
// Skip trivial children.
if (skipTrivial && childLowering.isTrivial())
continue;
auto childIndex = child.getIndex();
auto childValue = asImpl().emitRValueProject(B, loc, aggValue,
childIndex, childLowering);
visitor(pair.index(), childValue, childLowering);
}
}
virtual SILValue rebuildAggregate(SILBuilder &B, SILLocation loc,
ArrayRef<SILValue> values) const = 0;
ArrayRef<Child> getChildren(TypeConverter &TC) const {
if (Children.data() == nullptr) {
SmallVector<Child, 4> children;
lowerChildren(TC, children);
auto buf = operator new(sizeof(Child) * children.size(), TC);
memcpy(buf, children.data(), sizeof(Child) * children.size());
Children = {reinterpret_cast<Child*>(buf), children.size()};
}
return Children;
}
template <class T>
void forEachNonTrivialChild(SILBuilder &B, SILLocation loc,
SILValue aggValue,
const T &operation) const {
asImpl().destructureAggregate(B, loc, aggValue, true /*skipTrivial*/,
[&](unsigned, SILValue childValue,
const TypeLowering &childLowering) {
operation(B, loc, childValue,
childLowering);
});
}
using SimpleOperationTy = void (TypeLowering::*)(SILBuilder &B,
SILLocation loc,
SILValue value) const;
void forEachNonTrivialChild(SILBuilder &B, SILLocation loc,
SILValue aggValue,
SimpleOperationTy operation) const {
forEachNonTrivialChild(B, loc, aggValue,
[operation](SILBuilder &B, SILLocation loc,
SILValue childValue,
const TypeLowering &childLowering) {
(childLowering.*operation)(B, loc, childValue);
});
}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (B.getFunction().hasOwnership())
return B.createCopyValue(loc, value);
B.createRetainValue(loc, value, B.getDefaultAtomicity());
return value;
}
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue aggValue,
TypeExpansionKind style) const override {
if (style == TypeExpansionKind::None) {
return emitCopyValue(B, loc, aggValue);
}
SmallVector<SILValue, 8> loweredChildValues;
asImpl().destructureAggregate(
B, loc, aggValue, false /*skipTrivial*/,
[&](unsigned childIndex, SILValue childValue,
const TypeLowering &childLowering) {
if (!childLowering.isTrivial())
childValue = childLowering.emitLoweredCopyChildValue(
B, loc, childValue, style);
loweredChildValues.push_back(childValue);
});
// Without ownership, return our original value. This is a small
// non-ownership optimization to not destabilize the optimizer pipeline.
//
// TODO: Once the pass pipeline is fixed, we should evaluate if we can do
// this again.
if (!B.hasOwnership())
return aggValue;
return rebuildAggregate(B, loc, loweredChildValues);
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue aggValue) const override {
if (B.getFunction().hasOwnership()) {
B.createDestroyValue(loc, aggValue);
return;
}
B.createReleaseValue(loc, aggValue, B.getDefaultAtomicity());
}
void
emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue aggValue,
TypeExpansionKind loweringStyle) const override {
SimpleOperationTy Fn;
switch(loweringStyle) {
case TypeExpansionKind::None:
return emitDestroyValue(B, loc, aggValue);
case TypeExpansionKind::DirectChildren:
Fn = &TypeLowering::emitDestroyValue;
break;
case TypeExpansionKind::MostDerivedDescendents:
Fn = &TypeLowering::emitLoweredDestroyValueMostDerivedDescendents;
break;
}
forEachNonTrivialChild(B, loc, aggValue, Fn);
}
};
/// A lowering for loadable but non-trivial tuple types.
class LoadableTupleTypeLowering final
: public LoadableAggTypeLowering<LoadableTupleTypeLowering, unsigned> {
using Super = LoadableAggTypeLowering<LoadableTupleTypeLowering, unsigned>;
public:
LoadableTupleTypeLowering(CanType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: LoadableAggTypeLowering(type, properties, forExpansion) {}
SILValue emitRValueProject(SILBuilder &B, SILLocation loc,
SILValue tupleValue, unsigned index,
const TypeLowering &eltLowering) const {
assert(!B.hasOwnership() &&
"Shouldn't call this when ownership is enabled?! Destructure "
"non-trivial tuples instead");
return B.createTupleExtract(loc, tupleValue, index,
eltLowering.getLoweredType());
}
void destructureAggregate(
SILBuilder &B, SILLocation loc, SILValue aggValue, bool skipTrivial,
function_ref<void(unsigned childIndex, SILValue childValue,
const TypeLowering &childLowering)>
visitor) const {
// Without ownership, use our parent.
if (!B.hasOwnership())
return Super::destructureAggregate(B, loc, aggValue, skipTrivial,
visitor);
// Otherwise, emit a destructure tuple and do the loop.
auto *dti = B.createDestructureTuple(loc, aggValue);
for (auto pair : llvm::enumerate(dti->getResults())) {
SILValue childValue = pair.value();
auto &childLowering =
B.getFunction().getTypeLowering(childValue->getType());
if (skipTrivial && childLowering.isTrivial())
continue;
visitor(pair.index(), childValue, childLowering);
}
}
SILValue rebuildAggregate(SILBuilder &B, SILLocation loc,
ArrayRef<SILValue> values) const override {
return B.createTuple(loc, getLoweredType(), values);
}
private:
void lowerChildren(TypeConverter &TC, SmallVectorImpl<Child> &children)
const override {
// The children are just the elements of the lowered tuple.
auto silTy = getLoweredType();
auto tupleTy = silTy.castTo<TupleType>();
children.reserve(tupleTy->getNumElements());
unsigned index = 0;
for (auto elt : tupleTy.getElementTypes()) {
auto silElt = SILType::getPrimitiveType(elt, silTy.getCategory());
auto &eltTL = TC.getTypeLowering(silElt, getExpansionContext());
children.push_back(Child{index, eltTL});
++index;
}
}
};
/// A lowering for loadable but non-trivial struct types.
class LoadableStructTypeLowering final
: public LoadableAggTypeLowering<LoadableStructTypeLowering, VarDecl *> {
using Super =
LoadableAggTypeLowering<LoadableStructTypeLowering, VarDecl *>;
public:
LoadableStructTypeLowering(CanType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: LoadableAggTypeLowering(type, properties, forExpansion) {}
SILValue emitRValueProject(SILBuilder &B, SILLocation loc,
SILValue structValue, VarDecl *field,
const TypeLowering &fieldLowering) const {
return B.createStructExtract(loc, structValue, field,
fieldLowering.getLoweredType());
}
void destructureAggregate(
SILBuilder &B, SILLocation loc, SILValue aggValue, bool skipTrivial,
function_ref<void(unsigned childIndex, SILValue childValue,
const TypeLowering &childLowering)>
visitor) const {
if (!B.hasOwnership())
return Super::destructureAggregate(B, loc, aggValue, skipTrivial,
visitor);
auto *dsi = B.createDestructureStruct(loc, aggValue);
for (auto pair : llvm::enumerate(dsi->getResults())) {
SILValue childValue = pair.value();
auto &childLowering =
B.getFunction().getTypeLowering(childValue->getType());
if (skipTrivial && childLowering.isTrivial())
continue;
visitor(pair.index(), childValue, childLowering);
}
}
SILValue rebuildAggregate(SILBuilder &B, SILLocation loc,
ArrayRef<SILValue> values) const override {
return B.createStruct(loc, getLoweredType(), values);
}
private:
void lowerChildren(TypeConverter &TC, SmallVectorImpl<Child> &children)
const override {
auto silTy = getLoweredType();
auto structDecl = silTy.getStructOrBoundGenericStruct();
assert(structDecl);
for (auto prop : structDecl->getStoredProperties()) {
SILType propTy = silTy.getFieldType(prop, TC, getExpansionContext());
auto &propTL = TC.getTypeLowering(propTy, getExpansionContext());
children.push_back(Child{prop, propTL});
}
}
};
/// A lowering for loadable but non-trivial enum types.
class LoadableEnumTypeLowering final : public NonTrivialLoadableTypeLowering {
public:
LoadableEnumTypeLowering(CanType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: NonTrivialLoadableTypeLowering(SILType::getPrimitiveObjectType(type),
properties,
IsNotReferenceCounted,
forExpansion) {}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (B.getFunction().hasOwnership())
return B.createCopyValue(loc, value);
B.createRetainValue(loc, value, B.getDefaultAtomicity());
return value;
}
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue value,
TypeExpansionKind style) const override {
if (B.getFunction().hasOwnership())
return B.createCopyValue(loc, value);
B.createRetainValue(loc, value, B.getDefaultAtomicity());
return value;
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (B.getFunction().hasOwnership()) {
B.createDestroyValue(loc, value);
return;
}
B.createReleaseValue(loc, value, B.getDefaultAtomicity());
}
void emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value,
TypeExpansionKind style) const override {
// Enums, we never want to expand.
return emitDestroyValue(B, loc, value);
}
};
/// A type lowering for `@differentiable` function types.
class NormalDifferentiableSILFunctionTypeLowering final
: public LoadableAggTypeLowering<
NormalDifferentiableSILFunctionTypeLowering,
NormalDifferentiableFunctionTypeComponent> {
public:
using LoadableAggTypeLowering::LoadableAggTypeLowering;
SILValue emitRValueProject(
SILBuilder &B, SILLocation loc, SILValue tupleValue,
NormalDifferentiableFunctionTypeComponent extractee,
const TypeLowering &eltLowering) const {
return B.createDifferentiableFunctionExtract(
loc, extractee, tupleValue);
}
SILValue rebuildAggregate(SILBuilder &B, SILLocation loc,
ArrayRef<SILValue> values) const override {
assert(values.size() == 3);
auto fnTy = getLoweredType().castTo<SILFunctionType>();
auto *parameterIndices = fnTy->getDifferentiabilityParameterIndices();
auto *resultIndices = fnTy->getDifferentiabilityResultIndices();
return B.createDifferentiableFunction(
loc, parameterIndices, resultIndices, values[0],
std::make_pair(values[1], values[2]));
}
void lowerChildren(TypeConverter &TC,
SmallVectorImpl<Child> &children) const override {
auto fnTy = getLoweredType().castTo<SILFunctionType>();
auto numDerivativeFns = 2;
children.reserve(numDerivativeFns + 1);
auto origFnTy = fnTy->getWithoutDifferentiability();
auto *paramIndices = fnTy->getDifferentiabilityParameterIndices();
auto *resultIndices = fnTy->getDifferentiabilityResultIndices();
children.push_back(Child{
NormalDifferentiableFunctionTypeComponent::Original,
TC.getTypeLowering(origFnTy, getExpansionContext())
});
for (AutoDiffDerivativeFunctionKind kind :
{AutoDiffDerivativeFunctionKind::JVP,
AutoDiffDerivativeFunctionKind::VJP}) {
auto derivativeFnTy = origFnTy->getAutoDiffDerivativeFunctionType(
paramIndices, resultIndices, kind, TC,
LookUpConformanceInModule(&TC.M));
auto silTy = SILType::getPrimitiveObjectType(derivativeFnTy);
NormalDifferentiableFunctionTypeComponent extractee(kind);
// Assert that we have the right extractee. A terrible bug in the past
// was caused by implicit conversions from `unsigned` to
// `NormalDifferentiableFunctionTypeComponent` which resulted into a
// wrong extractee.
assert(extractee.getAsDerivativeFunctionKind() == kind);
children.push_back(Child{
extractee, TC.getTypeLowering(silTy, getExpansionContext())});
}
assert(children.size() == 3);
}
};
/// A type lowering for `@differentiable(linear)` function types.
class LinearDifferentiableSILFunctionTypeLowering final
: public LoadableAggTypeLowering<
LinearDifferentiableSILFunctionTypeLowering,
LinearDifferentiableFunctionTypeComponent> {
public:
using LoadableAggTypeLowering::LoadableAggTypeLowering;
SILValue emitRValueProject(
SILBuilder &B, SILLocation loc, SILValue tupleValue,
LinearDifferentiableFunctionTypeComponent component,
const TypeLowering &eltLowering) const {
return B.createLinearFunctionExtract(loc, component, tupleValue);
}
SILValue rebuildAggregate(SILBuilder &B, SILLocation loc,
ArrayRef<SILValue> values) const override {
assert(values.size() == 2);
auto fnTy = getLoweredType().castTo<SILFunctionType>();
auto paramIndices = fnTy->getDifferentiabilityParameterIndices();
return B.createLinearFunction(loc, paramIndices, values[0], values[1]);
}
void lowerChildren(TypeConverter &TC,
SmallVectorImpl<Child> &children) const override {
auto fnTy = getLoweredType().castTo<SILFunctionType>();
children.reserve(2);
auto origFnTy = fnTy->getWithoutDifferentiability();
auto paramIndices = fnTy->getDifferentiabilityParameterIndices();
children.push_back(Child{
LinearDifferentiableFunctionTypeComponent::Original,
TC.getTypeLowering(origFnTy, getExpansionContext())
});
auto transposeFnTy = origFnTy->getAutoDiffTransposeFunctionType(
paramIndices, TC, LookUpConformanceInModule(&TC.M));
auto transposeSILFnTy = SILType::getPrimitiveObjectType(transposeFnTy);
children.push_back(Child{
LinearDifferentiableFunctionTypeComponent::Transpose,
TC.getTypeLowering(transposeSILFnTy, getExpansionContext())
});
assert(children.size() == 2);
}
};
class LeafLoadableTypeLowering : public NonTrivialLoadableTypeLowering {
public:
LeafLoadableTypeLowering(SILType type, RecursiveProperties properties,
IsReferenceCounted_t isRefCounted,
TypeExpansionContext forExpansion)
: NonTrivialLoadableTypeLowering(type, properties, isRefCounted,
forExpansion) {}
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue value,
TypeExpansionKind style) const override {
return emitCopyValue(B, loc, value);
}
void emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value,
TypeExpansionKind style) const override {
emitDestroyValue(B, loc, value);
}
};
/// A class for reference types, which are all non-trivial but still
/// loadable.
class ReferenceTypeLowering : public LeafLoadableTypeLowering {
public:
ReferenceTypeLowering(SILType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: LeafLoadableTypeLowering(type, properties, IsReferenceCounted,
forExpansion) {}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (isa<FunctionRefInst>(value) || isa<DynamicFunctionRefInst>(value) ||
isa<PreviousDynamicFunctionRefInst>(value))
return value;
if (B.getFunction().hasOwnership())
return B.createCopyValue(loc, value);
B.createStrongRetain(loc, value, B.getDefaultAtomicity());
return value;
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (B.getFunction().hasOwnership()) {
B.createDestroyValue(loc, value);
return;
}
B.createStrongRelease(loc, value, B.getDefaultAtomicity());
}
};
/// A type lowering for loadable @unowned types.
#define ALWAYS_OR_SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
class Loadable##Name##TypeLowering final : public LeafLoadableTypeLowering { \
public: \
Loadable##Name##TypeLowering(SILType type, \
TypeExpansionContext forExpansion, \
RecursiveProperties props) \
: LeafLoadableTypeLowering(type, props, \
IsReferenceCounted, \
forExpansion) {} \
SILValue emitCopyValue(SILBuilder &B, SILLocation loc, \
SILValue value) const override { \
if (B.getFunction().hasOwnership()) \
return B.createCopyValue(loc, value); \
B.create##Name##Retain(loc, value, B.getDefaultAtomicity()); \
return value; \
} \
void emitDestroyValue(SILBuilder &B, SILLocation loc, \
SILValue value) const override { \
if (B.getFunction().hasOwnership()) { \
B.createDestroyValue(loc, value); \
return; \
} \
B.create##Name##Release(loc, value, B.getDefaultAtomicity()); \
} \
};
#include "swift/AST/ReferenceStorage.def"
/// A class for non-trivial, address-only types.
class AddressOnlyTypeLowering : public TypeLowering {
public:
AddressOnlyTypeLowering(SILType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: TypeLowering(type, properties, IsNotReferenceCounted,
forExpansion) {
assert(properties.isAddressOnly());
}
void emitCopyInto(SILBuilder &B, SILLocation loc,
SILValue src, SILValue dest, IsTake_t isTake,
IsInitialization_t isInit) const override {
B.createCopyAddr(loc, src, dest, isTake, isInit);
}
SILValue emitLoadOfCopy(SILBuilder &B, SILLocation loc,
SILValue addr, IsTake_t isTake) const override {
llvm_unreachable("calling emitLoadOfCopy on non-loadable type");
}
void emitStoreOfCopy(SILBuilder &B, SILLocation loc,
SILValue newValue, SILValue addr,
IsInitialization_t isInit) const override {
llvm_unreachable("calling emitStoreOfCopy on non-loadable type");
}
void emitStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual) const override {
llvm_unreachable("calling emitStore on non-loadable type");
}
SILValue emitLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual) const override {
llvm_unreachable("calling emitLoad on non-loadable type");
}
SILValue emitLoweredLoad(SILBuilder &B, SILLocation loc, SILValue addr,
LoadOwnershipQualifier qual,
Lowering::TypeLowering::TypeExpansionKind
expansionKind) const override {
llvm_unreachable("calling emitLoweredLoad on non-loadable type?!");
}
void emitLoweredStore(SILBuilder &B, SILLocation loc, SILValue value,
SILValue addr, StoreOwnershipQualifier qual,
Lowering::TypeLowering::TypeExpansionKind
expansionKind) const override {
llvm_unreachable("calling emitLoweredStore on non-loadable type?!");
}
void emitDestroyAddress(SILBuilder &B, SILLocation loc,
SILValue addr) const override {
if (!isTrivial())
B.createDestroyAddr(loc, addr);
}
void emitDestroyRValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
if (!isTrivial())
B.createDestroyAddr(loc, value);
}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
llvm_unreachable("type is not loadable!");
}
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue value,
TypeExpansionKind style) const override {
llvm_unreachable("type is not loadable!");
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
llvm_unreachable("type is not loadable!");
}
void emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value,
TypeExpansionKind style) const override {
llvm_unreachable("type is not loadable!");
}
};
/// A class for Builtin.UnsafeValueBuffer. The only purpose here is
/// to catch obviously broken attempts to copy or destroy the buffer.
class UnsafeValueBufferTypeLowering : public AddressOnlyTypeLowering {
public:
UnsafeValueBufferTypeLowering(SILType type,
TypeExpansionContext forExpansion,
IsTypeExpansionSensitive_t isSensitive)
: AddressOnlyTypeLowering(type,
{IsNotTrivial, IsFixedABI,
IsAddressOnly, IsNotResilient, isSensitive},
forExpansion) {}
void emitCopyInto(SILBuilder &B, SILLocation loc,
SILValue src, SILValue dest, IsTake_t isTake,
IsInitialization_t isInit) const override {
llvm_unreachable("cannot copy an UnsafeValueBuffer!");
}
void emitDestroyAddress(SILBuilder &B, SILLocation loc,
SILValue addr) const override {
llvm_unreachable("cannot destroy an UnsafeValueBuffer!");
}
void emitDestroyRValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
llvm_unreachable("cannot destroy an UnsafeValueBuffer!");
}
};
/// Lower address only types as opaque values.
///
/// Opaque values behave like loadable leaf types in SIL.
///
/// FIXME: When you remove an unreachable, just delete the method.
class OpaqueValueTypeLowering : public LeafLoadableTypeLowering {
public:
OpaqueValueTypeLowering(SILType type, RecursiveProperties properties,
TypeExpansionContext forExpansion)
: LeafLoadableTypeLowering(type, properties, IsNotReferenceCounted,
forExpansion) {}
void emitCopyInto(SILBuilder &B, SILLocation loc,
SILValue src, SILValue dest, IsTake_t isTake,
IsInitialization_t isInit) const override {
llvm_unreachable("copy into");
}
// --- Same as LeafLoadableTypeLowering.
SILValue emitLoweredCopyValue(SILBuilder &B, SILLocation loc,
SILValue value,
TypeExpansionKind style) const override {
llvm_unreachable("lowered copy");
}
void emitLoweredDestroyValue(SILBuilder &B, SILLocation loc, SILValue value,
TypeExpansionKind style) const override {
llvm_unreachable("destroy value");
}
SILValue emitCopyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
return B.createCopyValue(loc, value);
}
void emitDestroyValue(SILBuilder &B, SILLocation loc,
SILValue value) const override {
B.createDestroyValue(loc, value);
}
};
/// Build the appropriate TypeLowering subclass for the given type,
/// which is assumed to already have been lowered.
class LowerType
: public TypeClassifierBase<LowerType, TypeLowering *>
{
public:
LowerType(TypeConverter &TC, TypeExpansionContext Expansion)
: TypeClassifierBase(TC, Expansion) {}
TypeLowering *handleTrivial(CanType type) {
return handleTrivial(type, RecursiveProperties::forTrivial());
}
TypeLowering *handleTrivial(CanType type,
RecursiveProperties properties) {
auto silType = SILType::getPrimitiveObjectType(type);
return new (TC) TrivialTypeLowering(silType, properties, Expansion);
}
TypeLowering *handleReference(CanType type,
RecursiveProperties properties) {
auto silType = SILType::getPrimitiveObjectType(type);
return new (TC) ReferenceTypeLowering(silType, properties, Expansion);
}
TypeLowering *handleReference(CanType type) {
auto silType = SILType::getPrimitiveObjectType(type);
return new (TC) ReferenceTypeLowering(
silType, RecursiveProperties::forReference(), Expansion);
}
TypeLowering *handleAddressOnly(CanType type,
RecursiveProperties properties) {
if (!TC.Context.LangOpts.EnableSILOpaqueValues) {
auto silType = SILType::getPrimitiveAddressType(type);
return new (TC) AddressOnlyTypeLowering(silType, properties,
Expansion);
}
auto silType = SILType::getPrimitiveObjectType(type);
return new (TC) OpaqueValueTypeLowering(silType, properties, Expansion);
}
#define ALWAYS_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
TypeLowering * \
visit##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return new (TC) Loadable##Name##TypeLowering( \
SILType::getPrimitiveObjectType(type), \
Expansion, \
getReferenceRecursiveProperties(isSensitive)); \
}
#define SOMETIMES_LOADABLE_CHECKED_REF_STORAGE(Name, ...) \
TypeLowering * \
visitLoadable##Name##StorageType(Can##Name##StorageType type, \
AbstractionPattern origType, \
IsTypeExpansionSensitive_t isSensitive) { \
return new (TC) Loadable##Name##TypeLowering( \
SILType::getPrimitiveObjectType(type), \
Expansion, \
getReferenceRecursiveProperties(isSensitive)); \
}
#include "swift/AST/ReferenceStorage.def"
TypeLowering *
visitBuiltinUnsafeValueBufferType(CanBuiltinUnsafeValueBufferType type,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
auto silType = SILType::getPrimitiveAddressType(type);
return new (TC)
UnsafeValueBufferTypeLowering(silType, Expansion, isSensitive);
}
TypeLowering *visitTupleType(CanTupleType tupleType,
AbstractionPattern origType,
IsTypeExpansionSensitive_t isSensitive) {
RecursiveProperties properties;
for (unsigned i = 0, e = tupleType->getNumElements(); i < e; ++i) {
auto eltType = tupleType.getElementType(i);
auto origEltType = origType.getTupleElementType(i);
auto &lowering = TC.getTypeLowering(origEltType, eltType, Expansion);
properties.addSubobject(lowering.getRecursiveProperties());
}
properties = mergeIsTypeExpansionSensitive(isSensitive, properties);
return handleAggregateByProperties<LoadableTupleTypeLowering>(tupleType,
properties);
}
bool handleResilience(CanType type, NominalTypeDecl *D,
RecursiveProperties &properties) {
if (D->isResilient()) {
// If the type is resilient and defined in our module, make a note of
// that, since our lowering now depends on the resilience expansion.
bool sameModule = (D->getModuleContext() == &TC.M);
if (sameModule)
properties.addSubobject(RecursiveProperties::forResilient());
// If the type is in a different module, or if we're using a minimal
// expansion, the type is address only and completely opaque to us.
//
// Note: if the type is in a different module, the lowering does
// not depend on the resilience expansion, so we do not need to set
// the isResilent() flag above.
if (!sameModule || Expansion.getResilienceExpansion() ==
ResilienceExpansion::Minimal) {
properties.addSubobject(RecursiveProperties::forOpaque());
return true;
}
}
return false;
}
TypeLowering *visitAnyStructType(CanType structType,
AbstractionPattern origType,
StructDecl *D,
IsTypeExpansionSensitive_t isSensitive) {
RecursiveProperties properties;
properties = mergeIsTypeExpansionSensitive(isSensitive, properties);
if (handleResilience(structType, D, properties))
return handleAddressOnly(structType, properties);
if (D->isCxxNonTrivial()) {
properties.setAddressOnly();
properties.setNonTrivial();
}
auto subMap = structType->getContextSubstitutionMap(&TC.M, D);
// Classify the type according to its stored properties.
for (auto field : D->getStoredProperties()) {
auto substFieldType =
field->getInterfaceType().subst(subMap)
->getCanonicalType();
// We are determining the recursive properties of the struct here,
// not the lowered types of the fields, so instead of lowering the
// field type against the declaration's interface type as we normally
// would, we use the substituted field type in order to accurately
// preserve the properties of the aggregate.
auto origFieldType = origType.unsafeGetSubstFieldType(field);
properties.addSubobject(classifyType(origFieldType, substFieldType,
TC, Expansion));
}
return handleAggregateByProperties<LoadableStructTypeLowering>(structType,
properties);
}
TypeLowering *visitAnyEnumType(CanType enumType,
AbstractionPattern origType,
EnumDecl *D,
IsTypeExpansionSensitive_t isSensitive) {
RecursiveProperties properties;
properties = mergeIsTypeExpansionSensitive(isSensitive, properties);
if (handleResilience(enumType, D, properties))
return handleAddressOnly(enumType, properties);
// If the whole enum is indirect, we lower it as if all payload
// cases were indirect. This means a fixed-layout indirect enum
// is always loadable and nontrivial. A resilient indirect enum
// is still address only, because we don't know how many bits
// are used for the discriminator, and new non-indirect cases
// may be added resiliently later.
if (D->isIndirect()) {
properties.setNonTrivial();
return new (TC) LoadableEnumTypeLowering(enumType, properties,
Expansion);
}
auto subMap = enumType->getContextSubstitutionMap(&TC.M, D);
// Accumulate the properties of all direct payloads.
for (auto elt : D->getAllElements()) {
// No-payload elements do not affect any recursive properties.
if (!elt->hasAssociatedValues())
continue;
// Indirect elements only make the type nontrivial.
if (elt->isIndirect()) {
properties.setNonTrivial();
continue;
}
auto substEltType =
elt->getArgumentInterfaceType().subst(subMap)
->getCanonicalType();
auto origEltType = origType.unsafeGetSubstFieldType(elt,
elt->getArgumentInterfaceType()
->getCanonicalType(D->getGenericSignature()));
properties.addSubobject(classifyType(origEltType, substEltType,
TC, Expansion));
}
return handleAggregateByProperties<LoadableEnumTypeLowering>(enumType,
properties);
}
TypeLowering *
visitNormalDifferentiableSILFunctionType(CanSILFunctionType type,
RecursiveProperties props) {
return handleAggregateByProperties
<NormalDifferentiableSILFunctionTypeLowering>(type, props);
}
TypeLowering *
visitLinearDifferentiableSILFunctionType(CanSILFunctionType type,
RecursiveProperties props) {
return handleAggregateByProperties
<LinearDifferentiableSILFunctionTypeLowering>(type, props);
}
template <class LoadableLoweringClass>
TypeLowering *handleAggregateByProperties(CanType type,
RecursiveProperties props) {
if (props.isAddressOnly()) {
return handleAddressOnly(type, props);
}
assert(props.isFixedABI());
if (props.isTrivial()) {
return handleTrivial(type, props);
}
return new (TC) LoadableLoweringClass(type, props, Expansion);
}
};
} // end anonymous namespace
TypeConverter::TypeConverter(ModuleDecl &m)
: M(m), Context(m.getASTContext()) {
}
TypeConverter::~TypeConverter() {
// The bump pointer allocator destructor will deallocate but not destroy all
// our independent TypeLowerings.
for (auto &ti : LoweredTypes) {
// Destroy only the unique entries.
CanType srcType = ti.first.OrigType;
if (!srcType) continue;
CanType mappedType = ti.second->getLoweredType().getASTType();
if (srcType == mappedType)
ti.second->~TypeLowering();
}
}
void *TypeLowering::operator new(size_t size, TypeConverter &tc) {
return tc.TypeLoweringBPA.Allocate(size, alignof(TypeLowering&));
}
const TypeLowering *TypeConverter::find(const TypeKey &k) {
if (!k.isCacheable()) return nullptr;
auto ck = k.getCachingKey();
auto found = LoweredTypes.find(ck);
if (found == LoweredTypes.end())
return nullptr;
assert((found->second || k.expansionContext.isMinimal()) &&
"type recursion not caught in Sema");
return found->second;
}
#ifndef NDEBUG
void TypeConverter::removeNullEntry(const TypeKey &k) {
if (!k.isCacheable())
return;
auto ck = k.getCachingKey();
auto found = LoweredTypes.find(ck);
if (found == LoweredTypes.end() || found->second != nullptr)
return;
LoweredTypes.erase(ck);
}
#endif
void TypeConverter::insert(const TypeKey &k, const TypeLowering *tl) {
if (!k.isCacheable()) return;
LoweredTypes[k.getCachingKey()] = tl;
}
/// Lower each of the elements of the substituted type according to
/// the abstraction pattern of the given original type.
static CanTupleType computeLoweredTupleType(TypeConverter &tc,
TypeExpansionContext context,
AbstractionPattern origType,
CanTupleType substType) {
assert(origType.matchesTuple(substType));
// Does the lowered tuple type differ from the substituted type in
// any interesting way?
bool changed = false;
SmallVector<TupleTypeElt, 4> loweredElts;
loweredElts.reserve(substType->getNumElements());
for (auto i : indices(substType->getElementTypes())) {
auto origEltType = origType.getTupleElementType(i);
auto substEltType = substType.getElementType(i);
auto &substElt = substType->getElement(i);
// Make sure we don't have something non-materializable.
auto Flags = substElt.getParameterFlags();
assert(Flags.getValueOwnership() == ValueOwnership::Default);
assert(!Flags.isVariadic());
CanType loweredSubstEltType =
tc.getLoweredRValueType(context, origEltType, substEltType);
changed = (changed || substEltType != loweredSubstEltType ||
!Flags.isNone());
// Note: we drop @escaping and @autoclosure which can still appear on
// materializable tuple types.
//
// FIXME: Replace this with an assertion that the original tuple element
// did not have any flags.
loweredElts.emplace_back(loweredSubstEltType,
substElt.getName(),
ParameterTypeFlags());
}
if (!changed) return substType;
// The cast should succeed, because if we end up with a one-element
// tuple type here, it must have a label.
return cast<TupleType>(CanType(TupleType::get(loweredElts, tc.Context)));
}
static CanType computeLoweredOptionalType(TypeConverter &tc,
TypeExpansionContext context,
AbstractionPattern origType,
CanType substType,
CanType substObjectType) {
assert(substType.getOptionalObjectType() == substObjectType);
CanType loweredObjectType = tc.getLoweredRValueType(
context, origType.getOptionalObjectType(), substObjectType);
// If the object type didn't change, we don't have to rebuild anything.
if (loweredObjectType == substObjectType) {
return substType;
}
auto optDecl = tc.Context.getOptionalDecl();
return CanType(BoundGenericEnumType::get(optDecl, Type(), loweredObjectType));
}
static CanType
computeLoweredReferenceStorageType(TypeConverter &tc,
TypeExpansionContext context,
AbstractionPattern origType,
CanReferenceStorageType substType) {
CanType loweredReferentType = tc.getLoweredRValueType(
context, origType.getReferenceStorageReferentType(),
substType.getReferentType());
if (loweredReferentType == substType.getReferentType())
return substType;
return CanReferenceStorageType::get(loweredReferentType,
substType->getOwnership());
}
CanSILFunctionType
TypeConverter::getSILFunctionType(TypeExpansionContext context,
AbstractionPattern origType,
CanFunctionType substType) {
return cast<SILFunctionType>(
getLoweredRValueType(context, origType, substType));
}
const TypeLowering &
TypeConverter::getTypeLowering(AbstractionPattern origType,
Type origSubstType,
TypeExpansionContext forExpansion) {
CanType substType = origSubstType->getCanonicalType();
bool origHasOpaqueArchetype = substType->hasOpaqueArchetype();
// A type is type expansion sensitive if its lowering could depend on the type
// expansion context:
// - If the type has an opaque archetype
// Because depending on the type expansion context we might get a different
// SIL type (Foo<some P> vs Foo<Int>).
// - or if during type lowering we discover an opaque archetype that
// influences type lowering by type expansion context
// E.g a struct containing a field that is a opaque archetype will be
// loadable or not depending on the type expansion context. In a more
// permissive type expansion context we will look through the opaque
// archetype and could discover a loadable type making the whole aggregate
// loadable.
auto isTypeExpansionSensitive = origHasOpaqueArchetype
? IsTypeExpansionSensitive
: IsNotTypeExpansionSensitive;
auto key = getTypeKey(origType, substType, forExpansion);
assert(!substType->is<InOutType>());
auto *candidateLowering = find(key.getKeyForMinimalExpansion());
auto *lowering = getTypeLoweringForExpansion(
key, forExpansion, candidateLowering, IsNotTypeExpansionSensitive);
if (lowering != nullptr)
return *lowering;
#ifndef NDEBUG
// Catch reentrancy bugs.
if (candidateLowering == nullptr)
insert(key.getKeyForMinimalExpansion(), nullptr);
#endif
// Lower the type.
auto loweredSubstType =
computeLoweredRValueType(forExpansion, origType, substType);
// If that didn't change the type and the key is cachable, there's no
// point in re-checking the table, so just construct a type lowering
// and cache it.
if (loweredSubstType == substType && key.isCacheable()) {
lowering =
LowerType(*this, forExpansion)
.visit(key.SubstType, key.OrigType, isTypeExpansionSensitive);
// Otherwise, check the table at a key that would be used by the
// SILType-based lookup path for the type we just lowered to, then cache
// that same result at this key if possible.
} else {
lowering = &getTypeLoweringForLoweredType(
origType, loweredSubstType, forExpansion, isTypeExpansionSensitive);
}
if (!lowering->isResilient() && !lowering->isTypeExpansionSensitive()) {
insert(key.getKeyForMinimalExpansion(), lowering);
} else {
insert(key, lowering);
#ifndef NDEBUG
removeNullEntry(key.getKeyForMinimalExpansion());
#endif
}
return *lowering;
}
CanType
TypeConverter::computeLoweredRValueType(TypeExpansionContext forExpansion,
AbstractionPattern origType,
CanType substType) {
class LoweredRValueTypeVisitor
: public CanTypeVisitor<LoweredRValueTypeVisitor, CanType> {
TypeConverter &TC;
TypeExpansionContext forExpansion;
AbstractionPattern origType;
public:
LoweredRValueTypeVisitor(TypeConverter &TC,
TypeExpansionContext forExpansion,
AbstractionPattern origType)
: TC(TC), forExpansion(forExpansion), origType(origType) {}
// AST function types are turned into SIL function types:
// - the type is uncurried as desired
// - types are turned into their unbridged equivalents, depending
// on the abstract CC
// - ownership conventions are deduced
// - a minimal substituted generic signature is extracted to represent
// possible ABI-compatible substitutions
CanType visitAnyFunctionType(CanAnyFunctionType substFnType) {
// If the formal type uses a C convention, it is not formally
// abstractable, and it may be subject to implicit bridging.
auto extInfo = substFnType->getExtInfo();
auto rep = extInfo.getRepresentation();
SILFunctionTypeRepresentation silRep = convertRepresentation(rep);
if (getSILFunctionLanguage(silRep) == SILFunctionLanguage::C) {
// The importer only applies fully-reversible bridging to the
// component types of C function pointers.
auto bridging = Bridgeability::Full;
if (silRep == SILFunctionTypeRepresentation::CFunctionPointer)
bridging = Bridgeability::None;
// Bridge the parameters and result of the function type.
auto bridgedFnType =
TC.getBridgedFunctionType(origType, substFnType, bridging, silRep);
substFnType = bridgedFnType;
// Also rewrite the type of the abstraction pattern.
auto signature = origType.getGenericSignatureOrNull();
if (origType.isTypeParameter()) {
origType = AbstractionPattern(signature, bridgedFnType);
} else {
origType.rewriteType(signature, bridgedFnType);
}
}
AnyFunctionType::ExtInfo baseExtInfo;
if (auto origFnType = origType.getAs<AnyFunctionType>()) {
baseExtInfo = origFnType->getExtInfo();
} else {
baseExtInfo = substFnType->getExtInfo();
}
const clang::Type *clangType = baseExtInfo.getClangTypeInfo().getType();
if (shouldStoreClangType(rep) && !clangType) {
clangType = TC.Context.getClangFunctionType(
substFnType->getParams(), substFnType->getResult(), rep);
}
auto silExtInfo =
SILExtInfoBuilder(
baseExtInfo.intoBuilder().withClangFunctionType(clangType), false)
.build();
return ::getNativeSILFunctionType(TC, forExpansion, origType, substFnType,
silExtInfo);
}
// Ignore dynamic self types.
CanType visitDynamicSelfType(CanDynamicSelfType selfType) {
return TC.getLoweredRValueType(forExpansion, origType,
selfType.getSelfType());
}
// Static metatypes are unitary and can optimized to a "thin" empty
// representation if the type also appears as a static metatype in the
// original abstraction pattern.
CanType visitMetatypeType(CanMetatypeType substMeta) {
// If the metatype has already been lowered, it will already carry its
// representation.
if (substMeta->hasRepresentation()) {
assert(substMeta->isLegalSILType());
return substOpaqueTypesWithUnderlyingTypes(substMeta, forExpansion);
}
MetatypeRepresentation repr;
auto origMeta = origType.getAs<MetatypeType>();
if (!origMeta) {
// If the metatype matches a dependent type, it must be thick.
assert(origType.isTypeParameterOrOpaqueArchetype());
repr = MetatypeRepresentation::Thick;
} else {
// Otherwise, we're thin if the metatype is thinnable both
// substituted and in the abstraction pattern.
if (hasSingletonMetatype(substMeta.getInstanceType()) &&
hasSingletonMetatype(origMeta.getInstanceType()))
repr = MetatypeRepresentation::Thin;
else
repr = MetatypeRepresentation::Thick;
}
CanType instanceType = substOpaqueTypesWithUnderlyingTypes(
substMeta.getInstanceType(), forExpansion);
// Regardless of thinness, metatypes are always trivial.
return CanMetatypeType::get(instanceType, repr);
}
// Give existential metatypes @thick representation by default.
CanType
visitExistentialMetatypeType(CanExistentialMetatypeType existMetatype) {
if (existMetatype->hasRepresentation()) {
assert(existMetatype->isLegalSILType());
return existMetatype;
}
return CanExistentialMetatypeType::get(existMetatype.getInstanceType(),
MetatypeRepresentation::Thick);
}
// Lower tuple element types.
CanType visitTupleType(CanTupleType substTupleType) {
return computeLoweredTupleType(TC, forExpansion, origType,
substTupleType);
}
// Lower the referent type of reference storage types.
CanType visitReferenceStorageType(CanReferenceStorageType substRefType) {
return computeLoweredReferenceStorageType(TC, forExpansion, origType,
substRefType);
}
CanType visitSILFunctionType(CanSILFunctionType silFnTy) {
if (!silFnTy->hasOpaqueArchetype() ||
!forExpansion.shouldLookThroughOpaqueTypeArchetypes())
return silFnTy;
return silFnTy->substituteOpaqueArchetypes(TC, forExpansion);
}
CanType visitType(CanType substType) {
// Lower the object type of optional types.
if (auto substObjectType = substType.getOptionalObjectType()) {
return computeLoweredOptionalType(TC, forExpansion, origType, substType,
substObjectType);
}
// The Swift type directly corresponds to the lowered type.
auto underlyingTy =
substOpaqueTypesWithUnderlyingTypes(substType, forExpansion,
/*allowLoweredTypes*/ true);
if (underlyingTy != substType) {
underlyingTy =
TC.computeLoweredRValueType(forExpansion, origType, underlyingTy);
}
return underlyingTy;
}
};
LoweredRValueTypeVisitor visitor(*this, forExpansion, origType);
return visitor.visit(substType);
}
const TypeLowering &
TypeConverter::getTypeLowering(SILType type,
TypeExpansionContext forExpansion,
CanGenericSignature sig) {
// The type lowering for a type parameter relies on its context.
assert(sig || !type.getASTType()->hasTypeParameter());
auto loweredType = type.getASTType();
auto isTypeExpansionSensitive = loweredType->hasOpaqueArchetype()
? IsTypeExpansionSensitive
: IsNotTypeExpansionSensitive;
return getTypeLoweringForLoweredType(AbstractionPattern(sig, loweredType),
loweredType, forExpansion,
isTypeExpansionSensitive);
}
const TypeLowering &
TypeConverter::getTypeLowering(SILType t, SILFunction &F) {
return getTypeLowering(t, TypeExpansionContext(F),
F.getLoweredFunctionType()->getSubstGenericSignature());
}
const TypeLowering &TypeConverter::getTypeLoweringForLoweredType(
AbstractionPattern origType, CanType loweredType,
TypeExpansionContext forExpansion,
IsTypeExpansionSensitive_t isTypeExpansionSensitive) {
assert(loweredType->isLegalSILType() && "type is not lowered!");
(void)loweredType;
// Cache the lowered type record for a contextualized type independent of the
// abstraction pattern. Lowered type parameters can't be cached or looked up
// without context. (TODO: We could if they match the out-of-context
// abstraction pattern.)
AbstractionPattern origTypeForCaching = loweredType->hasTypeParameter()
? AbstractionPattern::getInvalid()
: AbstractionPattern(loweredType);
auto key = getTypeKey(origTypeForCaching, loweredType, forExpansion);
auto *candidateLowering = find(key.getKeyForMinimalExpansion());
auto *lowering = getTypeLoweringForExpansion(
key, forExpansion, candidateLowering, isTypeExpansionSensitive);
if (lowering != nullptr)
return *lowering;
#ifndef NDEBUG
// Catch reentrancy bugs.
if (candidateLowering == nullptr)
insert(key.getKeyForMinimalExpansion(), nullptr);
#endif
if (forExpansion.shouldLookThroughOpaqueTypeArchetypes() &&
loweredType->hasOpaqueArchetype()) {
loweredType = computeLoweredRValueType(
forExpansion, origType, loweredType);
}
lowering =
LowerType(*this, forExpansion)
.visit(loweredType, origType, isTypeExpansionSensitive);
if (!lowering->isResilient() && !lowering->isTypeExpansionSensitive())
insert(key.getKeyForMinimalExpansion(), lowering);
else {
insert(key, lowering);
#ifndef NDEBUG
removeNullEntry(key.getKeyForMinimalExpansion());
#endif
}
return *lowering;
}
/// When we've found a type lowering for one resilience expansion,
/// check if its the one we want; if not, walk the list until we
/// find the right one, returning nullptr if the caller needs to
/// go ahead and lower the type with the correct expansion.
const TypeLowering *TypeConverter::getTypeLoweringForExpansion(
TypeKey key, TypeExpansionContext forExpansion,
const TypeLowering *minimalExpansionLowering,
IsTypeExpansionSensitive_t isOrigTypeSensitive) {
if (minimalExpansionLowering == nullptr)
return nullptr;
if (!minimalExpansionLowering->isResilient() &&
!minimalExpansionLowering->isTypeExpansionSensitive() &&
!isOrigTypeSensitive) {
// Don't try to refine the lowering for other resilience expansions if
// we don't expect to get a different lowering anyway. Similar if the
// original type did not have opaque type archetypes.
//
// See LowerType::handleResilience() for the gory details; we only
// set this flag if the type is resilient *and* inside our module.
return minimalExpansionLowering;
}
auto *exactLowering = find(key);
if (exactLowering)
return exactLowering;
// We have to create a new one.
return nullptr;
}
static GenericSignature
getEffectiveGenericSignature(DeclContext *dc,
CaptureInfo captureInfo) {
if (dc->getParent()->isLocalContext() &&
!captureInfo.hasGenericParamCaptures())
return nullptr;
return dc->getGenericSignatureOfContext();
}
static GenericSignature
getEffectiveGenericSignature(AnyFunctionRef fn,
CaptureInfo captureInfo) {
return getEffectiveGenericSignature(fn.getAsDeclContext(), captureInfo);
}
static CanGenericSignature
getCanonicalSignatureOrNull(GenericSignature sig) {
if (!sig || sig->areAllParamsConcrete())
return nullptr;
return sig.getCanonicalSignature();
}
/// Get the type of a global variable accessor function, () -> RawPointer.
static CanAnyFunctionType getGlobalAccessorType(CanType varType) {
ASTContext &C = varType->getASTContext();
return CanFunctionType::get({}, C.TheRawPointerType);
}
/// Removes @noescape from the given type if it's a function type. Otherwise,
/// returns the original type.
static CanType removeNoEscape(CanType resultType) {
if (auto funTy = resultType->getAs<AnyFunctionType>()) {
auto newExtInfo = funTy->getExtInfo().withNoEscape(false);
return adjustFunctionType(cast<AnyFunctionType>(resultType), newExtInfo);
}
return resultType;
}
/// Get the type of a default argument generator, () -> T.
static CanAnyFunctionType getDefaultArgGeneratorInterfaceType(
SILDeclRef c) {
auto *vd = c.getDecl();
auto resultTy = getParameterAt(vd,
c.defaultArgIndex)->getInterfaceType();
assert(resultTy && "Didn't find default argument?");
// The result type might be written in terms of type parameters
// that have been made fully concrete.
CanType canResultTy = resultTy->getCanonicalType(
vd->getInnermostDeclContext()
->getGenericSignatureOfContext());
// Remove @noescape from function return types. A @noescape
// function return type is a contradiction.
canResultTy = removeNoEscape(canResultTy);
// Get the generic signature from the surrounding context.
auto sig = vd->getInnermostDeclContext()->getGenericSignatureOfContext();
if (auto *afd = dyn_cast<AbstractFunctionDecl>(vd)) {
auto *param = getParameterAt(afd, c.defaultArgIndex);
if (param->hasDefaultExpr()) {
auto captureInfo = param->getDefaultArgumentCaptureInfo();
sig = getEffectiveGenericSignature(afd, captureInfo);
}
}
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig),
{}, canResultTy);
}
/// Get the type of a stored property initializer, () -> T.
static CanAnyFunctionType getStoredPropertyInitializerInterfaceType(
VarDecl *VD) {
auto *DC = VD->getDeclContext();
CanType resultTy =
VD->getParentPattern()->getType()->mapTypeOutOfContext()
->getCanonicalType();
// If this is the backing storage for a property with an attached
// wrapper that was initialized with '=', the stored property initializer
// will be in terms of the original property's type.
if (auto originalProperty = VD->getOriginalWrappedProperty()) {
if (originalProperty->isPropertyMemberwiseInitializedWithWrappedType()) {
resultTy = originalProperty->getPropertyWrapperInitValueInterfaceType()
->getCanonicalType();
// Stored property initializers can't return @noescape functions
resultTy = removeNoEscape(resultTy);
}
}
auto sig = DC->getGenericSignatureOfContext();
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig),
{}, resultTy);
}
/// Get the type of a property wrapper backing initializer,
/// (property-type) -> backing-type.
static CanAnyFunctionType getPropertyWrapperBackingInitializerInterfaceType(
TypeConverter &TC,
VarDecl *VD) {
CanType resultType =
VD->getPropertyWrapperBackingPropertyType()->getCanonicalType();
auto *DC = VD->getInnermostDeclContext();
CanType inputType =
VD->getPropertyWrapperInitValueInterfaceType()->getCanonicalType();
auto sig = DC->getGenericSignatureOfContext();
AnyFunctionType::Param param(
inputType, Identifier(),
ParameterTypeFlags().withValueOwnership(ValueOwnership::Owned));
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig), {param},
resultType);
}
/// Get the type of a destructor function.
static CanAnyFunctionType getDestructorInterfaceType(DestructorDecl *dd,
bool isDeallocating,
bool isForeign) {
auto classType = dd->getDeclContext()->getDeclaredInterfaceType()
->getCanonicalType(dd->getGenericSignatureOfContext());
assert((!isForeign || isDeallocating)
&& "There are no foreign destroying destructors");
auto extInfoBuilder =
AnyFunctionType::ExtInfoBuilder(FunctionType::Representation::Thin,
/*throws*/ false);
if (isForeign)
extInfoBuilder = extInfoBuilder.withSILRepresentation(
SILFunctionTypeRepresentation::ObjCMethod);
else
extInfoBuilder = extInfoBuilder.withSILRepresentation(
SILFunctionTypeRepresentation::Method);
auto extInfo = extInfoBuilder.build();
auto &C = dd->getASTContext();
CanType resultTy = (isDeallocating
? TupleType::getEmpty(C)
: C.TheNativeObjectType);
CanType methodTy = CanFunctionType::get({}, resultTy);
auto sig = dd->getGenericSignatureOfContext();
FunctionType::Param args[] = {FunctionType::Param(classType)};
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig),
llvm::makeArrayRef(args),
methodTy, extInfo);
}
/// Retrieve the type of the ivar initializer or destroyer method for
/// a class.
static CanAnyFunctionType getIVarInitDestroyerInterfaceType(ClassDecl *cd,
bool isObjC,
bool isDestroyer) {
auto classType = cd->getDeclaredInterfaceType()
->getCanonicalType(cd->getGenericSignatureOfContext());
auto resultType = (isDestroyer
? TupleType::getEmpty(cd->getASTContext())
: classType);
auto extInfoBuilder =
AnyFunctionType::ExtInfoBuilder(FunctionType::Representation::Thin,
/*throws*/ false);
auto extInfo = extInfoBuilder
.withSILRepresentation(
isObjC ? SILFunctionTypeRepresentation::ObjCMethod
: SILFunctionTypeRepresentation::Method)
.build();
resultType = CanFunctionType::get({}, resultType, extInfo);
auto sig = cd->getGenericSignature();
FunctionType::Param args[] = {FunctionType::Param(classType)};
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig),
llvm::makeArrayRef(args),
resultType, extInfo);
}
static CanAnyFunctionType
getFunctionInterfaceTypeWithCaptures(TypeConverter &TC,
CanAnyFunctionType funcType,
SILDeclRef constant) {
// Get transitive closure of value captured by this function, and any
// captured functions.
auto captureInfo = TC.getLoweredLocalCaptures(constant);
// Capture generic parameters from the enclosing context if necessary.
auto closure = *constant.getAnyFunctionRef();
auto genericSig = getEffectiveGenericSignature(closure, captureInfo);
auto innerExtInfo =
AnyFunctionType::ExtInfoBuilder(FunctionType::Representation::Thin,
funcType->isThrowing())
.withAsync(funcType->isAsync())
.build();
return CanAnyFunctionType::get(
getCanonicalSignatureOrNull(genericSig),
funcType.getParams(), funcType.getResult(),
innerExtInfo);
}
CanAnyFunctionType TypeConverter::makeConstantInterfaceType(SILDeclRef c) {
if (auto *derivativeId = c.getDerivativeFunctionIdentifier()) {
auto originalFnTy =
makeConstantInterfaceType(c.asAutoDiffOriginalFunction());
auto *derivativeFnTy = originalFnTy->getAutoDiffDerivativeFunctionType(
derivativeId->getParameterIndices(), derivativeId->getKind(),
LookUpConformanceInModule(&M));
return cast<AnyFunctionType>(derivativeFnTy->getCanonicalType());
}
auto *vd = c.loc.dyn_cast<ValueDecl *>();
switch (c.kind) {
case SILDeclRef::Kind::Func: {
CanAnyFunctionType funcTy;
if (auto *ACE = c.loc.dyn_cast<AbstractClosureExpr *>()) {
// FIXME: Closures could have an interface type computed by Sema.
funcTy = cast<AnyFunctionType>(
ACE->getType()->mapTypeOutOfContext()->getCanonicalType());
} else {
funcTy = cast<AnyFunctionType>(
vd->getInterfaceType()->getCanonicalType());
}
return getFunctionInterfaceTypeWithCaptures(*this, funcTy, c);
}
case SILDeclRef::Kind::EnumElement: {
auto funcTy = cast<AnyFunctionType>(
vd->getInterfaceType()->getCanonicalType());
auto sig = vd->getDeclContext()->getGenericSignatureOfContext();
return CanAnyFunctionType::get(getCanonicalSignatureOrNull(sig),
funcTy->getParams(),
funcTy.getResult(),
funcTy->getExtInfo());
}
case SILDeclRef::Kind::Allocator: {
auto *cd = cast<ConstructorDecl>(vd);
auto funcTy = cast<AnyFunctionType>(
cd->getInterfaceType()->getCanonicalType());
return getFunctionInterfaceTypeWithCaptures(*this, funcTy, c);
}
case SILDeclRef::Kind::Initializer: {
auto *cd = cast<ConstructorDecl>(vd);
auto funcTy = cast<AnyFunctionType>(
cd->getInitializerInterfaceType()->getCanonicalType());
return getFunctionInterfaceTypeWithCaptures(*this, funcTy, c);
}
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator:
return getDestructorInterfaceType(cast<DestructorDecl>(vd),
c.kind == SILDeclRef::Kind::Deallocator,
c.isForeign);
case SILDeclRef::Kind::GlobalAccessor: {
VarDecl *var = cast<VarDecl>(vd);
assert(var->hasStorage() &&
"constant ref to computed global var");
return getGlobalAccessorType(var->getInterfaceType()->getCanonicalType());
}
case SILDeclRef::Kind::DefaultArgGenerator:
return getDefaultArgGeneratorInterfaceType(c);
case SILDeclRef::Kind::StoredPropertyInitializer:
return getStoredPropertyInitializerInterfaceType(cast<VarDecl>(vd));
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
return getPropertyWrapperBackingInitializerInterfaceType(*this,
cast<VarDecl>(vd));
case SILDeclRef::Kind::IVarInitializer:
return getIVarInitDestroyerInterfaceType(cast<ClassDecl>(vd),
c.isForeign, false);
case SILDeclRef::Kind::IVarDestroyer:
return getIVarInitDestroyerInterfaceType(cast<ClassDecl>(vd),
c.isForeign, true);
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
GenericSignature
TypeConverter::getConstantGenericSignature(SILDeclRef c) {
auto *vd = c.loc.dyn_cast<ValueDecl *>();
/// Get the function generic params, including outer params.
switch (c.kind) {
case SILDeclRef::Kind::Func:
case SILDeclRef::Kind::Allocator:
case SILDeclRef::Kind::Initializer:
case SILDeclRef::Kind::Destroyer:
case SILDeclRef::Kind::Deallocator: {
auto captureInfo = getLoweredLocalCaptures(c);
return getEffectiveGenericSignature(
*c.getAnyFunctionRef(), captureInfo);
}
case SILDeclRef::Kind::IVarInitializer:
case SILDeclRef::Kind::IVarDestroyer:
return cast<ClassDecl>(vd)->getGenericSignature();
case SILDeclRef::Kind::DefaultArgGenerator: {
// Use the generic environment of the original function.
auto captureInfo = getLoweredLocalCaptures(c);
return getEffectiveGenericSignature(
vd->getInnermostDeclContext(), captureInfo);
}
case SILDeclRef::Kind::EnumElement:
case SILDeclRef::Kind::GlobalAccessor:
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
return vd->getDeclContext()->getGenericSignatureOfContext();
}
llvm_unreachable("Unhandled SILDeclRefKind in switch.");
}
GenericEnvironment *
TypeConverter::getConstantGenericEnvironment(SILDeclRef c) {
if (auto sig = getConstantGenericSignature(c))
return sig->getGenericEnvironment();
return nullptr;
}
SILType TypeConverter::getSubstitutedStorageType(TypeExpansionContext context,
AbstractStorageDecl *value,
Type lvalueType) {
// The l-value type is the result of applying substitutions to
// the type-of-reference. Essentially, we want to apply those
// same substitutions to value->getType().
// Canonicalize and lower the l-value's object type.
AbstractionPattern origType = getAbstractionPattern(value);
CanType substType = lvalueType->getCanonicalType();
assert(!isa<LValueType>(substType));
// Look through reference storage on the original type.
auto origRefType = origType.getAs<ReferenceStorageType>();
if (origRefType) {
origType = origType.getReferenceStorageReferentType();
substType = substType.getReferenceStorageReferent();
}
CanType substLoweredType = getLoweredRValueType(context, origType, substType);
// Type substitution preserves structural type structure, and the
// type-of-reference is only different in the outermost structural
// types. So, basically, we just need to undo the changes made by
// getTypeOfReference and then reapply them on the substituted type.
// The only really significant manipulation there is with @weak and
// @unowned.
if (origRefType) {
substLoweredType = CanReferenceStorageType::get(substType,
origRefType->getOwnership());
}
return SILType::getPrimitiveAddressType(substLoweredType);
}
ProtocolDispatchStrategy
TypeConverter::getProtocolDispatchStrategy(ProtocolDecl *P) {
// ObjC protocols use ObjC method dispatch, and Swift protocols
// use witness tables.
if (P->isObjC())
return ProtocolDispatchStrategy::ObjC;
return ProtocolDispatchStrategy::Swift;
}
/// If a capture references a local function, return a reference to that
/// function.
static Optional<AnyFunctionRef>
getAnyFunctionRefFromCapture(CapturedValue capture) {
if (auto *afd = dyn_cast<AbstractFunctionDecl>(capture.getDecl()))
return AnyFunctionRef(afd);
return None;
}
bool
TypeConverter::hasLoweredLocalCaptures(SILDeclRef fn) {
return !getLoweredLocalCaptures(fn).getCaptures().empty();
}
CaptureInfo
TypeConverter::getLoweredLocalCaptures(SILDeclRef fn) {
PrettyStackTraceSILLocation stack("getting lowered local captures",
fn.getAsRegularLocation(), Context);
// If we're guaranteed to never have local captures, bail out now.
switch (fn.kind) {
case SILDeclRef::Kind::StoredPropertyInitializer:
case SILDeclRef::Kind::PropertyWrapperBackingInitializer:
return CaptureInfo::empty();
default:
if (fn.hasDecl()) {
if (!fn.getDecl()->isLocalCapture())
return CaptureInfo::empty();
}
break;
}
fn.isForeign = 0;
// See if we've cached the lowered capture list for this function.
auto found = LoweredCaptures.find(fn);
if (found != LoweredCaptures.end())
return found->second;
// Recursively collect transitive captures from captured local functions.
llvm::DenseSet<AnyFunctionRef> visitedFunctions;
llvm::MapVector<ValueDecl*,CapturedValue> captures;
// If there is a capture of 'self' with dynamic 'Self' type, it goes last so
// that IRGen can pass dynamic 'Self' metadata.
Optional<CapturedValue> selfCapture;
bool capturesGenericParams = false;
DynamicSelfType *capturesDynamicSelf = nullptr;
OpaqueValueExpr *capturesOpaqueValue = nullptr;
std::function<void (CaptureInfo captureInfo, DeclContext *dc)> collectCaptures;
std::function<void (AnyFunctionRef)> collectFunctionCaptures;
std::function<void (SILDeclRef)> collectConstantCaptures;
collectCaptures = [&](CaptureInfo captureInfo, DeclContext *dc) {
assert(captureInfo.hasBeenComputed());
if (captureInfo.hasGenericParamCaptures())
capturesGenericParams = true;
if (captureInfo.hasDynamicSelfCapture())
capturesDynamicSelf = captureInfo.getDynamicSelfType();
if (captureInfo.hasOpaqueValueCapture())
capturesOpaqueValue = captureInfo.getOpaqueValue();
SmallVector<CapturedValue, 4> localCaptures;
captureInfo.getLocalCaptures(localCaptures);
for (auto capture : localCaptures) {
// If the capture is of another local function, grab its transitive
// captures instead.
if (auto capturedFn = getAnyFunctionRefFromCapture(capture)) {
collectFunctionCaptures(*capturedFn);
continue;
}
// If the capture is of a computed property, grab the transitive captures
// of its accessors.
if (auto capturedVar = dyn_cast<VarDecl>(capture.getDecl())) {
auto collectAccessorCaptures = [&](AccessorKind kind) {
if (auto *accessor = capturedVar->getParsedAccessor(kind))
collectFunctionCaptures(accessor);
};
if (!capture.isDirect()) {
auto impl = capturedVar->getImplInfo();
switch (impl.getReadImpl()) {
case ReadImplKind::Stored:
// Will capture storage later.
break;
case ReadImplKind::Address:
collectAccessorCaptures(AccessorKind::Address);
break;
case ReadImplKind::Get:
collectAccessorCaptures(AccessorKind::Get);
break;
case ReadImplKind::Read:
collectAccessorCaptures(AccessorKind::Read);
break;
case ReadImplKind::Inherited:
llvm_unreachable("inherited local variable?");
}
switch (impl.getWriteImpl()) {
case WriteImplKind::Immutable:
case WriteImplKind::Stored:
break;
case WriteImplKind::StoredWithObservers:
collectAccessorCaptures(AccessorKind::WillSet);
collectAccessorCaptures(AccessorKind::DidSet);
break;
case WriteImplKind::Set:
collectAccessorCaptures(AccessorKind::Set);
break;
case WriteImplKind::MutableAddress:
collectAccessorCaptures(AccessorKind::MutableAddress);
break;
case WriteImplKind::Modify:
collectAccessorCaptures(AccessorKind::Modify);
break;
case WriteImplKind::InheritedWithObservers:
llvm_unreachable("inherited local variable");
}
switch (impl.getReadWriteImpl()) {
case ReadWriteImplKind::Immutable:
case ReadWriteImplKind::Stored:
break;
case ReadWriteImplKind::MaterializeToTemporary:
// We've already processed the read and write operations.
break;
case ReadWriteImplKind::MutableAddress:
collectAccessorCaptures(AccessorKind::MutableAddress);
break;
case ReadWriteImplKind::Modify:
collectAccessorCaptures(AccessorKind::Modify);
break;
case ReadWriteImplKind::StoredWithDidSet:
// We've already processed the didSet operation.
break;
case ReadWriteImplKind::InheritedWithDidSet:
llvm_unreachable("inherited local variable");
}
}
if (!capturedVar->hasStorage())
continue;
// We can always capture the storage in these cases.
Type captureType = capturedVar->getType()->getMetatypeInstanceType();
if (auto *selfType = captureType->getAs<DynamicSelfType>()) {
captureType = selfType->getSelfType();
// We're capturing a 'self' value with dynamic 'Self' type;
// handle it specially.
//
// However, only do this if its a 'let'; if the capture is
// mutable, we're going to be capturing a box or an address.
if (captureType->getClassOrBoundGenericClass() &&
capturedVar->isLet()) {
// If we've already captured the same value already, just merge
// flags.
if (selfCapture && selfCapture->getDecl() == capture.getDecl()) {
selfCapture = selfCapture->mergeFlags(capture);
continue;
// Otherwise, record the canonical self capture. It will appear
// at the end of the capture list.
} else if (!selfCapture) {
selfCapture = capture;
continue;
}
// If we end up here, we have multiple different captured values
// with a dynamic 'Self' type. Handle this and any subsequent
// captures via the normal code path below.
}
}
// Fall through to capture the storage.
}
// Collect non-function captures.
ValueDecl *value = capture.getDecl();
auto existing = captures.find(value);
if (existing != captures.end()) {
existing->second = existing->second.mergeFlags(capture);
} else {
captures.insert(std::pair<ValueDecl *, CapturedValue>(value, capture));
}
}
};
collectFunctionCaptures = [&](AnyFunctionRef curFn) {
if (!curFn.getBody())
return;
if (!visitedFunctions.insert(curFn).second)
return;
PrettyStackTraceAnyFunctionRef("lowering local captures", curFn);
auto dc = curFn.getAsDeclContext();
collectCaptures(curFn.getCaptureInfo(), dc);
// A function's captures also include its default arguments, because
// when we reference a function we don't track which default arguments
// are referenced too.
//
// FIXME: This should be more fine-grained -- we should only need the
// captures for default arguments that are actually referenced.
if (auto *AFD = curFn.getAbstractFunctionDecl()) {
for (auto *P : *AFD->getParameters()) {
if (P->hasDefaultExpr())
collectCaptures(P->getDefaultArgumentCaptureInfo(), dc);
}
}
};
collectConstantCaptures = [&](SILDeclRef curFn) {
if (curFn.isDefaultArgGenerator()) {
PrettyStackTraceSILLocation stack("lowering local captures",
fn.getAsRegularLocation(), Context);
if (auto *afd = dyn_cast<AbstractFunctionDecl>(curFn.getDecl())) {
auto *param = getParameterAt(afd, curFn.defaultArgIndex);
if (param->hasDefaultExpr()) {
auto dc = afd->getInnermostDeclContext();
collectCaptures(param->getDefaultArgumentCaptureInfo(), dc);
}
return;
}
if (curFn.getDecl()->getInnermostDeclContext()
->getGenericSignatureOfContext())
capturesGenericParams = true;
return;
}
collectFunctionCaptures(*curFn.getAnyFunctionRef());
};
collectConstantCaptures(fn);
SmallVector<CapturedValue, 4> resultingCaptures;
for (auto capturePair : captures) {
resultingCaptures.push_back(capturePair.second);
}
// If we captured an opaque value, add it.
if (capturesOpaqueValue) {
resultingCaptures.push_back(CapturedValue(capturesOpaqueValue, 0));
}
// If we captured the dynamic 'Self' type and we have a 'self' value also,
// add it as the final capture. Otherwise, add a fake hidden capture for
// the dynamic 'Self' metatype.
if (selfCapture.hasValue()) {
resultingCaptures.push_back(*selfCapture);
} else if (capturesDynamicSelf) {
selfCapture = CapturedValue::getDynamicSelfMetadata();
resultingCaptures.push_back(*selfCapture);
}
// Cache the uniqued set of transitive captures.
CaptureInfo info{Context, resultingCaptures, capturesDynamicSelf,
capturesOpaqueValue, capturesGenericParams};
auto inserted = LoweredCaptures.insert({fn, info});
assert(inserted.second && "already in map?!");
(void)inserted;
return info;
}
/// Given that type1 is known to be a subtype of type2, check if the two
/// types have the same calling convention representation.
TypeConverter::ABIDifference
TypeConverter::checkForABIDifferences(SILModule &M,
SILType type1, SILType type2,
bool thunkOptionals) {
// Unwrap optionals, but remember that we did.
bool type1WasOptional = false;
bool type2WasOptional = false;
if (auto object = type1.getOptionalObjectType()) {
type1WasOptional = true;
type1 = object;
}
if (auto object = type2.getOptionalObjectType()) {
type2WasOptional = true;
type2 = object;
}
bool optionalityChange;
if (thunkOptionals) {
// Forcing IUOs always requires a thunk.
if (type1WasOptional && !type2WasOptional)
return ABIDifference::NeedsThunk;
// Except for the above case, we should not be making a value less optional.
// If we're introducing a level of optionality, only certain types are
// ABI-compatible -- check below.
optionalityChange = (!type1WasOptional && type2WasOptional);
} else {
// We haven't implemented codegen for optional thunking at all levels
// (particularly objc_blocks at depth). Just accept ABI compatibility
// in either direction in these cases.
optionalityChange = type1WasOptional != type2WasOptional;
}
// If the types are identical and there was no optionality change,
// we're done.
if (type1 == type2 && !optionalityChange)
return ABIDifference::CompatibleRepresentation;
// Classes, class-constrained archetypes, and pure-ObjC existential types
// all have single retainable pointer representation; optionality change
// is allowed.
if (type1.getASTType()->satisfiesClassConstraint() &&
type2.getASTType()->satisfiesClassConstraint())
return ABIDifference::CompatibleRepresentation;
// Function parameters are ABI compatible if their differences are
// trivial.
if (auto fnTy1 = type1.getAs<SILFunctionType>()) {
if (auto fnTy2 = type2.getAs<SILFunctionType>()) {
// Async/synchronous conversions always need a thunk.
if (fnTy1->isAsync() != fnTy2->isAsync())
return ABIDifference::NeedsThunk;
// @convention(block) is a single retainable pointer so optionality
// change is allowed.
if (optionalityChange)
if (fnTy1->getRepresentation() != fnTy2->getRepresentation() ||
fnTy1->getRepresentation() != SILFunctionTypeRepresentation::Block)
return ABIDifference::NeedsThunk;
return checkFunctionForABIDifferences(M, fnTy1, fnTy2);
}
}
// Metatypes are ABI-compatible if they have the same representation.
if (auto meta1 = type1.getAs<MetatypeType>()) {
if (auto meta2 = type2.getAs<MetatypeType>()) {
if (meta1->getRepresentation() == meta2->getRepresentation() &&
(!optionalityChange ||
meta1->getRepresentation() == MetatypeRepresentation::Thick))
return ABIDifference::CompatibleRepresentation;
}
}
// Existential metatypes which are not identical are only ABI-compatible
// in @objc representation.
//
// Optionality change is allowed since @objc existential metatypes have a
// single retainable pointer representation.
if (auto meta1 = type1.getAs<ExistentialMetatypeType>()) {
if (auto meta2 = type2.getAs<ExistentialMetatypeType>()) {
if (meta1->getRepresentation() == meta2->getRepresentation() &&
meta1->getRepresentation() == MetatypeRepresentation::ObjC)
return ABIDifference::CompatibleRepresentation;
}
}
// Tuple types are ABI-compatible if their elements are.
if (!optionalityChange) {
if (auto tuple1 = type1.getAs<TupleType>()) {
if (auto tuple2 = type2.getAs<TupleType>()) {
if (tuple1->getNumElements() != tuple2->getNumElements())
return ABIDifference::NeedsThunk;
for (unsigned i = 0, e = tuple1->getNumElements(); i < e; i++) {
if (checkForABIDifferences(M,
type1.getTupleElementType(i),
type2.getTupleElementType(i))
!= ABIDifference::CompatibleRepresentation)
return ABIDifference::NeedsThunk;
}
// Tuple lengths and elements match
return ABIDifference::CompatibleRepresentation;
}
}
}
// The types are different, or there was an optionality change resulting
// in a change in representation.
return ABIDifference::NeedsThunk;
}
namespace {
class HaveDifferentAbstractStructure
: public CanTypeDifferenceVisitor<HaveDifferentAbstractStructure> {
public:
// Treat any sort of abstract type as equivalent.
static bool isAbstract(CanType type) {
return (isa<SubstitutableType>(type) || isa<DependentMemberType>(type));
};
// We can fast-path some of these checks by proviing these two overrides:
bool visitSubstitutableType(CanSubstitutableType type1,
CanSubstitutableType type2) {
return false;
}
bool visitDependentMemberType(CanDependentMemberType type1,
CanDependentMemberType type2) {
return false;
}
// We also need to handle the general case where we have different
// kinds of substitutable types.
bool visitDifferentComponentTypes(CanType type1, CanType type2) {
// This is a difference only if both types aren't abstract.
return !(isAbstract(type1) && isAbstract(type2));
}
// Change the rules used for SIL function types to only consider
// the basic structure, not any substitutions.
bool visitSILFunctionType(CanSILFunctionType type1,
CanSILFunctionType type2) {
return visitSILFunctionTypeStructure(type1, type2)
|| visitSILFunctionTypeComponents(type1, type2);
}
};
}
static bool haveDifferentAbstractStructure(CanType type1, CanType type2) {
return HaveDifferentAbstractStructure().visit(type1, type2);
}
static TypeConverter::ABIDifference
checkForABIDifferencesInYield(TypeConverter &TC, SILModule &M,
SILFunctionType *fnTy1, SILYieldInfo yield1,
SILFunctionType *fnTy2, SILYieldInfo yield2) {
// Require the interface types to have the same basic abstract
// structure, ignoring any substitutions from the function type.
// This structure is what determines the signature of the continuation
// function.
if (haveDifferentAbstractStructure(yield1.getInterfaceType(),
yield2.getInterfaceType()))
return TypeConverter::ABIDifference::NeedsThunk;
// Also make sure that the actual yield types match in ABI.
return TC.checkForABIDifferences(
M, yield1.getSILStorageType(M, fnTy1, TypeExpansionContext::minimal()),
yield2.getSILStorageType(M, fnTy2, TypeExpansionContext::minimal()));
}
TypeConverter::ABIDifference
TypeConverter::checkFunctionForABIDifferences(SILModule &M,
SILFunctionType *fnTy1,
SILFunctionType *fnTy2) {
// For now, only differentiate representation from calling convention when
// staging in substituted function types.
//
// We might still want to conditionalize this behavior even after we commit
// substituted function types, to avoid bloating
// IR for platforms that don't differentiate function type representations.
bool DifferentFunctionTypesHaveDifferentRepresentation
= Context.LangOpts.EnableSubstSILFunctionTypesForFunctionValues;
// TODO: For C language types we should consider the attached Clang types.
if (fnTy1->getLanguage() == SILFunctionLanguage::C)
DifferentFunctionTypesHaveDifferentRepresentation = false;
// Fast path -- if both functions were unwrapped from a CanSILFunctionType,
// we might have pointer equality here.
if (fnTy1 == fnTy2)
return ABIDifference::CompatibleRepresentation;
if (fnTy1->getParameters().size() != fnTy2->getParameters().size())
return ABIDifference::NeedsThunk;
if (fnTy1->getNumResults() != fnTy2->getNumResults())
return ABIDifference::NeedsThunk;
if (fnTy1->getNumYields() != fnTy2->getNumYields())
return ABIDifference::NeedsThunk;
// If we don't have a context but the other type does, we'll return
// ABIDifference::ThinToThick below.
if (fnTy1->getExtInfo().hasContext() &&
fnTy1->getCalleeConvention() != fnTy2->getCalleeConvention())
return ABIDifference::NeedsThunk;
for (unsigned i : indices(fnTy1->getResults())) {
auto result1 = fnTy1->getResults()[i];
auto result2 = fnTy2->getResults()[i];
if (result1.getConvention() != result2.getConvention())
return ABIDifference::NeedsThunk;
if (checkForABIDifferences(M,
result1.getSILStorageType(
M, fnTy1, TypeExpansionContext::minimal()),
result2.getSILStorageType(
M, fnTy2, TypeExpansionContext::minimal()),
/*thunk iuos*/ fnTy1->getLanguage() ==
SILFunctionLanguage::Swift) !=
ABIDifference::CompatibleRepresentation)
return ABIDifference::NeedsThunk;
}
for (unsigned i : indices(fnTy1->getYields())) {
auto yield1 = fnTy1->getYields()[i];
auto yield2 = fnTy2->getYields()[i];
if (yield1.getConvention() != yield2.getConvention())
return ABIDifference::NeedsThunk;
if (checkForABIDifferencesInYield(*this, M, fnTy1, yield1, fnTy2, yield2)
!= ABIDifference::CompatibleRepresentation)
return ABIDifference::NeedsThunk;
}
// If one type does not have an error result, we can still trivially cast
// (casting away an error result is only safe if the function never throws,
// of course).
if (fnTy1->hasErrorResult() && fnTy2->hasErrorResult()) {
auto error1 = fnTy1->getErrorResult(), error2 = fnTy2->getErrorResult();
if (error1.getConvention() != error2.getConvention())
return ABIDifference::NeedsThunk;
if (checkForABIDifferences(
M,
error1.getSILStorageType(M, fnTy1, TypeExpansionContext::minimal()),
error2.getSILStorageType(M, fnTy2, TypeExpansionContext::minimal()),
/*thunk iuos*/ fnTy1->getLanguage() ==
SILFunctionLanguage::Swift) !=
ABIDifference::CompatibleRepresentation)
return ABIDifference::NeedsThunk;
}
for (unsigned i = 0, e = fnTy1->getParameters().size(); i < e; ++i) {
auto param1 = fnTy1->getParameters()[i], param2 = fnTy2->getParameters()[i];
if (param1.getConvention() != param2.getConvention())
return ABIDifference::NeedsThunk;
// Parameters are contravariant and our relation is not symmetric, so
// make sure to flip the relation around.
if (checkForABIDifferences(
M,
param2.getSILStorageType(M, fnTy2, TypeExpansionContext::minimal()),
param1.getSILStorageType(M, fnTy1, TypeExpansionContext::minimal()),
/*thunk iuos*/ fnTy1->getLanguage() ==
SILFunctionLanguage::Swift) !=
ABIDifference::CompatibleRepresentation)
return ABIDifference::NeedsThunk;
}
auto rep1 = fnTy1->getRepresentation(), rep2 = fnTy2->getRepresentation();
if (rep1 != rep2) {
if (rep1 == SILFunctionTypeRepresentation::Thin &&
rep2 == SILFunctionTypeRepresentation::Thick) {
if (DifferentFunctionTypesHaveDifferentRepresentation) {
// FIXME: check whether the representations are compatible modulo
// context
return ABIDifference::CompatibleCallingConvention_ThinToThick;
} else {
return ABIDifference::CompatibleRepresentation_ThinToThick;
}
}
return ABIDifference::NeedsThunk;
}
if (DifferentFunctionTypesHaveDifferentRepresentation)
return ABIDifference::CompatibleCallingConvention;
else
return ABIDifference::CompatibleRepresentation;
}
CanSILBoxType
TypeConverter::getInterfaceBoxTypeForCapture(ValueDecl *captured,
CanType loweredInterfaceType,
bool isMutable) {
auto &C = M.getASTContext();
auto signature = getCanonicalSignatureOrNull(
captured->getDeclContext()->getGenericSignatureOfContext());
// If the type is not dependent at all, we can form a concrete box layout.
// We don't need to capture the generic environment.
if (!loweredInterfaceType->hasTypeParameter()) {
auto layout = SILLayout::get(C, nullptr,
SILField(loweredInterfaceType, isMutable));
return SILBoxType::get(C, layout, {});
}
// Otherwise, the layout needs to capture the generic environment of its
// originating scope.
// TODO: We could conceivably minimize the captured generic environment to
// only the parts used by the captured variable.
auto layout = SILLayout::get(C, signature,
SILField(loweredInterfaceType, isMutable));
// Instantiate the layout with identity substitutions.
auto subMap = SubstitutionMap::get(
signature,
[&](SubstitutableType *type) -> Type {
return signature->getCanonicalTypeInContext(type);
},
MakeAbstractConformanceForGenericType());
auto boxTy = SILBoxType::get(C, layout, subMap);
#ifndef NDEBUG
auto loweredContextType = loweredInterfaceType;
auto contextBoxTy = boxTy;
if (signature) {
auto env = signature->getGenericEnvironment();
loweredContextType = env->mapTypeIntoContext(loweredContextType)
->getCanonicalType();
contextBoxTy = cast<SILBoxType>(
env->mapTypeIntoContext(contextBoxTy)
->getCanonicalType());
}
assert(contextBoxTy->getLayout()->getFields().size() == 1 &&
getSILBoxFieldType(TypeExpansionContext::minimal(), contextBoxTy,
*this, 0)
.getASTType() == loweredContextType &&
"box field type doesn't match capture!");
#endif
return boxTy;
}
CanSILBoxType
TypeConverter::getContextBoxTypeForCapture(ValueDecl *captured,
CanType loweredContextType,
GenericEnvironment *env,
bool isMutable) {
CanType loweredInterfaceType = loweredContextType;
if (env) {
auto homeSig = captured->getDeclContext()
->getGenericSignatureOfContext();
loweredInterfaceType =
loweredInterfaceType->mapTypeOutOfContext()
->getCanonicalType(homeSig);
}
auto boxType = getInterfaceBoxTypeForCapture(captured,
loweredInterfaceType,
isMutable);
if (env)
boxType = cast<SILBoxType>(
env->mapTypeIntoContext(boxType)
->getCanonicalType());
return boxType;
}
CanSILBoxType TypeConverter::getBoxTypeForEnumElement(
TypeExpansionContext context, SILType enumType, EnumElementDecl *elt) {
auto *enumDecl = enumType.getEnumOrBoundGenericEnum();
assert(elt->getDeclContext() == enumDecl);
assert(elt->isIndirect() || elt->getParentEnum()->isIndirect());
auto &C = M.getASTContext();
auto boxSignature = getCanonicalSignatureOrNull(
enumDecl->getGenericSignature());
if (boxSignature == CanGenericSignature()) {
auto eltIntfTy = elt->getArgumentInterfaceType();
auto boxVarTy = getLoweredRValueType(context, eltIntfTy);
auto layout = SILLayout::get(C, nullptr, SILField(boxVarTy, true));
return SILBoxType::get(C, layout, {});
}
// Use the enum's signature for the box type.
auto boundEnum = enumType.getASTType();
// Lower the enum element's argument in the box's context.
auto eltIntfTy = elt->getArgumentInterfaceType();
auto boxVarTy = getLoweredRValueType(context,
getAbstractionPattern(elt), eltIntfTy);
auto layout = SILLayout::get(C, boxSignature, SILField(boxVarTy, true));
// Instantiate the layout with enum's substitution list.
auto subMap = boundEnum->getContextSubstitutionMap(
&M, enumDecl, enumDecl->getGenericEnvironment());
auto boxTy = SILBoxType::get(C, layout, subMap);
return boxTy;
}
static void countNumberOfInnerFields(unsigned &fieldsCount, TypeConverter &TC,
SILType Ty,
TypeExpansionContext expansion) {
if (auto *structDecl = Ty.getStructOrBoundGenericStruct()) {
assert(
!structDecl->isResilient(&TC.M, expansion.getResilienceExpansion()) &&
" FSO should not be trying to explode resilient (ie address-only) "
"types at all");
for (auto *prop : structDecl->getStoredProperties()) {
SILType propTy = Ty.getFieldType(prop, TC, expansion);
unsigned fieldsCountBefore = fieldsCount;
countNumberOfInnerFields(fieldsCount, TC, propTy, expansion);
if (fieldsCount == fieldsCountBefore) {
// size of Struct(BigStructType) == size of BigStructType()
// prevent counting its size as BigStructType()+1
++fieldsCount;
}
}
return;
}
if (auto tupleTy = Ty.getAs<TupleType>()) {
for (auto elt : tupleTy.getElementTypes()) {
auto silElt = SILType::getPrimitiveObjectType(elt);
countNumberOfInnerFields(fieldsCount, TC, silElt, expansion);
}
return;
}
if (auto *enumDecl = Ty.getEnumOrBoundGenericEnum()) {
if (enumDecl->isIndirect()) {
return;
}
assert(!enumDecl->isResilient(&TC.M, expansion.getResilienceExpansion()) &&
" FSO should not be trying to explode resilient (ie address-only) "
"types at all");
unsigned fieldsCountBefore = fieldsCount;
unsigned maxEnumCount = 0;
for (auto elt : enumDecl->getAllElements()) {
if (!elt->hasAssociatedValues())
continue;
if (elt->isIndirect())
continue;
// Although one might assume enums have a fields count of 1
// Which holds true for current uses of this code
// (we shouldn't expand enums)
// Number of fields > 1 as "future proof" for this heuristic:
// In case it is used by a pass that tries to explode enums.
auto payloadTy = Ty.getEnumElementType(elt, TC, expansion);
fieldsCount = 0;
countNumberOfInnerFields(fieldsCount, TC, payloadTy, expansion);
if (fieldsCount > maxEnumCount) {
maxEnumCount = fieldsCount;
}
}
fieldsCount = fieldsCountBefore + maxEnumCount;
return;
}
}
unsigned TypeConverter::countNumberOfFields(SILType Ty,
TypeExpansionContext expansion) {
auto key = std::make_pair(Ty, unsigned(expansion.getResilienceExpansion()));
auto Iter = TypeFields.find(key);
if (Iter != TypeFields.end()) {
return std::max(Iter->second, 1U);
}
unsigned fieldsCount = 0;
countNumberOfInnerFields(fieldsCount, *this, Ty, expansion);
TypeFields[key] = fieldsCount;
return std::max(fieldsCount, 1U);
}
void TypeLowering::print(llvm::raw_ostream &os) const {
auto BOOL = [&](bool b) -> StringRef {
if (b)
return "true";
return "false";
};
os << "Type Lowering for lowered type: " << LoweredType << ".\n"
<< "Expansion: " << getResilienceExpansion() << "\n"
<< "isTrivial: " << BOOL(Properties.isTrivial()) << ".\n"
<< "isFixedABI: " << BOOL(Properties.isFixedABI()) << ".\n"
<< "isAddressOnly: " << BOOL(Properties.isAddressOnly()) << ".\n"
<< "isResilient: " << BOOL(Properties.isResilient()) << ".\n"
<< "\n";
}
void TypeLowering::dump() const {
print(llvm::dbgs());
}
|
// DynaMix
// Copyright (c) 2013-2018 Borislav Stanimirov, Zahary Karadjov
//
// Distributed under the MIT Software License
// See accompanying file LICENSE.txt or copy at
// https://opensource.org/licenses/MIT
//
// this file is automatically generated by a script
//
#define I_DYNAMIX_MESSAGE0_DECL(export, message_name, method_name, return_type, constness, message_mechanism ) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type > \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE0_UNI(export, message_name, method_name, return_type, constness ) \
I_DYNAMIX_MESSAGE0_DECL(export, message_name, method_name, return_type, constness, unicast ) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj ) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj ); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj ) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj ); \
}\
#define I_DYNAMIX_MESSAGE0_MULTI(export, message_name, method_name, return_type, constness ) \
I_DYNAMIX_MESSAGE0_DECL(export, message_name, method_name, return_type, constness, multicast ) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator ); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj ) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator ); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj ) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj ); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj ) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj ); \
}\
#define I_DYNAMIX_MESSAGE1_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE1_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0) \
I_DYNAMIX_MESSAGE1_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0)); \
}\
#define I_DYNAMIX_MESSAGE1_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0) \
I_DYNAMIX_MESSAGE1_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0); \
}\
#define I_DYNAMIX_MESSAGE2_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0, arg1_type, a1) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type, arg1_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE2_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1) \
I_DYNAMIX_MESSAGE2_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0, arg1_type, a1) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1)); \
}\
#define I_DYNAMIX_MESSAGE2_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1) \
I_DYNAMIX_MESSAGE2_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0, arg1_type, a1) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0, a1); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0, a1); \
}\
#define I_DYNAMIX_MESSAGE3_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0, arg1_type, a1, arg2_type, a2) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type, arg1_type, arg2_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE3_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2) \
I_DYNAMIX_MESSAGE3_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0, arg1_type, a1, arg2_type, a2) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2)); \
}\
#define I_DYNAMIX_MESSAGE3_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2) \
I_DYNAMIX_MESSAGE3_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0, arg1_type, a1, arg2_type, a2) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0, a1, a2); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0, a1, a2); \
}\
#define I_DYNAMIX_MESSAGE4_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type, arg1_type, arg2_type, arg3_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE4_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3) \
I_DYNAMIX_MESSAGE4_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3)); \
}\
#define I_DYNAMIX_MESSAGE4_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3) \
I_DYNAMIX_MESSAGE4_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0, a1, a2, a3); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0, a1, a2, a3); \
}\
#define I_DYNAMIX_MESSAGE5_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type, arg1_type, arg2_type, arg3_type, arg4_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE5_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4) \
I_DYNAMIX_MESSAGE5_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3), std::forward<arg4_type>(a4)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3), std::forward<arg4_type>(a4)); \
}\
#define I_DYNAMIX_MESSAGE5_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4) \
I_DYNAMIX_MESSAGE5_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3, a4); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3, a4); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0, a1, a2, a3, a4); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0, a1, a2, a3, a4); \
}\
#define I_DYNAMIX_MESSAGE6_DECL(export, message_name, method_name, return_type, constness, message_mechanism , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4, arg5_type, a5) \
/* mechanism shows whether it's a multicast or unicast */ \
\
/* step 1: define the message struct */ \
struct export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) : public ::dynamix::internal::I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism) \
<I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name), constness ::dynamix::object, return_type , arg0_type, arg1_type, arg2_type, arg3_type, arg4_type, arg5_type> \
{ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)() \
: I_DYNAMIX_MESSAGE_CALLER_STRUCT(message_mechanism)(I_DYNAMIX_PP_STRINGIZE(message_name)) \
{} \
/* MethodOwner is the actual owner of the message (either the mixin or one of its parents) */ \
template <typename Mixin, typename MethodOwner> \
::dynamix::internal::func_ptr get_caller_for() const \
{ \
/* prevent the linker from optimizing away the caller function */ \
static caller_func the_caller = I_DYNAMIX_CALLER_NAME(constness)<Mixin, MethodOwner, &MethodOwner::method_name>; \
/* cast the caller to a void (*)() - safe according to the standard */ \
return reinterpret_cast< ::dynamix::internal::func_ptr>(the_caller); \
} \
}; \
/* step 2: define a message tag, that will be used to identify the message in feature lists */ \
/* it would have been nice if we could set this global variable to the unique global instance of the feature*/ \
/* but unfortunately we cannot trust dynamic libraries to keep it straight for us */ \
/* hence we rely on a getter like the mixin one */ \
extern export I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name) * I_DYNAMIX_MESSAGE_TAG(message_name); \
/* step 3: declare the feature getter and manual registrator for the message */ \
extern export ::dynamix::feature& _dynamix_get_mixin_feature_safe(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export const ::dynamix::feature& _dynamix_get_mixin_feature_fast(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
extern export void _dynamix_register_mixin_feature(const I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)*); \
#define I_DYNAMIX_MESSAGE6_UNI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4, arg5_type, a5) \
I_DYNAMIX_MESSAGE6_DECL(export, message_name, method_name, return_type, constness, unicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4, arg5_type, a5) \
/* step 4: define the message function -> the one that will be called for the objects */ \
inline return_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3), std::forward<arg4_type>(a4), std::forward<arg5_type>(a5)); \
}\
/* also define a pointer function */ \
inline return_type method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5) \
{\
return I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , std::forward<arg0_type>(a0), std::forward<arg1_type>(a1), std::forward<arg2_type>(a2), std::forward<arg3_type>(a3), std::forward<arg4_type>(a4), std::forward<arg5_type>(a5)); \
}\
#define I_DYNAMIX_MESSAGE6_MULTI(export, message_name, method_name, return_type, constness , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4, arg5_type, a5) \
I_DYNAMIX_MESSAGE6_DECL(export, message_name, method_name, return_type, constness, multicast , arg0_type, a0, arg1_type, a1, arg2_type, a2, arg3_type, a3, arg4_type, a4, arg5_type, a5) \
/* step 4: define the message functions -> the one that will be called for the objects */ \
/* function A: concrete combinator */ \
template <typename Combinator> \
void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5, Combinator& _d_combinator) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3, a4, a5); \
} \
/* function B: template combinator -> can be called on a single line */ \
template <template <typename> class Combinator> \
typename Combinator<return_type>::result_type method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5) \
{ \
Combinator<return_type> _d_combinator; \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_combinator_call(_d_obj, _d_combinator , a0, a1, a2, a3, a4, a5); \
return _d_combinator.result(); \
} \
/* function C: no combinator */ \
inline void method_name(constness ::dynamix::object& _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5) \
{ \
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(_d_obj , a0, a1, a2, a3, a4, a5); \
} \
/* also define a pointer function with no combinator */ \
inline void method_name(constness ::dynamix::object* _d_obj , arg0_type a0, arg1_type a1, arg2_type a2, arg3_type a3, arg4_type a4, arg5_type a5) \
{\
/* not forwarded arguments. We DO want an error if some of them are rvalue references */ \
I_DYNAMIX_MESSAGE_STRUCT_NAME(message_name)::make_call(*_d_obj , a0, a1, a2, a3, a4, a5); \
}\
#include "arity_message_macros.hpp"
|
#include <iostream>
int triangle(int number)
{
if (number == 1)
return 1;
return number + triangle(number - 1);
}
int countDivisors(int number)
{
int divisors = 2;
for (int i = number / 2; i > 1; i--)
{
if (number % i == 0)
divisors++;
}
return divisors;
}
int main(int argc, char const *argv[])
{
int n = 3;
int i = 1;
int limit = 500;
while (countDivisors(triangle(n)) <= limit)
// while (n < 20)
{
// std::cout << n << " " << i << " " << triangle(n) << std::endl;
if (i % 2 == 0)
n += 3;
else
n++;
i++;
}
std::cout << triangle(n) << std::endl;
return 0;
}
// time 189.01 s
|
#include <co/mutex.hpp>
#include <co/timed_mutex.hpp>
#include <catch2/catch.hpp>
#include "helper.hpp"
template <typename MutexT>
void testAgainstMutexConcept()
{
REQUIRE_FALSE(std::is_copy_constructible<MutexT>::value);
REQUIRE_FALSE(std::is_copy_assignable<MutexT>::value);
REQUIRE_FALSE(std::is_move_constructible<MutexT>::value);
REQUIRE_FALSE(std::is_move_assignable<MutexT>::value);
GIVEN("an unlocked mutex")
{
MutexT mutex;
WHEN("the mutex is locked")
{
mutex.lock();
THEN("another try to lock should fail") { REQUIRE_FALSE(mutex.try_lock()); }
AND_WHEN("the mutex is unlocked")
{
mutex.unlock();
THEN("locking sould be possible again")
{
mutex.lock();
AND_THEN("another try to lock should fail") { REQUIRE_FALSE(mutex.try_lock()); }
}
}
mutex.unlock();
}
WHEN("the mutex is tried to be locked")
{
auto aquiredLock = mutex.try_lock();
THEN("this attempt should succeed")
{
REQUIRE(aquiredLock);
AND_THEN("another try to lock should fail") { REQUIRE_FALSE(mutex.try_lock()); }
}
AND_WHEN("the mutex is unlocked")
{
mutex.unlock();
THEN("locking sould be possible again")
{
auto aquiredLock2 = mutex.try_lock();
REQUIRE(aquiredLock2);
AND_THEN("another try to lock should fail") { REQUIRE_FALSE(mutex.try_lock()); }
}
}
mutex.unlock();
}
WHEN("the mutex is locked in a coroutine")
{
auto startThread = std::this_thread::get_id();
co::Routine([&]() {
mutex.lock();
THEN("this should not change the exection thread of the coroutine")
{
REQUIRE(startThread == std::this_thread::get_id());
}
mutex.unlock();
}).join();
}
WHEN("the mutex is destructed without use, this should not fail") {}
}
GIVEN("a already locked mutex")
{
MutexT mutex;
mutex.lock();
WHEN("the mutex is locked in a coroutine")
{
auto startThread = std::this_thread::get_id();
co::Routine coro([&]() {
mutex.lock();
THEN("the coroutine should be resumed inline / in the same thread")
{
REQUIRE(startThread == std::this_thread::get_id());
}
mutex.unlock();
});
REQUIRE(coro);
AND_WHEN("the mutex is unlocked outside of a coroutine")
{
mutex.unlock();
coro.join();
}
}
WHEN("the mutex is locked in a coroutine")
{
co::IoContextThreads threads{1};
auto startThread = std::this_thread::get_id();
co::Routine coro([&]() {
mutex.lock();
THEN("the coroutine should not be resumed inline (in the same thread)")
{
REQUIRE_FALSE(startThread == std::this_thread::get_id());
}
mutex.unlock();
});
REQUIRE(coro);
AND_WHEN("the mutex is unlocked in an other coroutine")
{
co::Routine([&]() { mutex.unlock(); }).join();
coro.join();
}
}
}
}
SCENARIO("co::Mutex should fulfill the Mutex concept") { testAgainstMutexConcept<co::Mutex>(); }
SCENARIO("co::experimental::TimedMutex should fulfill the Mutex concept")
{
testAgainstMutexConcept<co::experimental::TimedMutex>();
}
template <typename Mutex>
void runMutexStressTest()
{
static constexpr size_t TotalCnt = 2000000;
static constexpr size_t CoroCnt = 5;
static constexpr size_t LoopCnt = TotalCnt / CoroCnt;
Mutex mutex;
co::IoContextThreads threads{4};
co_tests::Bench bench;
size_t callCnt = 0;
size_t threadSwitches = 0;
auto lastExecThread = std::this_thread::get_id();
auto coroFunc = [&]() {
// Force execution in thread pool
co::await(co::defaultIoContext());
for (int i = 0; i < LoopCnt; i++)
{
std::unique_lock<Mutex> lock(mutex);
callCnt++;
bench.update();
if (lastExecThread != std::this_thread::get_id())
{
threadSwitches++;
lastExecThread = std::this_thread::get_id();
}
}
};
std::vector<co::Routine> coros;
coros.reserve(CoroCnt);
for (int i = 0; i < CoroCnt; i++)
coros.emplace_back(coroFunc);
for (auto& coro : coros)
co::await(coro);
REQUIRE(callCnt == LoopCnt * CoroCnt);
REQUIRE(threadSwitches <= LoopCnt * CoroCnt);
REQUIRE(threadSwitches >= 4);
std::cout << "Thread switches: " << threadSwitches << std::endl;
}
TEST_CASE("co::Mutex stress tests", "[.StressTest]")
{
SECTION("co::Mutex (default - FairPolicy)")
{
using Mutex = co::Mutex;
runMutexStressTest<co::Mutex>();
}
SECTION("co::Mutex (unfair - HighThroughputPolicy)")
{
using Mutex = co::BaseMutex<co::impl::HighThroughputPolicy>;
runMutexStressTest<Mutex>();
}
}
|
/***************************************************************
*
* Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
* University of Wisconsin-Madison, WI.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************/
#include "condor_common.h"
#include "proc.h" // for job statuses
#include "condor_accountant.h" // for PriorityDelta
#include "condor_config.h"
#include "analysis.h"
#include "list.h"
#include "simplelist.h"
#include "extArray.h"
#include "condor_classad.h"
#include <iostream>
#include <sstream>
using namespace std;
using namespace classad_analysis;
using namespace classad_analysis::job;
ClassAdAnalyzer::
ClassAdAnalyzer( bool ras ) :
result_as_struct(ras), m_result(NULL), jobReq(NULL) {
stringstream std_rank;
stringstream preempt_rank;
stringstream preempt_prio;
std_rank << "MY." << ATTR_RANK << " > MY." << ATTR_CURRENT_RANK;
preempt_rank << "MY." << ATTR_RANK << " >= MY." << ATTR_CURRENT_RANK;
preempt_prio << "MY." << ATTR_REMOTE_USER_PRIO << " > TARGET." << ATTR_SUBMITTOR_PRIO << " + " << PriorityDelta;
ParseClassAdRvalExpr(std_rank.str().c_str(), std_rank_condition);
ParseClassAdRvalExpr(preempt_rank.str().c_str(), preempt_rank_condition);
ParseClassAdRvalExpr(preempt_prio.str().c_str(), preempt_prio_condition);
char *preq;
if( NULL == ( preq = param( "PREEMPTION_REQUIREMENTS" ) ) ) {
// No PREEMPTION_REQUIREMENTS; defaulting to FALSE
ParseClassAdRvalExpr( "FALSE", preemption_req );
} else {
if( ParseClassAdRvalExpr( preq , preemption_req ) ) {
// Failed to parse PREEMPTION_REQUIREMENTS; defaulting to FALSE
ParseClassAdRvalExpr( "FALSE", preemption_req );
}
#if !defined(WANT_OLD_CLASSADS)
ExprTree *tmp_expr = AddTargetRefs( preemption_req, TargetJobAttrs );
delete preemption_req;
preemption_req = tmp_expr;
#endif
free( preq );
}
}
ClassAdAnalyzer::
~ClassAdAnalyzer( )
{
delete std_rank_condition;
delete preempt_rank_condition;
delete preempt_prio_condition;
delete preemption_req;
if( jobReq ) {
delete jobReq;
}
if( m_result ) {
delete m_result;
m_result = NULL;
}
}
// Returns "true" if the job is not matched and IDLE, false otherwise
bool ClassAdAnalyzer::
NeedsBasicAnalysis( ClassAd *request ) {
int status;
int matched = false;
request->LookupInteger( ATTR_JOB_STATUS, status );
request->LookupInteger( ATTR_JOB_MATCHED, matched );
// XXX: are there cases where we need "basic" analysis even though
// we're matched?
if (matched) {
return false;
}
switch(status) {
// XXX: should we have "is_running/completed/removed" in m_result?
// XXX: should we add "is_held" to m_result/include hold_reason?
case RUNNING:
case HELD:
case REMOVED:
case COMPLETED:
case TRANSFERRING_OUTPUT:
return false;
default:
return true;
}
}
void ClassAdAnalyzer::
BasicAnalyze( ClassAd *request, ClassAd *offer ) {
// XXX: this code could/should be refactored out (from here and
// condor_q.V6/queue.cpp) and into an analysis library
// NB: for now, we only use this "basic analysis" if we're
// generating a result struct; so return otherwise. Be sure to
// eliminate this check if this code is used elsewhere!
if (!result_as_struct) { return; }
char remote_user[128];
classad::Value eval_result;
bool val;
bool satisfied_std_rank = EvalExprTree(std_rank_condition, offer, request, eval_result) && eval_result.IsBooleanValue(val) && val;
bool satisfied_preempt_prio = EvalExprTree( preempt_prio_condition, offer, request, eval_result ) && eval_result.IsBooleanValue(val) && val;
bool satisfied_preempt_rank = EvalExprTree( preempt_rank_condition, offer, request, eval_result ) && eval_result.IsBooleanValue(val) && val;
bool satisfied_preempt_req = EvalExprTree( preemption_req, offer, request, eval_result ) && eval_result.IsBooleanValue(val) && val;
if (!IsAHalfMatch(request, offer)) {
result_add_explanation(classad_analysis::MACHINES_REJECTED_BY_JOB_REQS, offer);
return;
}
if (!IsAHalfMatch(offer, request)) {
result_add_explanation(classad_analysis::MACHINES_REJECTING_JOB, offer);
return;
}
if (! offer->LookupString( ATTR_REMOTE_USER, remote_user )) {
if ( satisfied_std_rank ) {
// Machine satisfies job requirements, job satisfies machine
// constraints, no remote user
result_add_explanation(classad_analysis::MACHINES_AVAILABLE, offer);
return;
} else {
// Standard rank condition failed
result_add_explanation(classad_analysis::MACHINES_REJECTING_UNKNOWN, offer);
return;
}
}
if ( satisfied_preempt_prio ) {
if ( satisfied_std_rank ) {
// Satisfies preemption priority condition and standard rank
// condition; thus available
result_add_explanation(classad_analysis::MACHINES_AVAILABLE, offer);
return;
} else {
if ( satisfied_preempt_rank ) {
// Satisfies preemption priority and rank conditions, and ...
if (satisfied_preempt_req) {
// ... also satisfies PREEMPTION_REQUIREMENTS: available
result_add_explanation(classad_analysis::MACHINES_AVAILABLE, offer);
return;
} else {
// ... doesn't satisfy PREEMPTION_REQUIREMENTS: not available
result_add_explanation(classad_analysis::PREEMPTION_REQUIREMENTS_FAILED, offer);
return;
}
} else {
// The comments on the equivalent code path in condor_q
// indicate that this case usually implies "some unknown problem"
result_add_explanation(classad_analysis::PREEMPTION_FAILED_UNKNOWN, offer);
return;
}
}
} else {
// Failed preemption priority condition
result_add_explanation(classad_analysis::PREEMPTION_PRIORITY_FAILED, offer);
return;
}
}
void ClassAdAnalyzer::
ensure_result_initialized(classad::ClassAd *request) {
// Set up result object, only if necessary
// Other parts of this code are written to assume that a
// ClassAdAnalyzer can be used to analyze multiple jobs; we make
// the same assumption here. The overall interface of this code
// should be marked with a big FIXME.
if (!result_as_struct) return;
if (m_result != NULL && !(m_result->job_ad()).SameAs(request)) {
delete m_result;
m_result = NULL;
}
if (m_result == NULL) {
m_result = new classad_analysis::job::result(*request);
}
}
void ClassAdAnalyzer::
result_add_suggestion(suggestion s) {
if (!result_as_struct) return;
ASSERT(m_result);
m_result->add_suggestion(s);
}
void ClassAdAnalyzer::
result_add_explanation(matchmaking_failure_kind mfk, classad::ClassAd resource) {
if (!result_as_struct) return;
ASSERT(m_result);
m_result->add_explanation(mfk, resource);
}
void ClassAdAnalyzer::
result_add_explanation(matchmaking_failure_kind mfk, ClassAd *resource) {
if (!result_as_struct) return;
ASSERT(m_result);
m_result->add_explanation(mfk, resource);
}
void ClassAdAnalyzer::
result_add_machine(classad::ClassAd resource) {
if (!result_as_struct) return;
ASSERT(m_result);
m_result->add_machine(resource);
}
bool ClassAdAnalyzer::
AnalyzeJobReqToBuffer( ClassAd *request, ClassAdList &offers, string &buffer )
{
ResourceGroup rg;
classad::ClassAd *explicit_classad;
bool success;
// create a ResourceGroup object for offer ClassAds
if( !MakeResourceGroup( offers, rg ) ) {
buffer += "Unable to process machine ClassAds";
buffer += "\n";
return true;
}
explicit_classad = AddExplicitTargets( request );
ensure_result_initialized(explicit_classad);
bool do_basic_analysis = NeedsBasicAnalysis(request);
offers.Rewind();
ClassAd *ad;
while((ad = offers.Next())) {
result_add_machine(*ad);
if (do_basic_analysis) {
BasicAnalyze(request, ad);
}
}
success = AnalyzeJobReqToBuffer( explicit_classad, rg, buffer );
delete explicit_classad;
return success;
}
#if defined( COLLECTIONS )
bool ClassAdAnalyzer::
AnalyzeJobReqToBuffer( classad::ClassAd *request,
classad::ClassAdCollectionServer &offers,
string &buffer )
{
ResourceGroup rg;
// create a ResourceGroup object for offer ClassAds
if( !MakeResourceGroup( offers, rg ) ) {
buffer += "Unable to process machine ClassAds";
buffer += "\n";
return true;
}
return AnalyzeJobReqToBuffer( request, rg, buffer );
}
#endif
bool ClassAdAnalyzer::
AnalyzeJobAttrsToBuffer( ClassAd *request, ClassAdList &offers,
string &buffer )
{
ResourceGroup rg;
classad::ClassAd *explicit_classad;
bool success;
// create a ResourceGroup object for offer ClassAds
if( !MakeResourceGroup( offers, rg ) ) {
buffer += "Unable to process machine ClassAds";
buffer += "\n";
return true;
}
explicit_classad = AddExplicitTargets( request );
ensure_result_initialized(explicit_classad);
success = AnalyzeJobAttrsToBuffer( explicit_classad, rg, buffer );
delete explicit_classad;
return success;
}
bool ClassAdAnalyzer::
AnalyzeJobReqToBuffer( classad::ClassAd *request, ResourceGroup &offers, string &buffer)
{
if( !request ) {
// request is NULL;
return false;
}
classad::PrettyPrint pp;
classad::ExprTree *reqExpr, *flatReqExpr, *prunedReqExpr;
reqExpr = flatReqExpr = prunedReqExpr = NULL;
classad::Value val;
if (jobReq != NULL) {
delete jobReq;
}
jobReq = new MultiProfile( );
Profile *profile = NULL;
Condition *condition = NULL;
// Look up Requirements expression in request ClassAd
if( !( reqExpr = request->Lookup( ATTR_REQUIREMENTS ) ) ) {
buffer += "Job ClassAd is missing ";
buffer += ATTR_REQUIREMENTS;
buffer += " expression.";
buffer += "\n";
return true;
}
// Print out requirements expression to buffer
// Format req expression for 80 column screen.
string temp_buffer;
pp.Unparse( temp_buffer, reqExpr );
string::iterator t, lastAnd, lineStart;
t = lastAnd = lineStart = temp_buffer.begin( );
while( t != temp_buffer.end( ) ) {
if( ( *t == '&' ) && ( *( t+1 ) == '&' ) ) {
lastAnd = t + 2;
}
if( distance( lineStart, t ) >= 80 ) {
if( lastAnd != lineStart ) {
temp_buffer.replace( lastAnd, lastAnd + 1, 1, '\n' );
lineStart = lastAnd + 1;
lastAnd = lineStart;
}
}
t++;
}
// Print formatted req expression
buffer += "\n";
buffer += "The ";
buffer += ATTR_REQUIREMENTS;
buffer += " expression for your job is:";
buffer += "\n";
buffer += "\n";
buffer += temp_buffer;
buffer += "\n";
buffer += "\n";
// Try to flatten Requirements expression
mad.ReplaceLeftAd( request );
if( !( request->FlattenAndInline( reqExpr, val, flatReqExpr ) ) ) {
return true;
}
mad.RemoveLeftAd( );
// Check if Requirements expression flattened to a literal value
if( !flatReqExpr ) {
buffer += "Job ClassAd ";
buffer += ATTR_REQUIREMENTS;
buffer += " expression evaluates to ";
pp.Unparse( buffer, val );
buffer += "\n";
buffer += "\n";
return true;
}
// Get rid of dangling boolean literals created by Flatten
if( !( PruneDisjunction( flatReqExpr, prunedReqExpr ) ) ) {
return true;
}
// Convert Requirements expression to a MultiProfile object
if( !( BoolExpr::ExprToMultiProfile( prunedReqExpr, jobReq ) ) ) {
return true;
}
// Determine which Conditions should be removed from Requierments
// expression
if( !( SuggestCondition( jobReq, offers ) ) ) {
return true;
}
// Determine which Conditions conflict with one another
if( !( FindConflicts( jobReq, offers ) ) ) {
return true;
}
// req->explain.match and req->explain.numberOfMatches contain
// information about the offers that satisfy the requests requirements
// The following should probably make use of ClassAdPrintMask
// Get information from data structures and print to the buffer
string cond_s, value_s;
char formatted[2048];
char cond[1024];
char info[64];
char suggest[64];
char value[64];
char tempBuff[64];
int p = 1;
jobReq->Rewind( );
while( jobReq->NextProfile( profile ) ) {
// If we have more than one profile we need to number them.
int numProfs;
jobReq->GetNumberOfProfiles( numProfs );
if( numProfs > 1 ) {
buffer += "Profile ";
sprintf( tempBuff, "%i", p );
buffer += tempBuff;
if( profile->explain.match ) {
buffer += " matched ";
sprintf( tempBuff, "%i", profile->explain.numberOfMatches );
buffer += tempBuff;
} else {
buffer += " rejected all";
}
if( profile->explain.numberOfMatches == 1 ) {
buffer += " machine";
} else {
buffer += " machines";
}
buffer += "\n";
}
// Sort list of conditions by number of matches
List< Condition > sortedCondList;
profile->Rewind( );
Condition *sortedCond;
SimpleList< int > mapList;
int index = 0;
int junk;
while( profile->NextCondition( condition ) ) {
if( sortedCondList.IsEmpty( ) ) {
sortedCondList.Append( condition );
mapList.Append( index );
} else {
sortedCondList.Rewind( );
mapList.Rewind( );
while( sortedCondList.Next( sortedCond ) ) {
mapList.Next( junk );
if( condition->explain.numberOfMatches <
sortedCond->explain.numberOfMatches ) {
sortedCondList.Insert( condition );
mapList.Prepend( index );
break;
}
if( sortedCondList.AtEnd( ) ) {
sortedCondList.Append( condition );
mapList.Append( index );
}
}
}
index++;
}
sortedCondList.Rewind( );
mapList.Rewind( );
// create map from original Condition order to sorted order
int numConds = 0;
profile->GetNumberOfConditions( numConds );
ExtArray<int> condMap ( numConds );
int i = 0;
while( mapList.Next( index ) ) {
condMap[index] = i;
i++;
}
// print header for condition list
sprintf( formatted, " %-34s%-20s%s\n", "Condition",
"Machines Matched", "Suggestion" );
buffer += formatted;
sprintf( formatted, " %-34s%-20s%s\n", "---------",
"----------------", "----------" );
buffer += formatted;
i = 1;
// print each condition, number of matches, and suggestion
while( sortedCondList.Next( condition ) ) {
cond_s = "";
value_s = "";
condition->ToString( cond_s );
strncpy( cond, cond_s.c_str( ), 1023 );
cond[1023] = 0;
sprintf( info, "%i", condition->explain.numberOfMatches );
switch( condition->explain.suggestion ) {
case ConditionExplain::REMOVE: {
sprintf( suggest, "REMOVE" );
result_add_suggestion(suggestion(suggestion::REMOVE_CONDITION, cond_s));
break;
}
case ConditionExplain::MODIFY: {
pp.Unparse( value_s, condition->explain.newValue );
result_add_suggestion(suggestion(suggestion::MODIFY_CONDITION, cond_s, value_s));
strncpy( value, value_s.c_str( ), 63 );
sprintf( suggest, "MODIFY TO %s", value );
break;
}
default: {
sprintf( suggest, " " );
}
}
if( strlen( cond ) < 46 ) {
sprintf( formatted, "%-4i%-34s%-20s%s\n", i, cond, info,
suggest );
} else {
sprintf( formatted, "%-4i%s\n%38s%-20s%s\n", i, cond, "", info,
suggest );
}
buffer += formatted;
i++;
}
// print out conflicts
IndexSet sortedIS;
IndexSet *rawIS;
profile->explain.conflicts->Rewind( );
if( !profile->explain.conflicts->IsEmpty( ) ) {
buffer += "\n";
buffer += "Conflicts:\n";
buffer += "\n";
while( profile->explain.conflicts->Next( rawIS ) ) {
sortedIS.Init( numConds );
IndexSet::Translate( *rawIS, condMap.getarray (),
numConds, numConds, sortedIS );
buffer += " conditions: ";
bool firstNum = true;
for( i = 0; i < numConds; i++ ) {
if( sortedIS.HasIndex( i ) ) {
if( !firstNum ) {
buffer += ", ";
}
else {
firstNum = false;
}
sprintf( tempBuff, "%i", i+1 );
buffer += tempBuff;
}
}
buffer += "\n";
}
}
p++;
}
return true;
}
bool ClassAdAnalyzer::
AnalyzeJobAttrsToBuffer( classad::ClassAd *request, ResourceGroup &offers,
string &buffer )
{
if( !request ) {
buffer += "request ClassAd is NULL\n";
// request is NULL;
return false;
}
classad::PrettyPrint pp;
ClassAdExplain adExplain;
char formatted[2048];
char attr[64];
char suggest[64];
if( !( AnalyzeAttributes( request, offers, adExplain ) ) ) {
cerr << "error in AnalyzeAttributes" << endl << endl;
}
// get information from ClassAdExplain
// print list of undefined job attributes
if( !adExplain.undefAttrs.IsEmpty( ) ) {
buffer += "\n";
buffer += "The following attributes are missing from the job ClassAd:";
buffer += "\n";
buffer += "\n";
string undefAttr = "";
adExplain.undefAttrs.Rewind( );
while( adExplain.undefAttrs.Next( undefAttr ) ) {
result_add_suggestion(suggestion(suggestion::DEFINE_ATTRIBUTE, undefAttr));
buffer += undefAttr;
buffer += "\n";
}
}
// print ideal ranges for attributes
if( !adExplain.attrExplains.IsEmpty( ) ) {
string value_s = "";
string suggest_s = "";
string tempBuff = "";
int numModAttrs = 0;
tempBuff += "\nThe following attributes should be added or modified:";
tempBuff += "\n";
tempBuff += "\n";
// print header for attribute list
sprintf( formatted, "%-24s%s\n", "Attribute", "Suggestion" );
tempBuff += formatted;
sprintf( formatted, "%-24s%s\n", "---------", "----------" );
tempBuff += formatted;
// print each attribute and suggestion
AttributeExplain *attrExplain = NULL;
adExplain.attrExplains.Rewind( );
while( adExplain.attrExplains.Next( attrExplain ) ) {
switch( attrExplain->suggestion ) {
case AttributeExplain::MODIFY: {
numModAttrs++;
strncpy( attr, attrExplain->attribute.c_str( ), 64 );
if( attrExplain->isInterval ) {
double lower = 0;
double upper = 0;
GetLowDoubleValue( attrExplain->intervalValue, lower );
GetHighDoubleValue( attrExplain->intervalValue, upper );
suggest_s = "use a value ";
if( lower > -( FLT_MAX ) ) { // lower bound exists
if( attrExplain->intervalValue->openLower ) {
suggest_s += "> ";
}
else {
suggest_s += ">= ";
}
pp.Unparse( value_s,attrExplain->intervalValue->lower);
suggest_s += value_s;
value_s = "";
if( upper < FLT_MAX ) {
suggest_s += " and ";
}
}
if( upper < FLT_MAX ) { // upper bound exists
if( attrExplain->intervalValue->openUpper ) {
suggest_s += "< ";
}
else {
suggest_s += "<= ";
}
pp.Unparse( value_s,attrExplain->intervalValue->upper);
suggest_s += value_s;
value_s = "";
}
}
else { // attrExplain has a discrete value
suggest_s = "change to ";
pp.Unparse( value_s, attrExplain->discreteValue );
suggest_s += value_s;
value_s = "";
}
strncpy( suggest, suggest_s.c_str( ), 64 );
sprintf( formatted, "%-24s%s\n", attr, suggest );
result_add_suggestion(suggestion(suggestion::MODIFY_ATTRIBUTE, attr, suggest_s));
tempBuff += formatted;
}
default: { }
}
}
if( numModAttrs > 0 ) {
buffer += tempBuff;
}
}
return true;
}
bool ClassAdAnalyzer::
AnalyzeExprToBuffer( classad::ClassAd *mainAd, classad::ClassAd *contextAd, string &attr,
string &buffer )
{
classad::PrettyPrint pp;
classad::Value val;
string tempBuff_s = "";
ResourceGroup rg;
List<classad::ClassAd> contextList;
MultiProfile *mp = new MultiProfile;
Profile *profile = NULL;
Condition *condition = NULL;
classad::ExprTree *expr = NULL;
classad::ExprTree *flatExpr = NULL;
classad::ExprTree *prunedExpr = NULL;
char tempBuff[64];
char formatted[2048];
string cond_s = "";
char cond[1024];
string value_s = "";
char value[64];
classad::ClassAd *copyContextAd = (classad::ClassAd *) contextAd->Copy();
contextList.Append( copyContextAd );
if( !( rg.Init( contextList ) ) ) {
cerr << "problem adding job ad to ResourceGroup\n";
}
if( !( expr = mainAd->Lookup( attr ) ) ) {
cerr << "error looking up " << attr << " expression\n";
delete mp;
return false;
}
if( !( mainAd->FlattenAndInline( expr, val, flatExpr ) ) ) {
cerr << "error flattening machine ad\n";
delete mp;
return false;
}
if( !flatExpr ) {
buffer += attr;
buffer += " expresion flattens to ";
pp.Unparse( buffer, val );
buffer += "\n";
delete mp;
return true;
}
if( !PruneDisjunction( flatExpr, prunedExpr ) ) {
cerr << "error pruning expression:\n";
pp.Unparse( tempBuff_s, flatExpr );
cerr << tempBuff_s << "\n";
delete mp;
return false;
}
if( !( BoolExpr::ExprToMultiProfile( prunedExpr, mp ) ) ) {
cerr << "error in ExprToMultiProfile\n";
delete mp;
return false;
}
// Do analysis
if( !SuggestCondition( mp, rg ) ) {
cerr << "error in SuggestCondition\n";
}
// Print results
buffer += "\n";
buffer += "=====================\n";
buffer += "RESULTS OF ANALYSIS :\n";
buffer += "=====================\n";
buffer += "\n";
buffer += attr;
buffer += " expression ";
if( mp->explain.match ) {
buffer += "is true\n";
} else {
buffer += "is not true\n";
}
int p = 1;
int numProfiles;
mp->Rewind( );
while( mp->NextProfile( profile ) ) {
mp->GetNumberOfProfiles( numProfiles );
if( numProfiles > 1 ) {
buffer += " Profile ";
sprintf( tempBuff, "%i", p );
buffer += tempBuff;
if( profile->explain.match ) {
buffer += " is true\n";
} else {
buffer += " is false\n";
}
}
profile->Rewind( );
while( profile->NextCondition( condition ) ) {
condition->ToString( cond_s );
strncpy( cond, cond_s.c_str( ), 1024 );
cond_s = "";
if( condition->explain.match ) {
value_s = "is true";
} else {
value_s = "is false";
}
strncpy( value, value_s.c_str( ), 64 );
value_s = "";
sprintf( formatted, " %-25s%s\n", cond, value );
buffer += formatted;
}
p++;
}
buffer += "=====================\n";
buffer += "\n";
delete mp;
return true;
}
// private methods
bool ClassAdAnalyzer::
BuildBoolTable( MultiProfile *mp, ResourceGroup &rg, BoolTable &result )
{
BoolValue bval;
Profile *profile;
classad::ClassAd *ad;
List<classad::ClassAd> contexts;
int numProfs = 0;
int numContexts = 0;
if( !mp->GetNumberOfProfiles( numProfs ) ) {
cerr << "BuildBoolTable: error calling GetNumberOfProfiles" << endl;
}
if( !rg.GetNumberOfClassAds( numContexts ) ) {
cerr << "BuildBoolTable: error calling GetNumberOfClassAds" << endl;
}
if( !rg.GetClassAds( contexts ) ) {
cerr << "BuildBoolTable: error calling GetClassAds" << endl;
}
if( !result.Init( numContexts, numProfs ) ) {
cerr << "BuildBoolTable: error calling BoolTable::Init" << endl;
}
contexts.Rewind( );
int col = 0;
while( contexts.Next( ad ) ) {
int row = 0;
mp->Rewind( );
while( mp->NextProfile( profile ) ) {
profile->EvalInContext( mad, ad, bval );
result.SetValue( col, row, bval );
row++;
}
col++;
}
return true;
}
bool ClassAdAnalyzer::
BuildBoolTable( Profile *p, ResourceGroup &rg, BoolTable &result ) {
BoolValue bval;
Condition *condition;
classad::ClassAd *ad;
int numConds = 0;
int numContexts = 0;
p->GetNumberOfConditions( numConds );
rg.GetNumberOfClassAds( numContexts );
List<classad::ClassAd> contexts;
rg.GetClassAds( contexts );
result.Init( numContexts, numConds );
contexts.Rewind( );
int col = 0;
while( contexts.Next( ad ) ) {
int row = 0;
p->Rewind( );
while( p->NextCondition( condition ) ) {
condition->EvalInContext( mad, ad, bval );
result.SetValue( col, row, bval );
row++;
}
col++;
}
return true;
}
bool ClassAdAnalyzer::
MakeResourceGroup( ClassAdList &caList, ResourceGroup &rg )
{
List<classad::ClassAd> newList;
ClassAd *ad;
caList.Rewind( );
ad = caList.Next( );
while( ad ) {
classad::ClassAd *explicit_classad;
explicit_classad = AddExplicitTargets(ad);
newList.Append(explicit_classad);
ad = caList.Next( );
}
if( !rg.Init( newList ) ) {
return false;
}
return true;
}
#if defined( COLLECTIONS )
bool ClassAdAnalyzer::
MakeResourceGroup( ClassAdCollectionServer &server, ResourceGroup &rg )
{
List<classad::ClassAd> newList;
string q_key;
classad::ClassAd *tmp;
LocalCollectionQuery a;
a.Bind(&server);
a.Query("root",NULL);
a.ToFirst();
bool ret=a.Current(q_key);
if (ret==true){
do{
tmp=server.GetClassAd(q_key);
newList.Append( (classad::ClassAd*) tmp->Copy( ) );
}while(a.Next(q_key)==true);
}
if( !rg.Init( newList ) ) {
return false;
}
return true;
}
#endif
bool ClassAdAnalyzer::
SuggestCondition( MultiProfile *mp, ResourceGroup &rg )
{
if( mp == NULL ) {
cerr << "SuggestCondition: tried to pass null MultiProfile"
<< endl;
return false;
}
BoolTable bt;
if( !BuildBoolTable( mp, rg, bt ) ) {
return false;
}
// Get info from BoolTable for MultiProfileExplain
int mpMatches = 0;
int numCols = 0;
bt.GetNumColumns( numCols );
int currentColTotalTrue;
IndexSet matchedClassAds;
matchedClassAds.Init( numCols );
for( int i = 0; i < numCols; i++ ) {
bt.ColumnTotalTrue( i, currentColTotalTrue );
if( currentColTotalTrue > 0 ) {
mpMatches++;
matchedClassAds.AddIndex( i );
}
}
if( mpMatches > 0 ) {
if( !( mp->explain.Init( true, mpMatches, matchedClassAds,
numCols ) ) ) {
return false;
}
}
else {
if( !( mp->explain.Init( false, 0, matchedClassAds, numCols ) ) ) {
return false;
}
}
// call SuggestConditionRemove on all profiles
Profile *currentProfile;
mp->Rewind( );
while( mp->NextProfile( currentProfile ) ) {
if( !SuggestConditionModify( currentProfile, rg ) ) {
cerr << "error in SuggestConditionModify" << endl;
return false;
}
// if( !SuggestConditionRemove( currentProfile, rg ) ) {
// return false;
// }
}
return true;
}
bool ClassAdAnalyzer::
SuggestConditionRemove( Profile *p, ResourceGroup &rg )
{
List<AnnotatedBoolVector> abvList;
BoolTable bt;
Condition *condition;
BoolValue bval;
int profileMatches = 0;
int numRows = 0;
int numColumns = 0;
int currentColTotalTrue = 0;
int numberOfMatches = 0;
int row = 0;
bool match = false;
AnnotatedBoolVector *bestABV = NULL;
AnnotatedBoolVector *abv = NULL;
string buffer;
if( !BuildBoolTable( p, rg, bt ) ) {
return false;
}
if( !bt.GenerateMaxTrueABVList( abvList ) ) {
return false;
}
// Get info from BoolTable for Profile
bt.GetNumRows( numRows );
bt.GetNumColumns( numColumns );
for( int i = 0; i < numColumns; i++ ) {
bt.ColumnTotalTrue( i, currentColTotalTrue );
if( currentColTotalTrue == numRows ) {
profileMatches++;
}
}
if( profileMatches > 0 ) {
if( !( p->explain.Init( true, profileMatches ) ) ) {
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return false;
}
}
else {
if( !( p->explain.Init( false, 0 ) ) ) {
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return false;
}
}
// Get info from BoolTable for ConditionExplains
p->Rewind( );
row = 0;
while( p->NextCondition( condition ) ) {
bt.RowTotalTrue( row, numberOfMatches );
if( numberOfMatches == 0 ) {
match = false;
}
else {
match = true;
}
if( !( condition->explain.Init( match, numberOfMatches ) ) ) {
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return false;
}
row++;
}
// find first ABV with max frequency & max total true
// set up ConditionExplains using ABV
if( !AnnotatedBoolVector::MostFreqABV( abvList, bestABV ) ) {
cerr << "Analysis::SuggestConditionRemove(): error - bad ABV" << endl;
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return false;
}
else {
int i = 0;
p->Rewind( );
while( p->NextCondition( condition ) ) {
bestABV->GetValue( i, bval );
if( bval == TRUE_VALUE ) {
condition->explain.suggestion = ConditionExplain::KEEP;
}
else{
condition->explain.suggestion = ConditionExplain::REMOVE;
}
i++;
}
}
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return true;
}
bool ClassAdAnalyzer::
SuggestConditionModify( Profile *p, ResourceGroup &rg )
{
List<AnnotatedBoolVector> abvList;
BoolTable bt;
ValueTable vt;
Condition *condition;
int profileMatches = 0;
int numConds = 0;
int numContexts = 0;
int currentColTotalTrue = 0;
int numberOfMatches = 0;
bool match = false;
classad::MatchClassAd mcad;
if( !BuildBoolTable( p, rg, bt ) ) {
return false;
}
// Get info from BoolTable for Profile
bt.GetNumRows( numConds );
bt.GetNumColumns( numContexts );
for( int i = 0; i < numContexts; i++ ) {
bt.ColumnTotalTrue( i, currentColTotalTrue );
if( currentColTotalTrue == numConds ) {
profileMatches++;
}
}
if( profileMatches > 0 ) {
if( !( p->explain.Init( true, profileMatches ) ) ) {
return false;
}
}
else {
if( !( p->explain.Init( false, 0 ) ) ) {
return false;
}
}
// Get info from BoolTable for ConditionExplains
// set up array of operators and get list of attrs;
ExtArray< string > attrs;
ExtArray< ValueRange * > vrs;
string attr = "";
ExtArray<int> vr4Cond ( numConds );
int attrNum = 0;
int condNum = 0;
int vrNum = 0;
ExtArray<classad::Operation::OpKind> ops ( numConds );
ExtArray<Condition*> conds ( numConds );
// ExtArray<bool> tooComplex ( numConds );
std::vector<bool> tooComplex( numConds, false);
// classad::Operation::OpKind op1, op2;
classad::Value val;
p->Rewind( );
while( p->NextCondition( condition ) ) {
conds[condNum] = condition;
if( !condition->HasMultipleAttrs( ) ) {
tooComplex[condNum] = false;
// Attribute
condition->GetAttr( attr );
string currAttr;
bool seenAttr = false;
vrNum = 0;
for( int i = 0; i < attrs.getsize( ); i++ ) {
currAttr = attrs[i];
if( EqualsIgnoreCase( attr, currAttr ) ) {
seenAttr = true;
vrNum = i;
break;
}
}
if( !seenAttr ) {
vrNum = attrNum;
attrs.resize( attrNum + 1 );
attrs[attrNum] = attr;
vrs.resize( attrNum + 1 );
vrs[vrNum] = NULL;
attrNum++;
}
condition->GetOp( ops[condNum] );
if( vrs[vrNum] == NULL ) {
vrs[vrNum] = new ValueRange( );
}
AddConstraint( vrs[vrNum], condition );
vr4Cond[condNum] = vrNum;
}
else {
// we treat a complex condtion as ( attr == true )
tooComplex[condNum] = true;
ops[condNum] = classad::Operation::__NO_OP__;
vrNum = attrNum;
attrs.resize( attrNum + 1 );
attrs[attrNum] = "";
vrs.resize( attrNum + 1 );
vrs[vrNum] = new ValueRange( );
attrNum++;
AddDefaultConstraint( vrs[vrNum] );
vr4Cond[condNum] = vrNum;
}
// get info from BoolTable
bt.RowTotalTrue( condNum, numberOfMatches );
if( numberOfMatches == 0 ) {
match = false;
}
else {
match = true;
}
if( !( condition->explain.Init( match, numberOfMatches ) ) ) {
for( int i = 0; i < vrs.getsize( ); i++ ){
delete vrs[i];
}
return false;
}
condNum++;
}
int numVRs = attrs.getsize( );
ExtArray<classad::Value*> nearestValues ( numVRs );
for( int i = 0; i < numVRs; i++ ) {
nearestValues[i] = NULL;
}
List<classad::ClassAd> contexts;
rg.GetClassAds( contexts );
classad::ClassAd *context = NULL;
// build ValueTable
vt.Init( numContexts, numVRs );
for( int row = 0; row < numVRs; row++ ) {
attr = attrs[row];
p->Rewind( );
string currAttr;
while( p->NextCondition( condition ) ) {
if( !condition->HasMultipleAttrs( ) ) {
condition->GetAttr( currAttr );
if( EqualsIgnoreCase( currAttr, attr ) ) {
classad::Operation::OpKind op;
classad::Value c_val;
condition->GetOp( op );
condition->GetVal( c_val );
vt.SetOp( row, op );
if( condition->IsComplex( ) ) {
condition->GetOp2( op );
condition->GetVal2( c_val );
vt.SetOp( row, op );
}
}
}
}
contexts.Rewind( );
for( int col = 0; col < numContexts; col++ ) {
contexts.Next( context );
classad::Value c_val;
if( tooComplex[row] ){
BoolValue result;
conds[row]->EvalInContext( mcad, context, result );
switch( result ) {
case TRUE_VALUE: { c_val.SetBooleanValue( true ); break; }
case FALSE_VALUE: { c_val.SetBooleanValue( false ); break; }
case UNDEFINED_VALUE: { c_val.SetUndefinedValue( ); break; }
default: c_val.SetErrorValue( );
}
}
else {
context->EvaluateAttr( attr, c_val );
}
vt.SetValue( col, row, c_val );
}
}
// Calculate distances
classad::Value::ValueType type;
classad::Value currPt, currUpper, currLower;
BoolValue currBVal, tempBVal;
val.SetUndefinedValue( );
ExtArray<classad::Value*> tempVals ( numVRs );
for( int i = 0; i < numVRs; i++ ) {
tempVals[i] = NULL;
}
int closestCtx = -1;
double currDist = 0, sumDist = 0, minSumDist = (double)numVRs + 1;
for( int col = 0; col < numContexts; col++ ) {
for( int row = 0; row < numVRs; row++ ) {
currBVal = TRUE_VALUE;
for( int i = 0; i < numConds; i++ ) {
if( vr4Cond[i] == row ) {
bt.GetValue( col, i, tempBVal );
And( currBVal, tempBVal, currBVal );
}
}
if( currBVal == TRUE_VALUE ) {
currDist = 0;
}
else if( currBVal == UNDEFINED_VALUE ) {
currDist = 1;
}
else {
vt.GetValue( col, row, currPt );
type = currPt.GetType( );
if( type == classad::Value::BOOLEAN_VALUE ||
type == classad::Value::STRING_VALUE ) {
currDist = 1;
tempVals[row] = new classad::Value( );
tempVals[row]->CopyFrom( currPt );
}
else {
vt.GetUpperBound( row, currUpper );
vt.GetLowerBound( row, currLower );
vrs[row]->GetDistance( currPt, currLower, currUpper,
currDist, val );
tempVals[row] = new classad::Value( );
tempVals[row]->CopyFrom( val );
}
}
sumDist += currDist;
}
if( sumDist < minSumDist ) {
minSumDist = sumDist;
closestCtx = col;
for( int i = 0; i < numVRs; i++ ) {
if( tempVals[i] ) {
nearestValues[i] = tempVals[i];
}
}
}
sumDist = 0;
}
classad::Value condVal;
condNum = 0;
p->Rewind( );
while( p->NextCondition( condition ) ) {
bt.GetValue( closestCtx, condNum, currBVal );
if( currBVal == TRUE_VALUE ) {
condition->explain.suggestion = ConditionExplain::KEEP;
}
else if( currBVal == UNDEFINED_VALUE ) {
condition->explain.suggestion = ConditionExplain::REMOVE;
}
else if( condition->HasMultipleAttrs( ) ) {
condition->explain.suggestion = ConditionExplain::REMOVE;
}
else {
attrNum = vr4Cond[condNum];
vt.GetValue( closestCtx, attrNum, val );
if( nearestValues[attrNum] ) {
condition->GetVal( condVal );
condition->explain.suggestion = ConditionExplain::MODIFY;
if( EqualValue( condVal, *nearestValues[attrNum] ) ) {
if( ops[condNum] == classad::Operation::LESS_THAN_OP ) {
IncrementValue( val );
}
else if ( ops[condNum] == classad::Operation::GREATER_THAN_OP ) {
DecrementValue( val );
}
}
condition->explain.newValue.CopyFrom( val );
}
else {
condition->explain.suggestion = ConditionExplain::REMOVE;
}
}
condNum++;
}
for( int i = 0; i < numVRs; i++ ) {
if (tempVals[i] != NULL) {
delete tempVals[i];
}
}
return true;
}
bool ClassAdAnalyzer::
FindConflicts( MultiProfile *mp, ResourceGroup &rg )
{
Profile *currentProfile = NULL;
mp->Rewind( );
while( mp->NextProfile( currentProfile ) ) {
if( !FindConflicts( currentProfile, rg ) ) {
return false;
}
}
return true;
}
bool ClassAdAnalyzer::
FindConflicts( Profile *p, ResourceGroup &rg )
{
BoolTable bt;
List< BoolVector > bvList;
BoolVector *currBV = NULL;
IndexSet *currIS = NULL;
BoolValue bval;
int numConds = 0;
if( !p->GetNumberOfConditions( numConds ) ) {
return false;
}
if( !BuildBoolTable( p, rg, bt ) ) {
return false;
}
if( !bt.GenerateMinimalFalseBVList( bvList ) ) {
return false;
}
int card;
bvList.Rewind( );
while( bvList.Next( currBV ) ) {
if( currBV == NULL ) {
delete currIS;
return false;
}
currIS = new IndexSet( );
currIS->Init( numConds );
for( int i = 0; i < numConds; i++ ) {
currBV->GetValue( i, bval );
if( bval == TRUE_VALUE ) {
currIS->AddIndex( i );
}
}
currIS->GetCardinality( card );
if( card > 1 ) {
p->explain.conflicts->Append( currIS );
} else {
delete currIS;
currIS = NULL;
}
}
return true;
}
bool ClassAdAnalyzer::
AnalyzeAttributes( classad::ClassAd *ad, ResourceGroup &rg, ClassAdExplain &caExplain )
{
classad::ExprTree *reqExpr = NULL;
classad::ExprTree *flatReqExpr = NULL;
classad::ExprTree *prunedReqExpr = NULL;
classad::Value val;
string buffer;
classad::PrettyPrint pp;
List<classad::ClassAd> offerList;
List<MultiProfile> reqList;
classad::ClassAd *offer = NULL;
MultiProfile *currReq = NULL;
List<AnnotatedBoolVector> abvList;
AnnotatedBoolVector *abv;
BoolTable bt;
ValueRangeTable vrt;
/////////////////////////////////////
// STEP 1 - CREATE LIST OF OFFER //
// REQUIREMENTS FROM RESOURCEGROUP //
/////////////////////////////////////
// get list of ClassAds from ResourceGroup
if( !( rg.GetClassAds( offerList ) ) ) {
cerr << "CA::AA: error with GetClassAds" << endl << endl;
return false;
}
int numOffers = offerList.Number( );
// Only do this if SuggestCondition has been run successfully
if( jobReq ) {
if( jobReq->explain.numberOfClassAds == numOffers ) {
offerList.Rewind( );
for( int i = 0; i < numOffers; i++ ) {
offerList.Next( );
if( !jobReq->explain.matchedClassAds.HasIndex( i ) ) {
offerList.DeleteCurrent( );
}
}
if( offerList.Number( ) == 0 ) {
return true;
}
}
}
// create MultiProfile object for each offer Requirements expression;
offerList.Rewind( );
while( offerList.Next( offer ) ) {
currReq = new MultiProfile( );
if( !( reqExpr = offer->Lookup( ATTR_REQUIREMENTS ) ) ) {
cerr << "error looking up requirements" << endl << endl;
delete currReq;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
return false;
}
if( !( offer->FlattenAndInline( reqExpr, val, flatReqExpr ) ) ) {
cerr << "error flattening request" << endl << endl;
delete currReq;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
return false;
}
if( flatReqExpr ) {
// we have a non-literal boolean expression
if( !( PruneDisjunction( flatReqExpr, prunedReqExpr ) ) ) {
cerr << "error pruning expression:" << endl;
pp.Unparse( buffer, flatReqExpr );
cerr << buffer << endl << endl;
buffer = "";
delete flatReqExpr;
delete currReq;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
return false;
}
if( !( BoolExpr::ExprToMultiProfile( prunedReqExpr, currReq ) ) ) {
cerr << "error in ExprToMultiProfile" << endl << endl;
delete flatReqExpr;
delete prunedReqExpr;
delete currReq;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
return false;
}
// Add MultiProfile to the list
reqList.Append( currReq );
delete flatReqExpr;
delete prunedReqExpr;
}
else {
// we have a literal boolean expression
if( !( BoolExpr::ValToMultiProfile( val, currReq ) ) ) {
cerr << "error in ValToMultiProfile" << endl << endl;
delete currReq;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
return false;
}
reqList.Append( currReq );
}
}
////////////////////////////////////
// STEP 2 - BUILD DATA STRUCTURES //
////////////////////////////////////
int numReqs = reqList.Number( );
ExtArray<int> firstContext ( numReqs );
List< string > jobAttrs, undefAttrs, refdAttrs;
classad::ClassAd::iterator itr;
// build list of attributes
for( itr = ad->begin( ); itr != ad->end( ); itr++ ) {
jobAttrs.Append( new string( itr->first ) );
}
int numRefdAttrs = 0;
int arrayCount = 0;
int reqNo = 0;
List< ExtArray< BoolValue > > boolValueArrayList;
List< ExtArray< ValueRange * > > intervalArrayList;
ExtArray< BoolValue > literalValues;
ExtArray< BoolValue > *tempBools = NULL;
ExtArray< ValueRange * > *tempIntervals = NULL;
Profile *currProfile;
Condition *currCondition;
// iterate through machine requirements statements
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
firstContext[reqNo] = arrayCount;
literalValues.resize( arrayCount + 1);
literalValues[arrayCount] = TRUE_VALUE;
// None of the attribute values contribute to this expression
if( currReq->IsLiteral( ) ) {
BoolValue bVal = FALSE_VALUE;
currReq->GetLiteralValue( bVal );
literalValues[arrayCount] = bVal;
if( numRefdAttrs == 0 ) {
tempBools = NULL;
tempIntervals = NULL;
}
else {
tempBools = new ExtArray< BoolValue>( numRefdAttrs );
tempIntervals = new ExtArray< ValueRange * >(numRefdAttrs);
for( int i = 0; i < numRefdAttrs; i++ ) {
( *tempBools )[i] = bVal;
( *tempIntervals )[i] = NULL;
}
}
boolValueArrayList.Append( tempBools );
intervalArrayList.Append( tempIntervals );
arrayCount++;
reqNo++;
continue;
}
// iterate through Profiles in a machine requirements statement
currReq->Rewind( );
while( currReq->NextProfile( currProfile ) ) {
if( numRefdAttrs == 0 ) {
tempBools = NULL;
tempIntervals = NULL;
}
else {
tempBools = new ExtArray< BoolValue>( numRefdAttrs );
tempIntervals = new ExtArray< ValueRange * >(numRefdAttrs);
for( int i = 0; i < numRefdAttrs; i++ ) {
( *tempBools )[i] = TRUE_VALUE;
( *tempIntervals )[i] = NULL;
}
}
// iterate through Conditions in a profile
currProfile->Rewind( );
while( currProfile->NextCondition( currCondition ) ) {
if( currCondition->IsComplex( ) &&
currCondition->HasMultipleAttrs( ) ) {
// We need a better way to deal with complex Conditions
// LOOK INTO THIS
continue;
}
string currAttr;
if( !currCondition->GetAttr( currAttr ) ) {
cerr << "AA error: couldn't get attribute" << endl;
exit(1);
}
// Try to find attribute in job
bool attrInJob = false;
string jobAttr;
jobAttrs.Rewind( );
while( jobAttrs.Next( jobAttr ) ) {
if( EqualsIgnoreCase( currAttr, jobAttr ) ) {
attrInJob = true;
break;
}
}
// See if attribute has been previously referenced
bool attrInRefdAttrs = false;
string refdAttr;
int attrNum = 0;
refdAttrs.Rewind( );
while( refdAttrs.Next( refdAttr ) ) {
if(EqualsIgnoreCase( currAttr, refdAttr ) ) {
attrInRefdAttrs = true;
break;
}
attrNum++;
}
BoolValue conditionValue;
if( !currCondition->EvalInContext( mad, ad, conditionValue) ) {
cerr << "AA error: BoolExpr::EvalInContext failed" << endl;
exit(1);
}
// attribute has been previously encountered.
if( attrInRefdAttrs && tempBools) {
BoolValue newValue;
BoolValue oldValue = ( *tempBools )[attrNum];
And( oldValue, conditionValue, newValue );
( *tempBools )[attrNum] = newValue;
if( ( ( *tempIntervals )[attrNum] == NULL ) ) {
( *tempIntervals )[attrNum] = new ValueRange( );
}
AddConstraint( ( *tempIntervals )[attrNum],
currCondition );
}
// attribute is undefined and has thus far not been in any
// machine requirements.
else {
refdAttrs.Append( new string( currAttr ) );
numRefdAttrs++;
if( tempBools == NULL ) {
tempBools = new ExtArray< BoolValue>( numRefdAttrs );
tempIntervals = new ExtArray< ValueRange * >( numRefdAttrs );
for( int i = 0; i < numRefdAttrs; i++ ) {
( *tempBools )[i] = TRUE_VALUE;
( *tempIntervals )[i] = NULL;
}
}
else {
tempBools->resize( numRefdAttrs );
tempIntervals->resize( numRefdAttrs );
(*tempIntervals)[numRefdAttrs - 1] = NULL;
}
( *tempBools )[numRefdAttrs - 1] = conditionValue;
if( ( ( *tempIntervals )[attrNum] == NULL ) ) {
( *tempIntervals )[attrNum] = new ValueRange( );
}
AddConstraint( ( *tempIntervals )[attrNum],
currCondition );
}
// attribute is not defined in job
if( !attrInJob ) {
if( undefAttrs.IsEmpty( ) ) {
undefAttrs.Append( new string( currAttr ) );
}
else {
string undefAttr = "";
bool foundAttr = false;
undefAttrs.Rewind( );
while( undefAttrs.Next( undefAttr ) ) {
if( EqualsIgnoreCase( currAttr, undefAttr ) ) {
foundAttr = true;
break;
}
}
if( !foundAttr ) {
undefAttrs.Append( new string( currAttr ) );
}
}
}
}
boolValueArrayList.Append( tempBools );
intervalArrayList.Append( tempIntervals );
arrayCount++;
}
reqNo++;
}
string* attr;
jobAttrs.Rewind( );
while( jobAttrs.Next( attr ) ) {
delete attr;
}
// Convert machine map
int numContexts = boolValueArrayList.Number();
ExtArray<int> machineForContext ( numContexts );
int m = 0;
for( int i = 0; i < numContexts; i++ ) {
if( ( m+1 ) < numReqs) {
if( i >= firstContext[m+1] ) {
m++;
}
}
machineForContext[i] = m;
}
// Create BoolTable and ValueRangeTable
bt.Init( numContexts, numRefdAttrs );
vrt.Init( numContexts, numRefdAttrs );
ExtArray< BoolValue > *currBVArray;
ExtArray< ValueRange * > *currVRLArray;
boolValueArrayList.Rewind( );
intervalArrayList.Rewind( );
for( int ctxtNum = 0; ctxtNum < numContexts; ctxtNum++ ) {
boolValueArrayList.Next( currBVArray );
intervalArrayList.Next( currVRLArray );
int arraySize = 0;
if( currBVArray != NULL ) {
arraySize = currBVArray->getsize( );
}
for( int attrNum = 0; attrNum < numRefdAttrs; attrNum++ ) {
if( attrNum >= arraySize ) {
bt.SetValue( ctxtNum, attrNum, literalValues[ctxtNum] );
vrt.SetValueRange( ctxtNum, attrNum, NULL );
}
else {
bt.SetValue( ctxtNum, attrNum, (*currBVArray)[ attrNum ] );
vrt.SetValueRange( ctxtNum, attrNum,
( *currVRLArray )[ attrNum ] );
}
}
}
if( !bt.GenerateMaxTrueABVList( abvList ) ) {
cout << "CA::AA: error in GenerateMaxTrueABVList" << endl;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
if( currReq ) {
delete currReq;
}
}
boolValueArrayList.Rewind( );
while( boolValueArrayList.Next( tempBools ) ) {
if( tempBools ) {
delete tempBools;
}
}
intervalArrayList.Rewind( );
while( intervalArrayList.Next( tempIntervals ) ) {
if( tempIntervals ) {
for( int i = 0; i < tempIntervals->getsize( ); i++ ) {
if( ( *tempIntervals )[i] ) {
delete ( *tempIntervals )[i];
}
}
delete tempIntervals;
}
}
refdAttrs.Rewind( );
while( refdAttrs.Next( attr ) ) {
delete attr;
}
undefAttrs.Rewind( );
while( undefAttrs.Next( attr ) ) {
delete attr;
}
return false;
}
///////////////////////////////////////
// STEP 3 - GENERATE HYPERRECTANGLES //
///////////////////////////////////////
// find values corresponding to each ABV
AnnotatedBoolVector *currentABV = NULL;
List< ExtArray< HyperRect * > > allHyperRectangles;
abvList.Rewind( );
while( abvList.Next( currentABV ) ) {
ExtArray< ValueRange * > vrForAttr;
vrForAttr.resize( numRefdAttrs );
for( int i = 0; i < numRefdAttrs; i++ ) {
vrForAttr[i] = NULL;
}
// go through each attribute/boolean value (only look at non-true)
for( int i = 0; i < numRefdAttrs; i++ ) {
ValueRange *mergedVR = new ValueRange( );
BoolValue bval;
currentABV->GetValue( i, bval );
if( bval != TRUE_VALUE ) {
ValueRange *vr = NULL;
int context;
bool firstContextBool = true;
// - go through all contexts corresponding to current ABV
// - merge the ValueRanges into one multiply indexed
// ValueRange
for( int j = 0; j < numContexts; j++ ) {
bool hasContext = false;
currentABV->HasContext( j, hasContext );
if( hasContext ) {
vrt.GetValueRange( j, i, vr );
context = j;
if( firstContextBool ) {
if( vr != NULL ) {
mergedVR->Init( vr, context, numContexts );
firstContextBool = false;
}
}
else if( vr != NULL ) {
mergedVR->Union( vr, context );
}
}
}
vrForAttr[i] = mergedVR;
}
else {
delete mergedVR;
vrForAttr[i] = NULL;
}
}
// sticks all HyperRectangles together
// may want to keep them partitioned by BoolVector
ValueRange::BuildHyperRects( vrForAttr, numRefdAttrs,
numContexts, allHyperRectangles );
for( int i = 0; i < vrForAttr.getsize( ); i++ ) {
if( vrForAttr[i] ) {
delete vrForAttr[i];
}
}
}
ExtArray< HyperRect * > *hrs = NULL;
HyperRect *currHR = NULL;
// find HyperRect with most contexts (machines)
int maxNumContexts = -1;
int currNumContexts = 0;
currentABV = NULL;
hrs = NULL;
currHR = NULL;
HyperRect *bestHR = NULL;
IndexSet hasContext;
IndexSet hasMachine;
hasContext.Init( numContexts );
hasMachine.Init( numReqs );
abvList.Rewind( );
allHyperRectangles.Rewind( );
while( allHyperRectangles.Next( hrs ) ) {
abvList.Next( currentABV );
for( int i = 0; i < hrs->getsize( ); i++ ) {
currHR = ( *hrs )[i];
currHR->GetIndexSet( hasContext );
IndexSet::Translate( hasContext, machineForContext.getarray (),
numContexts, numReqs, hasMachine );
hasMachine.GetCardinality( currNumContexts );
if( currNumContexts > maxNumContexts ) {
maxNumContexts = currNumContexts;
bestHR = currHR;
}
}
}
///////////////////////////////////////
// STEP 4 - SET UP ATTRIBUTE EXPLAIN //
///////////////////////////////////////
List< AttributeExplain> attrExplains;
AttributeExplain *currAttrExplain;
string refdAttr = "";
Interval *ival = NULL;
refdAttrs.Rewind( );
int i = 0;
while( bestHR && refdAttrs.Next( refdAttr ) ) {
bestHR->GetInterval( i, ival );
currAttrExplain = new AttributeExplain( );
if( ival == NULL ) {
currAttrExplain->Init( refdAttr );
}
else {
switch( GetValueType( ival ) ) {
case classad::Value::BOOLEAN_VALUE:
case classad::Value::STRING_VALUE: {
currAttrExplain->Init( refdAttr, ival->lower );
break;
}
case classad::Value::INTEGER_VALUE:
case classad::Value::REAL_VALUE:
case classad::Value::RELATIVE_TIME_VALUE:
case classad::Value::ABSOLUTE_TIME_VALUE: {
double low = 0;
double high = 0;
GetLowDoubleValue( ival, low );
GetHighDoubleValue( ival, high );
if( low == high && !ival->openLower && !ival->openUpper ) {
currAttrExplain->Init( refdAttr, ival->lower );
}
else {
currAttrExplain->Init( refdAttr, ival );
}
break;
}
default: {
currAttrExplain->Init( refdAttr, ival->lower );
break;
}
}
}
attrExplains.Append( currAttrExplain );
i++;
}
if( !caExplain.Init( undefAttrs, attrExplains ) ) {
cout << "error with ClassAdExplain::Init" << endl;
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
refdAttrs.Rewind( );
while( refdAttrs.Next( attr ) ) {
delete attr;
}
undefAttrs.Rewind( );
while( undefAttrs.Next( attr ) ) {
delete attr;
}
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return false;
}
reqList.Rewind( );
while( reqList.Next( currReq ) ) {
delete currReq;
}
refdAttrs.Rewind( );
while( refdAttrs.Next( attr ) ) {
delete attr;
}
undefAttrs.Rewind( );
while( undefAttrs.Next( attr ) ) {
delete attr;
}
abvList.Rewind( );
while( abvList.Next( abv ) ) {
delete abv;
}
return true;
}
bool ClassAdAnalyzer::
EqualsIgnoreCase( const string &s1, const string &s2 )
{
return( strcasecmp( s1.c_str( ), s2.c_str( ) ) == 0 );
}
bool ClassAdAnalyzer::
DefinedLiteralValue( classad::Value &val )
{
return ( val.IsStringValue( ) ||
val.IsBooleanValue( ) ||
val.IsNumber( ) ||
val.IsAbsoluteTimeValue( ) ||
val.IsRelativeTimeValue( ) );
}
bool ClassAdAnalyzer::
AddConstraint( ValueRange *&vr, Condition *condition )
{
classad::PrettyPrint pp;
string buffer;
if( condition == NULL ) {
cerr << "Error: passed NULL Condition pointer to AddConstraint"
<< endl;
return false;
}
if( vr == NULL ) {
cerr << "Error: passed NULL ValueRange pointer to AddConstraint"
<< endl;
return false;
}
if( condition->IsComplex( ) && condition->HasMultipleAttrs( ) ) {
cerr << "AddConstraint: can't process complex Condition:" << endl;
condition->ToString( buffer );
cerr << buffer << endl;
return false;
}
classad::Operation::OpKind op = (classad::Operation::OpKind)0;
classad::Value val;
bool undef = false;
bool twoVals = false;
classad::Value val1, val2;
if( condition->IsComplex( ) && !condition->HasMultipleAttrs( ) ) {
classad::Operation::OpKind op1, op2;
condition->GetOp( op1 );
condition->GetOp2( op2 );
condition->GetVal( val1 );
condition->GetVal2( val2 );
undef = false;
if( val1.IsUndefinedValue( ) &&
DefinedLiteralValue( val2 ) ) {
undef = true;
val.CopyFrom( val2 );
op = op2;
}
else if( val2.IsUndefinedValue( ) &&
DefinedLiteralValue( val1 ) ) {
undef = true;
val.CopyFrom( val1 );
op = op1;
}
else {
classad::Value::ValueType type1 = val1.GetType( );
classad::Value::ValueType type2 = val2.GetType( );
// if( !SameType( type1, type2 ) ||
// !DefinedLiteralValue( val1 ) ||
// val1.IsStringValue ||
// val1.IsBooleanValue ) {
if( DefinedLiteralValue( val1 ) &&
DefinedLiteralValue( val2 ) &&
op1 == classad::Operation::EQUAL_OP &&
op2 == classad::Operation::EQUAL_OP &&
SameType( type1, type2 ) ) {
twoVals = true;
}
else {
cerr << "AddConstraint: can't process complex Condition"<<endl;
pp.Unparse( buffer, val1 );
cerr << "val1 is " << buffer << endl;
buffer = "";
pp.Unparse( buffer, val2 );
cerr << "val2 is " << buffer << endl;
buffer = "";
condition->ToString( buffer );
cerr << buffer << endl;
return false;
}
// }
}
}
if( !condition->IsComplex( ) ) {
condition->GetOp( op );
condition->GetVal( val );
}
classad::Value::ValueType type = val.GetType( );
if( twoVals ) {
Interval *i1 = new Interval( );
Interval *i2 = new Interval( );
if(i1) {
if(i2) {
i1->lower.CopyFrom( val1 );
i2->lower.CopyFrom( val2 );
i1->upper.CopyFrom( val1 );
i2->upper.CopyFrom( val2 );
i1->openLower = false;
i2->openLower = false;
i1->openUpper = false;
i2->openUpper = false;
if( vr->IsInitialized( ) ) {
vr->Intersect( i1, i2, false );
}
else {
vr->Init( i1, i2, false );
}
} else {
delete i1;
i1=0;
}
}
if(i1 && i2) {
delete i1;
delete i2;
}
}
else if( op == classad::Operation::NOT_EQUAL_OP ||
op == classad::Operation::ISNT_OP ) {
// we need multiple intervals
switch( type ) {
case classad::Value::UNDEFINED_VALUE: {
if( op != classad::Operation::ISNT_OP ) {
vr->EmptyOut( );
return true;
}
if( vr->IsInitialized( ) ) {
vr->IntersectUndef( false );
}
else {
vr->InitUndef( false );
}
return true;
}
case classad::Value::BOOLEAN_VALUE: {
bool b;
if( val.IsBooleanValue( b ) ) {
Interval *i = new Interval( );
i->lower.SetBooleanValue( !b );
if( vr->IsInitialized( ) ) {
if( op == classad::Operation::ISNT_OP ) {
vr->Intersect( i, true );
}
else {
vr->Intersect( i, undef );
}
}
else {
if( op == classad::Operation::ISNT_OP ) {
vr->Init( i, true );
}
else {
vr->Init( i, undef );
}
}
delete i;
return true;
}
else {
cerr << "AddConstraint: error: boolean value expected"
<< endl;
return false;
}
}
case classad::Value::STRING_VALUE: {
Interval *i = new Interval( );
i->lower.CopyFrom( val );
if( vr->IsInitialized( ) ) {
if( op == classad::Operation::ISNT_OP ) {
vr->Intersect( i, true, true );
}
else {
vr->Intersect( i, undef, true );
}
}
else {
if( op == classad::Operation::ISNT_OP ) {
vr->Init( i, true, true );
}
else {
vr->Init( i, undef, true );
}
}
delete i;
return true;
}
case classad::Value::INTEGER_VALUE:
case classad::Value::REAL_VALUE:
case classad::Value::RELATIVE_TIME_VALUE:
case classad::Value::ABSOLUTE_TIME_VALUE: {
Interval *i1 = new Interval( );
Interval *i2 = new Interval( );
i1->lower.SetRealValue( -( FLT_MAX ) );
i1->upper.CopyFrom( val );
i1->openLower = false;
i1->openUpper = false;
i2->lower.CopyFrom( val );
i2->upper.SetRealValue( FLT_MAX );
i2->openLower = false;
i2->openUpper = false;
if( vr->IsInitialized( ) ) {
if( op == classad::Operation::ISNT_OP ) {
vr->Intersect( i1, i2, true );
}
else {
vr->Intersect( i1, i2, undef );
}
}
else {
if( op == classad::Operation::ISNT_OP ) {
vr->Init( i1, i2, true );
}
else {
vr->Init( i1, i2, undef );
}
}
delete i1;
delete i2;
}
return true;
default: {
string expr;
condition->ToString(expr);
cerr << "AddConstraint: Condition value not literal: '" << val << "' in '" << expr << "'" << endl;
return false;
}
}
}
else {
// op is not '!=' or 'isnt' so we only need one interval
Interval *i = new Interval( );
switch( type ) {
case classad::Value::UNDEFINED_VALUE:
if( op != classad::Operation::IS_OP ) {
vr->EmptyOut( );
delete i;
return true;
}
if( vr->IsInitialized( ) ) {
vr->IntersectUndef( );
}
else {
vr->InitUndef( );
}
delete i;
return true;
case classad::Value::BOOLEAN_VALUE:
case classad::Value::STRING_VALUE: {
if( op != classad::Operation::EQUAL_OP && op != classad::Operation::IS_OP ) {
// should be type error
vr->EmptyOut( );
delete i;
return true;
}
i->lower.CopyFrom( val );
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
case classad::Value::INTEGER_VALUE:
case classad::Value::REAL_VALUE:
case classad::Value::RELATIVE_TIME_VALUE:
case classad::Value::ABSOLUTE_TIME_VALUE: {
switch( op ) {
case classad::Operation::LESS_THAN_OP:
{
i->lower.SetRealValue( -( FLT_MAX ) );
i->upper.CopyFrom( val );
i->openLower = true;
i->openUpper = true;
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
case classad::Operation::LESS_OR_EQUAL_OP:
{
i->lower.SetRealValue( -( FLT_MAX ) );
i->upper.CopyFrom( val );
i->openLower = true;
i->openUpper = false;
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
case classad::Operation::EQUAL_OP:
case classad::Operation::IS_OP:
{
i->lower.CopyFrom( val );
i->upper.CopyFrom( val );
i->openLower = false;
i->openUpper = false;
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
case classad::Operation::GREATER_OR_EQUAL_OP:
{
i->lower.CopyFrom( val );
i->upper.SetRealValue( FLT_MAX );
i->openLower = false;
i->openUpper = true;
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
case classad::Operation::GREATER_THAN_OP:
{
i->lower.CopyFrom( val );
i->upper.SetRealValue( FLT_MAX );
i->openLower = true;
i->openUpper = true;
if( vr->IsInitialized( ) ) {
vr->Intersect( i, undef );
}
else {
vr->Init( i, undef );
}
delete i;
return true;
}
default:
{
// should be type error
if( vr->IsInitialized( ) ) {
vr->EmptyOut( );
}
delete i;
return true;
}
}
}
default:
{
// should be type error
if( vr->IsInitialized( ) ) {
vr->EmptyOut( );
}
delete i;
return true;
}
}
}
return true;
}
bool ClassAdAnalyzer::
AddDefaultConstraint( ValueRange *&vr )
{
Interval *i = new Interval( );
i->lower.SetBooleanValue( true );
if( vr->IsInitialized( ) ) {
vr->Intersect( i );
}
else {
vr->Init( i );
}
delete i;
return true;
}
bool ClassAdAnalyzer::
PruneDisjunction( classad::ExprTree *expr, classad::ExprTree *&result )
{
if( !expr ) {
cerr << "PD error: null expr" << endl;
return false;
}
classad::ExprTree::NodeKind kind;
classad::Operation::OpKind op;
classad::ExprTree *left, *right, *junk;
classad::ExprTree *newLeft = NULL;
classad::ExprTree *newRight = NULL;
classad::Value val;
bool boolValue;
classad::ExprTree *currentTree = expr;
kind = currentTree->GetKind( );
if( kind != classad::ExprTree::OP_NODE ) {
return PruneAtom( currentTree, result );
}
( ( classad::Operation * )currentTree )->GetComponents( op, left, right, junk );
if( op == classad::Operation::PARENTHESES_OP ) {
if( !PruneDisjunction( left, result ) ) {
return false;
}
else if( !( result=classad::Operation::MakeOperation( classad::Operation::PARENTHESES_OP,
result, NULL, NULL ) ) ) {
cerr << "PD error: can't make Operation" << endl;
return false;
}
else {
return true;
}
}
if( op != classad::Operation::LOGICAL_OR_OP ) {
return PruneConjunction( currentTree, result );
}
kind = left->GetKind( );
if( kind == classad::ExprTree::LITERAL_NODE ) {
( ( classad::Literal * )left )->GetValue( val );
if( val.IsBooleanValue( boolValue ) && boolValue == false ) {
return PruneDisjunction( right, result );
}
}
if( !PruneDisjunction( left, newLeft ) ||
!PruneConjunction( right, newRight ) ||
!newLeft || !newRight ||
!( result = classad::Operation::MakeOperation( classad::Operation::LOGICAL_OR_OP,
newLeft, newRight, NULL ) ) ) {
cerr << "PD error: can't make Operation" << endl;
return false;
}
return true;
}
bool ClassAdAnalyzer::
PruneConjunction( classad::ExprTree *expr, classad::ExprTree *&result ) {
if( !expr ) {
cerr << "PC error: null expr" << endl;
return false;
}
classad::ExprTree::NodeKind kind;
classad::Operation::OpKind op;
classad::ExprTree *left, *right, *junk;
classad::ExprTree *newRight = NULL;
classad::ExprTree *newLeft = NULL;
classad::Value val;
bool boolValue;
classad::ExprTree *currentTree = expr;
kind = currentTree->GetKind( );
if( kind != classad::ExprTree::OP_NODE ) {
return PruneAtom( currentTree, result );
}
( ( classad::Operation * )currentTree )->GetComponents( op, left, right, junk );
if( op == classad::Operation::PARENTHESES_OP ) {
if( !PruneConjunction( left, result ) ) {
return false;
}
else if( !( result=classad::Operation::MakeOperation( classad::Operation::PARENTHESES_OP,
result, NULL, NULL ) ) ) {
cerr << "PC error: can't make Operation" << endl;
return false;
}
else {
return true;
}
}
if( op != classad::Operation::LOGICAL_AND_OP && op != classad::Operation::LOGICAL_OR_OP ) {
return PruneAtom( currentTree, result );
}
if( op == classad::Operation::LOGICAL_OR_OP ) {
return PruneDisjunction( currentTree, result );
}
kind = left->GetKind( );
if( kind == classad::ExprTree::LITERAL_NODE ) {
( ( classad::Literal * )left )->GetValue( val );
if( val.IsBooleanValue( boolValue ) && boolValue == true ) {
return PruneConjunction( right, result );
}
}
if( !PruneConjunction( left, newLeft ) ||
!PruneDisjunction( right, newRight ) ||
!newLeft || !newRight ||
!( result = classad::Operation::MakeOperation( classad::Operation::LOGICAL_AND_OP,
newLeft, newRight, NULL ) ) ) {
cerr << "PC error: can't Make Operation" << endl;
return false;
}
return true;
}
bool ClassAdAnalyzer::
PruneAtom( classad::ExprTree *expr, classad::ExprTree *&result )
{
if( !expr ) {
cerr << "PA error: null expr" << endl;
return false;
}
classad::ExprTree::NodeKind kind;
classad::Operation::OpKind op;
classad::ExprTree *left, *right, *junk;
classad::Value val;
bool boolValue;
string attr;
kind = expr->GetKind( );
if( kind != classad::ExprTree::OP_NODE ) {
result = expr->Copy( );
return true;
}
( ( classad::Operation * )expr )->GetComponents( op, left, right, junk );
if( op == classad::Operation::PARENTHESES_OP ) {
if( !PruneAtom( left, result ) ) {
cerr << "PA error: problem with expression in parens" << endl;
return false;
}
else if( !( result=classad::Operation::MakeOperation(classad::Operation::PARENTHESES_OP,
result, NULL, NULL ) ) ) {
cerr << "PA error: can't make Operation" << endl;
return false;
}
else {
return true;
}
}
if( op == classad::Operation::LOGICAL_OR_OP &&
left->GetKind( ) == classad::ExprTree::LITERAL_NODE ) {
( ( classad::Literal *)left )->GetValue( val );
if( val.IsBooleanValue( boolValue ) && !boolValue ) {
return PruneAtom( right, result );
}
}
if( left == NULL || right == NULL ) {
cerr << "PA error: NULL ptr in expr" << endl;
// error: NULL ptr in expr
return false;
}
if( !( result = classad::Operation::MakeOperation( op, left->Copy( ), right->Copy(),
NULL ) ) ) {
cerr << "PA error: can't make Operation" << endl;
return false;
}
return true;
}
/*
bool ClassAdAnalyzer::
InDNF( classad::ExprTree * tree )
{
if( tree == NULL ) {
cerr << "InDNF: tried to pass null pointer" << endl;
return false;
}
if( tree->GetKind( ) != classad::ExprTree::OP_NODE ) {
return false;
}
classad::Operation::OpKind op;
classad::ExprTree *arg1;
classad::ExprTree *arg2;
classad::ExprTree *junk;
( ( classad::Operation *)tree )->GetComponents( op, arg1, arg2, junk );
// all comparisons are atomic formulas
if( op >= classad::Operation::__COMPARISON_START__ &&
op <= classad::Operation::__COMPARISON_END__ ) {
return true;
}
if( op == classad::Operation::PARENTHESES_OP ) {
return InDNF( arg1 );
}
// operators must be comparison or logical
if( op < classad::Operation::__LOGIC_START__ ||
op > classad::Operation::__LOGIC_END__ ) {
return false;
}
// FINISH THIS CODE
return true;
}
bool ClassAdAnalyzer::
IsAtomicBooleanFormula( classad::Operation *tree )
{
if( tree == NULL ) {
cerr << "IsAtomicBooleanFormula: tried to pass null pointer" << endl;
return false;
}
// FINISH THIS CODE
return true;
}
bool ClassAdAnalyzer::
PropagateNegation( classad::ExprTree *tree, classad::ExprTree *&result )
{
if( tree == NULL ) {
cerr << "PropagateNegation: tried to pass null pointer" << endl;
return false;
}
// FINISH THIS CODE
return true;
}
bool ClassAdAnalyzer::
ToDNF( classad::ExprTree *tree, classad::ExprTree *&result )
{
if( tree == NULL ) {
cerr << "ToDNF: tried to pass null pointer" << endl;
return false;
}
// FINISH THIS CODE
return true;
}
*/
|
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/shell/browser/layout_test/layout_test_url_request_context_getter.h"
#include <utility>
#include "base/command_line.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "content/public/browser/browser_thread.h"
#include "content/shell/browser/shell_network_delegate.h"
#include "net/proxy/proxy_service.h"
namespace content {
LayoutTestURLRequestContextGetter::LayoutTestURLRequestContextGetter(
bool ignore_certificate_errors,
const base::FilePath& base_path,
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner,
scoped_refptr<base::SingleThreadTaskRunner> file_task_runner,
ProtocolHandlerMap* protocol_handlers,
URLRequestInterceptorScopedVector request_interceptors,
net::NetLog* net_log)
: ShellURLRequestContextGetter(ignore_certificate_errors,
base_path,
std::move(io_task_runner),
std::move(file_task_runner),
protocol_handlers,
std::move(request_interceptors),
net_log) {
// Must first be created on the UI thread.
DCHECK_CURRENTLY_ON(BrowserThread::UI);
}
LayoutTestURLRequestContextGetter::~LayoutTestURLRequestContextGetter() {
}
std::unique_ptr<net::NetworkDelegate>
LayoutTestURLRequestContextGetter::CreateNetworkDelegate() {
ShellNetworkDelegate::SetBlockThirdPartyCookies(true);
return base::WrapUnique(new ShellNetworkDelegate);
}
std::unique_ptr<net::ProxyConfigService>
LayoutTestURLRequestContextGetter::GetProxyConfigService() {
return nullptr;
}
std::unique_ptr<net::ProxyService>
LayoutTestURLRequestContextGetter::GetProxyService() {
return net::ProxyService::CreateDirect();
}
} // namespace content
|
/* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdarg.h>
#include "TestEnclave.h"
#include "TestEnclave_t.h" /* print_string */
#include <openssl/bn.h>
#include <openssl/ec.h>
#include <openssl/err.h>
#include <openssl/evp.h>
#include <openssl/rand.h>
#include <openssl/rsa.h>
#include "testCrypto.h"
#include "crypto.h"
#include "tSgxSSL_api.h"
#include "c11_support.h" /* vsnprintf_s */
/*
* printf:
* Invokes OCALL to display the enclave buffer to the terminal.
*/
void printf(const char* fmt, ...)
{
char buf[BUFSIZ] = {'\0'};
va_list ap;
va_start(ap, fmt);
vsnprintf_s(buf, BUFSIZ, fmt, ap);
va_end(ap);
ocall_print_string(buf);
}
// Test ECALL
int test()
{
return tcf::crypto::testCrypto();
}
|
/*
* scales.h
*
* Created on: Aug 18, 2018
* Author: willmitchell
*/
#ifndef INC_SCALES_HPP_
#define INC_SCALES_HPP_
/// Data structure for a note in a scale as represented by a precalculated ratio of integers. Generated with (link).
typedef struct {
/// Top 32 bits of a 64 bit precalculated ratio.
const uint32_t integerPart;
/// Bottom 32 bits of a 64 bit precalculated ratio.
const uint32_t fractionalPart;
/// GCD of the numerator and denominator.
const uint32_t fundamentalDivision;
} ScaleNote;
/// Data structure for a grid of scale notes. Generated with (link).
typedef struct {
/// An array of scales (an array of pointers to notes).
const ScaleNote* const** grid;
/// 12 - log2(number of scales) used to scale a control to an lookup index.
const uint32_t t2Bitshift;
/// Use 1v_oct control parsing if 1.
const uint32_t oneVoltOct;
} Scale;
#endif /* INC_SCALES_HPP_ */
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include <algorithm>
#include <cstring>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/plugin/trt_plugin_factory.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_resource_manager.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_resources.h"
#include "tensorflow/core/framework/node_def.pb.h" // NOLINT
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.pb.h" // NOLINT
#include "tensorflow/core/framework/tensor_shape.pb.h" // NOLINT
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_constructor.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/tensor_coding.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA
#if GOOGLE_TENSORRT
#include "tensorrt/include/NvInfer.h"
// Check if the types are equal. Cast to int first so that failure log message
// would work!
#define TFTRT_CHECK_EQ_TYPE(val1, val2) CHECK_EQ((int)val1, (int)val2)
#define TFTRT_INTERNAL_ERROR_AT_NODE(node) \
do { \
return tensorflow::errors::Internal( \
"TFTRT::", __FUNCTION__, " failed to add TRT layer, at: ", node); \
} while (0)
#define TFTRT_RETURN_ERROR_IF_FALSE(status, node) \
do { \
if (status == false) { \
TFTRT_INTERNAL_ERROR_AT_NODE(node); \
} \
} while (0)
#define TFTRT_RETURN_ERROR_IF_NULLPTR(ptr, node) \
do { \
if (ptr == nullptr) { \
TFTRT_INTERNAL_ERROR_AT_NODE(node); \
} \
} while (0)
namespace tensorflow {
namespace tensorrt {
// TODO(aaroey): put these constants into some class.
const char* const kInputPHName = "TensorRTInputPH_";
const char* const kOutputPHName = "TensorRTOutputPH_";
namespace convert {
using absl::StrAppend;
using absl::StrCat;
using ::tensorflow::str_util::Split;
inline tensorflow::Status ConvertDType(tensorflow::DataType tf_dtype,
nvinfer1::DataType* trt_dtype) {
switch (tf_dtype) {
case tensorflow::DataType::DT_FLOAT:
*trt_dtype = nvinfer1::DataType::kFLOAT;
break;
// TODO(aaroey): this should be DT_QINT8 which is not a well supported type.
case tensorflow::DataType::DT_INT8:
*trt_dtype = nvinfer1::DataType::kINT8;
break;
case tensorflow::DataType::DT_HALF:
*trt_dtype = nvinfer1::DataType::kHALF;
break;
case tensorflow::DataType::DT_INT32:
*trt_dtype = nvinfer1::DataType::kINT32;
break;
default:
return tensorflow::errors::InvalidArgument(
"Unsupported data type ", tensorflow::DataTypeString(tf_dtype));
}
return tensorflow::Status::OK();
}
template <typename TensorShapeType>
inline nvinfer1::Dims TensorShapeToTrtDims(const TensorShapeType& shape,
bool ignore_first_dim) {
nvinfer1::Dims trt_dims;
const int offset = (ignore_first_dim ? 1 : 0);
for (int i = offset; i < shape.dims(); i++) {
trt_dims.d[i - offset] = shape.dim_size(i);
}
trt_dims.nbDims = shape.dims() - offset;
return trt_dims;
}
Status TensorShapeArrayToTrtDims(const std::vector<int>& shape,
nvinfer1::Dims* out,
bool ignore_first_dim = false) {
PartialTensorShape tensor_shape;
TF_RETURN_IF_ERROR(TensorShapeUtils::MakeShape(shape, &tensor_shape));
*out = TensorShapeToTrtDims(tensor_shape, ignore_first_dim);
return tensorflow::Status::OK();
}
void GetOutputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int out_port,
PartialTensorShape* shape,
tensorflow::DataType* dtype) {
if (graph_properties.HasOutputProperties(node->name())) {
auto output_params = graph_properties.GetOutputProperties(node->name());
auto out_shape = output_params.at(out_port);
*dtype = out_shape.dtype();
*shape = out_shape.shape();
} else {
LOG(INFO) << "Unknown output shape" << node->name();
*dtype = node->output_type(out_port);
}
}
void GetInputProperties(const grappler::GraphProperties& graph_properties,
const Node* node, const int in_port,
PartialTensorShape* shape,
tensorflow::DataType* dtype) {
if (graph_properties.HasInputProperties(node->name())) {
auto input_params = graph_properties.GetInputProperties(node->name());
auto in_shape = input_params.at(in_port);
*dtype = in_shape.dtype();
*shape = in_shape.shape();
} else {
*dtype = node->input_type(in_port);
}
}
Status ValidateTensorProperties(const string& producer_node_type,
const tensorflow::DataType dtype,
const PartialTensorShape& shape,
bool validation_only,
nvinfer1::DataType* trt_dtype,
nvinfer1::Dims* trt_dims, int* batch_size) {
// Convert data type.
TF_RETURN_IF_ERROR(ConvertDType(dtype, trt_dtype));
// Convert shape.
if (shape.dims() < 0) {
return errors::InvalidArgument("Input tensor rank is unknown.");
}
if (shape.dims() > nvinfer1::Dims::MAX_DIMS + 1) { // +1 for batch dim
return errors::OutOfRange("Input tensor rank is greater than ",
nvinfer1::Dims::MAX_DIMS + 1);
}
if (producer_node_type != "Const" && shape.dims() < 2) {
return errors::InvalidArgument(
"Input tensor with rank<2 is not supported since the first dimension "
"is treated as batch dimension by TRT");
}
*trt_dims = TensorShapeToTrtDims(shape, /*ignore_first_dim=*/true);
*batch_size = shape.dim_size(0);
if (validation_only) return Status::OK();
// Following are validations at runtime.
for (int d = 1; d < shape.dims(); ++d) {
if (shape.dim_size(d) < 0) {
return errors::InvalidArgument(
"Input tensor with shape ", shape.DebugString(),
" has an unknown non-batch dimension at dim ", d);
}
}
return Status::OK();
}
string DebugString(const nvinfer1::DimensionType type) {
switch (type) {
case nvinfer1::DimensionType::kSPATIAL:
return "kSPATIAL";
case nvinfer1::DimensionType::kCHANNEL:
return "kCHANNEL";
case nvinfer1::DimensionType::kINDEX:
return "kINDEX";
case nvinfer1::DimensionType::kSEQUENCE:
return "kSEQUENCE";
default:
return StrCat(static_cast<int>(type), "=unknown");
}
}
string DebugString(const nvinfer1::DataType trt_dtype) {
switch (trt_dtype) {
case nvinfer1::DataType::kFLOAT:
return "kFLOAT";
case nvinfer1::DataType::kHALF:
return "kHALF";
case nvinfer1::DataType::kINT8:
return "kINT8";
case nvinfer1::DataType::kINT32:
return "kINT32";
default:
return "Invalid TRT data type";
}
}
string DebugString(const nvinfer1::Dims& dims) {
string out = StrCat("nvinfer1::Dims(nbDims=", dims.nbDims, ", d=");
for (int i = 0; i < dims.nbDims; ++i) {
StrAppend(&out, dims.d[i], "[", DebugString(dims.type[i]), "],");
}
StrAppend(&out, ")");
return out;
}
string DebugString(const nvinfer1::Permutation& permutation, int len) {
string out = "nvinfer1::Permutation(";
for (int i = 0; i < len; ++i) {
StrAppend(&out, permutation.order[i], ",");
}
StrAppend(&out, ")");
return out;
}
string DebugString(const nvinfer1::ITensor& tensor) {
return StrCat("nvinfer1::ITensor(@", reinterpret_cast<uintptr_t>(&tensor),
", name=", tensor.getName(),
", dtype=", DebugString(tensor.getType()),
", dims=", DebugString(tensor.getDimensions()), ")");
}
Status Converter::GetTrtBroadcastShape(
const TRT_TensorOrWeights& operand_l, const TRT_TensorOrWeights& operand_r,
nvinfer1::Dims* operand_l_new_dims,
nvinfer1::Dims* operand_r_new_dims) const {
// ***************************************************************************
// TensorRT Elementwise op supports broadcast but requires both tensor to be
// of Identical rank
//
// We consider case of:
// 1. operand_l to be a Tensor & operand_r to be a Const;
// 2. operand_l to be a Tensor & operand_r to be a Tensor;
// note: const op const (constant folding) should fallback to TensorFlow
//
// broadcast scheme:
// T: 1 3 5 (tensor would not have batch dimension)
// W: 1 1 3 1 (weight would have all explicit dimensions)
// i. fill in explicit dimensions
// -> T: -1 1 3 5 (we put a -1 for batch dimension)
// -> W: 1 1 3 1
// ii. compare broadcast feasibility
//
// We cannot support the following since TensorRT does not allow manipulation
// on batch dimension, we cannot generate output with proper shape
// T: 3 5 1
// W: 1 1 1 1 3 5 1
// -> T: 1 1 1 -1 3 5 1
// -> W: 1 1 1 1 3 5 1
// ***************************************************************************
if (!operand_l.is_tensor() && !operand_r.is_tensor()) {
return errors::InvalidArgument(
"Broadcasting requires at least one of the operands be tensors");
}
const int max_nb_dims = nvinfer1::Dims::MAX_DIMS + 1;
auto compute_output_dims =
[max_nb_dims](const TRT_TensorOrWeights& input, int broadcast_num_dims,
int* output_dims_array, nvinfer1::Dims* output_dims) {
const nvinfer1::Dims input_dims = input.GetTrtDims();
std::fill(output_dims_array, output_dims_array + max_nb_dims, 1);
std::copy(input_dims.d, input_dims.d + input_dims.nbDims,
output_dims_array + broadcast_num_dims - input_dims.nbDims);
if (input.is_tensor()) {
const int true_input_dims = input_dims.nbDims + 1;
if (true_input_dims < broadcast_num_dims) {
return errors::InvalidArgument(
"Broadcasting beyond batch dimension is not supported ",
"(tensor #dims ", true_input_dims, " vs broadcast #dims ",
broadcast_num_dims, ")");
}
// Set the batch dimension to -1, since batch size is not supposed to
// be broadcasted.
output_dims_array[0] = -1;
}
// Copy to output dimensions (stripping the batch dimension).
output_dims->nbDims = broadcast_num_dims - 1;
std::copy(output_dims_array + 1, output_dims_array + broadcast_num_dims,
output_dims->d);
return Status::OK();
};
// Compute the output dimensions.
const int broadcast_num_dims =
std::max(operand_l.GetTrtDims().nbDims + (operand_l.is_tensor() ? 1 : 0),
operand_r.GetTrtDims().nbDims + (operand_r.is_tensor() ? 1 : 0));
int output_l[max_nb_dims], output_r[max_nb_dims];
TF_RETURN_IF_ERROR(compute_output_dims(operand_l, broadcast_num_dims,
output_l, operand_l_new_dims));
TF_RETURN_IF_ERROR(compute_output_dims(operand_r, broadcast_num_dims,
output_r, operand_r_new_dims));
// Compare broadcast feasibility
for (int i = 0; i < broadcast_num_dims; ++i) {
if ((output_l[i] != output_r[i]) && (output_l[i] != 1) &&
(output_r[i] != 1)) {
return errors::InvalidArgument(
"Infeasible broadcast scheme (", "batch_dim: ", output_l[0], ", ",
DebugString(*operand_l_new_dims), " vs ", "batch_dim: ", output_r[0],
", ", DebugString(*operand_r_new_dims), ")");
}
}
return Status::OK();
}
nvinfer1::ITensor* Converter::CreateConstantLayer(
const TRT_ShapedWeights& weights, const nvinfer1::Dims& dims) {
nvinfer1::Weights trt_weights = weights.GetTrtWeights();
nvinfer1::IConstantLayer* layer = network()->addConstant(dims, trt_weights);
if (!layer) return nullptr;
const nvinfer1::DataType trt_dtype = trt_weights.type;
nvinfer1::ITensor* trt_tensor = layer->getOutput(0);
// TODO(laigd): there is a bug in TensorRT 5.0 library that, if we don't set
// the data type below, it will always be kFLOAT regardless what the data type
// of the weights is. Once NVIDIA fixes this bug, we should remove the data
// type setting logic below and test should still pass.
trt_tensor->setType(trt_dtype);
return trt_tensor;
}
inline bool DimsEqual(const nvinfer1::Dims& dim_l,
const nvinfer1::Dims& dim_r) {
if (dim_l.nbDims != dim_r.nbDims) {
return false;
}
for (int i = 0; i < dim_l.nbDims; i++) {
if (dim_l.d[i] != dim_r.d[i]) {
return false;
}
}
return true;
}
inline nvinfer1::Dims GetTrtDimsForTensor(const tensorflow::Tensor& tensor) {
nvinfer1::Dims dims;
dims.nbDims = tensor.dims();
for (int i = 0; i < dims.nbDims; i++) {
dims.d[i] = tensor.dim_size(i);
}
return dims;
}
inline bool HasStaticShape(const nvinfer1::Dims& dims) {
if (dims.nbDims < 0) return false;
for (int d = 0; d < dims.nbDims; ++d) {
if (dims.d[d] < 0) return false;
}
return true;
}
// Returns total number of elements in dims. Returning 0 means either some dim
// is 0 or the number of dims is 0.
// Note that for TF scalar constant, we always convert to dims [1].
int64_t TrtDimsNumElements(const nvinfer1::Dims& dims) {
if (dims.nbDims == 0) return 0;
int64_t count = 1;
for (int d = 0; d < dims.nbDims; ++d) {
count *= dims.d[d];
}
return count;
}
static std::vector<std::pair<int, int>> CreateSamePadding(
const nvinfer1::DimsHW& stride, const nvinfer1::DimsHW& kernel,
const std::vector<int64_t>& input_dims) {
std::vector<std::pair<int, int>> padding(input_dims.size());
CHECK_EQ(stride.nbDims, input_dims.size()); // TODO(jie): N+C? NC+?
for (size_t i = 0; i < input_dims.size(); ++i) {
// Formula to calculate the padding
int p = ((input_dims[i] - 1) / stride.d[i]) * stride.d[i] + kernel.d[i] -
input_dims[i];
p = (p > 0) ? p : 0;
// Right precedence padding, like in TensorFlow
int left = p / 2;
int right = p - left;
VLOG(2) << "PADDING_" << i << " pre: " << left << ", post: " << right
<< "paras: " << input_dims[i] << ", " << stride.d[i] << ", "
<< "kernel: " << kernel.d[i];
padding[i] = {left, right};
}
return padding;
}
string GetCommonNameScope(const string& op_name_a, const string& op_name_b) {
size_t last_scope_separator = 0;
const size_t min_size = std::min(op_name_a.size(), op_name_b.size());
for (size_t i = 0; i < min_size; ++i) {
if (op_name_a[i] != op_name_b[i]) break;
if (op_name_a[i] == '/') last_scope_separator = i + 1;
}
return op_name_a.substr(0, last_scope_separator);
}
TRT_ShapedWeights::TRT_ShapedWeights(DataType type) : type_(type) {
shape_.nbDims = 0;
}
TRT_ShapedWeights::TRT_ShapedWeights(DataType type, nvinfer1::Dims dims,
Tensor tensor)
: shape_(dims), type_(type), tensor_(tensor) {}
TRT_ShapedWeights::TRT_ShapedWeights(const TRT_ShapedWeights& rhs)
: shape_(rhs.shape_), type_(rhs.type_), tensor_(rhs.tensor_) {}
int64_t TRT_ShapedWeights::count() const { return TrtDimsNumElements(shape_); }
nvinfer1::Weights TRT_ShapedWeights::GetTrtWeights() const {
nvinfer1::DataType trt_type(nvinfer1::DataType::kFLOAT);
TF_CHECK_OK(ConvertDType(type_, &trt_type));
return nvinfer1::Weights{trt_type, GetValues(), count()};
}
size_t TRT_ShapedWeights::size_bytes() const {
return this->count() * tensorflow::DataTypeSize(this->type_);
}
string TRT_ShapedWeights::DebugString() const {
return StrCat("TRT_ShapedWeights(shape=", convert::DebugString(shape_),
", type=", DataTypeString(type_),
", values=", reinterpret_cast<uintptr_t>(GetValues()), ")");
}
// A fake ITensor implementation used to check whether the TF-TRT converter can
// handle specific node. We only need shape and type information, and the
// converter won't (and shouldn't) use this to build the TRT network.
class TRT_TensorOrWeights::SimpleITensor : public nvinfer1::ITensor {
public:
SimpleITensor(nvinfer1::DataType trt_dtype, const nvinfer1::Dims& trt_dims)
: trt_dtype_(trt_dtype), trt_dims_(trt_dims) {}
void setName(const char* name) override {}
const char* getName() const override { return ""; }
void setDimensions(nvinfer1::Dims dimensions) override {
trt_dims_ = dimensions;
}
nvinfer1::Dims getDimensions() const override { return trt_dims_; }
void setType(nvinfer1::DataType trt_dtype) override {
trt_dtype_ = trt_dtype;
}
nvinfer1::DataType getType() const override { return trt_dtype_; }
bool isNetworkInput() const override { return false; }
bool isNetworkOutput() const override { return false; }
void setBroadcastAcrossBatch(bool broadcastAcrossBatch) override {}
bool getBroadcastAcrossBatch() const override { return false; }
nvinfer1::TensorLocation getLocation() const override {
// This is arbitrary, since we don't use it.
return nvinfer1::TensorLocation::kDEVICE;
}
void setLocation(nvinfer1::TensorLocation location) override {}
#if NV_TENSORRT_MAJOR >= 5
bool setDynamicRange(float min, float max) override { return true; }
float getDynamicRange() const override { return 0; }
#endif
private:
nvinfer1::DataType trt_dtype_;
nvinfer1::Dims trt_dims_;
};
TRT_TensorOrWeights::TRT_TensorOrWeights(nvinfer1::ITensor* tensor,
int batch_size)
: tensor_(tensor),
batch_size_(batch_size),
initialized_(true),
is_tensor_(true) {}
TRT_TensorOrWeights::TRT_TensorOrWeights(nvinfer1::DataType trt_dtype,
const nvinfer1::Dims& trt_dims,
int batch_size)
: simple_itensor_(new SimpleITensor(trt_dtype, trt_dims)),
batch_size_(batch_size),
initialized_(true),
is_tensor_(true) {}
TRT_TensorOrWeights::TRT_TensorOrWeights(const TRT_ShapedWeights& weights)
: weights_(weights), initialized_(true), is_tensor_(false) {}
TRT_TensorOrWeights::TRT_TensorOrWeights(const TRT_TensorOrWeights& rhs)
: tensor_(rhs.tensor_),
simple_itensor_(rhs.simple_itensor_),
batch_size_(rhs.batch_size_),
weights_(rhs.weights_),
initialized_(rhs.initialized_),
is_tensor_(rhs.is_tensor_) {}
void TRT_TensorOrWeights::operator=(const TRT_TensorOrWeights& rhs) {
tensor_ = rhs.tensor_;
simple_itensor_ = rhs.simple_itensor_;
batch_size_ = rhs.batch_size_;
weights_ = rhs.weights_;
initialized_ = rhs.initialized_;
is_tensor_ = rhs.is_tensor_;
}
nvinfer1::ITensor* TRT_TensorOrWeights::tensor() {
CHECK(is_tensor());
return tensor_ == nullptr ? simple_itensor_.get() : tensor_;
}
const nvinfer1::ITensor* TRT_TensorOrWeights::tensor() const {
CHECK(is_tensor());
return tensor_ == nullptr ? simple_itensor_.get() : tensor_;
}
nvinfer1::Dims TRT_TensorOrWeights::GetTrtDims() const {
if (is_tensor()) {
return tensor()->getDimensions();
} else {
return weights().shape_;
}
}
string TRT_TensorOrWeights::DebugString() const {
string output = "TRT_TensorOrWeights(type=";
if (is_tensor()) {
StrAppend(&output, "tensor=", convert::DebugString(*tensor()),
", batch_size=", batch_size_);
} else {
StrAppend(&output, "weights=", weights_.DebugString());
}
StrAppend(&output, ")");
return output;
}
class TFAttrs {
public:
explicit TFAttrs(const tensorflow::NodeDef& tf_node) {
for (const auto& attr : tf_node.attr()) {
attrs_.insert({attr.first, &attr.second});
}
}
bool count(const string& key) const { return attrs_.count(key); }
tensorflow::AttrValue const* at(const string& key) const {
if (!attrs_.count(key)) {
LOG(FATAL) << "Attribute not found: " << key;
}
return attrs_.at(key);
}
template <typename T>
T get(const string& key) const;
template <typename T>
T get(const string& key, const T& default_value) const {
return attrs_.count(key) ? this->get<T>(key) : default_value;
}
std::vector<string> GetAllAttrKeys() const {
std::vector<string> attr_list;
for (const auto& attr_item : attrs_) {
attr_list.emplace_back(attr_item.first);
}
return attr_list;
}
private:
typedef std::map<string, tensorflow::AttrValue const*> AttrMap;
AttrMap attrs_;
};
template <>
string TFAttrs::get<string>(const string& key) const {
return this->at(key)->s();
}
template <>
std::vector<int> TFAttrs::get<std::vector<int>>(const string& key) const {
auto attr = this->at(key)->list().i();
return std::vector<int>(attr.begin(), attr.end());
}
template <>
std::vector<float> TFAttrs::get<std::vector<float>>(const string& key) const {
auto attr = this->at(key)->list().f();
return std::vector<float>(attr.begin(), attr.end());
}
template <>
nvinfer1::DataType TFAttrs::get<nvinfer1::DataType>(const string& key) const {
nvinfer1::DataType trt_dtype(nvinfer1::DataType::kFLOAT);
TF_CHECK_OK(ConvertDType(this->at(key)->type(), &trt_dtype));
return trt_dtype;
}
template <>
tensorflow::DataType TFAttrs::get<tensorflow::DataType>(
const string& key) const {
return this->at(key)->type();
}
template <>
float TFAttrs::get<float>(const string& key) const {
return this->at(key)->f();
}
template <>
bool TFAttrs::get<bool>(const string& key) const {
return this->at(key)->b();
}
template <>
int TFAttrs::get<int>(const string& key) const {
return this->at(key)->i();
}
// TODO(jie): reorder4 & reorder2 should be merged?
// TODO(aaroey): fix the order of parameters.
template <typename T>
void Reorder4(const nvinfer1::DimsNCHW& shape, const T* idata,
const nvinfer1::DimsNCHW& istrides, T* odata,
const nvinfer1::DimsNCHW& ostrides) {
for (int n = 0; n < shape.n(); ++n) {
for (int c = 0; c < shape.c(); ++c) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[n * ostrides.n() + c * ostrides.c() + h * ostrides.h() +
w * ostrides.w()] = idata[n * istrides.n() + c * istrides.c() +
h * istrides.h() + w * istrides.w()];
}
}
}
}
}
template <typename T>
void Reorder2(const nvinfer1::DimsHW& shape, const T* idata,
const nvinfer1::DimsHW& istrides, T* odata,
const nvinfer1::DimsHW& ostrides) {
for (int h = 0; h < shape.h(); ++h) {
for (int w = 0; w < shape.w(); ++w) {
odata[h * ostrides.h() + w * ostrides.w()] =
idata[h * istrides.h() + w * istrides.w()];
}
}
}
// TODO(jie): fallback to tensorflow!!
void ReorderCKtoKC(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights) {
const int c = iweights.shape_.d[0];
const int k = iweights.shape_.d[1];
oweights->shape_.d[0] = k;
oweights->shape_.d[1] = c;
const nvinfer1::DimsHW istrides = {1, k};
const nvinfer1::DimsHW ostrides = {c, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder2({k, c}, static_cast<float const*>(iweights.GetValues()),
istrides,
// TODO(aaroey): get rid of all the const_cast like this.
static_cast<float*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
}
case tensorflow::DataType::DT_HALF: {
Reorder2(
{k, c}, static_cast<Eigen::half const*>(iweights.GetValues()),
istrides,
static_cast<Eigen::half*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type in reorder expected fp32 or fp16 but got "
<< DataTypeString(iweights.type_);
}
}
void ReorderRSCKToKCRS(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights, const int num_groups) {
CHECK_EQ(iweights.type_, oweights->type_);
CHECK_EQ(iweights.size_bytes(), oweights->size_bytes());
// K indexes over output channels, C over input channels, and R and S over the
// height and width of the convolution
const int r = iweights.shape_.d[0];
const int s = iweights.shape_.d[1];
// TRT requires GKcRS, while TF depthwise has RSCK where c=1, C=G
const int c = iweights.shape_.d[2] / num_groups;
const int k = iweights.shape_.d[3] * num_groups;
VLOG(2) << "num_groups: " << num_groups << "c" << iweights.shape_.d[2]
<< " then " << c << "k" << iweights.shape_.d[3] << " then " << k
<< "r" << iweights.shape_.d[0] << " then " << r << "s"
<< iweights.shape_.d[1] << " then " << s;
oweights->shape_.d[0] = k / num_groups;
oweights->shape_.d[1] = c * num_groups;
oweights->shape_.d[2] = r;
oweights->shape_.d[3] = s;
const nvinfer1::DimsNCHW istrides = {1, k, s * k * c, c * k};
const nvinfer1::DimsNCHW ostrides = {c * r * s, r * s, s, 1};
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
Reorder4({k, c, r, s}, static_cast<float const*>(iweights.GetValues()),
istrides,
static_cast<float*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
}
case tensorflow::DataType::DT_HALF: {
Reorder4(
{k, c, r, s}, static_cast<Eigen::half const*>(iweights.GetValues()),
istrides,
static_cast<Eigen::half*>(const_cast<void*>(oweights->GetValues())),
ostrides);
break;
}
default:
LOG(FATAL) << "Unsupported type, expected fp32 or fp16 but got "
<< DataTypeString(iweights.type_);
}
}
TRT_ShapedWeights TrtWeightStore::GetTempWeights(tensorflow::DataType type,
const nvinfer1::Dims& dims) {
TensorShape shape;
// TODO(laigd): make it return a status.
TF_CHECK_OK(TensorShapeUtils::MakeShape(dims.d, dims.nbDims, &shape));
// TODO(jie): check weights size_bytes. 0 means type error
Tensor tensor(type, shape);
TRT_ShapedWeights weights(type, dims, tensor);
store_.emplace_back(std::move(tensor));
return weights;
}
TrtNodeValidator::TrtNodeValidator() { RegisterOpValidators(); }
Status TrtNodeValidator::ConvertToTensorOrWeights(
const NodeDef& node_def, int output_port,
const grappler::GraphProperties& graph_properties,
TRT_TensorOrWeights* tensor_or_weights) {
if (node_def.op() == "Const") {
if (output_port != 0) {
return errors::InvalidArgument("Const node should only have one output.");
}
// The output of the conversion will be used as input to other nodes to
// determine whether TRT supports those nodes. If it cannot convert the
// Const, it's very likely we cannot treat it as a tensor and make it an
// input to the TRT network, since TRT removes the first dimension and
// treats it as batch size. Also, it's not likely that the converter can
// support the op, and performance may suffer even if it can, so we just
// simply return error if the conversion fails.
std::vector<TRT_TensorOrWeights> inputs;
return ConvertConstToWeights(node_def, inputs, tensor_or_weights);
}
if (!graph_properties.HasOutputProperties(node_def.name())) {
return errors::InvalidArgument("Shape and data type are unknown");
}
// Validate and convert shape and dtype.
const auto& output_params =
graph_properties.GetOutputProperties(node_def.name());
const auto& tensor_properties = output_params.at(output_port);
const DataType dtype = tensor_properties.dtype();
const PartialTensorShape shape = tensor_properties.shape();
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
TF_RETURN_IF_ERROR(ValidateTensorProperties(
node_def.op(), dtype, shape, /*validation_only_=*/true, &trt_dtype,
&trt_dims, &batch_size));
// Adds a fake ITensor. This is fine since op converter operates in
// validation-only mode and it won't (and shouldn't) use the tensor to do
// any TRT network operations.
*tensor_or_weights = TRT_TensorOrWeights(trt_dtype, trt_dims, batch_size);
return Status::OK();
}
Status TrtNodeValidator::ValidateNode(
const tensorflow::NodeDef& node_def,
const std::vector<std::pair<const NodeDef*, int>>& input_node_and_ports,
const grappler::GraphProperties& graph_properties) {
// Convert input NodeDef and corresponding output ports to
// TRT_TensorOrWeights.
std::vector<TRT_TensorOrWeights> inputs;
for (int i = 0; i < input_node_and_ports.size(); ++i) {
const auto& pair = input_node_and_ports[i];
TRT_TensorOrWeights tensor_or_weights;
Status status = ConvertToTensorOrWeights(
*pair.first, pair.second, graph_properties, &tensor_or_weights);
if (!status.ok()) {
return errors::Internal(
"Failed to convert input with index ", i,
" to a TRT_TensorOrWeights: ", status.error_message());
}
inputs.push_back(tensor_or_weights);
}
// Validate the node.
const auto iter = op_validators_.find(node_def.op());
if (iter == op_validators_.end()) {
// If validator is not registered, it means no validation is needed.
return Status::OK();
}
OpConverter validator = iter->second;
OpConverterParams params(
/*arg_converter=*/nullptr, node_def, inputs, /*arg_outputs=*/nullptr,
/*arg_validation_only=*/true, &weight_store_);
return validator(¶ms);
}
Status TrtNodeValidator::ConvertConstToWeights(
const NodeDef& const_node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
TRT_TensorOrWeights* output) {
std::vector<TRT_TensorOrWeights> outputs;
OpConverterParams params(
/*arg_converter=*/nullptr, const_node_def, inputs, &outputs,
/*arg_validation_only=*/true, &weight_store_);
Status status = op_validators_["Const"](¶ms);
if (status.ok() && output) *output = outputs[0];
return status;
}
Converter::Converter(nvinfer1::INetworkDefinition* trt_network,
int precision_mode, bool use_calibration)
: trt_network_(trt_network),
precision_mode_(precision_mode),
use_calibration_(use_calibration) {
this->RegisterOpConverters();
}
Status Converter::ConvertNode(const NodeDef& node_def) {
std::vector<TRT_TensorOrWeights> inputs, outputs;
TF_RETURN_IF_ERROR(this->GetInputs(node_def, &inputs));
OpConverterParams params(this, node_def, inputs, &outputs,
/*arg_validation_only=*/false, &weight_store_);
const string& op = node_def.op();
if (PluginFactoryTensorRT::GetInstance()->IsPlugin(op)) {
TF_RETURN_IF_ERROR(plugin_converter_(¶ms));
} else {
if (!op_registry_.count(op)) {
return errors::Unimplemented("No converter registered for op: " + op);
}
OpConverter op_converter = op_registry_.at(op);
TF_RETURN_IF_ERROR(op_converter(¶ms));
}
for (size_t i = 0; i < outputs.size(); ++i) {
TRT_TensorOrWeights& output = outputs[i];
string output_name = node_def.name();
if (i != 0) output_name = StrCat(output_name, ":", i);
// We need to check the name before setting it. If the input is one of the
// engine input, setting the name here will overwrite engine input
// bindings which will cause runtime error.
// TODO(tmorris): Remove this work-around once we use TRT's IIdentityLayer
// in ConvertIdentity.
if (output.is_tensor()) {
const char* tensor_name = output.tensor()->getName();
if (!tensorflow::str_util::StartsWith(tensor_name, kInputPHName)) {
// TRT initializes tensor names as "(Unnamed ITensor* N)". We rename
// them to match their corresponding TensorFlow name.
// Note: ITensors that we create internally within TF-TRT which are
// not inputs or outputs of a node will not be renamed. This is a
// potential cause of confusion if an error message or warning
// mentions the unnamed tensor.
output.tensor()->setName(output_name.c_str());
}
}
VLOG(2) << "Adding out tensor " << output_name << ": "
<< output.DebugString();
Status status = AddTensorOrWeights(output_name, output);
if (!status.ok()) {
return Status(status.code(),
StrCat("Failed to add output for node ", node_def.name(),
": ", status.error_message()));
}
}
return Status::OK();
}
Status Converter::AddInputTensor(const string& name, nvinfer1::DataType dtype,
const nvinfer1::Dims& dims, int batch_size) {
// We verify the batch size only for the input nodes, and rely on individual
// op converter to ensure the batch size of the outputs is not changed.
// TODO(laigd): we need to test this properties.
Status status = MaybeUpdateBatchSize(batch_size);
if (!status.ok()) {
return Status(status.code(), StrCat("Batch size doesn't match for tensor ",
name, ": ", status.error_message()));
}
nvinfer1::ITensor* tensor = network()->addInput(name.c_str(), dtype, dims);
if (tensor == nullptr) {
return errors::InvalidArgument("Failed to create Input layer tensor ", name,
" rank=", dims.nbDims);
}
status = AddTensorOrWeights(name, TRT_TensorOrWeights(tensor));
if (!status.ok()) {
return Status(status.code(), StrCat("Failed to add input tensor ", name,
": ", status.error_message()));
}
return Status::OK();
}
Status Converter::RenameAndMarkOutputTensors(
const std::vector<Converter::EngineOutputInfo>& output_tensors) {
for (const auto& output : output_tensors) {
TRT_TensorOrWeights tensor_or_weights;
TF_RETURN_IF_ERROR(
GetTensorOrWeights(output.source_tensor_name, &tensor_or_weights));
if (!tensor_or_weights.is_tensor()) {
return errors::InvalidArgument("Output ", output.source_tensor_name,
" is weights not tensor");
}
nvinfer1::ITensor* tensor = tensor_or_weights.tensor();
if (tensor == nullptr) {
return errors::NotFound("Output tensor not found: ",
output.source_tensor_name);
}
// Check if this tensor has already been marked as an output.
// ConvertIdentity can cause the same tensor to be repeated in
// output_tensors, which can cause us to overwrite the name of the output
// tensor binding. For example, if we rename OutputPH_0 to OutputPH_1 then
// we won't be able to locate OutputPH_0 during runtime. To fix this,
// duplicate the tensor using no-op shuffle.
// TODO(tmorris): Remove this work-around once we use TRT's IIdentityLayer
// in ConvertIdentity.
if (tensorflow::str_util::StartsWith(tensor->getName(), kOutputPHName)) {
// Using shuffle layer for identity by not setting reshape or transpose.
nvinfer1::IShuffleLayer* layer = network()->addShuffle(*tensor);
TFTRT_RETURN_ERROR_IF_NULLPTR(
layer, StrCat("Output Copy for ", tensor->getName()));
MarkQuantizationRangesAsInferrable(tensor, layer->getOutput(0));
tensor = layer->getOutput(0);
}
tensor->setName(output.dest_node_name.c_str());
network()->markOutput(*tensor);
// Set type after marking as output. TRT only supports setType for engine
// outputs and inputs (type is inferred otherwise).
tensor->setType(output.trt_dtype);
VLOG(1) << "Marking output TRT tensor " << output.source_tensor_name
<< ", which feeds TF node " << output.dest_node_name;
}
return Status::OK();
}
Status Converter::MaybeUpdateBatchSize(int batch_size) {
// OK iff either is unknown or they equal to each other.
if (this->batch_size_ < 0 || batch_size < 0 ||
this->batch_size_ == batch_size) {
if (this->batch_size_ < 0 && batch_size >= 0) {
this->batch_size_ = batch_size;
}
return Status::OK();
}
return errors::InvalidArgument(
"Provided batch size does not match converter batch size: ", batch_size,
" vs ", batch_size_);
}
Status Converter::AddTensorOrWeights(const string& name,
TRT_TensorOrWeights input) {
// Set the batch size of the tensor, using batch size collected from the
// input tensors to the TRT subgraph at the beginning of the conversion.
// We rely on the individual op converter to understand the semantics of the
// TF node, and make sure it doesn't change the batch size nor introduce
// intra-element dependency inside the batch.
if (input.is_tensor()) input.set_batch_size(batch_size_);
if (trt_tensors_.insert({name, std::move(input)}).second) return Status::OK();
return errors::AlreadyExists("tensor/weights ", name, " already exist.");
}
Status Converter::GetTensorOrWeights(const string& name,
TRT_TensorOrWeights* output) {
if (!trt_tensors_.count(name)) {
return errors::NotFound("Tensor or weights with name ", name,
" could not be found.");
}
*output = trt_tensors_.at(name);
return Status::OK();
}
Status Converter::TransposeTensor(nvinfer1::ITensor* input_tensor,
const std::vector<int>& order_with_batch_dim,
const nvinfer1::ITensor** output_tensor) {
const auto dims = input_tensor->getDimensions();
if (order_with_batch_dim.size() - 1 != size_t(dims.nbDims)) {
return tensorflow::errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (order_with_batch_dim[0] != 0) {
return tensorflow::errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
nvinfer1::IShuffleLayer* layer = this->network()->addShuffle(*input_tensor);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Transpose");
MarkQuantizationRangesAsInferrable(input_tensor, layer->getOutput(0));
nvinfer1::Permutation permutation;
for (int32_t i = 0; i < dims.nbDims; ++i) {
permutation.order[i] = order_with_batch_dim[i + 1] - 1;
}
VLOG(1) << "TransposeTensor permutation: "
<< DebugString(permutation, dims.nbDims);
layer->setFirstTranspose(permutation);
nvinfer1::Dims reshape_dims;
reshape_dims.nbDims = dims.nbDims;
for (int32_t i = 0; i < reshape_dims.nbDims; ++i) {
reshape_dims.d[i] = 0;
// TODO(aaroey): why not transposing the types as well?
reshape_dims.type[i] = dims.type[i];
}
layer->setReshapeDimensions(reshape_dims);
*output_tensor = layer->getOutput(0);
return tensorflow::Status::OK();
}
Status Converter::GetWeightRange(const TRT_ShapedWeights& weights,
float* out_min, float* out_max) const {
switch (weights.type_) {
case DataType::DT_FLOAT: {
auto inp = static_cast<float const*>(weights.GetValues());
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = *result.first;
*out_max = *result.second;
break;
}
case DataType::DT_HALF: {
auto inp = static_cast<Eigen::half const*>(weights.GetValues());
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = Eigen::half_impl::half_to_float(*result.first);
*out_max = Eigen::half_impl::half_to_float(*result.second);
break;
}
case DataType::DT_INT32: {
auto inp = static_cast<int const*>(weights.GetValues());
auto result = std::minmax_element(inp, inp + weights.count());
*out_min = static_cast<float>(*result.first);
*out_max = static_cast<float>(*result.second);
break;
}
default:
return errors::Unimplemented(
"Data type not supported for GetWeightRange: ",
DataTypeString(weights.type_));
}
return Status::OK();
}
Status Converter::PrepareTensorForShape(const TRT_TensorOrWeights& input,
const nvinfer1::Dims& dims,
const nvinfer1::ITensor** tensor) {
// If -1 is not used for one of the dims, we can check if the shapes are
// compatible.
bool can_check_shapes = true;
for (int i = 0; i < dims.nbDims; i++) {
if (dims.d[i] == -1) {
can_check_shapes = false;
break;
}
}
if (can_check_shapes &&
TrtDimsNumElements(input.GetTrtDims()) != TrtDimsNumElements(dims)) {
return errors::InvalidArgument("Reshape shapes are not compatible (",
DebugString(input.GetTrtDims()), " vs ",
DebugString(dims), ")");
}
if (input.is_tensor()) {
if (DimsEqual(input.GetTrtDims(), dims)) {
*tensor = input.tensor();
} else {
nvinfer1::IShuffleLayer* layer = this->network()->addShuffle(
*const_cast<nvinfer1::ITensor*>(input.tensor()));
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, "TF-TRT Internal Reshape");
layer->setReshapeDimensions(dims);
MarkQuantizationRangesAsInferrable(
const_cast<nvinfer1::ITensor*>(input.tensor()), layer->getOutput(0));
*tensor = layer->getOutput(0);
}
} else {
*tensor = CreateConstantLayer(input.weights(), dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(*tensor, "TF-TRT Internal Reshape");
if (precision_mode() == INT8MODE && !use_calibration()) {
// If we are in int8 mode and not calibrating, we need to explicitly set a
// quantization range for the output tensor of the IConstantLayer. Here we
// set the range to [min(weights), max(weights)].
float min_range = 0.0f;
float max_range = 0.0f;
TF_RETURN_IF_ERROR(
GetWeightRange(input.weights(), &min_range, &max_range));
// Avoid setting range to 0 because TRT will throw an error. If the
// weights are zero then the range doesn't matter: using 127.0f should
// ensure the quantized weight will be exactly zero.
if (min_range == 0.0f && max_range == 0.0f) {
min_range = -127.0f;
max_range = 127.0f;
}
ProvideQuantizationRange(const_cast<nvinfer1::ITensor*>(*tensor),
min_range, max_range);
}
}
return tensorflow::Status::OK();
}
void Converter::MarkQuantizationRangesAsInferrable(nvinfer1::ITensor* input,
nvinfer1::ITensor* output) {
quantization_infer_.push_back({input, output});
quantization_infer_.push_back({output, input});
}
void Converter::ProvideQuantizationRange(nvinfer1::ITensor* tensor,
float min_range, float max_range) {
float symmetric_range = std::max(std::abs(min_range), std::abs(max_range));
quantization_ranges_[tensor] = symmetric_range;
}
void Converter::MaybeApplyQuantizationRanges() {
if (precision_mode() != INT8MODE) return;
// Infer ranges across marked ops.
PropagateQuantizationRanges();
// Apply ranges.
#if NV_TENSORRT_MAJOR >= 5
for (auto pair : quantization_ranges_) {
nvinfer1::ITensor* tensor = pair.first;
const float range = pair.second;
VLOG(1) << "Setting range for: " << tensor->getName() << ": " << range;
// TODO(laigd): if 'tensor' already has a range set which doesn't match
// 'range', it should report error.
tensor->setDynamicRange(-range, range);
}
#endif
// Warn user about tensors that are missing ranges. If TRT fuses some layers
// then these tensors may not actually be required, which is why this is
// just a warning. If we are still missing ranges even after fusion,
// Builder::buildCudaEngine() will return nullptr and we will catch the
// error at that point.
if (!use_calibration()) {
// Get all tensors from network
std::set<nvinfer1::ITensor*> all_tensors;
for (int i = 0; i < this->network()->getNbLayers(); i++) {
nvinfer1::ILayer* layer = this->network()->getLayer(i);
for (int j = 0; j < layer->getNbInputs(); j++) {
all_tensors.insert(layer->getInput(j));
}
for (int j = 0; j < layer->getNbOutputs(); j++) {
all_tensors.insert(layer->getOutput(j));
}
}
// Find tensors with no ranges
for (auto tensor : all_tensors) {
if (!quantization_ranges_.count(tensor)) {
// Note: there may be some warnings for "(Unnamed ITensor* N)". These
// are tensors which are created internally by TF-TRT. The ranges for
// these unnamed ITensors are always inferred from user provided ranges,
// thus there will also be a warning for the range(s) the user missed.
LOG(WARNING) << "Quantization range was not found for "
<< tensor->getName() << ". "
<< "This is okay if TensorRT does not need the range "
<< "(e.g. due to node fusion).";
}
}
}
}
void Converter::PropagateQuantizationRanges() {
// Propagate ranges across edges in quantization_infer_ until no new
// information is added.
// Note: this function modifies quantization_infer_, it might be better to
// modify a copy instead if we for some reason need quantization_infer_
// later.
bool information_added = true;
while (information_added) {
information_added = false;
for (auto it = quantization_infer_.begin();
it != quantization_infer_.end();) {
auto input_tensor_range = quantization_ranges_.find(it->first);
auto output_tensor_range = quantization_ranges_.find(it->second);
if (input_tensor_range != quantization_ranges_.end() &&
output_tensor_range == quantization_ranges_.end()) {
// Input has range but output doesn't: copy range
// TODO(laigd): consider reporting error if it a different range is
// already set.
quantization_ranges_[it->second] = input_tensor_range->second;
information_added = true;
VLOG(1) << "Copy quantization range: " << it->first->getName() << " -> "
<< it->second->getName();
}
// We can remove edges when the output range is known
if (quantization_ranges_.find(it->second) != quantization_ranges_.end()) {
it = quantization_infer_.erase(it);
} else {
++it;
}
}
}
}
Status Converter::GetInputs(const tensorflow::NodeDef& node_def,
std::vector<TRT_TensorOrWeights>* inputs) const {
for (auto const& input_name : node_def.input()) {
/*************************************************************************
* TODO(jie): handle case 1) here.
* Normalizes the inputs and extracts associated metadata:
* 1) Inputs can contain a colon followed by a suffix of characters.
* That suffix may be a single number (e.g. inputName:1) or several
* word characters separated from a number by a colon
* (e.g. inputName:foo:1). The
* latter case is used to denote inputs and outputs of functions.
* 2) Control dependency inputs contain caret at the beginning and we
* remove this and annotate the edge as a control dependency.
************************************************************************/
// skip control nodes
if (input_name[0] == '^') continue;
string name = input_name;
auto last = name.find_last_of(':');
// TODO(aaroey): use TensorId
if (last != string::npos && last + 2 == name.size() &&
name[last + 1] == '0') {
name.erase(last);
}
if (trt_tensors_.count(name)) {
TRT_TensorOrWeights input = trt_tensors_.at(name);
inputs->push_back(input);
VLOG(2) << "Retrieved input " << name << ": " << input.DebugString();
} else {
// TODO(aaroey): this should not happen, make it a CHECK.
// TODO(aaroey): use StrCat for pattern like this.
string msg("Node ");
StrAppend(&msg, node_def.name(), " should have an input named '", name,
"' but it is not available");
LOG(ERROR) << msg;
return tensorflow::errors::InvalidArgument(msg);
}
}
return tensorflow::Status::OK();
}
TRT_ShapedWeights ConvertFP32ToFP16(TrtWeightStore* store,
const TRT_ShapedWeights& weights_src) {
auto dtype_new = tensorflow::DataType::DT_HALF;
TRT_ShapedWeights weights =
store->GetTempWeights(dtype_new, weights_src.shape_);
const float* src = static_cast<const float*>(weights_src.GetValues());
Eigen::half* dst = const_cast<Eigen::half*>(
static_cast<Eigen::half const*>(weights.GetValues()));
for (int64_t i = 0; i < weights_src.count(); i++) {
dst[i] = Eigen::half_impl::float_to_half_rtne(src[i]);
}
return weights;
}
// ****************************************************************************
// Constant folding functions for weights.
// TODO(laigd): we should probably use eigen directly.
// *****************************************************************************
struct LambdaFactory {
enum class OP_CATEGORY : int { RSQRT = 0, NEG, RECIP };
OP_CATEGORY op;
template <typename T>
std::function<T(T)> unary() {
switch (op) {
case OP_CATEGORY::RSQRT: {
VLOG(2) << "RSQRT GETS DONE";
return [](T t) -> T { return 1.0 / sqrt(t); };
}
case OP_CATEGORY::NEG:
return [](T t) -> T { return -t; };
case OP_CATEGORY::RECIP:
return [](T t) -> T { return 1.0 / t; };
default:
LOG(ERROR) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
}
}
};
template <>
std::function<Eigen::half(Eigen::half)> LambdaFactory::unary<Eigen::half>() {
switch (op) {
case OP_CATEGORY::RSQRT: {
VLOG(2) << "RSQRT GETS DONE";
return [](Eigen::half t) {
return Eigen::half(1.0 / sqrt(static_cast<float>(t)));
};
}
case OP_CATEGORY::NEG:
return [](Eigen::half t) { return -t; };
case OP_CATEGORY::RECIP:
return [](Eigen::half t) {
return Eigen::half(1.0 / static_cast<float>(t));
};
default:
LOG(ERROR) << "Not supported op for unary: " << static_cast<int>(op);
return nullptr;
}
}
tensorflow::Status UnaryCompute(const TRT_ShapedWeights& iweights,
TRT_ShapedWeights* oweights,
LambdaFactory unary_op) {
CHECK_EQ(iweights.type_, oweights->type_);
switch (iweights.type_) {
case tensorflow::DataType::DT_FLOAT: {
auto inp = static_cast<float const*>(iweights.GetValues());
auto oup = static_cast<float*>(const_cast<void*>(oweights->GetValues()));
std::transform(inp, inp + iweights.count(), oup, unary_op.unary<float>());
break;
}
case tensorflow::DataType::DT_HALF: {
auto inp = static_cast<Eigen::half const*>(iweights.GetValues());
auto oup =
static_cast<Eigen::half*>(const_cast<void*>(oweights->GetValues()));
std::transform(inp, inp + iweights.count(), oup,
unary_op.unary<Eigen::half>());
break;
}
default:
return tensorflow::errors::Unimplemented(
"Data type not supported: " +
tensorflow::DataTypeString(iweights.type_));
}
return tensorflow::Status::OK();
}
// If swapped_inputs is false, 'tensor' is the left operand and 'weights' is the
// right operand. If swapped_inputs is true, those two are swapped.
//
// TODO(jie): broadcast is needed yet not implemented.
// Only implemented channel wise for the time being.
Status BinaryTensorOpWeight(OpConverterParams* params,
const nvinfer1::ITensor* tensor,
TRT_ShapedWeights weights, bool swapped_inputs) {
static const std::unordered_set<string> supported_ops = {"Sub", "Add", "Mul",
"Div", "RealDiv"};
const auto& node_def = params->node_def;
if (!supported_ops.count(node_def.op())) {
return errors::Unimplemented(node_def.op(), " is not supported, at ",
node_def.name());
}
// Check type consistency.
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(ConvertDType(weights.type_, &trt_dtype));
// Check scale mode.
auto dims_w = weights.shape_;
const auto dims_t = tensor->getDimensions();
// TODO(jie): addScale checks for input tensor dimension
if (dims_t.nbDims != 3) {
return errors::InvalidArgument("addScale requires tensor with rank 3, at ",
node_def.name());
}
// Default to element-wise
auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE;
// TODO(jie): maybe use a permutation instead to support more cases;
bool need_to_permute = false;
if (weights.count() == 1) {
scale_mode = nvinfer1::ScaleMode::kUNIFORM;
} else {
VLOG(2) << "weights dims: " << DebugString(dims_w)
<< "; tensor dims: " << DebugString(dims_t);
// Make sure no broadcasting on batch dimension.
if (dims_w.nbDims == dims_t.nbDims + 1) {
if (dims_w.d[0] == 1) {
for (int i = 1; i < dims_w.nbDims; i++) {
dims_w.d[i - 1] = dims_w.d[i];
}
dims_w.nbDims--;
} else {
return errors::InvalidArgument("Binary op cannot operate on batch, at ",
node_def.name());
}
}
if (dims_w.nbDims == dims_t.nbDims && dims_w.d[0] == dims_t.d[0]) {
scale_mode = nvinfer1::ScaleMode::kELEMENTWISE;
// Default is element-wise
for (int i = 1; i < dims_w.nbDims; i++) {
if (dims_w.d[i] != dims_t.d[i]) {
// If dimension does not match, switch back to per-channel
scale_mode = nvinfer1::ScaleMode::kCHANNEL;
break;
}
}
// If the mode is per-channel, since channel dimension is assumed to be
// the third to last dimension, we need to make sure all other dimensions
// have size 1.
if (scale_mode == nvinfer1::ScaleMode::kCHANNEL) {
for (int i = 1; i < dims_w.nbDims; i++) {
if (dims_w.d[i] != 1)
return errors::InvalidArgument(
"Weight dims not compatible for channel-wise broadcast at ",
node_def.name());
}
}
} else if (dims_w.nbDims == 1 &&
dims_w.d[0] == dims_t.d[dims_t.nbDims - 1]) {
// Channel wise and broadcast required. We compare the last dimension of
// the tensor shape because of tensorflow default broadcasting rules.
need_to_permute = true;
scale_mode = nvinfer1::ScaleMode::kCHANNEL;
} else {
return errors::InvalidArgument("Weight dims not compatible at ",
node_def.name());
}
}
// TODO(laigd): we should add validation_only support in TransposeTensor() and
// PrepareTensorForShape().
if (params->validation_only) return Status::OK();
// Transpose last dimension.
std::vector<int> permutation(dims_t.nbDims + 1);
if (need_to_permute) {
// We swap the last dimension into channel for trt, because of tensorflow
// default broadcasting rules.
for (int i = 0; i < static_cast<int>(permutation.size()); i++) {
permutation[i] = i;
}
permutation[1] = dims_t.nbDims;
permutation[dims_t.nbDims] = 1;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(tensor), permutation, &tensor));
}
if (params->converter->precision_mode() == FP16MODE) {
weights = ConvertFP32ToFP16(params->weight_store, weights);
}
// Prepare weights
TRT_ShapedWeights shift_weights(weights.type_);
TRT_ShapedWeights scale_weights(weights.type_);
TRT_ShapedWeights power_weights(weights.type_);
if (node_def.op() == "Sub") {
if (swapped_inputs) {
shift_weights = weights;
nvinfer1::IUnaryLayer* layer = params->converter->network()->addUnary(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::UnaryOperation::kNEG);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
// Since quantization ranges are symmetric, the same range as the input
// will work for the negation of the input.
params->converter->MarkQuantizationRangesAsInferrable(
const_cast<nvinfer1::ITensor*>(tensor), layer->getOutput(0));
tensor = layer->getOutput(0);
} else {
TRT_ShapedWeights neg_weights =
params->weight_store->GetTempWeights(weights);
LambdaFactory unary_op;
unary_op.op = LambdaFactory::OP_CATEGORY::NEG;
TF_RETURN_IF_ERROR(UnaryCompute(weights, &neg_weights, unary_op));
shift_weights = neg_weights;
}
} else if (node_def.op() == "Div" || node_def.op() == "RealDiv") {
if (swapped_inputs) {
// We need to infer the quantization range for this intermediate tensor.
//
// x -> [Recip] -> 1/x -> [Scale] -> s/x
// ^
// need range for this
//
// We have the quantization scales for x and s/x - can we divide the scale
// for s/x by s? Only if it is a scalar.
//
// Because of this issue, fall back to BinaryTensorOpTensor if we are
// doing INT8 with no calibration. There is most likely no performance
// penalty by falling back here.
if (params->converter->precision_mode() == INT8MODE &&
!params->converter->use_calibration()) {
return errors::Unimplemented(
"Intermediate quantization range cannot be determined without"
" calibration. Falling back to BinaryTensorOpTensor for ",
node_def.op(), ", at ", node_def.name());
}
scale_weights = weights;
nvinfer1::IUnaryLayer* layer = params->converter->network()->addUnary(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::UnaryOperation::kRECIP);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
tensor = layer->getOutput(0);
} else {
TRT_ShapedWeights recip_weights =
params->weight_store->GetTempWeights(weights);
LambdaFactory unary_op;
unary_op.op = LambdaFactory::OP_CATEGORY::RECIP;
TF_RETURN_IF_ERROR(UnaryCompute(weights, &recip_weights, unary_op));
scale_weights = recip_weights;
}
} else if (node_def.op() == "Mul") {
scale_weights = weights;
} else if (node_def.op() == "Add") {
shift_weights = weights;
} else {
// This should not happen.
return errors::Unimplemented("Binary op not supported at ", node_def.op());
}
nvinfer1::IScaleLayer* layer = params->converter->network()->addScale(
*const_cast<nvinfer1::ITensor*>(tensor), scale_mode,
shift_weights.GetTrtWeights(), scale_weights.GetTrtWeights(),
power_weights.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
const nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Transpose back dimension
if (need_to_permute) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), permutation,
&output_tensor));
}
// Pass the output
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
enum class ConvolutionType { DEFAULT, DEPTHWISE_CONV };
tensorflow::Status ConvertConv2DHelper(OpConverterParams* params, int group) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2) {
return tensorflow::errors::InvalidArgument("Two inputs are expected for ",
node_def.op(), ", at ",
node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
node_def.op(), " is only implemented for tensors, not weights, at ",
node_def.name());
}
if (inputs.at(1).is_tensor()) {
return tensorflow::errors::Unimplemented("Kernel for ", node_def.op(),
" must be constant weights, at ",
node_def.name());
}
TRT_ShapedWeights weights_rsck = inputs.at(1).weights();
if (weights_rsck.shape_.nbDims != 4) {
return tensorflow::errors::InvalidArgument(
"Conv2D expects kernel of dimension 4, at " + node_def.name());
}
TFAttrs attrs(node_def);
auto data_format = attrs.get<string>("data_format");
int c_index = (data_format == "NHWC") ? 3 : 1;
int h_index = (data_format == "NHWC") ? 1 : 2;
int w_index = (data_format == "NHWC") ? 2 : 3;
auto tf_dilations = attrs.get<std::vector<int>>("dilations");
if (tf_dilations.size() != 4) {
return tensorflow::errors::InvalidArgument(
"Convolution dilations field must specify 4 dimensions, at ",
node_def.name());
}
if (tf_dilations[0] != 1 || tf_dilations[c_index] != 1) {
return tensorflow::errors::Unimplemented(
"Dilation rate must be 1 for batch and channel dimensions, at ",
node_def.name());
}
const nvinfer1::DimsHW dilation(tf_dilations[h_index], tf_dilations[w_index]);
const auto tf_stride = attrs.get<std::vector<int>>("strides");
if (tf_stride.size() != 4) {
return tensorflow::errors::InvalidArgument(
"Convolution strides field must specify 4 dimensions, at ",
node_def.name());
}
if (tf_stride[0] != 1 || tf_stride[c_index] != 1) {
return tensorflow::errors::Unimplemented(
"Stride must be 1 for batch and channel dimensions, at ",
node_def.name());
}
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
if (params->validation_only) return tensorflow::Status::OK();
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
// Transpose to NCHW (NCHW is required for IConvLayer).
const bool need_transpose = (data_format == "NHWC");
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(tensor), {0, 3, 1, 2}, &tensor));
}
// Dimensions of transposed tensor.
const auto tensor_dim = tensor->getDimensions();
// For depthwise convolution, group will be 0 so set num_groups to size of
// input's channel dim. For a non-depthwise conv, num_groups will be 1.
const int num_groups = (group == 0) ? tensor_dim.d[0] : group;
if (params->converter->precision_mode() == FP16MODE) {
weights_rsck =
ConvertFP32ToFP16(params->weight_store, inputs.at(1).weights());
}
TRT_ShapedWeights weights =
params->weight_store->GetTempWeights(weights_rsck);
ReorderRSCKToKCRS(weights_rsck, &weights, num_groups);
TRT_ShapedWeights biases(weights.type_);
const int noutput = weights.shape_.d[0] * num_groups;
nvinfer1::DimsHW kernel_size;
kernel_size.h() = weights.shape_.d[2];
kernel_size.w() = weights.shape_.d[3];
// Add padding.
std::vector<std::pair<int, int>> padding;
if (attrs.get<string>("padding") == "SAME") {
nvinfer1::DimsHW effective_kernel_size = kernel_size;
effective_kernel_size.h() += (kernel_size.h() - 1) * (dilation.h() - 1);
effective_kernel_size.w() += (kernel_size.w() - 1) * (dilation.w() - 1);
padding = CreateSamePadding(
stride, effective_kernel_size,
{static_cast<int>(tensor_dim.d[1]), static_cast<int>(tensor_dim.d[2])});
} else {
padding = {{0, 0}, {0, 0}};
}
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second) {
// Handle asymmetric padding.
auto pad_layer = params->converter->network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
params->converter->MarkQuantizationRangesAsInferrable(
const_cast<nvinfer1::ITensor*>(tensor), pad_layer->getOutput(0));
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
}
// Add convolution.
nvinfer1::IConvolutionLayer* layer =
params->converter->network()->addConvolution(
*const_cast<nvinfer1::ITensor*>(tensor), noutput, kernel_size,
weights.GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
layer->setName(node_def.name().c_str());
layer->setNbGroups(num_groups);
layer->setDilation(dilation);
const nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Restore transpose.
if (need_transpose) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), {0, 2, 3, 1},
&output_tensor));
}
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertConv2DHelper(OpConverterParams* params,
ConvolutionType type) {
switch (type) {
case ConvolutionType::DEFAULT:
return ConvertConv2DHelper(params, 1);
case ConvolutionType::DEPTHWISE_CONV:
return ConvertConv2DHelper(params, 0);
}
return tensorflow::errors::Unimplemented("Unsupported convolution type, at ",
params->node_def.name());
}
Status BinaryTensorOpTensor(OpConverterParams* params,
const TRT_TensorOrWeights& operand_l,
const TRT_TensorOrWeights& operand_r) {
const auto& node_def = params->node_def;
static const std::unordered_map<string, nvinfer1::ElementWiseOperation> ops{
{"Add", nvinfer1::ElementWiseOperation::kSUM},
{"Mul", nvinfer1::ElementWiseOperation::kPROD},
{"Sub", nvinfer1::ElementWiseOperation::kSUB},
{"Div", nvinfer1::ElementWiseOperation::kDIV},
{"RealDiv", nvinfer1::ElementWiseOperation::kDIV},
{"Minimum", nvinfer1::ElementWiseOperation::kMIN},
{"Maximum", nvinfer1::ElementWiseOperation::kMAX},
};
auto op_pair = ops.find(node_def.op());
if (op_pair == ops.end()) {
return errors::Unimplemented("Binary op ", node_def.op(),
" not supported at: ", node_def.name());
}
nvinfer1::Dims broadcasted_dims_l, broadcasted_dims_r;
Status status = params->converter->GetTrtBroadcastShape(
operand_l, operand_r, &broadcasted_dims_l, &broadcasted_dims_r);
if (!status.ok()) {
return errors::InvalidArgument(
"Unsupported binary op broadcast scheme for op ", node_def.name(), ": ",
status.error_message());
}
TFAttrs attrs(node_def);
nvinfer1::DataType dtype = attrs.get<nvinfer1::DataType>("T");
if (dtype == nvinfer1::DataType::kINT32) {
return errors::Unimplemented("Binary op ", node_def.op(),
" does not support INT32, at ",
node_def.name());
}
if (params->validation_only) return Status::OK();
const nvinfer1::ITensor* tensor_l = nullptr;
const nvinfer1::ITensor* tensor_r = nullptr;
status = params->converter->PrepareTensorForShape(
operand_l, broadcasted_dims_l, &tensor_l);
if (status.ok()) {
status = params->converter->PrepareTensorForShape(
operand_r, broadcasted_dims_r, &tensor_r);
}
if (!status.ok()) {
return errors::Internal("Failed to convert binary op ", node_def.name(),
": ", status.error_message());
}
// Check type consistency.
TFTRT_CHECK_EQ_TYPE(tensor_l->getType(), dtype)
<< DebugString(tensor_l->getType()) << " vs " << DebugString(dtype);
TFTRT_CHECK_EQ_TYPE(tensor_r->getType(), dtype)
<< DebugString(tensor_r->getType()) << " vs " << DebugString(dtype);
// Add ElementWise layer.
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*const_cast<nvinfer1::ITensor*>(tensor_l),
*const_cast<nvinfer1::ITensor*>(tensor_r), op_pair->second);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Pass the output
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertPlugin(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
// prepare input
std::vector<nvinfer1::ITensor*> all_inputs;
all_inputs.reserve(inputs.size());
for (auto input : inputs) {
all_inputs.emplace_back(const_cast<nvinfer1::ITensor*>(input.tensor()));
}
// plugin is owned by PluginFactory
// TODO(jie): destroy plugins later (resource management)
PluginTensorRT* plugin =
PluginFactoryTensorRT::GetInstance()->CreatePlugin(node_def.op());
// passing attributes
// TODO(jie): support more general attribute
TFAttrs attrs(node_def);
auto attr_key_vector = attrs.GetAllAttrKeys();
for (auto attr_key : attr_key_vector) {
// TODO(jie): support only list of float for toy example here.
auto data = attrs.get<std::vector<float>>(attr_key);
size_t size_data = data.size() * sizeof(float);
if (!plugin->SetAttribute(attr_key, static_cast<void*>(data.data()),
size_data)) {
return tensorflow::errors::InvalidArgument("plugin SetAttribute failed");
}
}
nvinfer1::IPluginLayer* layer = params->converter->network()->addPlugin(
&all_inputs[0], static_cast<int>(inputs.size()), *plugin);
for (int i = 0; i < layer->getNbOutputs(); i++) {
nvinfer1::ITensor* output_tensor = layer->getOutput(i);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
}
return tensorflow::Status::OK();
}
tensorflow::Status ConvertTranspose(OpConverterParams* params) {
const auto& inputs = params->inputs;
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
!inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
"Input expects tensor and weights, at ", params->node_def.name());
}
// Get the permutation from weights.
TRT_ShapedWeights weights = inputs.at(1).weights();
const int* weights_ptr =
static_cast<int*>(const_cast<void*>(weights.GetValues()));
std::vector<int> perm(weights_ptr, weights_ptr + weights.count());
// Verify the permutation.
nvinfer1::ITensor* input_tensor =
const_cast<nvinfer1::ITensor*>(inputs.at(0).tensor());
if (perm.size() - 1 != size_t(input_tensor->getDimensions().nbDims)) {
return errors::InvalidArgument(
"Rank of perm for transpose does not match with that of the input.");
}
if (perm[0] != 0) {
return errors::Unimplemented(
"Transpose at batch dimension is not supported.");
}
if (params->validation_only) return Status::OK();
// Start conversion.
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(
params->converter->TransposeTensor(input_tensor, perm, &output_tensor));
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertReshape(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2 || !inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
"Input expects weights for shape, at ", node_def.name());
}
TRT_TensorOrWeights input_tensor = inputs.at(0);
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.count() == 0) {
return tensorflow::errors::Unimplemented(
"Reshape to shape=[] is not supported, at ", node_def.name());
}
const int* weights_ptr =
static_cast<int*>(const_cast<void*>(weights.GetValues()));
// Check that it doesn't change the batch dimension. This check is
// conservative, for example, when the first dim of the shape is -1 and input
// tensor shape is not fixed, it is still possible that the reshape doesn't
// change the batch dim, but as long as there is a possibility that it could
// change the batch dim, it reject the conversion. The parameters are:
//
// * reshape_batch_dim: the value of the first dim of the input shape constant
// * reshape_dims: all other dims of the input shape constant
// * input_batch_dim: the value of the first dim of the input tensor to
// reshape
// * input_dims: all other dims of the input tensor to reshape
//
// The validation logic is:
//
// if input_batch_dim is fixed:
// if reshape_batch_dim == input_batch_dim:
// ok
// elif reshape_batch_dim == -1 (meaning reshape_dims are fixed) and
// input_dims are fixed and
// prod(input_dims) == prod(reshape_dims)
// ok
// else:
// not ok
// elif input_dims are fixed:
// if reshape_dims are fixed and
// prod(input_dims) == prod(reshape_dims):
// ok
// else:
// not ok
// else:
// not ok
const int input_batch_dim = input_tensor.batch_size();
const int reshape_batch_dim = weights_ptr[0];
const nvinfer1::Dims input_dims = input_tensor.GetTrtDims();
nvinfer1::Dims reshape_dims;
reshape_dims.nbDims = weights.count() - 1;
for (int i = 1; i < weights.count(); i++) {
reshape_dims.d[i - 1] = weights_ptr[i];
}
// Check that it doesn't change the batch dimension according to the logic
// mentioned above.
bool reshape_may_change_batch_dim = false;
if (input_batch_dim > 0) { // Batch size is fixed.
if (reshape_batch_dim == -1) { // Other dims of the shape must be fixed.
if (!HasStaticShape(input_dims) ||
TrtDimsNumElements(reshape_dims) != TrtDimsNumElements(input_dims)) {
reshape_may_change_batch_dim = true;
}
} else if (reshape_batch_dim != input_batch_dim) {
reshape_may_change_batch_dim = true;
}
} else if (HasStaticShape(input_dims)) {
if (!HasStaticShape(reshape_dims) ||
TrtDimsNumElements(reshape_dims) != TrtDimsNumElements(input_dims)) {
reshape_may_change_batch_dim = true;
}
} else {
reshape_may_change_batch_dim = true;
}
VLOG(1) << "input_batch_dim=" << input_batch_dim
<< ", input_dims=" << DebugString(input_dims)
<< "\nreshape_batch_dim=" << reshape_batch_dim
<< ", reshape_dims=" << DebugString(reshape_dims);
if (reshape_may_change_batch_dim) {
const string msg = StrCat(
"Reshape on batch dimension is not supported, at ", node_def.name());
return errors::Unimplemented(msg);
}
if (params->validation_only) return Status::OK();
// Start conversion.
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
input_tensor, reshape_dims, &output_tensor));
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertExpandDims(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2) {
return tensorflow::errors::InvalidArgument(
"Two inputs expected for ExpandDims, at ", node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
"ExpandDims expects tensor for input, at ", node_def.name());
}
if (!inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
"ExpandDims expects weights for axis, at ", node_def.name());
}
// Get input shape as vector.
TRT_TensorOrWeights input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
// Add batch dim back.
input_dims.insert(input_dims.begin(), -1);
const int input_rank = input_dims.size();
// Get axis to expand on.
TRT_ShapedWeights weights = inputs.at(1).weights();
if (weights.count() != 1) {
return tensorflow::errors::InvalidArgument(
"ExpandDims axis must be a scalar, at ", node_def.name());
}
const int* weights_ptr =
static_cast<int*>(const_cast<void*>(weights.GetValues()));
int axis = weights_ptr[0];
// Make sure axis is valid.
if ((axis < (-input_rank - 1)) || (axis > input_rank)) {
return tensorflow::errors::InvalidArgument(
"Axis for ExpandDims is invalid, must be in the range "
"[-rank(input) - 1, rank(input)], at ",
node_def.name());
}
// Convert negative axis to corresponding positive axis.
if (axis < 0) axis += input_rank + 1;
if (axis == 0) {
return tensorflow::errors::Unimplemented(
"Modifying batch dimension is not supported for ExpandDims, at ",
node_def.name());
}
if (params->validation_only) return Status::OK();
// ExpandDims: Insert new dim of size 1.
input_dims.insert(input_dims.begin() + axis, 1);
// Reshape tensor.
nvinfer1::Dims new_dims;
TF_RETURN_IF_ERROR(TensorShapeArrayToTrtDims(input_dims, &new_dims,
/*ignore_first_dim=*/true));
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
input_tensor, new_dims, &output_tensor));
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertSqueeze(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 1) {
return tensorflow::errors::InvalidArgument(
"One input expected for Squeeze, at ", node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
"Squeeze expects tensor for input, at ", node_def.name());
}
// Get input shape.
TRT_TensorOrWeights input_tensor = inputs.at(0);
const nvinfer1::Dims dims = input_tensor.GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
// Add batch dim back.
input_dims.insert(input_dims.begin(), -1);
const int input_rank = input_dims.size();
// Mark axes to remove by setting them to 0.
TFAttrs attrs(node_def);
auto squeeze_dims = attrs.get<std::vector<int>>("squeeze_dims");
if (squeeze_dims.size() == 0) {
return tensorflow::errors::Unimplemented(
"Squeeze is only implemented for explicit dims, at ", node_def.name());
}
for (int axis : squeeze_dims) {
// Make sure axis is valid.
if ((axis < -input_rank) || (axis >= input_rank)) {
return tensorflow::errors::InvalidArgument(
"Axis for Squeeze is invalid, must be in the range "
"[-rank(input), rank(input)), at ",
node_def.name());
}
// Convert negative axis to corresponding positive axis.
if (axis < 0) axis += input_rank;
// Don't squeeze batch dim.
if (axis == 0) {
return tensorflow::errors::Unimplemented(
"Cannot squeeze batch dimension, at ", node_def.name());
}
// Make sure target dimension is size 1.
if (input_dims[axis] != 1) {
return tensorflow::errors::InvalidArgument(
"Cannot squeeze a dimension which isn't size 1, at ",
node_def.name());
}
// Mark dim for removal by setting to 0.
input_dims[axis] = 0;
}
if (params->validation_only) return Status::OK();
// Remove all dims which are equal to 0.
input_dims.erase(std::remove(input_dims.begin(), input_dims.end(), 0),
input_dims.end());
// Reshape tensor.
nvinfer1::Dims new_dims;
TF_RETURN_IF_ERROR(TensorShapeArrayToTrtDims(input_dims, &new_dims,
/*ignore_first_dim=*/true));
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
input_tensor, new_dims, &output_tensor));
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
// Gets the bounds (start or end) from the weights of a StridedSlice op.
tensorflow::Status GetStridedSliceBound(const std::vector<int>& input_dims,
const TRT_ShapedWeights& bound_weights,
int mask, bool begin, string node_name,
std::vector<int>* output_bound) {
const string bound_name = (begin) ? "begin" : "end";
const int* weights_ptr = static_cast<int*>(bound_weights.GetValues());
*output_bound =
std::vector<int>(weights_ptr, weights_ptr + bound_weights.count());
if (output_bound->size() != input_dims.size()) {
return tensorflow::errors::InvalidArgument(
"StridedSlice \"", bound_name, "\" specified ",
std::to_string(output_bound->size()), " dimensions, but input rank is ",
std::to_string(input_dims.size()), ", at ", node_name);
}
for (int i = 0; i < output_bound->size(); i++) {
if ((1 << i) & mask) {
// Apply mask.
(*output_bound)[i] = (begin) ? 0 : input_dims[i];
// Masked bound will always result in a valid, non-negative bound, so we
// don't need the following checks. For the common case of using masks on
// a undefined batch dim (-1), we specifically don't want to do the
// following checks because they will erroneously detect an out of range
// bound or try to correct the negative value.
continue;
}
// Make sure bound is valid.
if (((*output_bound)[i] < -input_dims[i]) ||
((*output_bound)[i] > input_dims[i])) {
return tensorflow::errors::InvalidArgument(
bound_name, " value of ", std::to_string((*output_bound)[i]),
" for StridedSlice is invalid, must be in the range "
"[-dim_size(i), dim_size(i)], at ",
node_name);
}
// Convert negative values to their positive equivalent.
if ((*output_bound)[i] < 0) {
(*output_bound)[i] += input_dims[i];
}
}
return tensorflow::Status::OK();
}
tensorflow::Status ConvertStridedSlice(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 4) {
return tensorflow::errors::InvalidArgument(
"StridedSlice expects 4 inputs, at ", node_def.name());
}
if (!inputs.at(1).is_weights() || !inputs.at(2).is_weights() ||
!inputs.at(3).is_weights()) {
return tensorflow::errors::InvalidArgument(
"StridedSlice expects weights for begin, end, and strides, at ",
node_def.name());
}
if (!inputs.at(0).is_tensor()) {
return tensorflow::errors::Unimplemented(
"StridedSlice is only implemented for tensors, at ", node_def.name());
}
// Get input dims.
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
std::vector<int> input_dims(dims.d, dims.d + dims.nbDims);
if (inputs.at(0).is_tensor()) {
// Temporarily add batch dimension so that indexes line up properly.
input_dims.insert(input_dims.begin(), inputs.at(0).batch_size());
}
if (input_dims.size() > 4) {
return tensorflow::errors::Unimplemented(
"StridedSlice is not implemented for tensors with rank > 4, at ",
node_def.name());
}
TFAttrs attrs(node_def);
// Get begin and end bounds per axis.
std::vector<int> begin, end;
TF_RETURN_IF_ERROR(GetStridedSliceBound(input_dims, inputs.at(1).weights(),
attrs.get<int>("begin_mask"), true,
node_def.name(), &begin));
TF_RETURN_IF_ERROR(GetStridedSliceBound(input_dims, inputs.at(2).weights(),
attrs.get<int>("end_mask"), false,
node_def.name(), &end));
// Get strides per axis (must all be 1).
TRT_ShapedWeights stride_weights = inputs.at(3).weights();
const int* stride_weights_ptr = static_cast<int*>(stride_weights.GetValues());
std::vector<int> strides(stride_weights_ptr,
stride_weights_ptr + stride_weights.count());
for (int x : strides) {
if (x != 1) {
return tensorflow::errors::Unimplemented(
"StridedSlice is only implemented for stride of 1, at ",
node_def.name());
}
}
// Unsupported mask options.
for (const string& attr :
{"ellipsis_mask", "new_axis_mask", "shrink_axis_mask"}) {
int attr_val = attrs.get<int>(attr);
if (attr_val != 0) {
return tensorflow::errors::Unimplemented(
attr, " is not supported for StridedSlice, at ", node_def.name());
}
}
nvinfer1::ITensor* tensor =
const_cast<nvinfer1::ITensor*>(inputs.at(0).tensor());
// Reshape if necessary to 4-D, since IPaddingLayer requires a 4-D input.
const bool need_reshape = (input_dims.size() != 4);
int reshape_dims_added = 0;
nvinfer1::Dims reshape_dims;
if (need_reshape) {
// Add new dims after batch dim until tensor is 4D.
while (input_dims.size() < 4) {
input_dims.insert(input_dims.begin() + 1, 1);
begin.insert(begin.begin() + 1, 0);
end.insert(end.begin() + 1, 1);
reshape_dims_added++;
}
TF_RETURN_IF_ERROR(TensorShapeArrayToTrtDims(input_dims, &reshape_dims,
/*ignore_first_dim=*/true));
}
// Find dimensions which need to be sliced.
std::vector<int> pad_dims;
for (int i = 0; i < input_dims.size(); i++) {
if ((begin[i] != 0) || (end[i] != input_dims[i])) {
if (i == 0) {
return tensorflow::errors::Unimplemented(
"StridedSlice can't modify batch dim, at ", node_def.name());
} else if ((end[i] - begin[i]) < 0) {
return tensorflow::errors::InvalidArgument(
"New size of sliced dimension is negative, at ", node_def.name());
}
pad_dims.push_back(i);
}
}
if (pad_dims.size() == 0) {
// No dimensions are changed. We could create a padding layer anyway with
// values of 0.
if (params->validation_only) return Status::OK();
params->outputs->push_back(inputs.at(0));
return tensorflow::Status::OK();
} else if (pad_dims.size() == 1) {
// Only one dim is modified but we have to have 2, mark a second dim which
// will have padding of 0. The dim we add is chosen to avoid an unecessary
// transpose.
if (pad_dims[0] != 2) {
pad_dims.push_back(2);
} else {
pad_dims.push_back(3);
}
} else if (pad_dims.size() > 2) {
return tensorflow::errors::Unimplemented(
"StridedSlice can only modify 2 dimensions, at ", node_def.name());
}
std::sort(pad_dims.begin(), pad_dims.end());
// Convert to pre/post padding values. Since TRT does not have a StridedSlice
// or Slice layer, we instead create an IPaddingLayer with negative padding.
nvinfer1::DimsHW pre_padding, post_padding;
for (int i = 0; i < pad_dims.size(); i++) {
const int axis = pad_dims[i];
pre_padding.d[i] = -begin[axis];
post_padding.d[i] = end[axis] - input_dims[axis];
}
// IPaddingLayer will always apply the padding to dims 2,3 (input format is
// NCHW).
const bool need_transpose = !(pad_dims[0] == 2 && pad_dims[1] == 3);
std::vector<int> transpose_order(input_dims.size());
std::vector<int> inv_transpose_order(input_dims.size());
if (need_transpose) {
if (pad_dims[0] == 1 && pad_dims[1] == 3) {
transpose_order = {0, 2, 1, 3};
inv_transpose_order = {0, 2, 1, 3};
} else if (pad_dims[0] == 1 && pad_dims[1] == 2) {
transpose_order = {0, 3, 1, 2};
inv_transpose_order = {0, 2, 3, 1};
}
}
if (params->validation_only) return Status::OK();
// Start conversion.
if (need_reshape) {
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
inputs.at(0), reshape_dims, &output_tensor));
tensor = const_cast<nvinfer1::ITensor*>(output_tensor);
}
if (need_transpose) {
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, transpose_order, &output_tensor));
tensor = const_cast<nvinfer1::ITensor*>(output_tensor);
}
// Add padding layer
nvinfer1::IPaddingLayer* layer = params->converter->network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor), pre_padding, post_padding);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->converter->MarkQuantizationRangesAsInferrable(tensor,
layer->getOutput(0));
tensor = layer->getOutput(0);
// Restore transpose
if (need_transpose) {
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
tensor, inv_transpose_order, &output_tensor));
tensor = const_cast<nvinfer1::ITensor*>(output_tensor);
}
// Restore reshape
if (need_reshape) {
// Calculate output dimensions
for (int i = 0; i < pad_dims.size(); i++) {
const int axis = pad_dims[i];
input_dims[axis] = end[axis] - begin[axis];
}
// Remove added 1 dimensions
for (int i = 0; i < reshape_dims_added; i++) {
int value = input_dims[1];
if (value != 1) {
return tensorflow::errors::Internal(
"StridedSlice error when reshaping, at ", node_def.name());
}
input_dims.erase(input_dims.begin() + 1);
}
nvinfer1::Dims new_dims;
TF_RETURN_IF_ERROR(TensorShapeArrayToTrtDims(input_dims, &new_dims,
/*ignore_first_dim=*/true));
const nvinfer1::ITensor* output_tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
TRT_TensorOrWeights(tensor), new_dims, &output_tensor));
tensor = const_cast<nvinfer1::ITensor*>(output_tensor);
}
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertConv2D(OpConverterParams* params) {
return ConvertConv2DHelper(params, ConvolutionType::DEFAULT);
}
tensorflow::Status ConvertConv2DDepthwise(OpConverterParams* params) {
return ConvertConv2DHelper(params, ConvolutionType::DEPTHWISE_CONV);
}
tensorflow::Status ConvertPool(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
node_def.op(), " is only implemented for tensors, not weights, at ",
node_def.name());
}
nvinfer1::PoolingType type;
if (node_def.op() == "MaxPool") {
type = nvinfer1::PoolingType::kMAX;
} else if (node_def.op() == "AvgPool") {
type = nvinfer1::PoolingType::kAVERAGE;
} else {
return tensorflow::errors::Unimplemented(
"Unsupported pooling type: ", node_def.op(), ", at ", node_def.name());
}
TFAttrs attrs(node_def);
const string padding_type = attrs.get<string>("padding");
if ((padding_type != "SAME") && (padding_type != "VALID")) {
return tensorflow::errors::Unimplemented(
"Unsupported padding type: ", padding_type, ", at ", node_def.name());
}
if (params->validation_only) return Status::OK();
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
int h_index = 2;
int w_index = 3;
const auto data_format = attrs.get<string>("data_format");
if (data_format == "NHWC") {
h_index = 1;
w_index = 2;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(tensor), {0, 3, 1, 2}, &tensor));
}
const auto tf_stride = attrs.get<std::vector<int>>("strides");
const nvinfer1::DimsHW stride(tf_stride[h_index], tf_stride[w_index]);
const auto tf_kernel = attrs.get<std::vector<int>>("ksize");
const nvinfer1::DimsHW ksize(tf_kernel[h_index], tf_kernel[w_index]);
auto tensor_dim = tensor->getDimensions();
std::vector<std::pair<int, int>> padding;
if (padding_type == "SAME") {
// This is NCHW tensor with no batch dimension.
// 1 -> h
// 2 -> w
padding = CreateSamePadding(
stride, ksize,
{static_cast<int>(tensor_dim.d[1]), static_cast<int>(tensor_dim.d[2])});
} else if (padding_type == "VALID") {
padding = {{0, 0}, {0, 0}};
}
if (padding[0].first != padding[0].second ||
padding[1].first != padding[1].second) {
VLOG(2) << "Padding!!!: " << padding[0].first << padding[0].second
<< padding[1].first << padding[1].second;
auto pad_layer = params->converter->network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::DimsHW(padding[0].first, padding[1].first),
nvinfer1::DimsHW(padding[0].second, padding[1].second));
TFTRT_RETURN_ERROR_IF_NULLPTR(pad_layer, node_def.name());
params->converter->MarkQuantizationRangesAsInferrable(
const_cast<nvinfer1::ITensor*>(tensor), pad_layer->getOutput(0));
padding = {{0, 0}, {0, 0}};
tensor = pad_layer->getOutput(0);
}
nvinfer1::IPoolingLayer* layer = params->converter->network()->addPooling(
*const_cast<nvinfer1::ITensor*>(tensor), type, ksize);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
// TODO(tmorris): Average pooling may not be entirely safe to infer
// quantization range through (at least forwards - backwards should be fine).
// Max pooling is okay.
params->converter->MarkQuantizationRangesAsInferrable(
const_cast<nvinfer1::ITensor*>(tensor), layer->getOutput(0));
layer->setStride(stride);
layer->setPadding({padding[0].first, padding[1].first});
layer->setName(node_def.name().c_str());
const nvinfer1::ITensor* output_tensor = layer->getOutput(0);
if (data_format == "NHWC") {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), {0, 2, 3, 1},
&output_tensor));
}
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertActivation(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 1) {
return tensorflow::errors::InvalidArgument(
node_def.op(), " expects one input, at ", node_def.name());
}
if (!inputs.at(0).is_tensor()) {
return tensorflow::errors::Unimplemented(
node_def.op(), " is only implemented for tensors, at ",
node_def.name());
}
static const std::unordered_map<string, nvinfer1::ActivationType> ops{
{"Relu", nvinfer1::ActivationType::kRELU},
{"Sigmoid", nvinfer1::ActivationType::kSIGMOID},
{"Tanh", nvinfer1::ActivationType::kTANH},
};
auto op_pair = ops.find(node_def.op());
if (op_pair == ops.end()) {
return tensorflow::errors::Unimplemented(
"Activation op: ", node_def.op(),
" not supported at: ", node_def.name());
}
if (params->validation_only) return tensorflow::Status::OK();
// Start conversion.
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
nvinfer1::IActivationLayer* layer =
params->converter->network()->addActivation(
*const_cast<nvinfer1::ITensor*>(tensor), op_pair->second);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Set quantization range for output of Sigmoid, Tanh.
if (node_def.op() == "Sigmoid") {
params->converter->ProvideQuantizationRange(output_tensor, 0.0f, 1.0f);
} else if (node_def.op() == "Tanh") {
params->converter->ProvideQuantizationRange(output_tensor, -1.0f, 1.0f);
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
Status ConvertQuantize(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if ((inputs.size() == 0) ||
(node_def.op() == "FakeQuantWithMinMaxArgs" && inputs.size() != 1) ||
(node_def.op() == "FakeQuantWithMinMaxVars" && inputs.size() != 3) ||
(node_def.op() == "QuantizeAndDequantizeV2" && inputs.size() != 3) ||
(node_def.op() == "QuantizeAndDequantizeV3" && inputs.size() != 4)) {
return errors::InvalidArgument("Invalid number of inputs for ",
node_def.op(), ", at ", node_def.name());
}
if (inputs.at(0).is_weights()) {
// TensorRT will automatically quantize weights, so we will ignore ranges
// for weights.
params->outputs->push_back(inputs.at(0));
return Status::OK();
}
float min_range = 0.0f;
float max_range = 0.0f;
if (node_def.op() == "FakeQuantWithMinMaxArgs") {
// Get ranges via node attributes.
TFAttrs attrs(node_def);
if (attrs.count("min") == 0 || attrs.count("max") == 0) {
return errors::InvalidArgument("Min or max attribute not found for ",
node_def.op(), " at ", node_def.name());
}
min_range = attrs.get<float>("min");
max_range = attrs.get<float>("max");
} else if (node_def.op() == "FakeQuantWithMinMaxVars" ||
node_def.op() == "QuantizeAndDequantizeV2" ||
node_def.op() == "QuantizeAndDequantizeV3") {
// Get ranges via inputs.
if (!inputs.at(1).is_weights() || !inputs.at(2).is_weights()) {
return errors::InvalidArgument("Min and max inputs for ", node_def.op(),
" must be weights not tensors, at ",
node_def.name());
}
auto get_weights_value = [&inputs](int index) {
auto raw_weights = static_cast<float*>(
const_cast<void*>(inputs.at(index).weights().GetValues()));
return raw_weights[0];
};
min_range = get_weights_value(1);
max_range = get_weights_value(2);
} else {
return errors::InvalidArgument("Unknown quantization op ", node_def.op(),
", at ", node_def.name());
}
if (params->validation_only) return Status::OK();
// Store ranges for tensor
params->converter->ProvideQuantizationRange(
const_cast<nvinfer1::ITensor*>(inputs.at(0).tensor()), min_range,
max_range);
// Sometimes, TRT may not quantize a tensor, either because it chooses to
// execute a higher precision kernel or because of op fusion. In these cases,
// accuracy will suffer if the model was trained to expect quantization at
// that tensor. We should consider adding a clip(tensor, min_range, max_range)
// operation here to ensure that any arbitrarily placed quantize node will
// execute as expected. However, this will negatively affect performance. If
// users train their models in a way which models inference as close as
// possible (i.e. not quantizing in place where fusion will occur), then there
// is no problem with the current implementation.
params->outputs->push_back(inputs.at(0));
return Status::OK();
}
// TODO(pdavoodi): we should update relu6 implementation once TensorRT supports
// Relu6 natively.
tensorflow::Status ConvertRelu6(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 1) {
return tensorflow::errors::InvalidArgument(
"Invalid number of inputs for Relu6, at ", node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
"Relu6 is only implemented for tensors, not weights, at ",
node_def.name());
}
if (params->validation_only) return Status::OK();
// ***************************************************************************
// TensorRT does not implement Relu6 natively. This function converts Relu6 op
// to available TensorRT ops: Relu6(x) = min(Relu(x), 6)
// ***************************************************************************
// Input Tensor
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
// Relu operation i.e. Relu(x) = max(0, x)
nvinfer1::IActivationLayer* relu_layer =
params->converter->network()->addActivation(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::ActivationType::kRELU);
TFTRT_RETURN_ERROR_IF_NULLPTR(relu_layer, node_def.name());
// Large range of relu is problematic during quantization in INT8 precision
// mode. Setting dynamic range of relu = [0.f, 6.0f] helps with quantization.
// TRT only uses dynamic ranges in INT8 precision mode,
// and this does not affect the FP32 path.
params->converter->ProvideQuantizationRange(relu_layer->getOutput(0), 0.0f,
6.0f);
// Create a constant layer to store the floating point weight i.e. 6.0f This
// tensor will be broadcasted uniformly during elementwise `min` operation.
// The constant has to have the same rank as the input in order for TRT to
// broadcast
nvinfer1::Dims dims;
dims.nbDims = relu_layer->getOutput(0)->getDimensions().nbDims;
for (int i = 0; i < dims.nbDims; i++) {
dims.d[i] = 1;
}
TRT_ShapedWeights weights = params->weight_store->GetTempWeights(
tensorflow::DataType::DT_FLOAT, dims);
auto weights_ptr =
static_cast<float*>(const_cast<void*>(weights.GetValues()));
weights_ptr[0] = 6.0f;
nvinfer1::ITensor* const6_tensor =
params->converter->CreateConstantLayer(weights, dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(const6_tensor, node_def.name());
params->converter->ProvideQuantizationRange(const6_tensor, 0.0f, 6.0f);
// ElementWise Min Operation
// Min op is a nop for INT8 execution path, as the input tensor
// to this layer will only have values in range [0.f, 6.0f].
nvinfer1::IElementWiseLayer* relu6_layer =
params->converter->network()->addElementWise(
*const_cast<nvinfer1::ITensor*>(relu_layer->getOutput(0)),
*const6_tensor, nvinfer1::ElementWiseOperation::kMIN);
TFTRT_RETURN_ERROR_IF_NULLPTR(relu6_layer, node_def.name());
nvinfer1::ITensor* output_tensor = relu6_layer->getOutput(0);
params->converter->ProvideQuantizationRange(output_tensor, 0.0f, 6.0f);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return Status::OK();
}
tensorflow::Status ConvertBiasAdd(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
!inputs.at(1).is_weights()) {
return errors::InvalidArgument("Input expects tensor and weights, at ",
node_def.name());
}
TFAttrs attrs(node_def);
tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
if (tf_dtype != DataType::DT_FLOAT && tf_dtype != DataType::DT_HALF) {
return errors::Unimplemented("Data type is not supported, for node ",
node_def.name(), " got ",
DataTypeString(tf_dtype));
}
if (params->validation_only) return Status::OK();
nvinfer1::ITensor* tensor =
const_cast<nvinfer1::ITensor*>(inputs.at(0).tensor());
const nvinfer1::Dims original_dims = tensor->getDimensions();
const string data_format = attrs.get<string>("data_format");
const int channel_index =
(data_format == "NHWC" ? original_dims.nbDims - 1 : 0);
nvinfer1::Permutation permutation;
if (channel_index != 0) {
// Permute the dimensions so that the channel dimension is the first
// dimension.
for (int i = 0; i < original_dims.nbDims; ++i) {
permutation.order[i] = i;
}
permutation.order[0] = channel_index;
permutation.order[channel_index] = 0;
VLOG(1) << "ConvertBiasAdd permutation: "
<< DebugString(permutation, original_dims.nbDims);
}
// TensorRT addScale requires input to be of rank 3, we need to apply
// transpose as well as reshape.
// TODO(laigd): this doesn't match what the TRT doc says, fix the doc?
if (channel_index != 0 || original_dims.nbDims != 3) {
nvinfer1::IShuffleLayer* shuffle_layer =
params->converter->network()->addShuffle(*tensor);
TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
params->converter->MarkQuantizationRangesAsInferrable(
tensor, shuffle_layer->getOutput(0));
// NOTE(laigd): for some reason we need to apply the reshape
// unconditionally. The default shape has nbDims==-1 and it seems the
// behavior is undefined in some cases.
nvinfer1::Dims reshape_dims;
reshape_dims.nbDims = 3;
// 0 means copying from input; -1 means inferring from the rest.
reshape_dims.d[0] = 0;
reshape_dims.d[1] = original_dims.nbDims >= 2 ? 0 : 1;
reshape_dims.d[2] = original_dims.nbDims >= 3 ? -1 : 1;
shuffle_layer->setReshapeDimensions(reshape_dims);
if (channel_index != 0) {
shuffle_layer->setFirstTranspose(permutation);
}
tensor = shuffle_layer->getOutput(0);
}
TRT_ShapedWeights weights = inputs.at(1).weights();
if (params->converter->precision_mode() == FP16MODE) {
weights = ConvertFP32ToFP16(params->weight_store, weights);
}
nvinfer1::ScaleMode mode = nvinfer1::ScaleMode::kCHANNEL;
if (weights.shape_.d[0] == 1) {
mode = nvinfer1::ScaleMode::kUNIFORM;
}
TRT_ShapedWeights empty_weights(weights.type_);
nvinfer1::IScaleLayer* layer = params->converter->network()->addScale(
*tensor, mode, weights.GetTrtWeights(), empty_weights.GetTrtWeights(),
empty_weights.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Restore transpose & reshape.
if (channel_index != 0 || original_dims.nbDims != 3) {
nvinfer1::IShuffleLayer* shuffle_layer =
params->converter->network()->addShuffle(*output_tensor);
TFTRT_RETURN_ERROR_IF_NULLPTR(shuffle_layer, node_def.name());
// NOTE: for same reason as mentioned above we need to apply the reshape
// unconditionally.
nvinfer1::Dims reshape_dims = original_dims;
if (channel_index != 0) {
// NOTE: according to NVIDIA dimension types are deprecated, so we don't
// need to copy them back.
reshape_dims.d[channel_index] = original_dims.d[0];
reshape_dims.d[0] = original_dims.d[channel_index];
}
shuffle_layer->setReshapeDimensions(reshape_dims);
if (channel_index != 0) {
shuffle_layer->setSecondTranspose(permutation);
}
params->converter->MarkQuantizationRangesAsInferrable(
output_tensor, shuffle_layer->getOutput(0));
output_tensor = shuffle_layer->getOutput(0);
}
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return Status::OK();
}
void GetTensorDimsWithProtoShape(const Tensor& tensor, nvinfer1::Dims* dims) {
if (tensor.dims() > 0) {
*dims = GetTrtDimsForTensor(tensor);
} else {
dims->nbDims = 1;
// No dimension provided. Flatten it.
dims->d[0] = tensor.NumElements();
dims->type[0] = nvinfer1::DimensionType::kSPATIAL;
for (int i = 1; i < nvinfer1::Dims::MAX_DIMS; ++i) {
dims->d[i] = 0;
}
}
}
Status TfTensorToTrtWeights(const Tensor& tensor, TrtWeightStore* weight_store,
TRT_ShapedWeights* weights) {
const DataType dtype = tensor.dtype();
// We always convert the integer constants to INT32, since TRT INT8 is for
// quantized inference.
//
// TODO(aaroey): FP16 will remain in half format and is not converted to
// FP32, but the converter currently uses all float weights as FP32. Fix
// this.
const DataType converted_dtype =
(dtype == DT_INT16 || dtype == DT_INT8 || dtype == DT_UINT8 ? DT_INT32
: dtype);
// Verify that the dtype is supported by TensorRT. Otherwise, return an error.
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(ConvertDType(converted_dtype, &trt_dtype));
if (tensor.NumElements() == 0) {
// Return empty weights having converted dtype.
*weights = TRT_ShapedWeights(converted_dtype);
return Status::OK();
}
nvinfer1::Dims weight_dims;
GetTensorDimsWithProtoShape(tensor, &weight_dims);
*weights = weight_store->GetTempWeights(converted_dtype, weight_dims);
// Copy the tensor directly if the tensor does not require cast to the
// supported type.
if (converted_dtype == dtype) {
char* dst = static_cast<char*>(const_cast<void*>(weights->GetValues()));
memcpy(dst, tensor.tensor_data().data(), tensor.TotalBytes());
return Status::OK();
}
// Copy tensor elements after casting them to the converted DataType.
int32* dst = static_cast<int32*>(const_cast<void*>(weights->GetValues()));
if (dtype == DT_INT16) {
const int16* src = tensor.flat<int16>().data();
std::copy(src, src + tensor.NumElements(), dst);
} else if (dtype == DT_INT8) {
const int8* src = tensor.flat<int8>().data();
std::copy(src, src + tensor.NumElements(), dst);
} else {
// dtype can only be DT_UINT8 at this point.
TFTRT_CHECK_EQ_TYPE(dtype, DT_UINT8);
const uint8* src = tensor.flat<uint8>().data();
std::copy(src, src + tensor.NumElements(), dst);
}
return Status::OK();
}
// Convert a Const NodeDef to TRT_ShapedWeights. This is a special converter, it
// always ignores the params->validation_only parameter but adds the converted
// weights to params->outputs. We did this since TrtNodeValidator needs the
// weights as input to other nodes, and use it to determine whether those nodes
// are supported by TRT.
tensorflow::Status ConvertConst(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (!inputs.empty()) {
return errors::InvalidArgument(
"Constant node is expected to have empty input list: ",
node_def.name());
}
// Create shaped weights as output
const auto& tensor_proto = node_def.attr().at("value").tensor();
tensorflow::Tensor tensor;
if (!tensor.FromProto(tensor_proto)) {
return tensorflow::errors::Internal("Cannot parse weight tensor proto: ",
node_def.name());
}
TFAttrs attrs(node_def);
const DataType dtype = attrs.get<tensorflow::DataType>("dtype");
if (dtype != tensor.dtype()) {
return errors::InvalidArgument("DataType mismatch between attr (",
DataTypeString(dtype), ") and tensor (",
DataTypeString(tensor.dtype()), ")");
}
TRT_ShapedWeights weights;
TF_RETURN_IF_ERROR(
TfTensorToTrtWeights(tensor, params->weight_store, &weights));
if (params->outputs != nullptr) {
params->outputs->push_back(TRT_TensorOrWeights(weights));
}
return Status::OK();
}
tensorflow::Status ConvertIdentity(OpConverterParams* params) {
// TODO(tmorris): TRT's Identity layer does not get optimized away as of TRT
// 5.0, however once we know that it does it would be nice to use that
// instead.
params->outputs->push_back(params->inputs.at(0));
return tensorflow::Status::OK();
}
Status ConvertBinary(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2) {
return errors::InvalidArgument("Binary ops require two inputs, at ",
node_def.name());
}
// Constant folding should have been done by TensorFlow
if (inputs.at(0).is_weights() && inputs.at(1).is_weights()) {
return errors::Unimplemented(
"Constant folding is falled back to TensorFlow, binary op received "
"both input as constant at: ",
node_def.name());
}
// TODO(tmorris): TRT plans to deprecate IScaleLayer and will replace it with
// IElementwiseLayer. At that point, we can remove BinaryTensorOpWeight. For
// now, the performance will be slightly better with IScaleLayer because it
// can be fused in more situations. However, most of the benefits of
// IScaleLayer are when the layer performs both a shift and a scale, which we
// don't do except for convolutions.
//
// Try to convert into Scale layer first (for better performance).
// Since scale layer supports restricted broadcast policy and op types, we
// allow failure and try to handle it through Elementwise op
// (BinaryTensorOpTensor).
Status status = Status::OK();
if (inputs.at(0).is_tensor() && inputs.at(1).is_weights()) {
status = BinaryTensorOpWeight(params, inputs.at(0).tensor(),
inputs.at(1).weights(), false);
} else if (inputs.at(0).is_weights() && inputs.at(1).is_tensor()) {
status = BinaryTensorOpWeight(params, inputs.at(1).tensor(),
inputs.at(0).weights(), true);
}
// If both input are tensors, or one of them is weights but the conversion
// above failed, try the conversion using BinaryTensorOpTensor.
if ((inputs.at(0).is_tensor() && inputs.at(1).is_tensor()) || !status.ok()) {
if (!status.ok()) VLOG(1) << status;
status = BinaryTensorOpTensor(params, inputs.at(0), inputs.at(1));
}
return status;
}
tensorflow::Status ConvertUnary(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
static const std::unordered_map<string, nvinfer1::UnaryOperation> ops{
{"Neg", nvinfer1::UnaryOperation::kNEG},
{"Exp", nvinfer1::UnaryOperation::kEXP},
{"Log", nvinfer1::UnaryOperation::kLOG},
{"Sqrt", nvinfer1::UnaryOperation::kSQRT},
{"Abs", nvinfer1::UnaryOperation::kABS},
{"Reciprocal", nvinfer1::UnaryOperation::kRECIP},
};
if (inputs.size() != 1) {
return tensorflow::errors::FailedPrecondition(
"Unary ops require single tensor input, at ", node_def.name());
}
// TODO(jie): check type
const nvinfer1::ITensor* tensor = nullptr;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
inputs.at(0), inputs.at(0).GetTrtDims(), &tensor));
nvinfer1::IUnaryLayer* layer;
if (node_def.op() == "Rsqrt") {
// We will need a quantization range for intermediate tensor if not using
// calibration.
//
// x -> [Sqrt] -> sqrt(x) -> [Recip] -> 1/sqrt(x)
// ^
// need range here
if (params->converter->precision_mode() == INT8MODE &&
!params->converter->use_calibration()) {
return errors::Unimplemented(
"Intermediate quantization range cannot be determined without"
" calibration for Rsqrt, consider replacing with "
"Sqrt -> FakeQuant -> Reciprocal ops, at ",
node_def.name());
}
layer = params->converter->network()->addUnary(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::UnaryOperation::kSQRT);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
tensor = layer->getOutput(0);
layer = params->converter->network()->addUnary(
*const_cast<nvinfer1::ITensor*>(tensor),
nvinfer1::UnaryOperation::kRECIP);
} else if (ops.count(node_def.op()) != 0) {
layer = params->converter->network()->addUnary(
*const_cast<nvinfer1::ITensor*>(tensor), ops.at(node_def.op()));
} else {
return tensorflow::errors::InvalidArgument(
"Binary op: ", node_def.op(), " not supported, at ", node_def.name());
}
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertSquare(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 1) {
return tensorflow::errors::InvalidArgument("Square expects one input, at ",
node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
"Square is only implemented for tensors, at ", node_def.name());
}
if (params->validation_only) return Status::OK();
// Constant 2 with same rank as input
nvinfer1::Dims dims = inputs.at(0).GetTrtDims();
for (int i = 0; i < dims.nbDims; i++) {
dims.d[i] = 1;
}
TRT_ShapedWeights weights = params->weight_store->GetTempWeights(
tensorflow::DataType::DT_FLOAT, dims);
auto weights_ptr =
static_cast<float*>(const_cast<void*>(weights.GetValues()));
weights_ptr[0] = 2.f;
nvinfer1::ITensor* const2_tensor =
params->converter->CreateConstantLayer(weights, dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(const2_tensor, node_def.name());
// ElementWise Pow Operation
nvinfer1::IElementWiseLayer* layer =
params->converter->network()->addElementWise(
*const_cast<nvinfer1::ITensor*>(inputs.at(0).tensor()),
*const2_tensor, nvinfer1::ElementWiseOperation::kPOW);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertReduce(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
!inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
"Input expects tensor and weights, at", node_def.name());
}
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
TRT_ShapedWeights index_list = inputs.at(1).weights();
TFAttrs attrs(node_def);
auto index_type = attrs.get<tensorflow::DataType>("Tidx");
// Only expect to handle INT32 as attributes for now
if (index_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32");
}
int axes = 0;
if (index_list.count() == 0) {
return tensorflow::errors::InvalidArgument(
"TRT cannot support reduce on all (batch) dimensions, at",
node_def.name());
} else {
auto index_list_data =
static_cast<int*>(const_cast<void*>(index_list.GetValues()));
for (int i = 0; i < index_list.count(); i++) {
int axis = index_list_data[i];
if (axis < 0) axis += tensor->getDimensions().nbDims + 1;
if (axis == 0) {
return tensorflow::errors::InvalidArgument(
"TRT cannot reduce at batch dimension, at", node_def.name());
}
axes |= (1 << (axis - 1));
}
}
nvinfer1::ReduceOperation reduce_operation;
if (node_def.op() == "Sum") {
reduce_operation = nvinfer1::ReduceOperation::kSUM;
} else if (node_def.op() == "Prod") {
reduce_operation = nvinfer1::ReduceOperation::kPROD;
} else if (node_def.op() == "Max") {
reduce_operation = nvinfer1::ReduceOperation::kMAX;
} else if (node_def.op() == "Min") {
reduce_operation = nvinfer1::ReduceOperation::kMIN;
} else if (node_def.op() == "Mean") {
reduce_operation = nvinfer1::ReduceOperation::kAVG;
} else {
return tensorflow::errors::Unimplemented("Op not supported ", node_def.op(),
" , at ", node_def.name());
}
const auto keep_dims = attrs.get<bool>("keep_dims");
nvinfer1::ILayer* layer = params->converter->network()->addReduce(
*const_cast<nvinfer1::ITensor*>(tensor), reduce_operation, axes,
keep_dims);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
params->outputs->push_back(TRT_TensorOrWeights(layer->getOutput(0)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertPad(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
// TODO(aaroey): make a routine for this check and reuse it.
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
!inputs.at(1).is_weights()) {
return tensorflow::errors::InvalidArgument(
"Input expects tensor and weights, at", node_def.name());
}
// Implement tensor binaryOp weight [channel wise] for now;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
const auto dims = tensor->getDimensions();
// Restore implicit batch dimension
const int nb_dims = dims.nbDims + 1;
TRT_ShapedWeights pads = inputs.at(1).weights();
TFAttrs attrs(node_def);
// Padding type here is done through TF type
// so I can leverage their EnumToDataType for my cast
auto padding_type = attrs.get<tensorflow::DataType>("Tpaddings");
// TODO(jie): handle data type conversion for TRT?
if (pads.shape_.d[0] != nb_dims || pads.shape_.d[1] != 2) {
return tensorflow::errors::InvalidArgument(
"Pad only supports explicit padding on 4 dimensional tensor, at ",
node_def.name());
}
// Only expect to handle INT32 as attributes for now
if (padding_type != tensorflow::DataType::DT_INT32) {
return tensorflow::errors::Unimplemented(
"Tpaddings supports only DT_INT32");
}
auto pad_data = static_cast<int*>(const_cast<void*>(pads.GetValues()));
std::vector<int32_t> pad_index;
for (int i = 0; i < nb_dims; i++) {
if (pad_data[2 * i] != 0 || pad_data[2 * i + 1] != 0) {
pad_index.push_back(i);
}
}
// No padding at all, we should exit
if (pad_index.size() == 0) {
params->outputs->push_back(inputs.at(0));
return tensorflow::Status::OK();
}
// Only supports padding on less than 2 axis GIE-2579
if (pad_index.size() > 2) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on > 2");
}
// Padding on batch dimension is not supported
if (pad_index[0] == 0) {
return tensorflow::errors::InvalidArgument(
"Padding layer does not support padding on batch dimension");
}
// Not doing the legit thing here. ignoring padding on dim 1 and 3;
// TODO(jie): implement pad as uff parser
if (pad_index.size() == 2 && pad_index[0] == 0 && pad_index[1] == 3) {
return tensorflow::errors::Unimplemented(
"Padding layer does not support padding on dimension 1 and 3 yet");
}
if (params->validation_only) return Status::OK();
bool legit_pad = true;
nvinfer1::DimsHW pre_padding(0, 0);
nvinfer1::DimsHW post_padding(0, 0);
std::vector<int32_t> permuted_pad_index(pad_index);
if (pad_index[0] == 1) {
legit_pad = false;
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(tensor), {0, 3, 2, 1}, &tensor));
permuted_pad_index[0] = 3;
}
for (size_t i = 0; i < pad_index.size(); i++) {
int index = pad_index[i];
if (permuted_pad_index[i] == 2) {
pre_padding.h() = pad_data[index * 2];
post_padding.h() = pad_data[index * 2 + 1];
} else if (permuted_pad_index[i] == 3) {
pre_padding.w() = pad_data[index * 2];
post_padding.w() = pad_data[index * 2 + 1];
}
}
nvinfer1::IPaddingLayer* layer = params->converter->network()->addPadding(
*const_cast<nvinfer1::ITensor*>(tensor), pre_padding, post_padding);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
const nvinfer1::ITensor* output_tensor = layer->getOutput(0);
if (!legit_pad) {
TF_RETURN_IF_ERROR(params->converter->TransposeTensor(
const_cast<nvinfer1::ITensor*>(output_tensor), {0, 3, 2, 1},
&output_tensor));
}
params->outputs->push_back(
TRT_TensorOrWeights(const_cast<nvinfer1::ITensor*>(output_tensor)));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertConcat(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
// not including the last input (axis) here
int input_size = static_cast<int>(inputs.size()) - 1;
if (!inputs.at(0).is_tensor()) {
return tensorflow::errors::InvalidArgument(
"Concat in TRT support only Tensor input, at ", node_def.name());
}
// We are retrieving the axis
TRT_ShapedWeights axis = inputs.at(input_size).weights();
TFAttrs attrs(node_def);
auto index_type = attrs.get<tensorflow::DataType>("Tidx");
// TODO(jie): handle data type
// Only expect to handle INT32 as index attributes for now
if (index_type != tensorflow::DataType::DT_INT32)
return tensorflow::errors::Unimplemented("Tidx supports only DT_INT32, at ",
node_def.name());
int index = *(static_cast<int*>(const_cast<void*>(axis.GetValues())));
// TODO(jie): early termination with no-op (attr_size==1)
auto dim = inputs.at(0).tensor()->getDimensions();
// dimension check
if (index > dim.nbDims + 1) {
return tensorflow::errors::InvalidArgument(
"Concatenate on axis out of dimension range, at ", node_def.name());
}
if (index == 0) {
return tensorflow::errors::InvalidArgument(
"Concatenate on batch dimension not supported, at ", node_def.name());
}
if (index < 0) {
index = dim.nbDims + index + 1;
}
std::vector<nvinfer1::ITensor const*> inputs_vec;
// Shap chack (all input tensor should have same shape)
// starting from 0 since we are probably also doing transpose here;
for (int i = 0; i < input_size; i++) {
auto tensor_i = inputs.at(i).tensor();
auto dim_i = tensor_i->getDimensions();
if (dim_i.nbDims != dim.nbDims) {
return tensorflow::errors::InvalidArgument(
"Concatenate receives inputs with inconsistent dimensions, at ",
node_def.name());
}
for (int j = 0; j < dim.nbDims; j++) {
// check dimension consistency on non-concatenate axis
if (j != index - 1 && dim_i.d[j] != dim.d[j]) {
return tensorflow::errors::InvalidArgument(
"Concatenate receives inputs with inconsistent shape, at",
node_def.name());
}
}
inputs_vec.push_back(tensor_i);
}
if (params->validation_only) return tensorflow::Status::OK();
// nvinfer1::ITensor const* tensor = inputs.at(0).tensor();
nvinfer1::IConcatenationLayer* layer =
params->converter->network()->addConcatenation(
const_cast<nvinfer1::ITensor* const*>(inputs_vec.data()),
inputs_vec.size());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
layer->setAxis(index - 1);
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertFusedBatchNorm(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFAttrs attrs(node_def);
float epsilon = attrs.get<float>("epsilon");
auto data_format = attrs.get<string>("data_format");
if (data_format != "NCHW") {
return tensorflow::errors::Unimplemented(
node_def.op(), " only supports data_format=NCHW, at ", node_def.name());
}
bool is_training = attrs.get<bool>("is_training");
if (is_training) {
// Trying to use batchnorm in training mode is a very common problem.
// Because the error message will only be printed in VLOG(1) by the
// segmenter, we issue a special warning so that users will actually see it.
LOG(WARNING) << node_def.op() << " only supports is_training=false. If you "
<< "are using Keras, please call "
<< "keras.backend.set_learning_phase(0) before constructing "
<< "your model. At " << node_def.name();
return tensorflow::errors::Unimplemented(
node_def.op(), " only supports is_training=false, at ",
node_def.name());
}
if (inputs.at(0).is_weights()) {
return tensorflow::errors::Unimplemented(
node_def.op(),
" is only implemented for tensor inputs, not weights, at ",
node_def.name());
}
for (int i = 1; i < 5; i++) {
if (inputs.at(i).is_tensor()) {
return tensorflow::errors::Unimplemented(
node_def.op(),
" must have constant inputs for scale, offset, mean and variance, "
"at ",
node_def.name());
}
}
nvinfer1::ITensor const* tensor = inputs.at(0).tensor();
// Check parameter types
auto parameter_type = inputs.at(1).weights().type_;
if ((parameter_type != tensorflow::DataType::DT_FLOAT) &&
(parameter_type != tensorflow::DataType::DT_HALF)) {
return tensorflow::errors::Unimplemented(
"only float32 or float16 weight data type is supported, for node " +
node_def.name() + " got " + tensorflow::DataTypeString(parameter_type));
}
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().type_ != parameter_type) {
return tensorflow::errors::Unimplemented(
"Inconsistent parameter type for batchnorm is not supported, at: " +
node_def.name());
}
}
TRT_ShapedWeights dummy_power_weights(parameter_type);
size_t nweight = 0;
for (int i = 1; i < 5; i++) {
nweight = std::max(nweight, (size_t)inputs.at(i).weights().count());
}
TRT_ShapedWeights* ptr_shape_weights = nullptr;
for (int i = 1; i < 5; i++) {
if (inputs.at(i).weights().count() == nweight) {
ptr_shape_weights =
const_cast<TRT_ShapedWeights*>(&(inputs.at(i).weights()));
} else if (inputs.at(i).weights().count() != 1) {
return tensorflow::errors::InvalidArgument(
"Inconsistent batchnorm parameter count, at: " + node_def.name());
}
}
if (params->validation_only) return Status::OK();
// We could technically have two weights with different shape.
// that requires two addScale op, arguably less performant
TRT_ShapedWeights combined_scale_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
TRT_ShapedWeights combined_offset_weights =
params->weight_store->GetTempWeights(*ptr_shape_weights);
const Eigen::half* cast_vals_array[4];
const float* vals_array[4];
for (int j = 0; j < 4; j++) {
cast_vals_array[j] =
static_cast<Eigen::half const*>(inputs.at(j + 1).weights().GetValues());
vals_array[j] =
static_cast<float const*>(inputs.at(j + 1).weights().GetValues());
}
Eigen::half* cast_combined_scale_vals = const_cast<Eigen::half*>(
static_cast<Eigen::half const*>(combined_scale_weights.GetValues()));
Eigen::half* cast_combined_offset_vals = const_cast<Eigen::half*>(
static_cast<Eigen::half const*>(combined_offset_weights.GetValues()));
float* combined_scale_vals = const_cast<float*>(
static_cast<float const*>(combined_scale_weights.GetValues()));
float* combined_offset_vals = const_cast<float*>(
static_cast<float const*>(combined_offset_weights.GetValues()));
for (size_t i = 0; i < nweight; ++i) {
float batchnorm_data[4];
for (int j = 0; j < 4; j++) {
if (inputs.at(j + 1).weights().count() != 1) {
if (parameter_type == tensorflow::DT_FLOAT) {
batchnorm_data[j] = vals_array[j][i];
} else if (parameter_type == tensorflow::DT_HALF) {
batchnorm_data[j] =
Eigen::half_impl::half_to_float(cast_vals_array[j][i]);
}
} else {
if (parameter_type == tensorflow::DT_FLOAT) {
batchnorm_data[j] = vals_array[j][0];
} else if (parameter_type == tensorflow::DT_HALF) {
batchnorm_data[j] =
Eigen::half_impl::half_to_float(cast_vals_array[j][0]);
}
}
}
float scale = batchnorm_data[0];
float offset = batchnorm_data[1];
float mean = batchnorm_data[2];
float variance = batchnorm_data[3];
float combined_scale_val = scale / sqrtf(variance + epsilon);
float combined_offset_val = offset - mean * combined_scale_val;
if (parameter_type == tensorflow::DT_FLOAT) {
combined_scale_vals[i] = combined_scale_val;
combined_offset_vals[i] = combined_offset_val;
} else if (parameter_type == tensorflow::DT_HALF) {
cast_combined_scale_vals[i] = Eigen::half(combined_scale_val);
cast_combined_offset_vals[i] = Eigen::half(combined_offset_val);
}
}
nvinfer1::ScaleMode mode = nweight == 1 ? nvinfer1::ScaleMode::kUNIFORM
: nvinfer1::ScaleMode::kCHANNEL;
nvinfer1::IScaleLayer* layer = params->converter->network()->addScale(
*const_cast<nvinfer1::ITensor*>(tensor), mode,
combined_offset_weights.GetTrtWeights(),
combined_scale_weights.GetTrtWeights(),
dummy_power_weights.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertMatMulHelper(OpConverterParams* params,
TRT_TensorOrWeights tensor_input,
TRT_ShapedWeights weights_raw,
bool transpose_weight,
string node_name) {
nvinfer1::ITensor* output_tensor;
if (!tensor_input.is_tensor()) {
return tensorflow::errors::InvalidArgument("Input 0 expects tensor");
}
const nvinfer1::ITensor* tensor = tensor_input.tensor();
TRT_ShapedWeights weights(weights_raw.type_);
if (transpose_weight) {
weights = weights_raw;
} else {
weights = params->weight_store->GetTempWeights(weights_raw);
ReorderCKtoKC(weights_raw, &weights);
}
TRT_ShapedWeights biases(weights.type_);
int noutput = weights.shape_.d[0];
auto input_dim = tensor->getDimensions();
while (input_dim.nbDims != 3) {
input_dim.d[input_dim.nbDims++] = 1;
}
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
tensor_input, input_dim, &tensor));
nvinfer1::IFullyConnectedLayer* layer =
params->converter->network()->addFullyConnected(
*const_cast<nvinfer1::ITensor*>(tensor), noutput,
weights.GetTrtWeights(), biases.GetTrtWeights());
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_name);
output_tensor = layer->getOutput(0);
const nvinfer1::ITensor* temp_tensor = nullptr;
auto output_dim = output_tensor->getDimensions();
output_dim.nbDims = 1;
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
TRT_TensorOrWeights(output_tensor), output_dim, &temp_tensor));
output_tensor = const_cast<nvinfer1::ITensor*>(temp_tensor);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
// inputs are both two dimensional (tensorflow::ops::MatMul)
tensorflow::Status ConvertMatMul(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
if (inputs.size() != 2 || !inputs.at(0).is_tensor() ||
!inputs.at(1).is_weights()) {
return errors::InvalidArgument("Input expects tensor and weights, at ",
node_def.name());
}
TFAttrs attrs(node_def);
tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
if (tf_dtype != DataType::DT_FLOAT && tf_dtype != DataType::DT_HALF) {
return errors::Unimplemented("Data type is not supported, for node ",
node_def.name(), " got ",
DataTypeString(tf_dtype));
}
bool transpose_a = attrs.get<bool>("transpose_a");
bool transpose_b = attrs.get<bool>("transpose_b");
// FullyConnected:
if (transpose_a) {
return errors::InvalidArgument(
"transpose_a is not supported for TensorRT FullyConnected (op: ",
node_def.op(), "), at: ", node_def.name());
}
if (params->validation_only) return Status::OK();
return ConvertMatMulHelper(params, inputs.at(0), inputs.at(1).weights(),
transpose_b, node_def.name());
}
tensorflow::Status ConvertBatchMatMul(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
TFAttrs attrs(node_def);
tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
if (tf_dtype != tensorflow::DataType::DT_FLOAT &&
tf_dtype != tensorflow::DataType::DT_HALF) {
return tensorflow::errors::Unimplemented(
"data type is not supported, for node " + node_def.name() + " got " +
tensorflow::DataTypeString(tf_dtype));
}
bool transpose_a = attrs.get<bool>("adj_x");
bool transpose_b = attrs.get<bool>("adj_y");
auto dims = inputs.at(0).GetTrtDims();
if (dims.nbDims == 1) { // NC * CK is only supported through fully connected
if (transpose_a == false && inputs.at(0).is_tensor() &&
inputs.at(1).is_weights()) {
return ConvertMatMulHelper(params, inputs.at(0), inputs.at(1).weights(),
transpose_b, node_def.name());
} else {
return tensorflow::errors::InvalidArgument(
"Invalid configuration for MatMul, at: " + node_def.name());
}
}
const nvinfer1::ITensor* tensor_l;
const nvinfer1::ITensor* tensor_r;
auto dims_l = inputs.at(0).GetTrtDims();
auto dims_r = inputs.at(1).GetTrtDims();
if (inputs.at(0).is_weights()) {
if (inputs.at(0).GetTrtDims().d[0] != 1) {
return tensorflow::errors::InvalidArgument(
"Input 0 as weight assumes broadcast across batch for MatMul, at: " +
node_def.name());
} else {
for (int i = 0; i < dims_l.nbDims - 1; i++) {
dims_l.d[i] = dims_l.d[i + 1];
}
dims_l.nbDims--;
}
}
if (inputs.at(1).is_weights()) {
if (inputs.at(1).GetTrtDims().d[0] != 1) {
return tensorflow::errors::InvalidArgument(
"Input 1 as weight assumes broadcast across batch for MatMul, at: " +
node_def.name());
} else {
for (int i = 0; i < dims_r.nbDims - 1; i++) {
dims_r.d[i] = dims_r.d[i + 1];
}
dims_r.nbDims--;
}
}
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
inputs.at(0), dims_l, &tensor_l));
TF_RETURN_IF_ERROR(params->converter->PrepareTensorForShape(
inputs.at(1), dims_r, &tensor_r));
nvinfer1::IMatrixMultiplyLayer* layer =
params->converter->network()->addMatrixMultiply(
*const_cast<nvinfer1::ITensor*>(tensor_l), transpose_a,
*const_cast<nvinfer1::ITensor*>(tensor_r), transpose_b);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertSoftmax(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
int nbDims = tensor->getDimensions().nbDims;
if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
"TensorRT Softmax cannot apply on batch dimension, at" +
node_def.name());
}
nvinfer1::ISoftMaxLayer* layer = params->converter->network()->addSoftMax(
*const_cast<nvinfer1::ITensor*>(tensor));
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
// Tensorflow SoftMax assumes applying softmax on the last dimension.
layer->setAxes(1 << (nbDims - 1));
nvinfer1::ITensor* output_tensor = layer->getOutput(0);
// Quantization range for SoftMax is always (0, 1)
params->converter->ProvideQuantizationRange(output_tensor, 0.0f, 1.0f);
params->outputs->push_back(TRT_TensorOrWeights(output_tensor));
return tensorflow::Status::OK();
}
tensorflow::Status ConvertTopK(OpConverterParams* params) {
const auto& inputs = params->inputs;
const auto& node_def = params->node_def;
const nvinfer1::ITensor* tensor = inputs.at(0).tensor();
int nbDims = tensor->getDimensions().nbDims;
if (nbDims == 0) {
return tensorflow::errors::InvalidArgument(
"TensorRT TopK cannot apply on batch dimension, at" + node_def.name());
}
TRT_ShapedWeights k_w = inputs.at(1).weights();
int k = *(static_cast<int*>(const_cast<void*>(k_w.GetValues())));
nvinfer1::TopKOperation op;
uint32_t reducedAxes = 0;
if (node_def.op() == "TopKV2") {
op = nvinfer1::TopKOperation::kMAX;
reducedAxes |= 1 << (nbDims - 1);
} else {
return tensorflow::errors::Unimplemented(
"Operation: ", node_def.op(),
" not implemented, at: ", node_def.name());
}
nvinfer1::ITopKLayer* layer = params->converter->network()->addTopK(
*const_cast<nvinfer1::ITensor*>(tensor), op, k, reducedAxes);
TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name());
nvinfer1::ITensor* output_value_tensor = layer->getOutput(0);
nvinfer1::ITensor* output_indices_tensor = layer->getOutput(1);
params->outputs->push_back(TRT_TensorOrWeights(output_value_tensor));
params->outputs->push_back(TRT_TensorOrWeights(output_indices_tensor));
return tensorflow::Status::OK();
}
static void RegisterValidatableOpConverters(
std::unordered_map<string, OpConverter>* registration) {
// TODO(laigd): support all op types.
(*registration)["BiasAdd"] = ConvertBiasAdd;
(*registration)["ConcatV2"] = ConvertConcat;
(*registration)["Const"] = ConvertConst;
(*registration)["Conv2D"] = ConvertConv2D;
(*registration)["DepthwiseConv2dNative"] = ConvertConv2DDepthwise;
(*registration)["ExpandDims"] = ConvertExpandDims;
(*registration)["MatMul"] = ConvertMatMul;
(*registration)["Pad"] = ConvertPad;
(*registration)["Relu6"] = ConvertRelu6;
(*registration)["Reshape"] = ConvertReshape;
(*registration)["Square"] = ConvertSquare;
(*registration)["Squeeze"] = ConvertSqueeze;
(*registration)["StridedSlice"] = ConvertStridedSlice;
(*registration)["Transpose"] = ConvertTranspose;
for (auto quantization_op_type :
{"QuantizeAndDequantizeV2", "QuantizeAndDequantizeV3",
"FakeQuantWithMinMaxVars", "FakeQuantWithMinMaxArgs"}) {
(*registration)[quantization_op_type] = ConvertQuantize;
}
for (auto binary_op_type :
{"Add", "Mul", "Sub", "Div", "RealDiv", "Maximum", "Minimum"}) {
(*registration)[binary_op_type] = ConvertBinary;
}
for (auto activation_op_type : {"Relu", "Sigmoid", "Tanh"}) {
(*registration)[activation_op_type] = ConvertActivation;
}
for (auto pool_op_type : {"AvgPool", "MaxPool"}) {
(*registration)[pool_op_type] = ConvertPool;
}
for (auto normalization_op_type : {"FusedBatchNorm", "FusedBatchNormV2"}) {
(*registration)[normalization_op_type] = ConvertFusedBatchNorm;
}
}
void TrtNodeValidator::RegisterOpValidators() {
RegisterValidatableOpConverters(&op_validators_);
}
void Converter::RegisterOpConverters() {
RegisterValidatableOpConverters(&op_registry_);
// TODO(ben,jie): this is a temp hack.
op_registry_["Identity"] = ConvertIdentity; // Identity should be removed
op_registry_["Snapshot"] = ConvertIdentity; // Snapshot should be removed
op_registry_["Rsqrt"] = ConvertUnary;
op_registry_["Reciprocal"] = ConvertUnary;
op_registry_["Exp"] = ConvertUnary;
op_registry_["Log"] = ConvertUnary;
op_registry_["Sqrt"] = ConvertUnary;
op_registry_["Abs"] = ConvertUnary;
op_registry_["Neg"] = ConvertUnary;
op_registry_["Sum"] = ConvertReduce;
op_registry_["Prod"] = ConvertReduce;
op_registry_["Max"] = ConvertReduce;
op_registry_["Min"] = ConvertReduce;
op_registry_["Mean"] = ConvertReduce;
op_registry_["Softmax"] = ConvertSoftmax;
op_registry_["BatchMatMul"] = ConvertBatchMatMul;
op_registry_["TopKV2"] = ConvertTopK;
plugin_converter_ = ConvertPlugin;
}
tensorflow::Status ConvertGraphDefToEngine(
const tensorflow::GraphDef& gdef, int precision_mode, int max_batch_size,
size_t max_workspace_size_bytes,
const std::vector<tensorflow::PartialTensorShape>& input_shapes,
Logger* logger, nvinfer1::IGpuAllocator* allocator,
TRTInt8Calibrator* calibrator,
TrtUniquePtrType<nvinfer1::ICudaEngine>* engine, bool use_calibration,
bool* convert_successfully) {
engine->reset();
if (convert_successfully) *convert_successfully = false;
// Create the builder.
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(*logger));
builder->setMaxBatchSize(max_batch_size);
builder->setMaxWorkspaceSize(max_workspace_size_bytes);
builder->setGpuAllocator(allocator);
if (precision_mode == FP16MODE) {
builder->setHalf2Mode(true);
} else if (precision_mode == INT8MODE) {
builder->setInt8Mode(true);
if (use_calibration) {
builder->setInt8Calibrator(calibrator);
} else {
builder->setInt8Calibrator(nullptr);
}
}
// Create the network.
auto trt_network =
TrtUniquePtrType<nvinfer1::INetworkDefinition>(builder->createNetwork());
if (!trt_network) {
return tensorflow::errors::Internal(
"Failed to create TensorRT network object");
}
// Build the network
VLOG(1) << "Starting engine conversion ";
Converter converter(trt_network.get(), precision_mode, use_calibration);
std::vector<Converter::EngineOutputInfo> output_tensors;
// Graph nodes are already topologically sorted during construction
for (const auto& node_def : gdef.node()) {
string node_name = node_def.name();
VLOG(2) << "Converting op name=" << node_name << ", op=" << node_def.op();
if (tensorflow::str_util::StartsWith(node_name, kInputPHName) &&
(node_def.op() == "Placeholder")) {
int32 slot_number = -1;
if (!tensorflow::strings::safe_strto32( // non-absl ok
node_name.c_str() + strlen(kInputPHName), &slot_number)) {
return tensorflow::errors::InvalidArgument(
"Failed to parse slot number from ", node_name);
}
nvinfer1::DataType trt_dtype;
nvinfer1::Dims trt_dims;
int batch_size = -1;
auto shape = input_shapes.at(slot_number);
auto status = ValidateTensorProperties(
node_def.op(), node_def.attr().at("dtype").type(), shape,
/*validation_only=*/false, &trt_dtype, &trt_dims, &batch_size);
if (!status.ok()) {
const string error_message =
StrCat("Validation failed for ", node_name, " and input slot ",
slot_number, ": ", status.error_message());
LOG(WARNING) << error_message;
return Status(status.code(), error_message);
}
VLOG(2) << "Adding engine input tensor " << node_name << " with shape "
<< DebugString(trt_dims);
// TODO(laigd): the conversion should always happen at runtime where all
// the shapes are known, and we can provide a mode to generate the
// engines offline, by calling sess.run() and cache/serialize the engines.
TF_RETURN_IF_ERROR(
converter.AddInputTensor(node_name, trt_dtype, trt_dims, batch_size));
} else if (tensorflow::str_util::StartsWith(node_name, kOutputPHName) &&
(node_def.op() == "Identity")) {
int32 slot_number = -1;
if (!tensorflow::strings::safe_strto32( // non-absl ok
node_name.c_str() + strlen(kOutputPHName), &slot_number)) {
return tensorflow::errors::InvalidArgument(
"Failed to parse slot number from ", node_name);
}
// Get output type that TensorFlow expects
TFAttrs attrs(node_def);
tensorflow::DataType tf_dtype = attrs.get<tensorflow::DataType>("T");
nvinfer1::DataType trt_dtype;
TF_RETURN_IF_ERROR(ConvertDType(tf_dtype, &trt_dtype));
if (output_tensors.size() <= slot_number) {
output_tensors.resize(slot_number + 1);
}
output_tensors.at(slot_number) = {node_def.input(0), node_name,
trt_dtype};
} else {
VLOG(2) << "Converting node: " << node_def.name() << " , "
<< node_def.op();
TF_RETURN_IF_ERROR(converter.ConvertNode(node_def));
}
}
TF_RETURN_IF_ERROR(converter.RenameAndMarkOutputTensors(output_tensors));
if (convert_successfully) *convert_successfully = true;
// Apply user provided quantization ranges to tensors
converter.MaybeApplyQuantizationRanges();
// Build the engine.
VLOG(1) << "Starting engine creation";
engine->reset(builder->buildCudaEngine(*converter.network()));
if (engine->get() == nullptr) {
return tensorflow::errors::Internal("Failed to build TensorRT engine");
}
VLOG(1) << "Finished conversion";
return tensorflow::Status::OK();
}
tensorflow::Status ConvertSegmentToGraphDef(
const tensorflow::Graph* graph,
const tensorflow::grappler::GraphProperties& graph_properties,
const std::vector<const Node*>& subgraph_nodes, // In topological order
std::vector<EngineConnection>* connections,
tensorflow::GraphDef* segment_def, string* common_scope) {
std::set<string> marker_nodes;
// Update connection shapes/data types and add corresponding input/output
// nodes in the segment graphdef.
for (size_t i = 0; i < connections->size(); ++i) {
auto& connection = connections->at(i);
if (connection.is_control_edge()) continue;
auto outside_node = graph->FindNodeId(connection.outside_id);
if (!outside_node) {
// This should never happen, unless the original graph is problematic.
return tensorflow::errors::NotFound(
"Cannot find node with id ", connection.outside_id, " in the graph.");
}
// Updates the shape and data types of input/output connections.
tensorflow::DataType dtype;
tensorflow::PartialTensorShape partial_shape;
if (connection.is_input_edge) {
GetOutputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.outside_shape = partial_shape;
} else {
GetInputProperties(graph_properties,
graph->FindNodeId(connection.outside_id),
connection.outside_port, &partial_shape, &dtype);
connection.inside_shape = partial_shape;
}
connection.connection_type = dtype;
// Add dummy input/output nodes to the segment graphdef.
if (connection.is_input_edge) {
const string node_name = StrCat(kInputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing input " << node_name << " for the edge "
<< connection.outside_node_name << ":"
<< connection.outside_port << " -> "
<< connection.inside_node_name << ":" << connection.inside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
tensorflow::NodeDefBuilder builder(node_name, "Placeholder");
auto status = builder.Attr("shape", partial_shape)
.Attr("dtype", dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing input " << node_name << " for the edge "
<< connection.outside_node_name << ":" << connection.outside_port
<< " -> " << connection.inside_node_name << ":"
<< connection.inside_port;
} else {
const string node_name = StrCat(kOutputPHName, connection.port_number);
if (marker_nodes.count(node_name)) {
VLOG(1) << "Reusing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
continue;
}
marker_nodes.insert(node_name);
auto seg_node = segment_def->add_node();
tensorflow::NodeDefBuilder builder(node_name, "Identity");
auto status =
builder
.Input(connection.inside_node_name, connection.inside_port, dtype)
.Finalize(seg_node);
VLOG(1) << "Constructing output " << node_name << " for the edge "
<< connection.inside_node_name << ":" << connection.inside_port
<< " -> " << connection.outside_node_name << ":"
<< connection.outside_port;
}
} // for each connection.
std::unordered_map<int, int> old_to_new_id_map;
// Copy internal nodes to new graphdef
string local_scope = subgraph_nodes.front()->name();
for (const Node* node : subgraph_nodes) {
local_scope = GetCommonNameScope(local_scope, node->name());
old_to_new_id_map[node->id()] = segment_def->node_size();
auto snode = segment_def->add_node();
snode->CopyFrom(node->def());
VLOG(2) << "Copying " << snode->name() << " to subgraph";
}
// Update the inputs of the new input nodes to point to placeholder nodes.
for (int i = 0; i < connections->size(); ++i) {
auto& connection = connections->at(i);
if (connection.is_control_edge() || !connection.is_input_edge) continue;
auto snode =
segment_def->mutable_node(old_to_new_id_map[connection.inside_id]);
const string placeholder_name =
StrCat(kInputPHName, connection.port_number);
VLOG(1) << "Updating " << snode->name() << ":" << connection.inside_port
<< " from " << snode->input(connection.inside_port) << " to "
<< placeholder_name;
snode->set_input(connection.inside_port, placeholder_name);
}
std::set<string> subgraph_node_names;
for (const Node* node : subgraph_nodes) {
subgraph_node_names.insert(node->name());
}
// Remove control inputs that are not inside the segment.
for (int i = 0; i < segment_def->node_size(); ++i) {
auto snode = segment_def->mutable_node(i);
const int input_size = snode->input_size();
int input_idx = 0;
int actual_input_idx = 0;
while (input_idx < input_size) {
TensorId input = ParseTensorName(snode->input(input_idx));
if (!subgraph_node_names.count(
string(input.first.data(), input.first.size())) &&
!str_util::StartsWith(input.first, kInputPHName)) {
if (input.second == Graph::kControlSlot) {
VLOG(1) << "... removing control inputs " << input.first
<< " from subgraph.";
++input_idx;
continue;
} else {
return tensorflow::errors::InvalidArgument(
"Found non control input outside the segment that is not an "
"engine connection to ",
snode->name(), ": ", input.first);
}
}
if (actual_input_idx != input_idx) {
snode->set_input(actual_input_idx, snode->input(input_idx));
}
++input_idx;
++actual_input_idx;
}
for (int remove = input_size - actual_input_idx; remove > 0; --remove) {
snode->mutable_input()->RemoveLast();
}
}
*common_scope = local_scope;
VLOG(1) << "Converted TensorRT candidate segment @scope '" << local_scope
<< "' to a GraphDef";
return tensorflow::Status::OK();
}
bool OutputEdgeValidator::operator()(const tensorflow::Edge* out_edge) const {
if (out_edge->IsControlEdge()) return true;
if (out_edge->src()->type_string() == "Const") {
VLOG(1) << "--> Need to remove output node " << out_edge->src()->name()
<< " which is a Const.";
return false;
}
return true;
}
} // namespace convert
} // namespace tensorrt
} // namespace tensorflow
#endif // GOOGLE_TENSORRT
#endif // GOOGLE_CUDA
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef NT2_EXPONENTIAL_CONSTANTS_TWOTOMNMBO_3_HPP_INCLUDED
#define NT2_EXPONENTIAL_CONSTANTS_TWOTOMNMBO_3_HPP_INCLUDED
#include <nt2/include/functor.hpp>
#include <boost/simd/constant/hierarchy.hpp>
#include <boost/simd/constant/register.hpp>
namespace nt2
{
namespace tag
{
/*!
@brief Twotomnmbo_3 generic tag
Represents the Twotomnmbo_3 constant in generic contexts.
@par Models:
Hierarchy
**/
BOOST_SIMD_CONSTANT_REGISTER( Twotomnmbo_3, double
, 0, 0x3ba14518LL
, 0x3ed428a2f98d7286ULL
)
}
namespace ext
{
template<class Site, class... Ts>
BOOST_FORCEINLINE generic_dispatcher<tag::Twotomnmbo_3, Site> dispatching_Twotomnmbo_3(adl_helper, boost::dispatch::meta::unknown_<Site>, boost::dispatch::meta::unknown_<Ts>...)
{
return generic_dispatcher<tag::Twotomnmbo_3, Site>();
}
template<class... Args>
struct impl_Twotomnmbo_3;
}
/*!
Generates constant e.
@par Semantic:
/f$2^(-nmb/3)/f$
@code
T r = twotomnmbo_3<T>();
@endcode
is similar to:
@code
if T is float
r = 4.921566601151848e-03f
else
r = 4.806217383937348e-06
@endcode
**/
BOOST_SIMD_CONSTANT_IMPLEMENTATION(tag::Twotomnmbo_3, Twotomnmbo_3);
}
#endif
|
#include "BuildingSymbolizer.h"
#include "ParserUtils.h"
#include <cmath>
namespace carto { namespace mvt {
BuildingSymbolizer::FeatureProcessor BuildingSymbolizer::createFeatureProcessor(const ExpressionContext& exprContext, const SymbolizerContext& symbolizerContext) const {
vt::FloatFunction fillOpacityFunc = _fillOpacity.getFunction(exprContext);
vt::ColorFunction fillColorFunc = _fill.getFunction(exprContext);
if (fillOpacityFunc == vt::FloatFunction(0) || fillColorFunc == vt::ColorFunction(vt::Color())) {
return FeatureProcessor();
}
vt::ColorFunction fillFunc = _fillFuncBuilder.createColorOpacityFunction(fillColorFunc, fillOpacityFunc);
std::optional<vt::Transform> geometryTransform = _geometryTransform.getValue(exprContext);
float height = _height.getValue(exprContext);
float minHeight = _minHeight.getValue(exprContext);
vt::Polygon3DStyle style(fillFunc, geometryTransform);
return [style, height, minHeight, this](const FeatureCollection& featureCollection, vt::TileLayerBuilder& layerBuilder) {
bool suppressWarning = false;
if (auto polygon3DProcessor = layerBuilder.createPolygon3DProcessor(style)) {
for (std::size_t featureIndex = 0; featureIndex < featureCollection.size(); featureIndex++) {
if (auto polygonGeometry = featureCollection.getPolygonGeometry(featureIndex)) {
for (const auto& verticesList : polygonGeometry->getPolygonList()) {
polygon3DProcessor(featureCollection.getLocalId(featureIndex), verticesList, minHeight, height);
}
}
else if (!suppressWarning) {
_logger->write(Logger::Severity::WARNING, "Unsupported geometry for BuildingSymbolizer");
suppressWarning = true;
}
}
}
};
}
} }
|
class Solution {
public:
int FirstNotRepeatingChar(string str) {
if(str.size()==0) return -1;
int size =str.size();
int *al =new int[256];
for(int i=0;i<256;i++)
{
al[i]=0;
}
for(int i=0;i<size;i++)
{
al[str[i]]++;
}
for(int i=0;i<str.size();i++)
{
if(al[str[i]]==1)
{
return i;
}
}
return -1;
}
};
//贼奇怪的一道题。。
|
#include "gamescreen.h"
#include <fstream>
#include "frame.h"
#include "titlescreen.h"
#include "gameplay.h"
#include "spritedef.h"
GameScreen::GameScreen(sf::RenderWindow * window,
float initialVelocity,
float maxVelocity,
float acceleration,
const std::shared_ptr<Gameplay> & gameplay,
const boost::property_tree::ptree & config):
_window(window),
_overlay(gameplay),
_initialVelocity(initialVelocity),
_maxVelocity(maxVelocity),
_acceleration(acceleration),
_gameplay(gameplay),
_config(config),
_paddleId(_entityIdGenerator.generate()),
_ballId(_entityIdGenerator.generate()),
_mouseX(350),
_status(RUNNING),
_soundSubsystem(config.get_child("sounds"))
{
_window->setMouseCursorVisible(false);
_physicSubsystem.onMove().connect(&_graphicSubsystem, &GraphicSubsystem::onMove);
_physicSubsystem.onMove().connect(this, &GameScreen::onMove);
_physicSubsystem.onCollision().connect(&_soundSubsystem, &SoundSubsystem::onCollision);
_physicSubsystem.onDestroy().connect(&_graphicSubsystem, &GraphicSubsystem::onDestroy);
_physicSubsystem.onDestroy().connect(&_soundSubsystem, &SoundSubsystem::onDestroy);
_physicSubsystem.onDestroy().connect(this, &GameScreen::onDestroy);
makeLevel();
}
std::shared_ptr<Screen> GameScreen::onMouseMove(int x, int)
{
if(_status == RUNNING)
{
_mouseX = x;
_physicSubsystem.moveObstacle(_paddleId, sf::Vector2f(x, 560));
if(_physicSubsystem.isStatic(_ballId))
{
_physicSubsystem.moveDynamic(_ballId, sf::Vector2f(x, 540));
}
}
return nullptr;
}
std::shared_ptr<Screen> GameScreen::onMouseClick(sf::Mouse::Button, int, int)
{
if(_status == RUNNING && _physicSubsystem.isStatic(_ballId))
{
_physicSubsystem.setDynamic(_ballId, normalize(sf::Vector2f(1, -1)) * _initialVelocity);
}
return nullptr;
}
std::shared_ptr<Screen> GameScreen::onKey(const sf::Event::KeyEvent & key)
{
if(key.code == sf::Keyboard::Escape)
{
return std::make_shared<TitleScreen>(_window);
}
else return nullptr;
}
std::shared_ptr<Screen> GameScreen::onFrame(sf::Time elapsed)
{
if(_status == FAIL)
{
// We ran out of lifes, reset the game
return std::make_shared<TitleScreen>(_window);
}
else if(_entitiesToDelete.empty())
{
// Success! Go to the next stage
_gameplay->success();
return std::make_shared<GameScreen>(_window, _initialVelocity, _maxVelocity, _acceleration, _gameplay, _config);
}
else if(_status == PAUSE && _pauseTime > elapsed)
{
// Paused
_pauseTime -= elapsed;
}
else
{
_status = RUNNING;
elapsed -= _pauseTime;
_pauseTime = sf::seconds(0);
_overlay.clear();
}
if(_status == RUNNING && elapsed > sf::seconds(0))
{
// Status is RUNNING
_physicSubsystem.simulate(elapsed);
}
return nullptr;
}
void GameScreen::draw()
{
_window->draw(_graphicSubsystem);
_window->draw(_overlay);
}
void GameScreen::onMove(EntityId entityId, const sf::Vector2f & position)
{
if(entityId == _ballId && position.y > 650)
{
if(_gameplay->failure())
{
_status = FAIL;
}
else
{
_physicSubsystem.setStatic(_ballId);
_physicSubsystem.moveDynamic(_ballId, sf::Vector2f(_mouseX, 540));
_status = PAUSE;
_pauseTime = sf::seconds(1);
_overlay.onFailure();
}
}
}
void GameScreen::onDestroy(EntityId entityId)
{
_entitiesToDelete.erase(entityId);
}
void GameScreen::makeLevel()
{
std::ifstream level(_gameplay->getCurrentLevel().getLevelFilename().c_str());
if(!level.good())
throw std::runtime_error("Cannot find level description file " + _gameplay->getCurrentLevel().getLevelFilename());
// Add bricks
int row = 0;
std::string line;
while(level.good())
{
std::getline(level, line, '\n');
int col = 0;
std::istringstream linestr(line);
std::for_each(std::istream_iterator<int>(linestr),
std::istream_iterator<int>(),
[&](int brickType)
{
if(brickType > 0)
{
EntityId entityId = _entityIdGenerator.generate();
_entitiesToDelete.insert(entityId);
sf::Vector2f position(col * 50, row * 30 + 50);
_graphicSubsystem.add(entityId,
SpriteDef("resources/brick_sprite_sheet.png",
sf::IntRect(0, (brickType - 1) * 30, 50, 30)),
position);
_physicSubsystem.addObstacle(entityId,
std::make_shared<OutsideRectangle>(position,
position + sf::Vector2f(50, 30)),
false,
brickType == 5 ? 3 : 1,
false);
_soundSubsystem.add(entityId,
SoundSubsystem::Brick);
}
++col;
});
++row;
}
// Add surrounding walls
EntityId wallId = _entityIdGenerator.generate();
_physicSubsystem.addObstacle(wallId,
std::make_shared<InsideRectangle>(sf::Vector2f(0, 0),
sf::Vector2f(800, 1000)),
false,
0,
true);
_soundSubsystem.add(wallId, SoundSubsystem::Wall);
// Add ball
sf::Vector2f ballPosition(50, 50);
_graphicSubsystem.add(_ballId,
SpriteDef("resources/bille.png",
sf::IntRect(0, 0, 20, 20),
sf::Vector2f(10, 10)),
ballPosition);
_physicSubsystem.addDynamic(_ballId, Disc(ballPosition, 10));
_soundSubsystem.add(_ballId, SoundSubsystem::Ball);
// Add paddle
sf::Vector2f paddlePosition(_mouseX, 560);
_graphicSubsystem.add(_paddleId,
SpriteDef("resources/plateau.png",
sf::IntRect(0, 0, 60, 20),
sf::Vector2f(30, 10)),
paddlePosition);
_physicSubsystem.addObstacle(_paddleId,
std::make_shared<OutsideRectangle>(paddlePosition + sf::Vector2f(-30, -10),
paddlePosition + sf::Vector2f(30, 10)),
true,
0,
true);
_soundSubsystem.add(_paddleId, SoundSubsystem::Paddle);
onMouseMove(_mouseX, 0);
}
|
/********************************************************************************
* Copyright 2009 The Robotics Group, The Maersk Mc-Kinney Moller Institute,
* Faculty of Engineering, University of Southern Denmark
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************/
#include "ProximityCache.hpp"
|
#ifndef SPROUT_ALGORITHM_SHUFFLE_HPP
#define SPROUT_ALGORITHM_SHUFFLE_HPP
#include <sprout/config.hpp>
#include <sprout/algorithm/fixed/shuffle.hpp>
#include <sprout/algorithm/fit/shuffle.hpp>
#endif // #ifndef SPROUT_ALGORITHM_SHUFFLE_HPP
|
/*=========================================================================
Program: Visualization Toolkit
Module: TestGPURayCastAdditive.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// This test covers switch from perspective to parallel projection.
// This test volume renders a synthetic dataset with unsigned char values,
// with the composite method.
#include "vtkSphere.h"
#include "vtkSampleFunction.h"
#include "vtkGPUVolumeRayCastMapper.h"
#include "vtkTestUtilities.h"
#include "vtkColorTransferFunction.h"
#include "vtkPiecewiseFunction.h"
#include "vtkRenderer.h"
#include "vtkRenderWindow.h"
#include "vtkRenderWindowInteractor.h"
#include "vtkVolumeProperty.h"
#include "vtkCamera.h"
#include "vtkRegressionTestImage.h"
#include "vtkImageShiftScale.h"
#include "vtkImageData.h"
#include "vtkPointData.h"
#include "vtkDataArray.h"
int TestGPURayCastPerspectiveParallel(int argc,
char *argv[])
{
cout << "CTEST_FULL_OUTPUT (Avoid ctest truncation of output)" << endl;
// Create a spherical implicit function.
vtkSphere *shape=vtkSphere::New();
shape->SetRadius(0.1);
shape->SetCenter(0.0,0.0,0.0);
vtkSampleFunction *source=vtkSampleFunction::New();
source->SetImplicitFunction(shape);
shape->Delete();
source->SetOutputScalarTypeToDouble();
source->SetSampleDimensions(127,127,127); // intentional NPOT dimensions.
source->SetModelBounds(-1.0,1.0,-1.0,1.0,-1.0,1.0);
source->SetCapping(false);
source->SetComputeNormals(false);
source->SetScalarArrayName("values");
source->Update();
vtkDataArray *a=source->GetOutput()->GetPointData()->GetScalars("values");
double range[2];
a->GetRange(range);
vtkImageShiftScale *t=vtkImageShiftScale::New();
t->SetInputConnection(source->GetOutputPort());
source->Delete();
t->SetShift(-range[0]);
double magnitude=range[1]-range[0];
if(magnitude==0.0)
{
magnitude=1.0;
}
t->SetScale(255.0/magnitude);
t->SetOutputScalarTypeToUnsignedChar();
t->Update();
vtkRenderWindow *renWin=vtkRenderWindow::New();
vtkRenderer *ren1=vtkRenderer::New();
ren1->SetBackground(0.1,0.4,0.2);
renWin->AddRenderer(ren1);
ren1->Delete();
renWin->SetSize(301,300); // intentional odd and NPOT width/height
vtkRenderWindowInteractor *iren=vtkRenderWindowInteractor::New();
iren->SetRenderWindow(renWin);
renWin->Delete();
renWin->Render(); // make sure we have an OpenGL context.
vtkGPUVolumeRayCastMapper *volumeMapper;
vtkVolumeProperty *volumeProperty;
vtkVolume *volume;
volumeMapper=vtkGPUVolumeRayCastMapper::New();
volumeMapper->SetBlendModeToComposite();
volumeMapper->SetInputConnection(
t->GetOutputPort());
volumeProperty=vtkVolumeProperty::New();
volumeProperty->ShadeOff();
volumeProperty->SetInterpolationType(VTK_LINEAR_INTERPOLATION);
vtkPiecewiseFunction *compositeOpacity = vtkPiecewiseFunction::New();
compositeOpacity->AddPoint(0.0,0.0);
compositeOpacity->AddPoint(80.0,1.0);
compositeOpacity->AddPoint(80.1,0.0);
compositeOpacity->AddPoint(255.0,0.0);
volumeProperty->SetScalarOpacity(compositeOpacity);
vtkColorTransferFunction *color=vtkColorTransferFunction::New();
color->AddRGBPoint(0.0 ,0.0,0.0,1.0);
color->AddRGBPoint(40.0 ,1.0,0.0,0.0);
color->AddRGBPoint(255.0,1.0,1.0,1.0);
volumeProperty->SetColor(color);
color->Delete();
volume=vtkVolume::New();
volume->SetMapper(volumeMapper);
volume->SetProperty(volumeProperty);
ren1->AddViewProp(volume);
int valid=volumeMapper->IsRenderSupported(renWin,volumeProperty);
int retVal;
if(valid)
{
ren1->ResetCamera();
// Render composite. Default camera is perpective.
renWin->Render();
// Switch to parallel
vtkCamera *c=ren1->GetActiveCamera();
c->SetParallelProjection(true);
renWin->Render();
retVal = vtkTesting::Test(argc, argv, renWin, 75);
if (retVal == vtkRegressionTester::DO_INTERACTOR)
{
iren->Start();
}
}
else
{
retVal=vtkTesting::PASSED;
cout << "Required extensions not supported." << endl;
}
volumeMapper->Delete();
volumeProperty->Delete();
volume->Delete();
iren->Delete();
t->Delete();
compositeOpacity->Delete();
return !((retVal == vtkTesting::PASSED) || (retVal == vtkTesting::DO_INTERACTOR));
}
|
//===- unittest/Tooling/QualTypeNameTest.cpp ------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "clang/AST/QualTypeNames.h"
#include "TestVisitor.h"
using namespace clang;
namespace {
struct TypeNameVisitor : TestVisitor<TypeNameVisitor> {
llvm::StringMap<std::string> ExpectedQualTypeNames;
bool WithGlobalNsPrefix = false;
// ValueDecls are the least-derived decl with both a qualtype and a
// name.
bool traverseDecl(Decl *D) {
return true; // Always continue
}
bool VisitValueDecl(const ValueDecl *VD) {
std::string ExpectedName =
ExpectedQualTypeNames.lookup(VD->getNameAsString());
if (ExpectedName != "") {
PrintingPolicy Policy(Context->getPrintingPolicy());
Policy.SuppressScope = false;
Policy.AnonymousTagLocations = true;
Policy.PolishForDeclaration = true;
Policy.SuppressUnwrittenScope = true;
std::string ActualName = TypeName::getFullyQualifiedName(
VD->getType(), *Context, Policy, WithGlobalNsPrefix);
if (ExpectedName != ActualName) {
// A custom message makes it much easier to see what declaration
// failed compared to EXPECT_EQ.
EXPECT_TRUE(false) << "Typename::getFullyQualifiedName failed for "
<< VD->getQualifiedNameAsString() << std::endl
<< " Actual: " << ActualName << std::endl
<< " Exepcted: " << ExpectedName;
}
}
return true;
}
};
// named namespaces inside anonymous namespaces
TEST(QualTypeNameTest, getFullyQualifiedName) {
TypeNameVisitor Visitor;
// Simple case to test the test framework itself.
Visitor.ExpectedQualTypeNames["CheckInt"] = "int";
// Keeping the names of the variables whose types we check unique
// within the entire test--regardless of their own scope--makes it
// easier to diagnose test failures.
// Simple namespace qualifier
Visitor.ExpectedQualTypeNames["CheckA"] = "A::B::Class0";
// Lookup up the enclosing scopes, then down another one. (These
// appear as elaborated type in the AST. In that case--even if
// policy.SuppressScope = 0--qual_type.getAsString(policy) only
// gives the name as it appears in the source, not the full name.
Visitor.ExpectedQualTypeNames["CheckB"] = "A::B::C::Class1";
// Template parameter expansion.
Visitor.ExpectedQualTypeNames["CheckC"] =
"A::B::Template0<A::B::C::MyInt, A::B::AnotherClass>";
// Recursive template parameter expansion.
Visitor.ExpectedQualTypeNames["CheckD"] =
"A::B::Template0<A::B::Template1<A::B::C::MyInt, A::B::AnotherClass>, "
"A::B::Template0<int, long> >";
// Variadic Template expansion.
Visitor.ExpectedQualTypeNames["CheckE"] =
"A::Variadic<int, A::B::Template0<int, char>, "
"A::B::Template1<int, long>, A::B::C::MyInt>";
// Using declarations should be fully expanded.
Visitor.ExpectedQualTypeNames["CheckF"] = "A::B::Class0";
// Elements found within "using namespace foo;" should be fully
// expanded.
Visitor.ExpectedQualTypeNames["CheckG"] = "A::B::C::MyInt";
// Type inside function
Visitor.ExpectedQualTypeNames["CheckH"] = "struct X";
// Anonymous Namespaces
Visitor.ExpectedQualTypeNames["CheckI"] = "aClass";
// Keyword inclusion with namespaces
Visitor.ExpectedQualTypeNames["CheckJ"] = "struct A::aStruct";
// Anonymous Namespaces nested in named namespaces and vice-versa.
Visitor.ExpectedQualTypeNames["CheckK"] = "D::aStruct";
// Namespace alias
Visitor.ExpectedQualTypeNames["CheckL"] = "A::B::C::MyInt";
Visitor.ExpectedQualTypeNames["non_dependent_type_var"] =
"Foo<X>::non_dependent_type";
Visitor.ExpectedQualTypeNames["AnEnumVar"] = "EnumScopeClass::AnEnum";
Visitor.ExpectedQualTypeNames["AliasTypeVal"] = "A::B::C::InnerAlias<int>";
Visitor.ExpectedQualTypeNames["CheckM"] = "const A::B::Class0 *";
Visitor.ExpectedQualTypeNames["CheckN"] = "const X *";
Visitor.runOver(
"int CheckInt;\n"
"template <typename T>\n"
"class OuterTemplateClass { };\n"
"namespace A {\n"
" namespace B {\n"
" class Class0 { };\n"
" namespace C {\n"
" typedef int MyInt;"
" template <typename T>\n"
" using InnerAlias = OuterTemplateClass<T>;\n"
" InnerAlias<int> AliasTypeVal;\n"
" }\n"
" template<class X, class Y> class Template0;"
" template<class X, class Y> class Template1;"
" typedef B::Class0 AnotherClass;\n"
" void Function1(Template0<C::MyInt,\n"
" AnotherClass> CheckC);\n"
" void Function2(Template0<Template1<C::MyInt, AnotherClass>,\n"
" Template0<int, long> > CheckD);\n"
" void Function3(const B::Class0* CheckM);\n"
" }\n"
"template<typename... Values> class Variadic {};\n"
"Variadic<int, B::Template0<int, char>, "
" B::Template1<int, long>, "
" B::C::MyInt > CheckE;\n"
" namespace BC = B::C;\n"
" BC::MyInt CheckL;\n"
"}\n"
"using A::B::Class0;\n"
"void Function(Class0 CheckF);\n"
"using namespace A::B::C;\n"
"void Function(MyInt CheckG);\n"
"void f() {\n"
" struct X {} CheckH;\n"
"}\n"
"struct X;\n"
"void f(const ::X* CheckN) {}\n"
"namespace {\n"
" class aClass {};\n"
" aClass CheckI;\n"
"}\n"
"namespace A {\n"
" struct aStruct {} CheckJ;\n"
"}\n"
"namespace {\n"
" namespace D {\n"
" namespace {\n"
" class aStruct {};\n"
" aStruct CheckK;\n"
" }\n"
" }\n"
"}\n"
"template<class T> struct Foo {\n"
" typedef typename T::A dependent_type;\n"
" typedef int non_dependent_type;\n"
" dependent_type dependent_type_var;\n"
" non_dependent_type non_dependent_type_var;\n"
"};\n"
"struct X { typedef int A; };"
"Foo<X> var;"
"void F() {\n"
" var.dependent_type_var = 0;\n"
"var.non_dependent_type_var = 0;\n"
"}\n"
"class EnumScopeClass {\n"
"public:\n"
" enum AnEnum { ZERO, ONE };\n"
"};\n"
"EnumScopeClass::AnEnum AnEnumVar;\n",
TypeNameVisitor::Lang_CXX11
);
TypeNameVisitor Complex;
Complex.ExpectedQualTypeNames["CheckTX"] = "B::TX";
Complex.runOver(
"namespace A {"
" struct X {};"
"}"
"using A::X;"
"namespace fake_std {"
" template<class... Types > class tuple {};"
"}"
"namespace B {"
" using fake_std::tuple;"
" typedef tuple<X> TX;"
" TX CheckTX;"
" struct A { typedef int X; };"
"}");
TypeNameVisitor GlobalNsPrefix;
GlobalNsPrefix.WithGlobalNsPrefix = true;
GlobalNsPrefix.ExpectedQualTypeNames["IntVal"] = "int";
GlobalNsPrefix.ExpectedQualTypeNames["BoolVal"] = "bool";
GlobalNsPrefix.ExpectedQualTypeNames["XVal"] = "::A::B::X";
GlobalNsPrefix.ExpectedQualTypeNames["IntAliasVal"] = "::A::B::Alias<int>";
GlobalNsPrefix.ExpectedQualTypeNames["ZVal"] = "::A::B::Y::Z";
GlobalNsPrefix.ExpectedQualTypeNames["GlobalZVal"] = "::Z";
GlobalNsPrefix.ExpectedQualTypeNames["CheckK"] = "D::aStruct";
GlobalNsPrefix.ExpectedQualTypeNames["YZMPtr"] = "::A::B::X ::A::B::Y::Z::*";
GlobalNsPrefix.runOver(
"namespace A {\n"
" namespace B {\n"
" int IntVal;\n"
" bool BoolVal;\n"
" struct X {};\n"
" X XVal;\n"
" template <typename T> class CCC { };\n"
" template <typename T>\n"
" using Alias = CCC<T>;\n"
" Alias<int> IntAliasVal;\n"
" struct Y { struct Z { X YZIPtr; }; };\n"
" Y::Z ZVal;\n"
" X Y::Z::*YZMPtr;\n"
" }\n"
"}\n"
"struct Z {};\n"
"Z GlobalZVal;\n"
"namespace {\n"
" namespace D {\n"
" namespace {\n"
" class aStruct {};\n"
" aStruct CheckK;\n"
" }\n"
" }\n"
"}\n"
);
TypeNameVisitor InlineNamespace;
InlineNamespace.ExpectedQualTypeNames["c"] = "B::C";
InlineNamespace.runOver("inline namespace A {\n"
" namespace B {\n"
" class C {};\n"
" }\n"
"}\n"
"using namespace A::B;\n"
"C c;\n",
TypeNameVisitor::Lang_CXX11);
TypeNameVisitor AnonStrucs;
AnonStrucs.ExpectedQualTypeNames["a"] = "short";
AnonStrucs.ExpectedQualTypeNames["un_in_st_1"] =
"union (anonymous struct at input.cc:1:1)::(anonymous union at "
"input.cc:2:27)";
AnonStrucs.ExpectedQualTypeNames["b"] = "short";
AnonStrucs.ExpectedQualTypeNames["un_in_st_2"] =
"union (anonymous struct at input.cc:1:1)::(anonymous union at "
"input.cc:5:27)";
AnonStrucs.ExpectedQualTypeNames["anon_st"] =
"struct (anonymous struct at input.cc:1:1)";
AnonStrucs.runOver(R"(struct {
union {
short a;
} un_in_st_1;
union {
short b;
} un_in_st_2;
} anon_st;)");
}
} // end anonymous namespace
|
// slam3d - Frontend for graph-based SLAM
// Copyright (C) 2017 S. Kasperski
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef SLAM_MAPPER_HPP
#define SLAM_MAPPER_HPP
#include "Sensor.hpp"
#include "PoseSensor.hpp"
#include "Graph.hpp"
namespace slam3d
{
class Mapper
{
public:
Mapper(Graph* graph, Logger* log);
~Mapper();
/**
* @brief Access to the graph structure.
* @return graph
*/
Graph* getGraph() { return mGraph; }
/**
* @brief Register a pose sensor to create spatial constraints.
* @details For each node that is added by a registered sensor, each
* registered pose sensor will be triggered to create additional edges
* in the graph, e.g an odometry sensor will add an edge between the last
* and the new vertex holding the odometry data.
* @param s pose sensor to be registered for mapping
*/
void registerPoseSensor(PoseSensor* s);
/**
* @brief Register a sensor, so its data can be added to the graph.
* @details Multiple sensors can be used, but in this case at least one pose
* sensor is required for the mapping to work correctly. Matching is currently
* done only between measurements of the same sensor.
* @param s sensor to be registered for mapping
*/
void registerSensor(Sensor* s);
/**
* @brief Add a new measurement to the graph.
* @details Creates a new node in the graph, adds the given measurement to it
* and calls each registered PoseSensor to create spatial constraints.
* @param m pointer to a new measurement
* @return id of the newly added vertex
*/
IdType addMeasurement(Measurement::Ptr m);
/**
* @brief Add a new measurement from another robot.
* @details The new measurement is added to the graph and directly
* linked to the measurement with the given uuid. This enforces that
* the graph stays connected even when external measurement cannot be
* linked to local ones.
* @param measurement pointer to a new measurement
* @param source_uuid uuid of another measurement
* @param twc transform between measurement and source
* @param sensor name of sensor that created the constraint (not the measurement!)
*/
virtual void addExternalMeasurement(Measurement::Ptr measurement,
boost::uuids::uuid source_uuid,
const TransformWithCovariance& twc,
const std::string& sensor);
/**
* @brief Add a constraint from another robot between two measurements.
* @param source uuid of a measurement
* @param target uuid of a measurement
* @param twc transform from source to target
* @param sensor name of sensor that created the constraint
*/
void addExternalConstraint(boost::uuids::uuid source,
boost::uuids::uuid target,
const TransformWithCovariance& twc,
const std::string& sensor);
/**
* @brief Get the current pose of the robot within the generated map.
* @details The pose is updated at least whenever a new node is added.
* @return current robot pose in map coordinates
*/
virtual Transform getCurrentPose();
/**
* @brief Get the last vertex, that was locally added to the graph.
* @details This will not return external vertices from other robots.
* @return last added vertex
*/
virtual const VertexObject& getLastVertex() const;
protected:
SensorList mSensors;
PoseSensorList mPoseSensors;
Logger* mLogger;
Graph* mGraph;
IdType mLastIndex;
};
}
#endif
|
#include "biggles/observation.hpp"
#include "biggles/detail/random.hpp"
#include "biggles/mh_moves/mh_moves.hpp"
#include "biggles/mh_moves/utility.hpp"
#include "biggles/track.hpp"
#include "biggles/tracker.hpp"
#include "biggles/kalman_filter.hpp"
#include "biggles/simulate.hpp"
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <algorithm>
#include <numeric>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <iterator>
#include <limits>
#include <string>
// include this last to stop pre-processor macros breaking things
extern "C" {
#include <ccan/tap/tap.h>
}
using namespace biggles;
typedef boost::shared_ptr< const kalman_filter > shared_const_kalman_filter_ptr;
std::ostream& operator<<(std::ostream& os, const kalman_filter::state_vector& state) {
os << boost::format("[% .4f, % .4f, % .4f, % .4f]") % state[0] % state[1] % state[2] % state[3];
return os;
}
typedef std::deque<float>::const_iterator data_iter;
std::pair<float, float> mean_stdev(data_iter b, data_iter e) {
float sum = std::accumulate(b, e, 0.0f);
float mean = sum / std::distance(b, e);
float sq_sum = std::inner_product(b, e, b, 0.0f);
float stdev = std::sqrt(sq_sum / std::distance(b, e) - mean * mean);
return std::make_pair(mean, stdev);
}
float max_deviation_from_line( const observation_collection &oc) {
time_stamp start = oc.first_time_stamp();
time_stamp end = oc.last_time_stamp();
float x_off = x(*oc.begin());
float y_off = y(*oc.begin());
float delta_x = (x(oc.back()) - x_off)/float(end-start-1);
float delta_y = (y(oc.back()) - y_off)/float(end-start-1);
float result = 0.f;
for (time_stamp ts = start; ts != end; ++ts) {
observation_collection::const_range obs_at_t = oc.at_time_stamp(ts);
float line_x = x_off + (ts - start) * delta_x;
float line_y = y_off + (ts - start) * delta_y;
for (; obs_at_t.first != obs_at_t.second; ++(obs_at_t.first)) {
float diff = hypotf(line_x - x(*obs_at_t.first), line_y - y(*obs_at_t.first));
if (diff > result) result = diff;
}
}
return result;
}
shared_const_track_ptr generate_rotating_track(int duration) {
const float delta_angle = 2.0f*3.1415f/float(duration);
observation_collection oc;
for (time_stamp t = 0; t < duration; ++t) {
float angle = t*delta_angle;
oc.insert(new_obs(std::cos(angle), std::sin(angle), t));
}
return shared_const_track_ptr(new track(0, duration, oc.begin(), oc.end(), 1.0f));
}
shared_const_track_ptr generate_linear_track(int duration, float dx, float dy) {
observation_collection oc;
for (time_stamp t = 0; t < duration; ++t) oc.insert(new_obs( t*dx, t*dy, t));
return shared_const_track_ptr(new track(0, duration, oc.begin(), oc.end(), 1.0f));
}
shared_const_track_ptr generate_linear_track(
time_stamp begin_ts, time_stamp end_ts, float x0, float y0, float dx, float dy, float loc_err)
{
observation_collection oc;
for (time_stamp ts = begin_ts; ts < end_ts; ++ts) oc.insert(new_obs(
x0 + (ts - begin_ts)*dx + sampling::sample_normal() * loc_err,
y0 + (ts - begin_ts)*dy + sampling::sample_normal() * loc_err,
ts));
return shared_const_track_ptr(new track(begin_ts, end_ts, oc.begin(), oc.end(), 1.0f));
}
boost::shared_ptr<const observation_collection> generate_observations(
time_stamp begin_ts, time_stamp end_ts, float x0, float y0, float stdev, float lambda)
{
boost::shared_ptr<observation_collection> oc_ptr(new observation_collection);
for (time_stamp ts = begin_ts; ts < end_ts; ++ts)
{
int num_obs = sampling::poisson(lambda);
for (int i = 0; i < num_obs; ++i) {
oc_ptr->insert(
new_obs(x0 + sampling::sample_normal() * stdev, y0 + sampling::sample_normal() * stdev, ts));
}
}
return oc_ptr;
}
void print_kalman_filter_states(const kalman_filter::states_and_cov_deque& sc) {
kalman_filter::states_and_cov_deque::const_iterator sc_iter;
int i = 0;
for (sc_iter = sc.begin(); sc_iter != sc.end(); ++sc_iter)
std::cout
<< boost::format("%3d: ") % i++
<< sc_iter->first
<< ", "
<< boost::format("v = % .4f") % hypotf(sc_iter->first[1], sc_iter->first[3])
<< ", "
<< boost::format("log(||C||) = % .4f") % logf(sc_iter->second.determinant())
<< std::endl;
}
std::string print_parameters(const model::parameters& params) {
std::stringstream para_ss;
para_ss << boost::format("b = %.2f, c = %.2f, o = %.2f%%, s = %.2f%%, cr = %.2f")
% model::mean_new_tracks_per_frame(params)
% model::mean_false_observations_per_frame(params)
% (model::generate_observation_probability(params) * 100.f)
% (model::frame_to_frame_survival_probability(params) * 100.f)
% model::constraint_radius(params)
<< ", "
<< boost::format("R = ((%.2f, %.2f), (%.2f, %.2f))")
% model::observation_error_covariance(params)(0, 0)
% model::observation_error_covariance(params)(0, 1)
% model::observation_error_covariance(params)(1, 0)
% model::observation_error_covariance(params)(1, 1)
;
return para_ss.str();
}
float find_good_parameters(partition_ptr_t& const_partition, model::parameters& good_parameters, const int nloops) {
float best_pdf = -std::numeric_limits<int>::max();
model::parameters target_parameters;
model::process_noise_covariance(target_parameters) = detail::initQ();
for (int i = 0; i < nloops; ++i) {
sample_model_parameters_given_partition(*const_partition, target_parameters);
float log_pdf = model::log_partition_given_parameters_and_data_density(*const_partition, target_parameters);
if (log_pdf > best_pdf) {
best_pdf = log_pdf;
good_parameters = target_parameters;
}
}
return best_pdf;
}
bool generate_parameters(model::parameters& params) {
model::mean_new_tracks_per_frame(params) = 1.2f;
model::mean_false_observations_per_frame(params) = 0.001f;
model::frame_to_frame_survival_probability(params) = 0.9;
model::generate_observation_probability(params) = 0.9f;
float sigma_R = 0.3f;
model::observation_error_covariance(params) << sigma_R * sigma_R, 0, 0, sigma_R * sigma_R;
model::process_noise_covariance(params) = detail::initQ();
model::constraint_radius(params) = sampling::uniform_real(0, 10);
return true;
}
std::string partition_properties(partition_ptr_t &const_partition) {
const track_collection &tracks(const_partition->tracks());
size_t num_tracks = tracks.size();
track_collection::const_iterator tr_iter;
float avg_track_duration = 0.0f;
float avg_track_size = 0.0f;
for (tr_iter = tracks.begin(); tr_iter != tracks.end(); ++tr_iter) {
avg_track_duration += float((*tr_iter)->duration());
avg_track_size += float((*tr_iter)->size());
}
avg_track_duration /= float(num_tracks);
avg_track_size /= float(num_tracks);
float o_obs = avg_track_size/avg_track_duration;
float b_obs = float(num_tracks)/float(const_partition->duration());
float c_obs = float(const_partition->clutter().size())/float(const_partition->duration());
float s_obs = 1.f - 1.f/avg_track_duration;
std::stringstream pp_ss;
pp_ss << boost::format("num.tracks = %d, avg.length = %.1f, avg.obs.count = %.1f, "
"observed: b = %.2f, c = %.2f, o = %.2f, s = %.2f")
% num_tracks % avg_track_duration % avg_track_size % b_obs % c_obs % o_obs % s_obs
;
return pp_ss.str();
}
int plot_tracks(const track_collection &tracks, const model::parameters &const_parameters) {
track_collection::const_iterator tr_iter;
for (tr_iter = tracks.begin(); tr_iter != tracks.end(); ++tr_iter) {
const track & track0(**tr_iter);
shared_const_kalman_filter_ptr kf = track0.make_kalman_filter(const_parameters);
float log_posterior = track0.log_posterior(const_parameters);
const kalman_filter::states_and_cov_deque & sncs(kf->corrections());
const observation_collection &oc(track0.observations());
std::deque <float> diffs;
time_stamp ts=track0.first_time_stamp();
std::stringstream diff_ss;
std::stringstream cov_ss;
std::stringstream track_ss;
diff_ss << "| ";
track_ss << "| ";
track_ss << std::string(ts, ' ');
diff_ss << std::string(ts, ' ');
cov_ss << "log(cov) = ";
for (size_t i=0; i < sncs.size(); ++i, ++ts) {
kalman_filter::state_vector state = sncs[i].first;
kalman_filter::covariance_matrix cov = sncs[i].second;
observation_collection::const_range obs_at_t(oc.at_time_stamp(ts));
cov_ss << boost::format(" %.1f") % log10f(cov.determinant());
if (obs_at_t.first != obs_at_t.second) {
observation obs = *obs_at_t.first;
float diff = hypotf(x(obs)-state[0], y(obs)-state[2]);
diff_ss << boost::format("%d") % int(diff);
diffs.push_back(diff);
track_ss << "O";
}
else {
diff_ss << "_";
track_ss << "_";
}
}
diag("log(PDF) = %8.2f %s", log_posterior, track_ss.str().c_str());
diag(" %s", diff_ss.str().c_str());
diag("%s", cov_ss.str().c_str());
float max_diff = *std::max_element(diffs.begin(), diffs.end());
float radius = track0.radius();
float max_dev = max_deviation_from_line(oc);
if (max_diff >= max_dev or max_dev >= radius) {
//diag("max diff = %.2f, dev from line = %.2f, radius = %.2f", max_diff, max_dev, radius);
//diag("%s", diff_ss.str().c_str());
//diag("length = %ld, observations = %.2f%%", track0.duration(), 100.f*float(track0.size())/float(track0.duration()));
}
}
return 0;
}
int plot_states_of_tracks(const track_collection &tracks, const model::parameters &const_parameters) {
track_collection::const_iterator tr_iter;
for (tr_iter = tracks.begin(); tr_iter != tracks.end(); ++tr_iter) {
const track& track0(**tr_iter);
shared_const_kalman_filter_ptr kf = track0.make_kalman_filter(const_parameters);
diag("track [%ld, %ld]", track0.first_time_stamp(), track0.last_time_stamp());
print_kalman_filter_states(kf->corrections());
}
return 0;
}
struct olink {
observation to, from;
olink(const observation &t, const observation &f) : to(t), from(f) {}
bool operator<(const olink &o) const { return to < o.to or (to == o.to and from < o.from); }
};
int test1() {
const int total = 1;
const time_stamp last_ts(80);
boost::shared_ptr<const observation_collection> clutter_obs(generate_observations(0, last_ts, 0.f, 0.f, 1.5f, 3.f));
clutter_ptr clutter(new clutter_t(clutter_obs->begin(), clutter_obs->end()));
boost::shared_ptr<const track_collection> tracks(new track_collection);
partition_ptr_t original_part_ptr(new partition(tracks, clutter));
for (int i=0; i< total; ++i) {
diag(" *** iteration %d *** ", i);
model::parameters params;
model::process_noise_covariance(params) = detail::initQ();
std::map<olink, float> link_count;
generate_parameters(params);
const std::string bsp("\b\b\b\b\b\b");
const int nloops(5000);
diag("number of loops: = %d", nloops);
tracker tracking(params, original_part_ptr);
for(int loop=0; loop<nloops; ++loop) {
//if (loop % 500 == 0) {
// std::clog << bsp << boost::format("%3d%%") % (100*loop/nloops) << std::flush;
//}
tracking();
BOOST_FOREACH(const shared_const_track_ptr& t_p, tracking.last_partition()->tracks()) {
const track tr0(*t_p);
BOOST_ASSERT(tr0.size()>1);
observation_collection::const_iterator from_it = tr0.observations().begin();
observation_collection::const_iterator to_it(from_it);
std::advance(to_it, 1);
for (; to_it != tr0.observations().end(); ++to_it, ++from_it) link_count[olink(*to_it, *from_it)]++;
}
}
//std::clog << bsp;
//
std::map<olink, float>::iterator it = link_count.begin();
std::map<int, float> dist_count;
std::map<int, float> from_count;
std::map<int, float> to_count;
std::map<int, float> dist_occur;
std::map<int, float> from_occur;
std::map<int, float> to_occur;
for (; it != link_count.end(); ++it) {
to_count[t(it->first.to)] += it->second;
dist_count[t(it->first.to) - t(it->first.from)] += it->second;
from_count[t(it->first.from)] += it->second;
to_occur[t(it->first.to)] ++;
dist_occur[t(it->first.to) - t(it->first.from)] ++;
from_occur[t(it->first.from)] ++;
}
std::map<int, float>::iterator fp;
diag("distance");
for (int i = 0; i < int(last_ts); ++i) {
float d_occur = dist_occur[i] == 0.f ? 1.f : float(nloops) * dist_occur[i];
float f_occur = from_occur[i] == 0.f ? 1.f : float(nloops) * from_occur[i];
float t_occur = to_occur[i] == 0.f ? 1.f : float(nloops) * to_occur[i];
std::stringstream sstr;
sstr
<< boost::format("%2d => %5.2f%%, %5.2f%%, %5.2f%%")
% i
% (dist_count[i]*100.f/ d_occur)
% (from_count[i]*100.f/ f_occur)
% ( to_count[i]*100.f/ t_occur)
;
diag("%s", sstr.str().c_str());
}
}
return 0;
}
int test2() {
observation_collection t1_obs;
//observation_collection cl_obs;
t1_obs.insert(new_obs(0, 0, 0));
t1_obs.insert(new_obs(0, 0, 1));
t1_obs.insert(new_obs(0, 0, 2));
t1_obs.insert(new_obs(0, 0, 3));
track track1(0, 5, t1_obs.begin(), t1_obs.end(), 1.0);
/*
cl_obs.insert(new_obs(0, 0, 4));
cl_obs.insert(new_obs(1, 1, 5));
cl_obs.insert(new_obs(1, 1, 6));
cl_obs.insert(new_obs(1, 1, 7));
cl_obs.insert(new_obs(1, 1, 8));
cl_obs.insert(new_obs(1, 1, 9));
boost::shared_ptr<track_collection> tracks(new track_collection());
tracks->insert(track1);
boost::shared_ptr<const observation_collection> clutter(new observation_collection(cl_obs.begin(), cl_obs.end()));
const partition original_partition(tracks, clutter);
*/
model::parameters params;
model::mean_new_tracks_per_frame(params) = 2.f;
model::mean_false_observations_per_frame(params) = 1.f;
model::frame_to_frame_survival_probability(params) = 0.95f;
model::generate_observation_probability(params) = 0.9f;
model::constraint_radius(params) = 100.0f;
model::observation_error_covariance(params) << 0.001f, 0.0f, 0.0f, 0.001f;
model::process_noise_covariance(params) = detail::initQ();
print_kalman_filter_states( track1.make_kalman_filter(params)->corrections() );
diag("forward:");
print_kalman_filter_states( track1.make_kalman_filter(params)->predictions() );
diag("backward");
print_kalman_filter_states( generate_rotating_track(20)->make_kalman_filter(params)->corrections() );
diag("forward");
print_kalman_filter_states( generate_rotating_track(20)->make_kalman_filter(params)->predictions() );
diag("backward linear:");
print_kalman_filter_states( generate_linear_track(40, 1.f, 0.f)->make_kalman_filter(params)->corrections() );
diag("forward linear:");
print_kalman_filter_states( generate_linear_track(40, 1.f, 0.f)->make_kalman_filter(params)->predictions() );
return 0;
}
int test3() {
const float loc_err(.3f);
std::deque<float> sing_pdf;
for (int i=0; i<1; ++i) {
boost::shared_ptr<track_collection> tracks(new track_collection());
tracks->insert(*generate_linear_track(0, 50, 0.f, 0.f, 1.f, 0.f, loc_err));
clutter_ptr clutter(new clutter_t());
partition_ptr_t original_part_ptr(new partition(tracks, clutter));
model::parameters params;
model::process_noise_covariance(params) = detail::initQ();
float log_pdf = find_good_parameters(original_part_ptr, params, 1000);
model::observation_error_covariance(params) << loc_err * loc_err, 0, 0, loc_err * loc_err;
diag("%s, log(PDF)=%.2f", print_parameters(params).c_str(), log_pdf);
plot_tracks(*tracks, params);
plot_states_of_tracks(*tracks, params);
sing_pdf.push_back(log_pdf);
}
std::deque<float> quad_pdf;
for (int i=0; i<1; ++i) {
boost::shared_ptr<track_collection> tracks(new track_collection());
int step = 5;
for (int j=0; j < 50; j+= step)
tracks->insert(*generate_linear_track(j, j+step, float(j), 0.f, 1.f, 0.f, loc_err));
clutter_ptr clutter(new clutter_t());
partition_ptr_t original_part_ptr(new partition(tracks, clutter));
model::parameters params;
model::process_noise_covariance(params) = detail::initQ();
float log_pdf = find_good_parameters(original_part_ptr, params, 1000);
model::observation_error_covariance(params) << loc_err * loc_err, 0, 0, loc_err * loc_err;
diag("%s, log(PDF)=%.2f", print_parameters(params).c_str(), log_pdf);
plot_tracks(*tracks, params);
plot_states_of_tracks(*tracks, params);
quad_pdf.push_back(log_pdf);
}
/*
std::pair<float, float> sing_stat = mean_stdev(sing_pdf.begin(), sing_pdf.end());
std::pair<float, float> quad_stat = mean_stdev(quad_pdf.begin(), quad_pdf.end());
diag("sing %.2f +/- %.2f", sing_stat.first, sing_stat.second);
diag("quad %.2f +/- %.2f", quad_stat.first, quad_stat.second);
*/
return 0;
}
int main(int argc, char** argv) {
// seed the PRNG to ensure consistent results across runs.
//biggles::detail::seed_prng(0xfacedead);
plan_no_plan();
test1();
ok1(true);
return exit_status();
}
|
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include <grpc/impl/codegen/port_platform.h>
#include "src/core/lib/channel/channelz.h"
#include <grpc/grpc.h>
#include <grpc/support/alloc.h>
#include <grpc/support/log.h>
#include <grpc/support/string_util.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "src/core/lib/channel/channelz_registry.h"
#include "src/core/lib/channel/status_util.h"
#include "src/core/lib/gpr/string.h"
#include "src/core/lib/gpr/useful.h"
#include "src/core/lib/gprpp/atomic.h"
#include "src/core/lib/gprpp/host_port.h"
#include "src/core/lib/gprpp/memory.h"
#include "src/core/lib/iomgr/error.h"
#include "src/core/lib/iomgr/exec_ctx.h"
#include "src/core/lib/slice/b64.h"
#include "src/core/lib/slice/slice_internal.h"
#include "src/core/lib/surface/channel.h"
#include "src/core/lib/surface/server.h"
#include "src/core/lib/transport/connectivity_state.h"
#include "src/core/lib/transport/error_utils.h"
#include "src/core/lib/uri/uri_parser.h"
namespace grpc_core {
namespace channelz {
//
// channel arg code
//
namespace {
void* parent_uuid_copy(void* p) { return p; }
void parent_uuid_destroy(void* /*p*/) {}
int parent_uuid_cmp(void* p1, void* p2) { return GPR_ICMP(p1, p2); }
const grpc_arg_pointer_vtable parent_uuid_vtable = {
parent_uuid_copy, parent_uuid_destroy, parent_uuid_cmp};
} // namespace
grpc_arg MakeParentUuidArg(intptr_t parent_uuid) {
// We would ideally like to store the uuid in an integer argument.
// Unfortunately, that won't work, because intptr_t (the type used for
// uuids) doesn't fit in an int (the type used for integer args).
// So instead, we use a hack to store it as a pointer, because
// intptr_t should be the same size as void*.
static_assert(sizeof(intptr_t) <= sizeof(void*),
"can't fit intptr_t inside of void*");
return grpc_channel_arg_pointer_create(
const_cast<char*>(GRPC_ARG_CHANNELZ_PARENT_UUID),
reinterpret_cast<void*>(parent_uuid), &parent_uuid_vtable);
}
intptr_t GetParentUuidFromArgs(const grpc_channel_args& args) {
const grpc_arg* arg =
grpc_channel_args_find(&args, GRPC_ARG_CHANNELZ_PARENT_UUID);
if (arg == nullptr || arg->type != GRPC_ARG_POINTER) return 0;
return reinterpret_cast<intptr_t>(arg->value.pointer.p);
}
//
// BaseNode
//
BaseNode::BaseNode(EntityType type, std::string name)
: type_(type), uuid_(-1), name_(std::move(name)) {
// The registry will set uuid_ under its lock.
ChannelzRegistry::Register(this);
}
BaseNode::~BaseNode() { ChannelzRegistry::Unregister(uuid_); }
std::string BaseNode::RenderJsonString() {
Json json = RenderJson();
return json.Dump();
}
//
// CallCountingHelper
//
CallCountingHelper::CallCountingHelper() {
num_cores_ = GPR_MAX(1, gpr_cpu_num_cores());
per_cpu_counter_data_storage_.reserve(num_cores_);
for (size_t i = 0; i < num_cores_; ++i) {
per_cpu_counter_data_storage_.emplace_back();
}
}
void CallCountingHelper::RecordCallStarted() {
AtomicCounterData& data =
per_cpu_counter_data_storage_[ExecCtx::Get()->starting_cpu()];
data.calls_started.FetchAdd(1, MemoryOrder::RELAXED);
data.last_call_started_cycle.Store(gpr_get_cycle_counter(),
MemoryOrder::RELAXED);
}
void CallCountingHelper::RecordCallFailed() {
per_cpu_counter_data_storage_[ExecCtx::Get()->starting_cpu()]
.calls_failed.FetchAdd(1, MemoryOrder::RELAXED);
}
void CallCountingHelper::RecordCallSucceeded() {
per_cpu_counter_data_storage_[ExecCtx::Get()->starting_cpu()]
.calls_succeeded.FetchAdd(1, MemoryOrder::RELAXED);
}
void CallCountingHelper::CollectData(CounterData* out) {
for (size_t core = 0; core < num_cores_; ++core) {
AtomicCounterData& data = per_cpu_counter_data_storage_[core];
out->calls_started += data.calls_started.Load(MemoryOrder::RELAXED);
out->calls_succeeded +=
per_cpu_counter_data_storage_[core].calls_succeeded.Load(
MemoryOrder::RELAXED);
out->calls_failed += per_cpu_counter_data_storage_[core].calls_failed.Load(
MemoryOrder::RELAXED);
const gpr_cycle_counter last_call =
per_cpu_counter_data_storage_[core].last_call_started_cycle.Load(
MemoryOrder::RELAXED);
if (last_call > out->last_call_started_cycle) {
out->last_call_started_cycle = last_call;
}
}
}
void CallCountingHelper::PopulateCallCounts(Json::Object* object) {
CounterData data;
CollectData(&data);
if (data.calls_started != 0) {
(*object)["callsStarted"] = std::to_string(data.calls_started);
gpr_timespec ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(data.last_call_started_cycle),
GPR_CLOCK_REALTIME);
char* ts_str = gpr_format_timespec(ts);
(*object)["lastCallStartedTimestamp"] = ts_str;
gpr_free(ts_str);
}
if (data.calls_succeeded != 0) {
(*object)["callsSucceeded"] = std::to_string(data.calls_succeeded);
}
if (data.calls_failed) {
(*object)["callsFailed"] = std::to_string(data.calls_failed);
}
}
//
// ChannelNode
//
ChannelNode::ChannelNode(std::string target, size_t channel_tracer_max_nodes,
intptr_t parent_uuid)
: BaseNode(parent_uuid == 0 ? EntityType::kTopLevelChannel
: EntityType::kInternalChannel,
target),
target_(std::move(target)),
trace_(channel_tracer_max_nodes),
parent_uuid_(parent_uuid) {}
const char* ChannelNode::GetChannelConnectivityStateChangeString(
grpc_connectivity_state state) {
switch (state) {
case GRPC_CHANNEL_IDLE:
return "Channel state change to IDLE";
case GRPC_CHANNEL_CONNECTING:
return "Channel state change to CONNECTING";
case GRPC_CHANNEL_READY:
return "Channel state change to READY";
case GRPC_CHANNEL_TRANSIENT_FAILURE:
return "Channel state change to TRANSIENT_FAILURE";
case GRPC_CHANNEL_SHUTDOWN:
return "Channel state change to SHUTDOWN";
}
GPR_UNREACHABLE_CODE(return "UNKNOWN");
}
Json ChannelNode::RenderJson() {
Json::Object data = {
{"target", target_},
};
// Connectivity state.
// If low-order bit is on, then the field is set.
int state_field = connectivity_state_.Load(MemoryOrder::RELAXED);
if ((state_field & 1) != 0) {
grpc_connectivity_state state =
static_cast<grpc_connectivity_state>(state_field >> 1);
data["state"] = Json::Object{
{"state", ConnectivityStateName(state)},
};
}
// Fill in the channel trace if applicable.
Json trace_json = trace_.RenderJson();
if (trace_json.type() != Json::Type::JSON_NULL) {
data["trace"] = std::move(trace_json);
}
// Ask CallCountingHelper to populate call count data.
call_counter_.PopulateCallCounts(&data);
// Construct outer object.
Json::Object json = {
{"ref",
Json::Object{
{"channelId", std::to_string(uuid())},
}},
{"data", std::move(data)},
};
// Template method. Child classes may override this to add their specific
// functionality.
PopulateChildRefs(&json);
return json;
}
void ChannelNode::PopulateChildRefs(Json::Object* json) {
MutexLock lock(&child_mu_);
if (!child_subchannels_.empty()) {
Json::Array array;
for (const auto& p : child_subchannels_) {
array.emplace_back(Json::Object{
{"subchannelId", std::to_string(p.first)},
});
}
(*json)["subchannelRef"] = std::move(array);
}
if (!child_channels_.empty()) {
Json::Array array;
for (const auto& p : child_channels_) {
array.emplace_back(Json::Object{
{"channelId", std::to_string(p.first)},
});
}
(*json)["channelRef"] = std::move(array);
}
}
void ChannelNode::SetConnectivityState(grpc_connectivity_state state) {
// Store with low-order bit set to indicate that the field is set.
int state_field = (state << 1) + 1;
connectivity_state_.Store(state_field, MemoryOrder::RELAXED);
}
void ChannelNode::AddChildChannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_channels_.insert(std::make_pair(child_uuid, true));
}
void ChannelNode::RemoveChildChannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_channels_.erase(child_uuid);
}
void ChannelNode::AddChildSubchannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_subchannels_.insert(std::make_pair(child_uuid, true));
}
void ChannelNode::RemoveChildSubchannel(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_subchannels_.erase(child_uuid);
}
//
// ServerNode
//
ServerNode::ServerNode(grpc_server* /*server*/, size_t channel_tracer_max_nodes)
: BaseNode(EntityType::kServer, ""), trace_(channel_tracer_max_nodes) {}
ServerNode::~ServerNode() {}
void ServerNode::AddChildSocket(RefCountedPtr<SocketNode> node) {
MutexLock lock(&child_mu_);
child_sockets_.insert(std::make_pair(node->uuid(), std::move(node)));
}
void ServerNode::RemoveChildSocket(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_sockets_.erase(child_uuid);
}
void ServerNode::AddChildListenSocket(RefCountedPtr<ListenSocketNode> node) {
MutexLock lock(&child_mu_);
child_listen_sockets_.insert(std::make_pair(node->uuid(), std::move(node)));
}
void ServerNode::RemoveChildListenSocket(intptr_t child_uuid) {
MutexLock lock(&child_mu_);
child_listen_sockets_.erase(child_uuid);
}
std::string ServerNode::RenderServerSockets(intptr_t start_socket_id,
intptr_t max_results) {
// If user does not set max_results, we choose 500.
size_t pagination_limit = max_results == 0 ? 500 : max_results;
Json::Object object;
{
MutexLock lock(&child_mu_);
size_t sockets_rendered = 0;
if (!child_sockets_.empty()) {
// Create list of socket refs.
Json::Array array;
const size_t limit = GPR_MIN(child_sockets_.size(), pagination_limit);
for (auto it = child_sockets_.lower_bound(start_socket_id);
it != child_sockets_.end() && sockets_rendered < limit;
++it, ++sockets_rendered) {
array.emplace_back(Json::Object{
{"socketId", std::to_string(it->first)},
{"name", it->second->name()},
});
}
object["socketRef"] = std::move(array);
}
if (sockets_rendered == child_sockets_.size()) object["end"] = true;
}
Json json = std::move(object);
return json.Dump();
}
Json ServerNode::RenderJson() {
Json::Object data;
// Fill in the channel trace if applicable.
Json trace_json = trace_.RenderJson();
if (trace_json.type() != Json::Type::JSON_NULL) {
data["trace"] = std::move(trace_json);
}
// Ask CallCountingHelper to populate call count data.
call_counter_.PopulateCallCounts(&data);
// Construct top-level object.
Json::Object object = {
{"ref",
Json::Object{
{"serverId", std::to_string(uuid())},
}},
{"data", std::move(data)},
};
// Render listen sockets.
{
MutexLock lock(&child_mu_);
if (!child_listen_sockets_.empty()) {
Json::Array array;
for (const auto& it : child_listen_sockets_) {
array.emplace_back(Json::Object{
{"socketId", std::to_string(it.first)},
{"name", it.second->name()},
});
}
object["listenSocket"] = std::move(array);
}
}
return object;
}
//
// SocketNode
//
namespace {
void PopulateSocketAddressJson(Json::Object* json, const char* name,
const char* addr_str) {
if (addr_str == nullptr) return;
Json::Object data;
grpc_uri* uri = grpc_uri_parse(addr_str, true);
if ((uri != nullptr) && ((strcmp(uri->scheme, "ipv4") == 0) ||
(strcmp(uri->scheme, "ipv6") == 0))) {
const char* host_port = uri->path;
if (*host_port == '/') ++host_port;
grpc_core::UniquePtr<char> host;
grpc_core::UniquePtr<char> port;
GPR_ASSERT(SplitHostPort(host_port, &host, &port));
int port_num = -1;
if (port != nullptr) {
port_num = atoi(port.get());
}
char* b64_host =
grpc_base64_encode(host.get(), strlen(host.get()), false, false);
data["tcpip_address"] = Json::Object{
{"port", port_num},
{"ip_address", b64_host},
};
gpr_free(b64_host);
} else if (uri != nullptr && strcmp(uri->scheme, "unix") == 0) {
data["uds_address"] = Json::Object{
{"filename", uri->path},
};
} else {
data["other_address"] = Json::Object{
{"name", addr_str},
};
}
grpc_uri_destroy(uri);
(*json)[name] = std::move(data);
}
} // namespace
SocketNode::SocketNode(std::string local, std::string remote, std::string name)
: BaseNode(EntityType::kSocket, std::move(name)),
local_(std::move(local)),
remote_(std::move(remote)) {}
void SocketNode::RecordStreamStartedFromLocal() {
streams_started_.FetchAdd(1, MemoryOrder::RELAXED);
last_local_stream_created_cycle_.Store(gpr_get_cycle_counter(),
MemoryOrder::RELAXED);
}
void SocketNode::RecordStreamStartedFromRemote() {
streams_started_.FetchAdd(1, MemoryOrder::RELAXED);
last_remote_stream_created_cycle_.Store(gpr_get_cycle_counter(),
MemoryOrder::RELAXED);
}
void SocketNode::RecordMessagesSent(uint32_t num_sent) {
messages_sent_.FetchAdd(num_sent, MemoryOrder::RELAXED);
last_message_sent_cycle_.Store(gpr_get_cycle_counter(), MemoryOrder::RELAXED);
}
void SocketNode::RecordMessageReceived() {
messages_received_.FetchAdd(1, MemoryOrder::RELAXED);
last_message_received_cycle_.Store(gpr_get_cycle_counter(),
MemoryOrder::RELAXED);
}
Json SocketNode::RenderJson() {
// Create and fill the data child.
Json::Object data;
gpr_timespec ts;
int64_t streams_started = streams_started_.Load(MemoryOrder::RELAXED);
if (streams_started != 0) {
data["streamsStarted"] = std::to_string(streams_started);
gpr_cycle_counter last_local_stream_created_cycle =
last_local_stream_created_cycle_.Load(MemoryOrder::RELAXED);
if (last_local_stream_created_cycle != 0) {
ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(last_local_stream_created_cycle),
GPR_CLOCK_REALTIME);
char* ts_str = gpr_format_timespec(ts);
data["lastLocalStreamCreatedTimestamp"] = ts_str;
gpr_free(ts_str);
}
gpr_cycle_counter last_remote_stream_created_cycle =
last_remote_stream_created_cycle_.Load(MemoryOrder::RELAXED);
if (last_remote_stream_created_cycle != 0) {
ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(last_remote_stream_created_cycle),
GPR_CLOCK_REALTIME);
char* ts_str = gpr_format_timespec(ts);
data["lastRemoteStreamCreatedTimestamp"] = ts_str;
gpr_free(ts_str);
}
}
int64_t streams_succeeded = streams_succeeded_.Load(MemoryOrder::RELAXED);
if (streams_succeeded != 0) {
data["streamsSucceeded"] = std::to_string(streams_succeeded);
}
int64_t streams_failed = streams_failed_.Load(MemoryOrder::RELAXED);
if (streams_failed != 0) {
data["streamsFailed"] = std::to_string(streams_failed);
}
int64_t messages_sent = messages_sent_.Load(MemoryOrder::RELAXED);
if (messages_sent != 0) {
data["messagesSent"] = std::to_string(messages_sent);
ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(
last_message_sent_cycle_.Load(MemoryOrder::RELAXED)),
GPR_CLOCK_REALTIME);
char* ts_str = gpr_format_timespec(ts);
data["lastMessageSentTimestamp"] = ts_str;
gpr_free(ts_str);
}
int64_t messages_received = messages_received_.Load(MemoryOrder::RELAXED);
if (messages_received != 0) {
data["messagesReceived"] = std::to_string(messages_received);
ts = gpr_convert_clock_type(
gpr_cycle_counter_to_time(
last_message_received_cycle_.Load(MemoryOrder::RELAXED)),
GPR_CLOCK_REALTIME);
char* ts_str = gpr_format_timespec(ts);
data["lastMessageReceivedTimestamp"] = ts_str;
gpr_free(ts_str);
}
int64_t keepalives_sent = keepalives_sent_.Load(MemoryOrder::RELAXED);
if (keepalives_sent != 0) {
data["keepAlivesSent"] = std::to_string(keepalives_sent);
}
// Create and fill the parent object.
Json::Object object = {
{"ref",
Json::Object{
{"socketId", std::to_string(uuid())},
{"name", name()},
}},
{"data", std::move(data)},
};
PopulateSocketAddressJson(&object, "remote", remote_.c_str());
PopulateSocketAddressJson(&object, "local", local_.c_str());
return object;
}
//
// ListenSocketNode
//
ListenSocketNode::ListenSocketNode(std::string local_addr, std::string name)
: BaseNode(EntityType::kSocket, std::move(name)),
local_addr_(std::move(local_addr)) {}
Json ListenSocketNode::RenderJson() {
Json::Object object = {
{"ref",
Json::Object{
{"socketId", std::to_string(uuid())},
{"name", name()},
}},
};
PopulateSocketAddressJson(&object, "local", local_addr_.c_str());
return object;
}
} // namespace channelz
} // namespace grpc_core
|
// Copyright (c) 2017-2018 Hartmut Kaiser
// 2018 R. Tohid
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if !defined(PHYLANX_PRIMITIVES_PRIMITIVES_HPP)
#define PHYLANX_PRIMITIVES_PRIMITIVES_HPP
#include <phylanx/execution_tree/primitives/access_argument.hpp>
#include <phylanx/execution_tree/primitives/add_dimension.hpp>
#include <phylanx/execution_tree/primitives/add_operation.hpp>
#include <phylanx/execution_tree/primitives/all_operation.hpp>
#include <phylanx/execution_tree/primitives/and_operation.hpp>
#include <phylanx/execution_tree/primitives/any_operation.hpp>
#include <phylanx/execution_tree/primitives/apply.hpp>
#include <phylanx/execution_tree/primitives/argmax.hpp>
#include <phylanx/execution_tree/primitives/argmin.hpp>
#include <phylanx/execution_tree/primitives/block_operation.hpp>
#include <phylanx/execution_tree/primitives/car_cdr_operation.hpp>
#include <phylanx/execution_tree/primitives/column_set.hpp>
#include <phylanx/execution_tree/primitives/column_slicing.hpp>
#include <phylanx/execution_tree/primitives/console_output.hpp>
#include <phylanx/execution_tree/primitives/constant.hpp>
#include <phylanx/execution_tree/primitives/cross_operation.hpp>
#include <phylanx/execution_tree/primitives/debug_output.hpp>
#include <phylanx/execution_tree/primitives/define_function.hpp>
#include <phylanx/execution_tree/primitives/define_variable.hpp>
#include <phylanx/execution_tree/primitives/determinant.hpp>
#include <phylanx/execution_tree/primitives/diag_operation.hpp>
#include <phylanx/execution_tree/primitives/div_operation.hpp>
#include <phylanx/execution_tree/primitives/dot_operation.hpp>
#include <phylanx/execution_tree/primitives/enable_tracing.hpp>
#include <phylanx/execution_tree/primitives/equal.hpp>
#include <phylanx/execution_tree/primitives/exponential_operation.hpp>
#include <phylanx/execution_tree/primitives/extract_shape.hpp>
#include <phylanx/execution_tree/primitives/file_read.hpp>
#include <phylanx/execution_tree/primitives/file_read_csv.hpp>
#include <phylanx/execution_tree/primitives/file_read_hdf5.hpp>
#include <phylanx/execution_tree/primitives/file_write.hpp>
#include <phylanx/execution_tree/primitives/file_write_csv.hpp>
#include <phylanx/execution_tree/primitives/file_write_hdf5.hpp>
#include <phylanx/execution_tree/primitives/filter_operation.hpp>
#include <phylanx/execution_tree/primitives/fold_left_operation.hpp>
#include <phylanx/execution_tree/primitives/fold_right_operation.hpp>
#include <phylanx/execution_tree/primitives/for_operation.hpp>
#include <phylanx/execution_tree/primitives/function_reference.hpp>
#include <phylanx/execution_tree/primitives/gradient_operation.hpp>
#include <phylanx/execution_tree/primitives/greater.hpp>
#include <phylanx/execution_tree/primitives/greater_equal.hpp>
#include <phylanx/execution_tree/primitives/hstack_operation.hpp>
#include <phylanx/execution_tree/primitives/identity.hpp>
#include <phylanx/execution_tree/primitives/if_conditional.hpp>
#include <phylanx/execution_tree/primitives/inverse_operation.hpp>
#include <phylanx/execution_tree/primitives/less.hpp>
#include <phylanx/execution_tree/primitives/less_equal.hpp>
#include <phylanx/execution_tree/primitives/linearmatrix.hpp>
#include <phylanx/execution_tree/primitives/linspace.hpp>
#include <phylanx/execution_tree/primitives/make_list.hpp>
#include <phylanx/execution_tree/primitives/map_operation.hpp>
#include <phylanx/execution_tree/primitives/mean_operation.hpp>
#include <phylanx/execution_tree/primitives/mul_operation.hpp>
#include <phylanx/execution_tree/primitives/not_equal.hpp>
#include <phylanx/execution_tree/primitives/or_operation.hpp>
#include <phylanx/execution_tree/primitives/parallel_block_operation.hpp>
#include <phylanx/execution_tree/primitives/power_operation.hpp>
#include <phylanx/execution_tree/primitives/random.hpp>
#include <phylanx/execution_tree/primitives/row_set.hpp>
#include <phylanx/execution_tree/primitives/row_slicing.hpp>
#include <phylanx/execution_tree/primitives/set_operation.hpp>
#include <phylanx/execution_tree/primitives/shuffle_operation.hpp>
#include <phylanx/execution_tree/primitives/slicing_operation.hpp>
#include <phylanx/execution_tree/primitives/square_root_operation.hpp>
#include <phylanx/execution_tree/primitives/store_operation.hpp>
#include <phylanx/execution_tree/primitives/string_output.hpp>
#include <phylanx/execution_tree/primitives/sub_operation.hpp>
#include <phylanx/execution_tree/primitives/sum_operation.hpp>
#include <phylanx/execution_tree/primitives/transpose_operation.hpp>
#include <phylanx/execution_tree/primitives/unary_minus_operation.hpp>
#include <phylanx/execution_tree/primitives/unary_not_operation.hpp>
#include <phylanx/execution_tree/primitives/variable.hpp>
#include <phylanx/execution_tree/primitives/vstack_operation.hpp>
#include <phylanx/execution_tree/primitives/while_operation.hpp>
#include <phylanx/execution_tree/primitives/wrapped_function.hpp>
#include <phylanx/execution_tree/primitives/wrapped_variable.hpp>
#endif
|
#include "daScript/misc/platform.h"
#include "module_builtin.h"
#include "daScript/simulate/simulate_nodes.h"
#include "daScript/simulate/sim_policy.h"
#include "daScript/simulate/aot.h"
#include "daScript/ast/ast_interop.h"
#include "daScript/ast/ast_policy_types.h"
namespace das
{
// string
DEFINE_OP2_EVAL_BASIC_POLICY(char *);
DEFINE_OP2_EVAL_ORDERED_POLICY(char *);
DEFINE_OP2_EVAL_GROUPBYADD_POLICY(char *);
template <typename QQ>
struct cast <EnumStubAny<QQ>> {
static __forceinline struct EnumStubAny<QQ> to(vec4f x) { return prune<EnumStubAny<QQ>,vec4f>::from(x); }
static __forceinline vec4f from ( EnumStubAny<QQ> x ) { return prune<vec4f, EnumStubAny<QQ>>::from(x); }
};
template<typename QQ>
struct SimPolicy<EnumStubAny<QQ>> {
static __forceinline auto to_enum ( vec4f val ) {
return cast<QQ>::to(val);
}
static __forceinline bool Equ ( vec4f a, vec4f b, Context & ) {
return to_enum(a) == to_enum(b);
}
static __forceinline bool NotEqu ( vec4f a, vec4f b, Context & ) {
return to_enum(a) != to_enum(b);
}
};
template <> struct typeName<EnumStub> { constexpr static const char * name() { return "enum"; } };
template <> struct typeName<EnumStub8> { constexpr static const char * name() { return "enum8"; } };
template <> struct typeName<EnumStub16> { constexpr static const char * name() { return "enum16"; } };
IMPLEMENT_OP2_EVAL_BOOL_POLICY(Equ,EnumStub);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(NotEqu,EnumStub);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(Equ,EnumStub8);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(NotEqu,EnumStub8);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(Equ,EnumStub16);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(NotEqu,EnumStub16);
template <>
struct SimPolicy<Func> {
static __forceinline int32_t to_func ( vec4f val ) {
return cast<Func>::to(val).index;
}
static __forceinline bool Equ ( vec4f a, vec4f b, Context & ) {
return to_func(a) == to_func(b);
}
static __forceinline bool NotEqu ( vec4f a, vec4f b, Context & ) {
return to_func(a) != to_func(b);
}
};
IMPLEMENT_OP2_EVAL_BOOL_POLICY(Equ,Func);
IMPLEMENT_OP2_EVAL_BOOL_POLICY(NotEqu,Func);
struct Sim_EqFunPtr : SimNode_Op2 {
DAS_BOOL_NODE;
Sim_EqFunPtr ( const LineInfo & at ) : SimNode_Op2(at) {}
virtual SimNode * visit ( SimVisitor & vis ) override {
return visitOp2(vis, "EqFunPtr", sizeof(Func), "Func");
}
__forceinline bool compute ( Context & context ) {
DAS_PROFILE_NODE
auto lv = cast<Func>::to(l->eval(context));
auto rv = r->evalPtr(context);
return !rv && lv.index==0; // they only equal if both null
}
};
struct Sim_NEqFunPtr : SimNode_Op2 {
DAS_BOOL_NODE;
Sim_NEqFunPtr ( const LineInfo & at ) : SimNode_Op2(at) {}
virtual SimNode * visit ( SimVisitor & vis ) override {
return visitOp2(vis, "NEqFunPtr", sizeof(Func), "Func");
}
__forceinline bool compute ( Context & context ) {
DAS_PROFILE_NODE
auto lv = cast<Func>::to(l->eval(context));
auto rv = r->evalPtr(context);
return rv || lv.index;
}
};
struct Sim_EqLambdaPtr : SimNode_Op2 {
DAS_BOOL_NODE;
Sim_EqLambdaPtr ( const LineInfo & at ) : SimNode_Op2(at) {}
virtual SimNode * visit ( SimVisitor & vis ) override {
return visitOp2(vis, "EqLambdaPtr", sizeof(Lambda), "Lambda");
}
__forceinline bool compute ( Context & context ) {
DAS_PROFILE_NODE
auto lv = cast<Lambda>::to(l->eval(context));
auto rv = r->evalPtr(context);
return !rv && !lv.capture; // they only equal if both null
}
};
struct Sim_NEqLambdaPtr : SimNode_Op2 {
DAS_BOOL_NODE;
Sim_NEqLambdaPtr ( const LineInfo & at ) : SimNode_Op2(at) {}
virtual SimNode * visit ( SimVisitor & vis ) override {
return visitOp2(vis, "NEqLambdaPtr", sizeof(Lambda), "Lambda");
}
__forceinline bool compute ( Context & context ) {
DAS_PROFILE_NODE
auto lv = cast<Lambda>::to(l->eval(context));
auto rv = r->evalPtr(context);
return rv || lv.capture;
}
};
void Module_BuiltIn::addMiscTypes(ModuleLibrary & lib) {
// enum
addFunctionBasic<EnumStub>(*this,lib);
addExtern<DAS_BIND_FUN(enum_to_int)>(*this, lib, "int", SideEffects::none, "int32_t");
addExtern<DAS_BIND_FUN(enum_to_uint)>(*this, lib, "uint", SideEffects::none, "uint32_t");
// enum8
addFunctionBasic<EnumStub8>(*this,lib);
addExtern<DAS_BIND_FUN(enum8_to_int)>(*this, lib, "int", SideEffects::none, "int32_t");
addExtern<DAS_BIND_FUN(enum8_to_uint)>(*this, lib, "uint", SideEffects::none, "uint32_t");
// enum16
addFunctionBasic<EnumStub16>(*this,lib);
addExtern<DAS_BIND_FUN(enum16_to_int)>(*this, lib, "int", SideEffects::none, "int32_t");
addExtern<DAS_BIND_FUN(enum16_to_uint)>(*this, lib, "uint", SideEffects::none, "uint32_t");
// function
addFunctionBasic<Func>(*this,lib);
addFunction( make_smart<BuiltInFn<Sim_EqFunPtr, bool,const Func,const void *>>("==",lib,"==",false) );
addFunction( make_smart<BuiltInFn<Sim_NEqFunPtr,bool,const Func,const void *>>("!=",lib,"!=",false) );
// lambda
addFunction( make_smart<BuiltInFn<Sim_EqLambdaPtr, bool,const Lambda,const void *>>("==",lib,"==",false) );
addFunction( make_smart<BuiltInFn<Sim_NEqLambdaPtr,bool,const Lambda,const void *>>("!=",lib,"!=",false) );
// string
addFunctionBasic<char *>(*this,lib);
addFunctionOrdered<char *>(*this,lib);
addFunctionConcat<char *>(*this,lib);
addFunction ( make_smart<BuiltInFn<SimNode_LexicalCast<int32_t>, char *,int32_t,Context *>> ("string",lib,"das_lexical_cast",false) );
addFunction ( make_smart<BuiltInFn<SimNode_LexicalCast<uint32_t>, char *,uint32_t,Context *>> ("string",lib,"das_lexical_cast",false) );
addFunction ( make_smart<BuiltInFn<SimNode_LexicalCast<int64_t>, char *,int64_t,Context *>> ("string",lib,"das_lexical_cast",false) );
addFunction ( make_smart<BuiltInFn<SimNode_LexicalCast<uint64_t>, char *,uint64_t,Context *>> ("string",lib,"das_lexical_cast",false) );
addFunction ( make_smart<BuiltInFn<SimNode_LexicalCast<float>, char *,float,Context *>> ("string",lib,"das_lexical_cast",false) );
}
}
|
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_USE_DSO
#include <mutex>
#include "hl_dso_loader.h"
/**
* cudart wrapper: for dynamic load libcudart.so.
* When nvcc compile cuda kernels, it will insert
* some build-in runtime routines, which must be
* provided by us if PADDLE_USE_DSO is true. If
* PADDLE_USE_DSO is false, all of them must be
* ignored to avoid multiple definitions.
*/
namespace dynload {
extern std::once_flag cudart_dso_flag;
extern void *cudart_dso_handle;
/**
* The following macro definition can generate structs
* (for each function) to dynamic load cuda routine
* via operator overloading.
**/
#define DYNAMIC_LOAD_CUDART_WRAP(__name, __type) \
struct DynLoad__##__name { \
template <typename... Args> \
__type operator()(Args... args) { \
typedef __type (*cudartFunc)(Args...); \
std::call_once(cudart_dso_flag, GetCudartDsoHandle, &cudart_dso_handle); \
void *p_##__name = dlsym(cudart_dso_handle, #__name); \
return reinterpret_cast<cudartFunc>(p_##__name)(args...); \
} \
} __name; /* struct DynLoad__##__name */
/* include all needed cuda functions in HPPL */
// clang-format off
#define CUDA_ROUTINE_EACH(__macro) \
__macro(cudaLaunch, cudaError_t) \
__macro(cudaSetupArgument, cudaError_t) \
__macro(cudaConfigureCall, cudaError_t) \
__macro(__cudaRegisterFatBinary, void**) \
__macro(__cudaUnregisterFatBinary, void) \
__macro(__cudaRegisterFunction, void) \
__macro(__cudaRegisterVar, void) \
__macro(__cudaRegisterManagedVar, void) \
__macro(__cudaInitModule, char) \
__macro(__cudaRegisterTexture, void) \
__macro(__cudaRegisterSurface, void)
// clang-format on
CUDA_ROUTINE_EACH(DYNAMIC_LOAD_CUDART_WRAP)
#if CUDART_VERSION >= 7000
DYNAMIC_LOAD_CUDART_WRAP(cudaLaunchKernel, cudaError_t)
#endif
#undef CUDA_ROUNTINE_EACH
} /* namespace dynload */
#if CUDART_VERSION >= 7000
__host__ cudaError_t CUDARTAPI cudaLaunchKernel(const void *func,
dim3 gridDim,
dim3 blockDim,
void **args,
size_t sharedMem,
cudaStream_t stream) {
return dynload::cudaLaunchKernel(
func, gridDim, blockDim, args, sharedMem, stream);
}
#endif /* CUDART_VERSION >= 7000 */
__host__ cudaError_t CUDARTAPI cudaLaunch(const void *func) {
return dynload::cudaLaunch(func);
}
__host__ cudaError_t CUDARTAPI cudaSetupArgument(const void *arg,
size_t size,
size_t offset) {
return dynload::cudaSetupArgument(arg, size, offset);
}
__host__ cudaError_t CUDARTAPI cudaConfigureCall(dim3 gridDim,
dim3 blockDim,
size_t sharedMem,
cudaStream_t stream) {
return dynload::cudaConfigureCall(gridDim, blockDim, sharedMem, stream);
}
extern "C" {
void **CUDARTAPI __cudaRegisterFatBinary(void *fatCubin) {
return dynload::__cudaRegisterFatBinary(fatCubin);
}
void CUDARTAPI __cudaUnregisterFatBinary(void **fatCubinHandle) {
return dynload::__cudaUnregisterFatBinary(fatCubinHandle);
}
void CUDARTAPI __cudaRegisterFunction(void **fatCubinHandle,
const char *hostFun,
char *deviceFun,
const char *deviceName,
int thread_limit,
uint3 *tid,
uint3 *bid,
dim3 *bDim,
dim3 *gDim,
int *wSize) {
return dynload::__cudaRegisterFunction(fatCubinHandle,
hostFun,
deviceFun,
deviceName,
thread_limit,
tid,
bid,
bDim,
gDim,
wSize);
}
void CUDARTAPI __cudaRegisterVar(void **fatCubinHandle,
char *hostVar,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global) {
return dynload::__cudaRegisterVar(fatCubinHandle,
hostVar,
deviceAddress,
deviceName,
ext,
size,
constant,
global);
}
extern void CUDARTAPI __cudaRegisterManagedVar(void **fatCubinHandle,
void **hostVarPtrAddress,
char *deviceAddress,
const char *deviceName,
int ext,
int size,
int constant,
int global) {
return dynload::__cudaRegisterManagedVar(fatCubinHandle,
hostVarPtrAddress,
deviceAddress,
deviceName,
ext,
size,
constant,
global);
}
char CUDARTAPI __cudaInitModule(void **fatCubinHandle) {
return dynload::__cudaInitModule(fatCubinHandle);
}
void CUDARTAPI __cudaRegisterTexture(void **fatCubinHandle,
const struct textureReference *hostVar,
const void **deviceAddress,
const char *deviceName,
int dim,
int norm,
int ext) {
return dynload::__cudaRegisterTexture(
fatCubinHandle, hostVar, deviceAddress, deviceName, dim, norm, ext);
}
void CUDARTAPI __cudaRegisterSurface(void **fatCubinHandle,
const struct surfaceReference *hostVar,
const void **deviceAddress,
const char *deviceName,
int dim,
int ext) {
return dynload::__cudaRegisterSurface(
fatCubinHandle, hostVar, deviceAddress, deviceName, dim, ext);
}
} /* extern "C" */
#endif
|
// typedef CStringT<TCHAR, StrTraitATL<TCHAR, ChTraitsCRT<TCHAR>>> CAtlString;
CAtlString str;
str = _T("\t\t ****Soccer is best!");
_tprintf_s(_T("Before: \"%s\"\n"), (LPCTSTR)str);
_tprintf_s(_T("After: \"%s\"\n"), (LPCTSTR)str.TrimLeft(_T("\t *")));
// Output:
// --------------------------
// Before: ****Soccer is best!
// After: Soccer is best!
|
//===-test_unsqueeze_axis_0_popart.cc-----------------------------------------------------------===//
//
// Copyright (C) 2019-2020 Alibaba Group Holding Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
// clang-format off
// Testing CXX Code Gen using ODLA API on popart
// RUN: %halo_compiler -target cxx -o %data_path/test_unsqueeze_axis_0/test_data_set_0/input_0.cc -x onnx -emit-data-as-c %data_path/test_unsqueeze_axis_0/test_data_set_0/input_0.pb
// RUN: %halo_compiler -target cxx -o %data_path/test_unsqueeze_axis_0/test_data_set_0/output_0.cc -x onnx -emit-data-as-c %data_path/test_unsqueeze_axis_0/test_data_set_0/output_0.pb
// RUN: %halo_compiler -target cxx -o %data_path/test_unsqueeze_axis_0/test_data_set_0/input_1.cc -x onnx -emit-data-as-c %data_path/test_unsqueeze_axis_0/test_data_set_0/input_1.pb
// RUN: %halo_compiler -target cxx -batch-size 1 %halo_compile_flags %data_path/test_unsqueeze_axis_0/model.onnx -o %t.cc
// RUN: %cxx -c -fPIC -o %t.o %t.cc -I%odla_path/include
// RUN: %cxx -g %s %t.o %t.bin -I%T -I%odla_path/include -I%unittests_path -I%data_path/test_unsqueeze_axis_0/test_data_set_0 %odla_link %device_link -lodla_popart -o %t_popart.exe -Wno-deprecated-declarations
// RUN: %t_popart.exe 0.0001 0 popart %data_path/test_unsqueeze_axis_0 | FileCheck %s
// CHECK: Result Pass
// clang-format on
// XFAIL: *
#include "test_unsqueeze_axis_0_popart.cc.tmp.main.cc.in"
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This transformation pass prepares for legalization to the TFLite dialect by
// converting operations in TensorFlow dialect into operations that can be
// legalized to TensorFlow Lite dialect with simple replacements. The newly
// created operations are in the TensorFlow dialect if the operation can be
// represented using a TensorFlow op. Otherwise, TensorFlow Lite dialect op is
// used. For example, Conv2D in TFLite which uses OHWI data format for filters
// is not supported in TensorFlow because TensorFlow requires filters in the
// HWIO data format.
//
// Motivation to prepare for the TFLite legalization before the actual
// legalization is to exploit constant folding opportunities in any newly
// created ops by leveraging constant folding support for the TensorFlow ops.
// This way TFLite can be used as a serialization format only and does not
// require access to the TFLite runtime for optimizations as required by the
// TFLite team.
#include <climits>
#include <cstdint>
#include "absl/memory/memory.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "mlir/Analysis/LoopAnalysis.h" // TF:llvm-project
#include "mlir/Dialect/QuantOps/FakeQuantSupport.h" // TF:llvm-project
#include "mlir/Dialect/QuantOps/UniformSupport.h" // TF:llvm-project
#include "mlir/IR/Attributes.h" // TF:llvm-project
#include "mlir/IR/MLIRContext.h" // TF:llvm-project
#include "mlir/IR/PatternMatch.h" // TF:llvm-project
#include "mlir/IR/StandardTypes.h" // TF:llvm-project
#include "mlir/Pass/Pass.h" // TF:llvm-project
#include "mlir/Support/Functional.h" // TF:llvm-project
#include "mlir/Support/LLVM.h" // TF:llvm-project
#include "mlir/Support/LogicalResult.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_utils.h"
#include "tensorflow/compiler/mlir/lite/transforms/dilated_conv.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/lite/transforms/unroll_batch_matmul.h"
#include "tensorflow/compiler/mlir/lite/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/lite/utils/validators.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#define DEBUG_TYPE "tf-tfl-legalization"
namespace mlir {
namespace TFL {
//===----------------------------------------------------------------------===//
// The actual PrepareTF Pass.
//
// TODO(hinsu): Add and use TensorFlow dialect ops for the ops created in this
// pass.
namespace {
// Prepare TF operations in functions for subsequent legalization.
class PrepareTFPass : public FunctionPass<PrepareTFPass> {
public:
explicit PrepareTFPass() : unfold_batch_matmul_(true) {}
explicit PrepareTFPass(bool unfold_batch_matmul)
: unfold_batch_matmul_(unfold_batch_matmul) {}
void runOnFunction() override;
private:
bool unfold_batch_matmul_;
};
// TODO(fengliuai): move this rule to PreparePatterns.td
// TODO(b/140968741): propagate the sign from the command line. Currently all
// the FakeQuant is assumed to targeting UIN8, but per-channel kernel is
// actually INT8.
// Inserts a "tfl.quantize" and "tfl.dequantize" op pair (QDQs) after the
// "tf.FakeQuantWithMinMaxVarsOp" to be constant folded. Since the constant
// folding logic will use a "std.constant" op to replace the
// "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve
// the quantization parameters as a TypeAttr and "tfl.dequantize" op used to
// convert the output type to the next op. Here are the transformations:
//
// input min cst max cst input min cst max cst
// \ | | \ | |
// \ (tf.Identity) (tf.Identity) => \ (tf.Identity) (tf.Identity)
// \ | | \ | |
// tf.FakeQuantWithMinMaxVars tf.FakeQuantWithMinMaxVars
// | |
// tf.quantize
// |
// tf.dequantize
// |
// If the input is a constant, the result pattern will eventually converted to
//
// quant-emulated input
// |
// tf.quantize
// |
// tf.dequantize
// |
template <typename TFFakeQuantOp, bool PerAxis>
struct InsertTFLQuantOpsAfterTFFakeQuantOp
: public OpRewritePattern<TFFakeQuantOp> {
using BaseType = InsertTFLQuantOpsAfterTFFakeQuantOp<TFFakeQuantOp, PerAxis>;
explicit InsertTFLQuantOpsAfterTFFakeQuantOp<TFFakeQuantOp, PerAxis>(
MLIRContext *ctx)
: OpRewritePattern<TFFakeQuantOp>(ctx) {}
PatternMatchResult matchAndRewrite(TFFakeQuantOp tf_op,
PatternRewriter &rewriter) const override {
// We don't want to insert quantize/dequantize if the quantize op exists.
auto res = tf_op.outputs();
if (!res.hasOneUse() || isa<QuantizeOp>(*res.user_begin()))
return this->matchFailure();
// Extract the min/max constant values from the operands. We also consider
// a special case that there are tf.Identity ops between the min/max
// constants and the tf.FakeQuantWithMinMaxVarsOp.
Value min = tf_op.min(), max = tf_op.max();
DenseFPElementsAttr min_value, max_value;
if (auto id1 = dyn_cast_or_null<TF::IdentityOp>(min.getDefiningOp()))
min = id1.input();
if (auto id2 = dyn_cast_or_null<TF::IdentityOp>(max.getDefiningOp()))
max = id2.input();
if (!matchPattern(min, m_Constant(&min_value))) return this->matchFailure();
if (!matchPattern(max, m_Constant(&max_value))) return this->matchFailure();
int quant_dim = -1;
if (PerAxis) {
// This is a special case that the quant_dim is the last dimensions.
quant_dim = res.getType().template cast<ShapedType>().getRank() - 1;
}
// Use the min/max from the operands and the num_bits and narrow_range
// attribute to create the quantization parameter for the new quantize op.
rewriter.setInsertionPointAfter(tf_op);
IntegerAttr num_bits =
rewriter.getI64IntegerAttr(tf_op.num_bits().getSExtValue());
BoolAttr narrow_range = rewriter.getBoolAttr(tf_op.narrow_range());
Type res_type = tf_op.getType();
TypeAttr qtype = quant::GetQuantizedTypeAttr(
rewriter, res_type, min_value, max_value, quant_dim, num_bits,
narrow_range, /*is_signed=*/false);
if (!qtype) this->matchFailure();
// Finally, use the quantization parameter to create the quantize and
// dequantize ops, and insert them between the tf.FakeQuantWithMinMaxVarsOp
// and its users.
Value value = tf_op.outputs();
auto quantize = rewriter.create<TFL::QuantizeOp>(
tf_op.getLoc(), qtype.getValue(), value, qtype);
auto dequantize = rewriter.create<TFL::DequantizeOp>(
tf_op.getLoc(), res_type, quantize.output());
value.replaceAllUsesWith(dequantize);
quantize.getOperation()->replaceUsesOfWith(dequantize, value);
return this->matchSuccess();
}
};
using PreparePerTensorFakeQuant =
InsertTFLQuantOpsAfterTFFakeQuantOp<TF::FakeQuantWithMinMaxVarsOp, false>;
using PreparePerChannelFakeQuant =
InsertTFLQuantOpsAfterTFFakeQuantOp<TF::FakeQuantWithMinMaxVarsPerChannelOp,
true>;
// Templated class for declaring a converter from some TensorFlow convolution
// op into its counterpart in TensorFlow Lite.
//
// The `ConcreteType` deriving from this template must provide the following
// method for constructing TensorFlow Lite op:
//
// TFL::[op] createTFLOp(ConvertTFConvOpMatchState *state,
// PatternRewriter &rewriter, Location loc,
// Type result_type, Value input,
// Value filter, Value bias) const;
//
// And also the following method for getting the dimension for bias tensor:
//
// int64_t getBiasDim(ArrayRef<int64_t> filterShape) const;
template <typename ConcreteType, typename TFConvOpType>
struct ConvertTFConvOp : public RewritePattern {
// Transient state for preserving data from match to rewrite
struct ConvertTFConvOpMatchState : public PatternState {
IntegerAttr dilation_height_factor;
IntegerAttr dilation_width_factor;
StringAttr padding;
IntegerAttr stride_height;
IntegerAttr stride_width;
};
ConvertTFConvOp(MLIRContext *context)
: RewritePattern(TFConvOpType::getOperationName(), 1, context),
intAttrOne(Builder(context).getI32IntegerAttr(1)) {}
PatternMatchResult match(Operation *op) const override {
// Assumes TensorFlow convolution op is already verified to be
// in valid form.
// Match a TFConvOpType under the following conditions:
// * The 'T' attribute must exist and be of value DT_FLOAT.
// * The 'data_format' attribute must exist and be of value "NHWC".
// * The 'strides' attribute must exist and is of the form [1, X, Y, 1].
// * The 'dilations' attribute is optional, but it must be of the form
// [1, X, Y, 1] if exists.
TFConvOpType tf_op = cast<TFConvOpType>(op);
if (!TFTypeIsFloatTensor(tf_op.input()) || !TFDataFormatIsNHWC(op))
return matchFailure();
IntegerAttr height, width;
if (!TFIntListIs1XY1(op, "strides", &height, &width)) return matchFailure();
auto state = std::make_unique<ConvertTFConvOpMatchState>();
state->stride_height = height;
state->stride_width = width;
if (TFIntListIs1XY1(op, "dilations", &height, &width)) {
state->dilation_height_factor = height;
state->dilation_width_factor = width;
} else {
// If the 'dilations' attribute is missing, we use the default value (1)
// for both dilation height and width factor.
state->dilation_height_factor = intAttrOne;
state->dilation_width_factor = intAttrOne;
}
StringAttr padding_attr;
if (!TFPaddingIsSameOrValid(op, &padding_attr)) return matchFailure();
state->padding = padding_attr;
// Additionally, we require the filter operand to be of 4-D tensor type so
// that we can extract info from the shape (e.g., for constructing bias
// tensor, for setting depth_multiplier attribute, etc.).
auto filter_type =
tf_op.filter().getType().template dyn_cast<RankedTensorType>();
if (filter_type && filter_type.getRank() == 4)
return matchSuccess(std::move(state));
return matchFailure();
}
void rewrite(Operation *op, std::unique_ptr<PatternState> state,
PatternRewriter &rewriter) const override {
// TensorFlow convolution op only has two inputs, while the TFLite one has
// three, with the bias vector marked as optional. However, TOCO has a
// dedicated pass, EnsureBiasVectors, to create default bias vectors for all
// those missing. So we model TFLite convolution op as requiring three
// inputs to achieve the legalization task of EnsureBiasVector. this
// requires the filter tensor to have static shape.
// TODO(antiagainst): also handle the case of tf.Add(tf.[op], <bias>)
TFConvOpType tf_op = cast<TFConvOpType>(op);
// Get a splat zero tensor with the expected dimension for the bias tensor
auto filter = tf_op.filter();
auto filter_type = filter.getType().template cast<RankedTensorType>();
auto elem_type = filter_type.getElementType();
auto bias_dim = static_cast<const ConcreteType *>(this)->getBiasDim(
filter_type.getShape());
auto bias_type = RankedTensorType::get({bias_dim}, elem_type);
auto bias_attr = rewriter.getZeroAttr(bias_type);
auto bias =
rewriter.create<TF::ConstOp>(op->getLoc(), bias_type, bias_attr);
auto *conv_state = static_cast<ConvertTFConvOpMatchState *>(state.get());
auto conv_op = static_cast<const ConcreteType *>(this)->createTFLOp(
conv_state, rewriter, op->getLoc(), tf_op.getType(), tf_op.input(),
filter, bias);
rewriter.replaceOp(op, conv_op.getResult());
}
const IntegerAttr intAttrOne;
};
class ConvertTFConv2D : public ConvertTFConvOp<ConvertTFConv2D, TF::Conv2DOp> {
public:
using BaseType = ConvertTFConvOp<ConvertTFConv2D, TF::Conv2DOp>;
ConvertTFConv2D(MLIRContext *context) : BaseType(context) {}
int64_t getBiasDim(ArrayRef<int64_t> filterShape) const {
return filterShape.back();
}
TFL::Conv2DOp createTFLOp(ConvertTFConvOpMatchState *state,
PatternRewriter &rewriter, Location loc,
Type result_type, Value input, Value filter,
Value bias) const {
filter = legalizeFilter(rewriter, loc, filter);
return rewriter.create<TFL::Conv2DOp>(
loc, result_type, input, filter, bias,
/*dilation_h_factor=*/state->dilation_height_factor,
/*dilation_w_factor=*/state->dilation_width_factor,
/*fused_activation_function=*/rewriter.getStringAttr("NONE"),
/*padding=*/state->padding,
/*stride_h=*/state->stride_height,
/*stride_w=*/state->stride_width);
}
private:
// Legalize the given filter by converting it from TensorFlow filter data
// format HWIO to TFLite Conv2D op filter data format OHWI and return Value
// for the converted filter. Requires that filter is verified by the match
// method that it is a 4-D RankedTensorType.
Value legalizeFilter(PatternRewriter &rewriter, Location loc,
Value filter) const {
// Create a constant op for HWIO to OHWI transpose permutation.
SmallVector<int, 4> perm = {3, 0, 1, 2};
auto perm_type = RankedTensorType::get({static_cast<int>(perm.size())},
rewriter.getIntegerType(32));
auto perm_attr =
DenseElementsAttr::get(perm_type, llvm::makeArrayRef<int>(perm));
auto perm_op = rewriter.create<TF::ConstOp>(loc, perm_type, perm_attr);
// Create tensor type for the transpose result.
auto filter_type = filter.getType().cast<RankedTensorType>();
auto result_shape = functional::map(
[filter_type](int64_t dim) { return filter_type.getDimSize(dim); },
perm);
auto elem_type = filter_type.getElementType();
auto result_type = RankedTensorType::get(result_shape, elem_type);
return rewriter.create<TF::TransposeOp>(loc, result_type, filter, perm_op);
}
};
class ConvertTFDepthwiseConv2dNative
: public ConvertTFConvOp<ConvertTFDepthwiseConv2dNative,
TF::DepthwiseConv2dNativeOp> {
public:
using BaseType = ConvertTFConvOp<ConvertTFDepthwiseConv2dNative,
TF::DepthwiseConv2dNativeOp>;
ConvertTFDepthwiseConv2dNative(MLIRContext *context) : BaseType(context) {}
int64_t getBiasDim(ArrayRef<int64_t> filterShape) const {
return filterShape[2] * filterShape[3];
}
TFL::DepthwiseConv2DOp createTFLOp(ConvertTFConvOpMatchState *state,
PatternRewriter &rewriter, Location loc,
Type result_type, Value input,
Value filter, Value bias) const {
// Compared to tfl.conv_2d, tfl.depthwise_conv_2d has an additional
// 'depth_multiplier' attribute. However, tf.DepthwiseConv2dNative does not
// have a corresponding 'depth_multiplier' attribute; the multiplier is the
// fourth dimension in the 4-D filter tensor. We query the multiplier from
// tf.DepthwiseConv2dNative and set it as the attribute value accordingly.
auto multiplier = filter.getType().cast<RankedTensorType>().getDimSize(3);
filter = legalizeFilter(rewriter, loc, filter);
return rewriter.create<TFL::DepthwiseConv2DOp>(
loc, result_type, input, filter, bias,
/*dilation_h_factor=*/state->dilation_height_factor,
/*dilation_w_factor=*/state->dilation_width_factor,
/*fused_activation_function=*/rewriter.getStringAttr("NONE"),
/*padding=*/state->padding,
/*stride_h=*/state->stride_height,
/*stride_w=*/state->stride_width,
/*depth_multiplier=*/rewriter.getI32IntegerAttr(multiplier));
}
private:
/// Legalize the given filter by converting it from TensorFlow filter data
/// format to TFLite DepthwiseConv2D op filter data format and return Value
/// for the converted filter. TensorFlow filter data format is
/// [filter_height, filter_width, in_channels, channel_multiplier] and TFLite
/// filter data format is [1, filter_height, filter_width, out_channels].
/// Requires that filter is verified by the match method that it is a 4-D
/// RankedTensorType.
Value legalizeFilter(PatternRewriter &rewriter, Location loc,
Value filter) const {
auto filter_type = filter.getType().cast<RankedTensorType>();
auto filterShape = filter_type.getShape();
SmallVector<int64_t, 4> result_shape = {1, filterShape[0], filterShape[1],
filterShape[2] * filterShape[3]};
auto elem_type = filter_type.getElementType();
auto result_type = RankedTensorType::get(result_shape, elem_type);
// TensorFlow Lite `Reshape` op only support int32 shape tensor currently.
auto shape_type = RankedTensorType::get({4}, rewriter.getIntegerType(32));
SmallVector<Attribute, 4> result_shape_data(4);
for (int i = 0; i < 4; ++i) {
result_shape_data[i] =
rewriter.getI32IntegerAttr(static_cast<int32_t>(result_shape[i]));
}
auto shape_attr = DenseElementsAttr::get(shape_type, result_shape_data);
auto shape = rewriter.create<TF::ConstOp>(loc, shape_type, shape_attr);
return rewriter.create<TF::ReshapeOp>(loc, result_type, filter, shape);
}
};
// StridedSlice can have complicated attributes like begin_axis_mask,
// end_axis_mask, ellipsis_axis_mask, new_axis_mask, shrink_axis_mask. These
// masks will complicate the strided_slice computation logic, we can simplify
// the logic by inserting a reshape op to pad the inputs so strided_slice can
// be easier to handle.
//
// So the graph may looks like below:
// original_input -> strided_slice -> output
// (transforms)
// original_input -> reshape -> strided_slice -> output
//
// And the new shape is computed based on the masks.
//
// An example for new_axis_mask. say the new_axis_mask is 9 which represents
// [1 0 0 1], and that means we're inserting two new axes at 0 & 3 dim, so
// if original shape is [2, 3], now we reshape that into [1, 2, 3, 1].
struct ConvertTFStridedSlice : public RewritePattern {
explicit ConvertTFStridedSlice(MLIRContext *context)
: RewritePattern(TF::StridedSliceOp::getOperationName(), 2, context) {}
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
// TODO(renjieliu): Consider expand the transformation for ellipsis & shrink
// mask as well.
TF::StridedSliceOp strided_slice_op = llvm::cast<TF::StridedSliceOp>(op);
uint64_t new_axis_mask = strided_slice_op.new_axis_mask().getZExtValue();
if (new_axis_mask == 0) return matchFailure();
// Insert a new reshape op.
Value original_input = strided_slice_op.input();
RankedTensorType original_input_type =
original_input.getType().cast<RankedTensorType>();
const ArrayRef<int64_t> &original_input_shape =
original_input_type.getShape();
SmallVector<int64_t, 4> new_shape;
int index = 0;
while (index < original_input_shape.size() || new_axis_mask) {
if (new_axis_mask & 1) {
new_shape.emplace_back(1);
} else {
new_shape.emplace_back(original_input_shape[index++]);
}
new_axis_mask >>= 1;
}
const int dim_size = new_shape.size();
Location loc = strided_slice_op.getLoc();
auto shape_type =
RankedTensorType::get({dim_size}, rewriter.getIntegerType(32));
SmallVector<Attribute, 4> result_shape_data(dim_size);
for (int i = 0; i < dim_size; ++i) {
result_shape_data[i] =
rewriter.getI32IntegerAttr(static_cast<int32_t>(new_shape[i]));
}
auto shape_attr = DenseElementsAttr::get(shape_type, result_shape_data);
auto shape = rewriter.create<ConstantOp>(loc, shape_type, shape_attr);
auto new_output_type =
RankedTensorType::get(new_shape, original_input_type.getElementType());
TF::ReshapeOp reshape = rewriter.create<TF::ReshapeOp>(
loc, new_output_type, original_input, shape);
// Replace the original strided_slice.
llvm::APInt new_begin_mask = strided_slice_op.begin_mask();
llvm::APInt new_end_mask = strided_slice_op.end_mask();
// Since we expand the dims, we need to apply them to the begin_mask &
// end_mask.
new_begin_mask |= strided_slice_op.new_axis_mask();
new_end_mask |= strided_slice_op.new_axis_mask();
auto attribute_type = rewriter.getIntegerType(64);
rewriter.replaceOpWithNewOp<TF::StridedSliceOp>(
op, strided_slice_op.getType(), reshape, strided_slice_op.begin(),
strided_slice_op.end(), strided_slice_op.strides(),
rewriter.getIntegerAttr(attribute_type, new_begin_mask),
rewriter.getIntegerAttr(attribute_type, new_end_mask),
rewriter.getIntegerAttr(attribute_type,
strided_slice_op.ellipsis_mask()),
rewriter.getI64IntegerAttr(0),
rewriter.getIntegerAttr(attribute_type,
strided_slice_op.shrink_axis_mask()));
return matchSuccess();
}
};
#include "tensorflow/compiler/mlir/lite/transforms/generated_prepare_tf.inc"
void PrepareTFPass::runOnFunction() {
OwningRewritePatternList patterns;
auto func = getFunction();
MLIRContext *ctx = &getContext();
// This pattern was intented to uses TFL QDQs to preserve the quantization
// parameters from the TF Quant ops, thus this pattern should run with the
// first `applyPatternsGreedily` method, which would otherwise removes the
// TF FakeQuant ops by the constant folding.
patterns.insert<PreparePerTensorFakeQuant, PreparePerChannelFakeQuant>(ctx);
// This pattern will try to identify and optimize for dilated convolution.
// e.g. Patterns like "SpaceToBatchND -> Conv2D -> BatchToSpaceND" will be
// replaced with a single Conv op with dilation parameter.
patterns.insert<ConvertTFDilatedConvOp<TF::Conv2DOp>,
ConvertTFDilatedConvOp<TF::DepthwiseConv2dNativeOp>>(ctx);
TFL::populateWithGenerated(ctx, &patterns);
// TODO(karimnosseir): Split to separate pass probably after
// deciding on long term plan for this optimization.
// This will allow optimizing any TF_Mul->TF_Conv in the graph
// and any expanded from FusedBatchNorm. We need to do this
// before converting TF_Conv to TFL_Conv
applyPatternsGreedily(func, patterns);
// Load the generated pattern again, so new quantization pass-through
// will be applied.
patterns.clear();
TFL::populateWithGenerated(ctx, &patterns);
if (unfold_batch_matmul_) {
patterns.insert<ConvertTFBatchMatMulOp<TF::BatchMatMulOp>,
ConvertTFBatchMatMulOp<TF::BatchMatMulV2Op>>(ctx);
}
patterns.insert<ConvertTFConv2D, ConvertTFDepthwiseConv2dNative,
ConvertTFStridedSlice>(ctx);
applyPatternsGreedily(func, patterns);
}
} // namespace
// Creates an instance of the TensorFlow Lite dialect PrepareTF pass.
std::unique_ptr<OpPassBase<FuncOp>> CreatePrepareTFPass(
bool unfold_batch_matmul) {
return std::make_unique<PrepareTFPass>(unfold_batch_matmul);
}
static PassRegistration<PrepareTFPass> pass(
"tfl-prepare-tf", "Prepare TF for legalization to TensorFlow Lite dialect");
} // namespace TFL
} // namespace mlir
|
#include <iostream>
#include <stdlib.h>
using namespace std;
void stck();
void que();
void stck_list();
void que_list();
void stck_ins_arr();
void stck_del_arr();
void stck_print();
void que_ins_arr();
void que_del_arr();
void que_print();
void stck_ins_list();
void stck_del_list();
void stck_print_list();
void que_ins_list();
void que_del_list();
void que_print_list();
int top = -1;
int st[100];
int fron = -1;
int endi = -1;
int q[100];
struct node
{
int data;
node *next;
};
node *head = NULL;
void stck_ins_arr()
{
system("cls");
int data;
cout << "Enter the data: ";
cin >> data;
if (top == -1)
{
top++;
st[top] = data;
}
else
{
top++;
st[top] = data;
}
cout << "\nElement inserted succesfully\n";
getchar();
stck();
}
void stck_del_arr()
{
system("cls");
if (top == -1)
{
cout << "No elemnts found to delete\n";
}
else
{
top--;
cout << "\nElement deleted succesfully\n";
}
getchar();
stck();
}
void stck_print()
{
system("cls");
cout << "\n\n\n\t\tThe Representation of stack is as follows\n\n";
for (int i = top; i >= 0; i--)
{
if (i == top)
{
cout << "\t\t\t" << st[i] << "<-"
<< "\n"
<< "\t\t\t-"
<< "\n";
}
else
{
cout << "\t\t\t" << st[i] << "\n"
<< "\t\t\t-"
<< "\n";
}
}
getchar();
stck();
}
void stck()
{
system("cls");
cout << "\n\t\t STACK AS ARRAY\n";
cout << "Choose any option\n";
cout << "\n1. Insert\n2. Delete\n3. Print\n";
int n;
cin >> n;
switch (n)
{
case 1:
stck_ins_arr();
break;
case 2:
stck_del_arr();
break;
case 3:
stck_print();
break;
default:
cout << "\nInvalid choice\n";
break;
}
}
void que_ins_arr()
{
system("cls");
int data;
cout << "\nEnter data to be inserted: ";
cin >> data;
if (fron == -1)
{
endi = fron = 0;
q[fron] = data;
}
else
{
endi++;
q[endi] = data;
}
cout << "\nElement inserted succesfully\n";
getchar();
que();
}
void que_del_arr()
{
system("cls");
if (fron == -1)
{
cout << "\nNot enough elements to delete\n";
}
else
{
fron++;
cout << "\nElement deleted succesfully\n";
}
getchar();
que();
}
void que_print()
{
system("cls");
cout << "\n\n\n\t\tThe Representation of queue is as follows\n\n";
cout << "\t\t\t\t";
for (int i = endi; i >= fron; i--)
{
cout << q[i] << " | ";
}
getchar();
que();
}
void que()
{
system("cls");
cout << "\n\t\t QUEUE AS ARRAY\n";
cout << "Choose any option\n";
cout << "\n1. Insert\n2. Delete\n3. Print\n";
int n;
cin >> n;
switch (n)
{
case 1:
que_ins_arr();
break;
case 2:
que_del_arr();
break;
case 3:
que_print();
break;
default:
cout << "\nInvalid choice\n";
break;
}
}
void stck_ins_list()
{
system("cls");
int data;
cout << "\nEnter data: ";
cin >> data;
if (head == NULL)
{
node *temp = new node;
temp->data = data;
temp->next = NULL;
head = temp;
}
else
{
node *temp = new node;
temp->data = data;
temp->next = head;
head = temp;
}
cout << "\nElement insereted successfully\n";
getchar();
stck_list();
}
void stck_del_list()
{
system("cls");
node *temp;
if (head == NULL)
{
cout << "\nNot enough nodes!!\n";
}
else
{
temp = head;
head = head->next;
free(temp);
}
cout << "\nElement deleted succesfully!!\n";
getchar();
stck_list();
}
void stck_print_list()
{
system("cls");
node *temp;
temp = head;
if (head == NULL)
{
cout << "\n\t\tLIST EMPTY!!\n";
}
else
{
cout << "\n\n\n\t\tThe Representation of stack(linked list) is as follows\n\n";
int i = 0;
while (temp != NULL)
{
cout << "\t\t\t" << temp->data << "\n"
<< "\t\t\t-"
<< "\n";
temp = temp->next;
}
}
getchar();
stck_list();
}
void stck_list()
{
system("cls");
cout << "\n\t\t STACK AS LINKED LIST\n";
cout << "Choose any option\n";
cout << "\n1. Insert\n2. Delete\n3. Print\n";
int n;
cin >> n;
switch (n)
{
case 1:
stck_ins_list();
break;
case 2:
stck_del_list();
break;
case 3:
stck_print_list();
break;
default:
cout << "\nInvalid choice\n";
break;
}
}
void que_ins_list()
{
system("cls");
int data;
cout << "\nEnter data: ";
cin >> data;
if (head == NULL)
{
node *temp = new node;
temp->data = data;
temp->next = NULL;
head = temp;
}
else
{
node *temp = new node;
temp->data = data;
temp->next = head;
head = temp;
}
cout << "\nElement inserted succesfully\n";
getchar();
que_list();
}
void que_del_list()
{
system("cls");
node *temp;
node *temp1;
int l = 0;
temp = head;
temp1 = head;
if (head == NULL)
{
cout << "\nNot enough nodes!!\n";
}
else
{
node *temp2;
while (temp->next != NULL)
{
temp = temp->next;
l++;
}
while (l - 1 > 0)
{
temp1 = temp1->next;
l--;
}
temp1->next = NULL;
}
cout << "\nElement deleted succesfully!!\n";
getchar();
que_list();
}
void que_print_list()
{
system("cls");
node *temp;
temp = head;
if (head == NULL)
{
cout << "\n\t\tLIST EMPTY!!\n";
}
else
{
cout << "\n\n\n\t\tThe Representation of queue is as follows\n\n";
cout << "\t\t\t\t";
while (temp != NULL)
{
cout << temp->data << " | ";
temp = temp->next;
}
}
getchar();
que_list();
}
void que_list()
{
system("cls");
cout << "\n\t\t QUEUE AS LINKED LIST\n";
cout << "Choose any option\n";
cout << "\n1. Insert\n2. Delete\n3. Print\n";
int n;
cin >> n;
switch (n)
{
case 1:
que_ins_list();
break;
case 2:
que_del_list();
break;
case 3:
que_print_list();
break;
default:
cout << "\nInvalid choice\n";
break;
}
}
int main()
{
cout << "\nYou want to work with: ";
cout << "\n1. Stack(as array)\n2. Queue(as array)\n3. Stack(as linked list)\n4. Queue(as linked list)\n";
cout << "Enter choice: ";
int n;
cin >> n;
switch (n)
{
case 1:
stck();
break;
case 2:
que();
break;
case 3:
stck_list();
break;
case 4:
que_list();
break;
default:
cout << "Invalid Choice";
break;
}
return 0;
}
|
//*********************************************************
//
// Copyright (c) Microsoft. All rights reserved.
// This code is licensed under the MIT License (MIT).
// THIS CODE IS PROVIDED *AS IS* WITHOUT WARRANTY OF
// ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING ANY
// IMPLIED WARRANTIES OF FITNESS FOR A PARTICULAR
// PURPOSE, MERCHANTABILITY, OR NON-INFRINGEMENT.
//
//*********************************************************
#include "pch.h"
#include "AudioCapturePermissions.h"
using namespace SDKTemplate;
using namespace Concurrency;
using namespace Platform;
using namespace Windows::Foundation;
using namespace Windows::Media::Capture;
/// <summary>
/// On desktop/tablet systems, users are prompted to give permission to use capture devices on a
/// per-app basis. Along with declaring the microphone DeviceCapability in the package manifest,
/// this method tests the privacy setting for microphone access for this application.
/// Note that this only checks the Settings->Privacy->Microphone setting, it does not handle
/// the Cortana/Dictation privacy check, however (Under Settings->Privacy->Speech, Inking and Typing).
///
/// Developers should ideally perform a check like this every time their app gains focus, in order to
/// check if the user has changed the setting while the app was suspended or not in focus.
/// </summary>
/// <returns>true if the microphone can be accessed without any permissions problems.</returns>
IAsyncOperation<bool>^ AudioCapturePermissions::RequestMicrophonePermissionAsync()
{
return create_async([]()
{
try
{
// Request access to the microphone only, to limit the number of capabilities we need
// to request in the package manifest.
MediaCaptureInitializationSettings^ settings = ref new MediaCaptureInitializationSettings();
settings->StreamingCaptureMode = StreamingCaptureMode::Audio;
settings->MediaCategory = MediaCategory::Speech;
MediaCapture^ capture = ref new MediaCapture();
return create_task(capture->InitializeAsync(settings))
.then([](task<void> previousTask) -> bool
{
try
{
previousTask.get();
}
catch (AccessDeniedException^)
{
// The user has turned off access to the microphone. If this occurs, we should show an error, or disable
// functionality within the app to ensure that further exceptions aren't generated when
// recognition is attempted.
return false;
}
catch (Exception^ exception)
{
// This can be replicated by using remote desktop to a system, but not redirecting the microphone input.
// Can also occur if using the virtual machine console tool to access a VM instead of using remote desktop.
if (exception->HResult == AudioCapturePermissions::NoCaptureDevicesHResult)
{
auto messageDialog = ref new Windows::UI::Popups::MessageDialog("No Audio Capture devices are present on this system.");
create_task(messageDialog->ShowAsync());
return false;
}
throw;
}
return true;
});
}
catch (Platform::ClassNotRegisteredException^ ex)
{
// If media player components are unavailable (eg, on an N SKU of windows), we may
// get ClassNotRegisteredException when trying to check if we have permission to use
// the microphone.
auto messageDialog = ref new Windows::UI::Popups::MessageDialog("Media Player Components unavailable.");
create_task(messageDialog->ShowAsync());
return create_task([] {return false; });
}
});
}
|
/*
* (c) Copyright 2019 Xilinx, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef _XFCOMPRESSION_STREAM_DOWNSIZER_HPP_
#define _XFCOMPRESSION_STREAM_DOWNSIZER_HPP_
/**
* @file stream_downsizer.hpp
* @brief Header for stream downsizer module.
*
* This file is part of Vitis Data Compression Library.
*/
#include "common.h"
namespace xf {
namespace compression {
template <class SIZE_DT, int IN_WIDTH, int OUT_WIDTH>
void streamDownsizer(hls::stream<ap_uint<IN_WIDTH> >& inStream,
hls::stream<ap_uint<OUT_WIDTH> >& outStream,
SIZE_DT input_size) {
/**
* @brief This module reads the IN_WIDTH size from the data stream
* and downsizes the data to OUT_WIDTH size and writes to output stream
*
* @tparam SIZE_DT data size
* @tparam IN_WIDTH input width
* @tparam OUT_WIDTH output width
*
* @param inStream input stream
* @param outStream output stream
* @param input_size input size
*/
if (input_size == 0) // changed for gzip
return;
const int c_byteWidth = 8;
const int c_inputWord = IN_WIDTH / c_byteWidth;
const int c_outWord = OUT_WIDTH / c_byteWidth;
uint32_t sizeOutputV = (input_size - 1) / c_outWord + 1;
int factor = c_inputWord / c_outWord;
ap_uint<IN_WIDTH> inBuffer = 0;
convInWidthtoV:
for (int i = 0; i < sizeOutputV; i++) {
#pragma HLS PIPELINE II = 1
int idx = i % factor;
if (idx == 0) inBuffer = inStream.read();
ap_uint<OUT_WIDTH> tmpValue = inBuffer.range((idx + 1) * OUT_WIDTH - 1, idx * OUT_WIDTH);
outStream << tmpValue;
}
}
template <class SIZE_DT, int IN_WIDTH, int OUT_WIDTH>
void streamDownsizerP2P(hls::stream<ap_uint<IN_WIDTH> >& inStream,
hls::stream<ap_uint<OUT_WIDTH> >& outStream,
SIZE_DT input_size,
SIZE_DT input_start_idx) {
/**
* @brief This module reads the IN_WIDTH size from the data stream
* and downsizes the data to OUT_WIDTH size and writes to output stream
*
* @tparam SIZE_DT data size
* @tparam IN_WIDTH input width
* @tparam OUT_WIDTH output width
*
* @param inStream input stream
* @param outStream output stream
* @param input_size input size
* @param input_start_idx input starting index
*/
const int c_byteWidth = 8;
const int c_inputWord = IN_WIDTH / c_byteWidth;
const int c_outWord = OUT_WIDTH / c_byteWidth;
uint32_t sizeOutputV = (input_size - 1) / c_outWord + 1;
int factor = c_inputWord / c_outWord;
ap_uint<IN_WIDTH> inBuffer = 0;
int offset = input_start_idx % c_inputWord;
convInWidthtoV:
for (int i = offset; i < (sizeOutputV + offset); i++) {
#pragma HLS PIPELINE II = 1
int idx = i % factor;
if (idx == 0 || i == offset) inBuffer = inStream.read();
ap_uint<OUT_WIDTH> tmpValue = inBuffer.range((idx + 1) * OUT_WIDTH - 1, idx * OUT_WIDTH);
outStream << tmpValue;
}
}
} // namespace compression
} // namespace xf
#endif
|
/*
*
* Copyright (c) 2020 Project CHIP Authors
* Copyright (c) 2017 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* Source implementation for a generic shell API for CHIP examples.
*/
#include "shell.h"
#include "commands.h"
#include <core/CHIPError.h>
#include <support/CodeUtils.h>
#include <support/logging/CHIPLogging.h>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
using namespace chip::Logging;
namespace chip {
namespace Shell {
Shell Shell::theShellRoot;
int shell_line_read(char * buffer, size_t max)
{
int read = 0;
bool done = false;
char * inptr = buffer;
// Read in characters until we get a new line or we hit our max size.
while (((inptr - buffer) < (int) max) && !done)
{
if (read == 0)
{
read = streamer_read(streamer_get(), inptr, 1);
}
// Process any characters we just read in.
while (read > 0)
{
switch (*inptr)
{
case '\r':
case '\n':
streamer_printf(streamer_get(), "\r\n");
*inptr = 0; // null terminate
done = true;
break;
case 0x7F:
// delete backspace character + 1 more
inptr -= 2;
if (inptr >= buffer - 1)
{
streamer_printf(streamer_get(), "\b \b");
}
else
{
inptr = buffer - 1;
}
break;
default:
if (isprint((int) *inptr) || *inptr == '\t')
{
streamer_printf(streamer_get(), "%c", *inptr);
}
else
{
inptr--;
}
break;
}
inptr++;
read--;
}
}
return (inptr - buffer);
}
void Shell::ForEachCommand(shell_command_iterator_t * on_command, void * arg)
{
for (unsigned i = 0; i < _commandSetCount; i++)
{
for (unsigned j = 0; j < _commandSetSize[i]; j++)
{
if (on_command(&_commandSet[i][j], arg))
{
return;
}
}
}
}
void Shell::RegisterCommands(shell_command_t * command_set, unsigned count)
{
if (_commandSetCount >= CHIP_SHELL_MAX_MODULES)
{
ChipLogError(Shell, "Max number of modules reached\n");
assert(0);
}
_commandSet[_commandSetCount] = command_set;
_commandSetSize[_commandSetCount] = count;
++_commandSetCount;
}
int Shell::ExecCommand(int argc, char * argv[])
{
int retval = CHIP_ERROR_INVALID_ARGUMENT;
// Find the command
for (unsigned i = 0; i < _commandSetCount; i++)
{
for (unsigned j = 0; j < _commandSetSize[i]; j++)
{
if (strcmp(argv[0], _commandSet[i][j].cmd_name) == 0)
{
// Execute the command!
retval = _commandSet[i][j].cmd_func(argc - 1, argv + 1);
break;
}
}
}
return retval;
}
static bool IsSeparator(char aChar)
{
return (aChar == ' ') || (aChar == '\t') || (aChar == '\r') || (aChar == '\n');
}
static bool IsEscape(char aChar)
{
return (aChar == '\\');
}
static bool IsEscapable(char aChar)
{
return IsSeparator(aChar) || IsEscape(aChar);
}
int Shell::TokenizeLine(char * buffer, char ** tokens, int max_tokens)
{
int len = strlen(buffer);
int cursor = 0;
int i = 0;
// Strip leading spaces
while (buffer[i] && buffer[i] == ' ')
{
i++;
}
VerifyOrExit((len - i) > 0, cursor = 0);
// The first token starts at the beginning.
tokens[cursor++] = &buffer[i];
for (; i < len && cursor < max_tokens; i++)
{
if (IsEscape(buffer[i]) && IsEscapable(buffer[i + 1]))
{
// include the null terminator: strlen(cmd) = strlen(cmd + 1) + 1
memmove(&buffer[i], &buffer[i + 1], strlen(&buffer[i]));
}
else if (IsSeparator(buffer[i]))
{
buffer[i] = 0;
if (!IsSeparator(buffer[i + 1]))
{
tokens[cursor++] = &buffer[i + 1];
}
}
}
tokens[cursor] = 0;
exit:
return cursor;
}
void Shell::TaskLoop(void * arg)
{
int retval;
int argc;
char * argv[CHIP_SHELL_MAX_TOKENS];
char line[CHIP_SHELL_MAX_LINE_SIZE];
// Initialize the default streamer that was linked.
streamer_init(streamer_get());
theShellRoot.RegisterDefaultCommands();
while (1)
{
streamer_printf(streamer_get(), CHIP_SHELL_PROMPT);
shell_line_read(line, sizeof(line));
argc = shell_line_tokenize(line, argv, CHIP_SHELL_MAX_TOKENS);
if (argc > 0)
{
retval = theShellRoot.ExecCommand(argc, argv);
if (retval)
{
char errorStr[160];
bool errorStrFound = FormatCHIPError(errorStr, sizeof(errorStr), retval);
if (!errorStrFound)
{
errorStr[0] = 0;
}
streamer_printf(streamer_get(), "Error %s: %s\r\n", argv[0], errorStr);
}
else
{
streamer_printf(streamer_get(), "Done\r\n", argv[0]);
}
}
else
{
// Empty input has no output -- just display prompt
}
}
}
} // namespace Shell
} // namespace chip
|
// Copyright (c) 2018, The Safex Project
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other
// materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be
// used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
// THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
// THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Parts of this file are originally copyright (c) 2017-2018 The Monero Project
#include "daemon_handler.h"
// likely included by daemon_handler.h's includes,
// but including here for clarity
#include "cryptonote_core/cryptonote_core.h"
#include "cryptonote_basic/cryptonote_format_utils.h"
#include "cryptonote_basic/blobdatatype.h"
#include "ringct/rctSigs.h"
namespace cryptonote
{
namespace rpc
{
void DaemonHandler::handle(const GetHeight::Request& req, GetHeight::Response& res)
{
res.height = m_core.get_current_blockchain_height();
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlocksFast::Request& req, GetBlocksFast::Response& res)
{
std::list<std::pair<blobdata, std::list<blobdata> > > blocks;
if(!m_core.find_blockchain_supplement(req.start_height, req.block_ids, blocks, res.current_height, res.start_height, COMMAND_RPC_GET_BLOCKS_FAST_MAX_COUNT))
{
res.status = Message::STATUS_FAILED;
res.error_details = "core::find_blockchain_supplement() returned false";
return;
}
res.blocks.resize(blocks.size());
res.output_indices.resize(blocks.size());
//TODO: really need to switch uses of std::list to std::vector unless
// it's a huge performance concern
auto it = blocks.begin();
uint64_t block_count = 0;
while (it != blocks.end())
{
cryptonote::rpc::block_with_transactions& bwt = res.blocks[block_count];
if (!parse_and_validate_block_from_blob(it->first, bwt.block))
{
res.blocks.clear();
res.output_indices.clear();
res.status = Message::STATUS_FAILED;
res.error_details = "failed retrieving a requested block";
return;
}
if (it->second.size() != bwt.block.tx_hashes.size())
{
res.blocks.clear();
res.output_indices.clear();
res.status = Message::STATUS_FAILED;
res.error_details = "incorrect number of transactions retrieved for block";
return;
}
std::list<transaction> txs;
for (const auto& blob : it->second)
{
txs.resize(txs.size() + 1);
if (!parse_and_validate_tx_from_blob(blob, txs.back()))
{
res.blocks.clear();
res.output_indices.clear();
res.status = Message::STATUS_FAILED;
res.error_details = "failed retrieving a requested transaction";
return;
}
}
cryptonote::rpc::block_output_indices& indices = res.output_indices[block_count];
// miner tx output indices
{
cryptonote::rpc::tx_output_indices tx_indices;
bool r = m_core.get_tx_outputs_gindexs(get_transaction_hash(bwt.block.miner_tx), tx_indices);
if (!r)
{
res.status = Message::STATUS_FAILED;
res.error_details = "core::get_tx_outputs_gindexs() returned false";
return;
}
indices.push_back(tx_indices);
}
// assume each block returned is returned with all its transactions
// in the correct order.
auto tx_it = txs.begin();
for (const crypto::hash& h : bwt.block.tx_hashes)
{
bwt.transactions.emplace(h, *tx_it);
tx_it++;
cryptonote::rpc::tx_output_indices tx_indices;
bool r = m_core.get_tx_outputs_gindexs(h, tx_indices);
if (!r)
{
res.status = Message::STATUS_FAILED;
res.error_details = "core::get_tx_outputs_gindexs() returned false";
return;
}
indices.push_back(tx_indices);
}
it++;
block_count++;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetHashesFast::Request& req, GetHashesFast::Response& res)
{
res.start_height = req.start_height;
auto& chain = m_core.get_blockchain_storage();
if (!chain.find_blockchain_supplement(req.known_hashes, res.hashes, res.start_height, res.current_height))
{
res.status = Message::STATUS_FAILED;
res.error_details = "Blockchain::find_blockchain_supplement() returned false";
return;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetTransactions::Request& req, GetTransactions::Response& res)
{
std::list<cryptonote::transaction> found_txs;
std::list<crypto::hash> missed_hashes;
bool r = m_core.get_transactions(req.tx_hashes, found_txs, missed_hashes);
// TODO: consider fixing core::get_transactions to not hide exceptions
if (!r)
{
res.status = Message::STATUS_FAILED;
res.error_details = "core::get_transactions() returned false (exception caught there)";
return;
}
size_t num_found = found_txs.size();
// std::list is annoying
std::vector<cryptonote::transaction> found_txs_vec
{
std::make_move_iterator(std::begin(found_txs)),
std::make_move_iterator(std::end(found_txs))
};
std::vector<crypto::hash> missed_vec
{
std::make_move_iterator(std::begin(missed_hashes)),
std::make_move_iterator(std::end(missed_hashes))
};
std::vector<uint64_t> heights(num_found);
std::vector<bool> in_pool(num_found, false);
std::vector<crypto::hash> found_hashes(num_found);
for (size_t i=0; i < num_found; i++)
{
found_hashes[i] = get_transaction_hash(found_txs_vec[i]);
heights[i] = m_core.get_blockchain_storage().get_db().get_tx_block_height(found_hashes[i]);
}
// if any missing from blockchain, check in tx pool
if (!missed_vec.empty())
{
std::list<cryptonote::transaction> pool_txs;
m_core.get_pool_transactions(pool_txs);
for (const auto& tx : pool_txs)
{
crypto::hash h = get_transaction_hash(tx);
auto itr = std::find(missed_vec.begin(), missed_vec.end(), h);
if (itr != missed_vec.end())
{
found_hashes.push_back(h);
found_txs_vec.push_back(tx);
heights.push_back(std::numeric_limits<uint64_t>::max());
in_pool.push_back(true);
missed_vec.erase(itr);
}
}
}
for (size_t i=0; i < found_hashes.size(); i++)
{
cryptonote::rpc::transaction_info info;
info.height = heights[i];
info.in_pool = in_pool[i];
info.transaction = std::move(found_txs_vec[i]);
res.txs.emplace(found_hashes[i], std::move(info));
}
res.missed_hashes = std::move(missed_vec);
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const KeyImagesSpent::Request& req, KeyImagesSpent::Response& res)
{
res.spent_status.resize(req.key_images.size(), KeyImagesSpent::STATUS::UNSPENT);
std::vector<bool> chain_spent_status;
std::vector<bool> pool_spent_status;
m_core.are_key_images_spent(req.key_images, chain_spent_status);
m_core.are_key_images_spent_in_pool(req.key_images, pool_spent_status);
if ((chain_spent_status.size() != req.key_images.size()) || (pool_spent_status.size() != req.key_images.size()))
{
res.status = Message::STATUS_FAILED;
res.error_details = "tx_pool::have_key_images_as_spent() gave vectors of wrong size(s).";
return;
}
for(size_t i=0; i < req.key_images.size(); i++)
{
if ( chain_spent_status[i] )
{
res.spent_status[i] = KeyImagesSpent::STATUS::SPENT_IN_BLOCKCHAIN;
}
else if ( pool_spent_status[i] )
{
res.spent_status[i] = KeyImagesSpent::STATUS::SPENT_IN_POOL;
}
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetTxGlobalOutputIndices::Request& req, GetTxGlobalOutputIndices::Response& res)
{
if (!m_core.get_tx_outputs_gindexs(req.tx_hash, res.output_indices))
{
res.status = Message::STATUS_FAILED;
res.error_details = "core::get_tx_outputs_gindexs() returned false";
return;
}
res.status = Message::STATUS_OK;
}
//TODO: handle "restricted" RPC
void DaemonHandler::handle(const GetRandomOutputsForAmounts::Request& req, GetRandomOutputsForAmounts::Response& res)
{
auto& chain = m_core.get_blockchain_storage();
try
{
for (const uint64_t& amount : req.amounts)
{
std::vector<uint64_t> indices = chain.get_random_outputs(amount, req.count, req.output_type);
outputs_for_amount ofa;
ofa.resize(indices.size());
for (size_t i = 0; i < indices.size(); i++)
{
crypto::public_key key = chain.get_output_key(amount, indices[i], req.output_type);
ofa[i].amount_index = indices[i];
ofa[i].key = key;
}
amount_with_random_outputs amt;
amt.amount = amount;
amt.outputs = ofa;
res.amounts_with_outputs.push_back(amt);
}
res.status = Message::STATUS_OK;
}
catch (const std::exception& e)
{
res.status = Message::STATUS_FAILED;
res.error_details = e.what();
}
}
void DaemonHandler::handle(const SendRawTx::Request& req, SendRawTx::Response& res)
{
auto tx_blob = cryptonote::tx_to_blob(req.tx);
cryptonote_connection_context fake_context = AUTO_VAL_INIT(fake_context);
tx_verification_context tvc = AUTO_VAL_INIT(tvc);
if(!m_core.handle_incoming_tx(tx_blob, tvc, false, false, !req.relay) || tvc.m_verifivation_failed)
{
if (tvc.m_verifivation_failed)
{
LOG_PRINT_L0("[on_send_raw_tx]: tx verification failed");
}
else
{
LOG_PRINT_L0("[on_send_raw_tx]: Failed to process tx");
}
res.status = Message::STATUS_FAILED;
res.error_details = "";
if (tvc.m_low_mixin)
{
res.error_details = "mixin too low";
}
if (tvc.m_double_spend)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "double spend";
}
if (tvc.m_invalid_input)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "invalid input";
}
if (tvc.m_invalid_output)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "invalid output";
}
if (tvc.m_too_big)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "too big";
}
if (tvc.m_overspend)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "overspend";
}
if (tvc.m_fee_too_low)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "fee too low";
}
if (tvc.m_not_rct)
{
if (!res.error_details.empty()) res.error_details += " and ";
res.error_details = "tx is not ringct";
}
if (res.error_details.empty())
{
res.error_details = "an unknown issue was found with the transaction";
}
return;
}
if(!tvc.m_should_be_relayed || !req.relay)
{
LOG_PRINT_L0("[on_send_raw_tx]: tx accepted, but not relayed");
res.error_details = "Not relayed";
res.relayed = false;
res.status = Message::STATUS_OK;
return;
}
NOTIFY_NEW_TRANSACTIONS::request r;
r.txs.push_back(tx_blob);
m_core.get_protocol()->relay_transactions(r, fake_context);
//TODO: make sure that tx has reached other nodes here, probably wait to receive reflections from other nodes
res.status = Message::STATUS_OK;
res.relayed = true;
return;
}
void DaemonHandler::handle(const StartMining::Request& req, StartMining::Response& res)
{
cryptonote::address_parse_info info;
if(!get_account_address_from_str(info, m_core.get_nettype(), req.miner_address))
{
res.error_details = "Failed, wrong address";
LOG_PRINT_L0(res.error_details);
res.status = Message::STATUS_FAILED;
return;
}
if (info.is_subaddress)
{
res.error_details = "Failed, mining to subaddress isn't supported yet";
LOG_PRINT_L0(res.error_details);
res.status = Message::STATUS_FAILED;
return;
}
unsigned int concurrency_count = boost::thread::hardware_concurrency() * 4;
// if we couldn't detect threads, set it to a ridiculously high number
if(concurrency_count == 0)
{
concurrency_count = 257;
}
// if there are more threads requested than the hardware supports
// then we fail and log that.
if(req.threads_count > concurrency_count)
{
res.error_details = "Failed, too many threads relative to CPU cores.";
LOG_PRINT_L0(res.error_details);
res.status = Message::STATUS_FAILED;
return;
}
boost::thread::attributes attrs;
attrs.set_stack_size(THREAD_STACK_SIZE);
if(!m_core.get_miner().start(info.address, static_cast<size_t>(req.threads_count), attrs, req.do_background_mining, req.ignore_battery))
{
res.error_details = "Failed, mining not started";
LOG_PRINT_L0(res.error_details);
res.status = Message::STATUS_FAILED;
return;
}
res.status = Message::STATUS_OK;
res.error_details = "";
}
void DaemonHandler::handle(const GetInfo::Request& req, GetInfo::Response& res)
{
res.info.height = m_core.get_current_blockchain_height();
res.info.target_height = m_core.get_target_blockchain_height();
if (res.info.height > res.info.target_height)
{
res.info.target_height = res.info.height;
}
auto& chain = m_core.get_blockchain_storage();
res.info.difficulty = chain.get_difficulty_for_next_block();
res.info.target = chain.get_difficulty_target();
res.info.tx_count = chain.get_total_transactions() - res.info.height; //without coinbase
res.info.tx_pool_size = m_core.get_pool_transactions_count();
res.info.alt_blocks_count = chain.get_alternative_blocks_count();
uint64_t total_conn = m_p2p.get_connections_count();
res.info.outgoing_connections_count = m_p2p.get_outgoing_connections_count();
res.info.incoming_connections_count = total_conn - res.info.outgoing_connections_count;
res.info.white_peerlist_size = m_p2p.get_peerlist_manager().get_white_peers_count();
res.info.grey_peerlist_size = m_p2p.get_peerlist_manager().get_gray_peers_count();
res.info.mainnet = m_core.get_nettype() == MAINNET;
res.info.testnet = m_core.get_nettype() == TESTNET;
res.info.stagenet = m_core.get_nettype() == STAGENET;
res.info.cumulative_difficulty = m_core.get_blockchain_storage().get_db().get_block_cumulative_difficulty(res.info.height - 1);
res.info.block_size_limit = m_core.get_blockchain_storage().get_current_cumulative_blocksize_limit();
res.info.start_time = (uint64_t)m_core.get_start_time();
res.status = Message::STATUS_OK;
res.error_details = "";
}
void DaemonHandler::handle(const StopMining::Request& req, StopMining::Response& res)
{
if(!m_core.get_miner().stop())
{
res.error_details = "Failed, mining not stopped";
LOG_PRINT_L0(res.error_details);
res.status = Message::STATUS_FAILED;
return;
}
res.status = Message::STATUS_OK;
res.error_details = "";
}
void DaemonHandler::handle(const MiningStatus::Request& req, MiningStatus::Response& res)
{
const cryptonote::miner& lMiner = m_core.get_miner();
res.active = lMiner.is_mining();
res.is_background_mining_enabled = lMiner.get_is_background_mining_enabled();
if ( lMiner.is_mining() ) {
res.speed = lMiner.get_speed();
res.threads_count = lMiner.get_threads_count();
const account_public_address& lMiningAdr = lMiner.get_mining_address();
res.address = get_account_address_as_str(m_core.get_nettype(), false, lMiningAdr);
}
res.status = Message::STATUS_OK;
res.error_details = "";
}
void DaemonHandler::handle(const SaveBC::Request& req, SaveBC::Response& res)
{
if (!m_core.get_blockchain_storage().store_blockchain())
{
res.status = Message::STATUS_FAILED;
res.error_details = "Error storing the blockchain";
}
else
{
res.status = Message::STATUS_OK;
}
}
void DaemonHandler::handle(const GetBlockHash::Request& req, GetBlockHash::Response& res)
{
if (m_core.get_current_blockchain_height() <= req.height)
{
res.hash = crypto::null_hash;
res.status = Message::STATUS_FAILED;
res.error_details = "height given is higher than current chain height";
return;
}
res.hash = m_core.get_block_id_by_height(req.height);
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlockTemplate::Request& req, GetBlockTemplate::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const SubmitBlock::Request& req, SubmitBlock::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const GetLastBlockHeader::Request& req, GetLastBlockHeader::Response& res)
{
const crypto::hash block_hash = m_core.get_tail_id();
if (!getBlockHeaderByHash(block_hash, res.header))
{
res.status = Message::STATUS_FAILED;
res.error_details = "Requested block does not exist";
return;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlockHeaderByHash::Request& req, GetBlockHeaderByHash::Response& res)
{
if (!getBlockHeaderByHash(req.hash, res.header))
{
res.status = Message::STATUS_FAILED;
res.error_details = "Requested block does not exist";
return;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlockHeaderByHeight::Request& req, GetBlockHeaderByHeight::Response& res)
{
const crypto::hash block_hash = m_core.get_block_id_by_height(req.height);
if (!getBlockHeaderByHash(block_hash, res.header))
{
res.status = Message::STATUS_FAILED;
res.error_details = "Requested block does not exist";
return;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlockHeadersByHeight::Request& req, GetBlockHeadersByHeight::Response& res)
{
res.headers.resize(req.heights.size());
for (size_t i=0; i < req.heights.size(); i++)
{
const crypto::hash block_hash = m_core.get_block_id_by_height(req.heights[i]);
if (!getBlockHeaderByHash(block_hash, res.headers[i]))
{
res.status = Message::STATUS_FAILED;
res.error_details = "A requested block does not exist";
return;
}
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBlock::Request& req, GetBlock::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const GetPeerList::Request& req, GetPeerList::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const SetLogHashRate::Request& req, SetLogHashRate::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const SetLogLevel::Request& req, SetLogLevel::Response& res)
{
if (req.level < 0 || req.level > 4)
{
res.status = Message::STATUS_FAILED;
res.error_details = "Error: log level not valid";
}
else
{
res.status = Message::STATUS_OK;
mlog_set_log_level(req.level);
}
}
void DaemonHandler::handle(const GetTransactionPool::Request& req, GetTransactionPool::Response& res)
{
bool r = m_core.get_pool_for_rpc(res.transactions, res.key_images);
if (!r) res.status = Message::STATUS_FAILED;
else res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetConnections::Request& req, GetConnections::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const GetBlockHeadersRange::Request& req, GetBlockHeadersRange::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const StopDaemon::Request& req, StopDaemon::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const StartSaveGraph::Request& req, StartSaveGraph::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const StopSaveGraph::Request& req, StopSaveGraph::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const HardForkInfo::Request& req, HardForkInfo::Response& res)
{
const Blockchain &blockchain = m_core.get_blockchain_storage();
uint8_t version = req.version > 0 ? req.version : blockchain.get_ideal_hard_fork_version();
res.info.version = blockchain.get_current_hard_fork_version();
res.info.enabled = blockchain.get_hard_fork_voting_info(version, res.info.window, res.info.votes, res.info.threshold, res.info.earliest_height, res.info.voting);
res.info.state = blockchain.get_hard_fork_state();
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetBans::Request& req, GetBans::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const SetBans::Request& req, SetBans::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const FlushTransactionPool::Request& req, FlushTransactionPool::Response& res)
{
res.status = Message::STATUS_FAILED;
res.error_details = "RPC method not yet implemented.";
}
void DaemonHandler::handle(const GetOutputHistogram::Request& req, GetOutputHistogram::Response& res)
{
std::map<uint64_t, std::tuple<uint64_t, uint64_t, uint64_t> > histogram;
try
{
histogram = m_core.get_blockchain_storage().get_output_histogram(req.amounts, req.unlocked, req.recent_cutoff, req.output_type);
}
catch (const std::exception &e)
{
res.status = Message::STATUS_FAILED;
res.error_details = e.what();
return;
}
res.histogram.clear();
res.histogram.reserve(histogram.size());
for (const auto &i: histogram)
{
if (std::get<0>(i.second) >= req.min_count && (std::get<0>(i.second) <= req.max_count || req.max_count == 0))
res.histogram.emplace_back(output_amount_count{i.first, std::get<0>(i.second), std::get<1>(i.second), std::get<2>(i.second)});
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetOutputKeys::Request& req, GetOutputKeys::Response& res)
{
try
{
for (const auto& i : req.outputs)
{
crypto::public_key key;
rct::key mask;
bool unlocked;
m_core.get_blockchain_storage().get_output_key_mask_unlocked(i.amount, i.index, key, mask, unlocked, i.output_type);
res.keys.emplace_back(output_key_mask_unlocked{key, mask, unlocked});
}
}
catch (const std::exception& e)
{
res.status = Message::STATUS_FAILED;
res.error_details = e.what();
return;
}
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetRPCVersion::Request& req, GetRPCVersion::Response& res)
{
res.version = DAEMON_RPC_VERSION_ZMQ;
res.status = Message::STATUS_OK;
}
void DaemonHandler::handle(const GetPerKBFeeEstimate::Request& req, GetPerKBFeeEstimate::Response& res)
{
res.estimated_fee_per_kb = m_core.get_blockchain_storage().get_dynamic_per_kb_fee_estimate(req.num_grace_blocks);
res.status = Message::STATUS_OK;
}
bool DaemonHandler::getBlockHeaderByHash(const crypto::hash& hash_in, cryptonote::rpc::BlockHeaderResponse& header)
{
block b;
if (!m_core.get_block_by_hash(hash_in, b))
{
return false;
}
header.hash = hash_in;
if (b.miner_tx.vin.size() != 1 || b.miner_tx.vin.front().type() != typeid(txin_gen))
{
return false;
}
header.height = boost::get<txin_gen>(b.miner_tx.vin.front()).height;
header.major_version = b.major_version;
header.minor_version = b.minor_version;
header.timestamp = b.timestamp;
header.nonce = b.nonce;
header.prev_id = b.prev_id;
header.depth = m_core.get_current_blockchain_height() - header.height - 1;
header.reward = 0;
for (const auto& out : b.miner_tx.vout)
{
header.reward += out.amount;
}
header.difficulty = m_core.get_blockchain_storage().block_difficulty(header.height);
return true;
}
std::string DaemonHandler::handle(const std::string& request)
{
MDEBUG("Handling RPC request: " << request);
Message* resp_message = NULL;
try
{
FullMessage req_full(request, true);
rapidjson::Value& req_json = req_full.getMessage();
const std::string request_type = req_full.getRequestType();
// create correct Message subclass and call handle() on it
REQ_RESP_TYPES_MACRO(request_type, GetHeight, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetBlocksFast, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetHashesFast, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetTransactions, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, KeyImagesSpent, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetTxGlobalOutputIndices, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetRandomOutputsForAmounts, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, SendRawTx, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetInfo, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, StartMining, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, StopMining, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, MiningStatus, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, SaveBC, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetBlockHash, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetLastBlockHeader, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetBlockHeaderByHash, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetBlockHeaderByHeight, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetBlockHeadersByHeight, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetPeerList, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, SetLogLevel, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetTransactionPool, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, HardForkInfo, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetOutputHistogram, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetOutputKeys, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetRPCVersion, req_json, resp_message, handle);
REQ_RESP_TYPES_MACRO(request_type, GetPerKBFeeEstimate, req_json, resp_message, handle);
// if none of the request types matches
if (resp_message == NULL)
{
return BAD_REQUEST(request_type, req_full.getID());
}
FullMessage resp_full = FullMessage::responseMessage(resp_message, req_full.getID());
const std::string response = resp_full.getJson();
delete resp_message;
resp_message = NULL;
MDEBUG("Returning RPC response: " << response);
return response;
}
catch (const std::exception& e)
{
if (resp_message)
{
delete resp_message;
}
return BAD_JSON(e.what());
}
}
} // namespace rpc
} // namespace cryptonote
|
#include <bits/stdc++.h>
const int Nmax = 1e6;
using namespace std;
/**
I = 1;
E = 0;
**/
char str[Nmax];
int N, K;
const int DIGITS = 4;
const int R = 32 / 4;
const int radix = 1 << R;
const int mask = radix - 1;
int A[Nmax], B[Nmax], buckets[radix], newN;
void RadixSort()
{
for ( int d = 0, shift = 0; d < DIGITS; d++, shift += R )
{
for ( int i = 0; i < radix; ++i )
buckets[i] = 0;
for ( int i = 0; i < newN; ++i )
buckets[ ( A[i] >> shift ) & mask ]++;
for ( int i = 1; i < radix; ++i )
buckets[i] += buckets[i - 1];
for ( int i = newN - 1; i >= 0; i-- )
B[ --buckets[ ( A[i] >> shift ) & mask ] ] = A[i];
for ( int i = 0; i < newN; ++i )
A[i] = B[i];
}
}
inline bool bit( int n, int i )
{
return ( n & ( 1 << i ) );
}
int main()
{
FILE *f = fopen("rocker.in", "r");
FILE *g = fopen("rocker.out", "w");
fscanf(f, "%d %d\n", &N, &K);
fscanf(f, "%s", str);
int pw = 1 << K;
int sir = 0;
for ( int i = 0; i < K; ++i )
{
sir = sir * 2 + ( str[i] == 'I' );
}
A[ newN++ ] = sir;
for ( int i = K; i < N; ++i )
{
sir = sir * 2 + ( str[i] == 'I' );
if ( sir & pw )
sir ^= pw;
A[ newN++ ] = sir;
}
RadixSort();
for ( int i = 0; i < newN - 1; ++i )
{
int sol = 0;
for ( int j = K - 1; j >= 0; j-- )
{
if ( bit( A[i], j ) != bit( A[i + 1], j ) )
break;
else
sol++;
}
fprintf(g, "%d\n", sol);
}
return 0;
}
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ui/gfx/skbitmap_operations.h"
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include "base/logging.h"
#include "third_party/skia/include/core/SkBitmap.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkColorFilter.h"
#include "third_party/skia/include/core/SkColorPriv.h"
#include "third_party/skia/include/core/SkUnPreMultiply.h"
#include "third_party/skia/include/effects/SkBlurImageFilter.h"
#include "ui/gfx/geometry/insets.h"
#include "ui/gfx/geometry/point.h"
#include "ui/gfx/geometry/size.h"
// static
SkBitmap SkBitmapOperations::CreateInvertedBitmap(const SkBitmap& image) {
DCHECK(image.colorType() == kN32_SkColorType);
SkAutoLockPixels lock_image(image);
SkBitmap inverted;
inverted.allocN32Pixels(image.width(), image.height());
for (int y = 0; y < image.height(); ++y) {
uint32_t* image_row = image.getAddr32(0, y);
uint32_t* dst_row = inverted.getAddr32(0, y);
for (int x = 0; x < image.width(); ++x) {
uint32_t image_pixel = image_row[x];
dst_row[x] = (image_pixel & 0xFF000000) |
(0x00FFFFFF - (image_pixel & 0x00FFFFFF));
}
}
return inverted;
}
// static
SkBitmap SkBitmapOperations::CreateBlendedBitmap(const SkBitmap& first,
const SkBitmap& second,
double alpha) {
DCHECK((alpha >= 0) && (alpha <= 1));
DCHECK(first.width() == second.width());
DCHECK(first.height() == second.height());
DCHECK(first.bytesPerPixel() == second.bytesPerPixel());
DCHECK(first.colorType() == kN32_SkColorType);
// Optimize for case where we won't need to blend anything.
static const double alpha_min = 1.0 / 255;
static const double alpha_max = 254.0 / 255;
if (alpha < alpha_min)
return first;
else if (alpha > alpha_max)
return second;
SkAutoLockPixels lock_first(first);
SkAutoLockPixels lock_second(second);
SkBitmap blended;
blended.allocN32Pixels(first.width(), first.height());
double first_alpha = 1 - alpha;
for (int y = 0; y < first.height(); ++y) {
uint32_t* first_row = first.getAddr32(0, y);
uint32_t* second_row = second.getAddr32(0, y);
uint32_t* dst_row = blended.getAddr32(0, y);
for (int x = 0; x < first.width(); ++x) {
uint32_t first_pixel = first_row[x];
uint32_t second_pixel = second_row[x];
int a = static_cast<int>((SkColorGetA(first_pixel) * first_alpha) +
(SkColorGetA(second_pixel) * alpha));
int r = static_cast<int>((SkColorGetR(first_pixel) * first_alpha) +
(SkColorGetR(second_pixel) * alpha));
int g = static_cast<int>((SkColorGetG(first_pixel) * first_alpha) +
(SkColorGetG(second_pixel) * alpha));
int b = static_cast<int>((SkColorGetB(first_pixel) * first_alpha) +
(SkColorGetB(second_pixel) * alpha));
dst_row[x] = SkColorSetARGB(a, r, g, b);
}
}
return blended;
}
// static
SkBitmap SkBitmapOperations::CreateMaskedBitmap(const SkBitmap& rgb,
const SkBitmap& alpha) {
DCHECK(rgb.width() == alpha.width());
DCHECK(rgb.height() == alpha.height());
DCHECK(rgb.bytesPerPixel() == alpha.bytesPerPixel());
DCHECK(rgb.colorType() == kN32_SkColorType);
DCHECK(alpha.colorType() == kN32_SkColorType);
SkBitmap masked;
masked.allocN32Pixels(rgb.width(), rgb.height());
SkAutoLockPixels lock_rgb(rgb);
SkAutoLockPixels lock_alpha(alpha);
SkAutoLockPixels lock_masked(masked);
for (int y = 0; y < masked.height(); ++y) {
uint32_t* rgb_row = rgb.getAddr32(0, y);
uint32_t* alpha_row = alpha.getAddr32(0, y);
uint32_t* dst_row = masked.getAddr32(0, y);
for (int x = 0; x < masked.width(); ++x) {
unsigned alpha = SkGetPackedA32(alpha_row[x]);
unsigned scale = SkAlpha255To256(alpha);
dst_row[x] = SkAlphaMulQ(rgb_row[x], scale);
}
}
return masked;
}
// static
SkBitmap SkBitmapOperations::CreateButtonBackground(SkColor color,
const SkBitmap& image,
const SkBitmap& mask) {
// Despite this assert, it seems like image is actually unpremultiplied.
// The math producing dst_row[x] below is a correct SrcOver when
// bg_* are premultiplied and img_* are unpremultiplied.
DCHECK(image.colorType() == kN32_SkColorType);
DCHECK(mask.colorType() == kN32_SkColorType);
SkBitmap background;
background.allocN32Pixels(mask.width(), mask.height());
double bg_a = SkColorGetA(color);
double bg_r = SkColorGetR(color) * (bg_a / 255.0);
double bg_g = SkColorGetG(color) * (bg_a / 255.0);
double bg_b = SkColorGetB(color) * (bg_a / 255.0);
SkAutoLockPixels lock_mask(mask);
SkAutoLockPixels lock_image(image);
SkAutoLockPixels lock_background(background);
for (int y = 0; y < mask.height(); ++y) {
uint32_t* dst_row = background.getAddr32(0, y);
uint32_t* image_row = image.getAddr32(0, y % image.height());
uint32_t* mask_row = mask.getAddr32(0, y);
for (int x = 0; x < mask.width(); ++x) {
uint32_t image_pixel = image_row[x % image.width()];
double img_a = SkColorGetA(image_pixel);
double img_r = SkColorGetR(image_pixel);
double img_g = SkColorGetG(image_pixel);
double img_b = SkColorGetB(image_pixel);
double img_alpha = img_a / 255.0;
double img_inv = 1 - img_alpha;
double mask_a = static_cast<double>(SkColorGetA(mask_row[x])) / 255.0;
dst_row[x] = SkColorSetARGB(
// This is pretty weird; why not the usual SrcOver alpha?
static_cast<int>(std::min(255.0, bg_a + img_a) * mask_a),
static_cast<int>(((bg_r * img_inv) + (img_r * img_alpha)) * mask_a),
static_cast<int>(((bg_g * img_inv) + (img_g * img_alpha)) * mask_a),
static_cast<int>(((bg_b * img_inv) + (img_b * img_alpha)) * mask_a));
}
}
return background;
}
namespace {
namespace HSLShift {
// TODO(viettrungluu): Some things have yet to be optimized at all.
// Notes on and conventions used in the following code
//
// Conventions:
// - R, G, B, A = obvious; as variables: |r|, |g|, |b|, |a| (see also below)
// - H, S, L = obvious; as variables: |h|, |s|, |l| (see also below)
// - variables derived from S, L shift parameters: |sdec| and |sinc| for S
// increase and decrease factors, |ldec| and |linc| for L (see also below)
//
// To try to optimize HSL shifts, we do several things:
// - Avoid unpremultiplying (then processing) then premultiplying. This means
// that R, G, B values (and also L, but not H and S) should be treated as
// having a range of 0..A (where A is alpha).
// - Do things in integer/fixed-point. This avoids costly conversions between
// floating-point and integer, though I should study the tradeoff more
// carefully (presumably, at some point of processing complexity, converting
// and processing using simpler floating-point code will begin to win in
// performance). Also to be studied is the speed/type of floating point
// conversions; see, e.g., <http://www.stereopsis.com/sree/fpu2006.html>.
//
// Conventions for fixed-point arithmetic
// - Each function has a constant denominator (called |den|, which should be a
// power of 2), appropriate for the computations done in that function.
// - A value |x| is then typically represented by a numerator, named |x_num|,
// so that its actual value is |x_num / den| (casting to floating-point
// before division).
// - To obtain |x_num| from |x|, simply multiply by |den|, i.e., |x_num = x *
// den| (casting appropriately).
// - When necessary, a value |x| may also be represented as a numerator over
// the denominator squared (set |den2 = den * den|). In such a case, the
// corresponding variable is called |x_num2| (so that its actual value is
// |x_num^2 / den2|.
// - The representation of the product of |x| and |y| is be called |x_y_num| if
// |x * y == x_y_num / den|, and |xy_num2| if |x * y == x_y_num2 / den2|. In
// the latter case, notice that one can calculate |x_y_num2 = x_num * y_num|.
// Routine used to process a line; typically specialized for specific kinds of
// HSL shifts (to optimize).
typedef void (*LineProcessor)(const color_utils::HSL&,
const SkPMColor*,
SkPMColor*,
int width);
enum OperationOnH { kOpHNone = 0, kOpHShift, kNumHOps };
enum OperationOnS { kOpSNone = 0, kOpSDec, kOpSInc, kNumSOps };
enum OperationOnL { kOpLNone = 0, kOpLDec, kOpLInc, kNumLOps };
// Epsilon used to judge when shift values are close enough to various critical
// values (typically 0.5, which yields a no-op for S and L shifts. 1/256 should
// be small enough, but let's play it safe>
const double epsilon = 0.0005;
// Line processor: default/universal (i.e., old-school).
void LineProcDefault(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
for (int x = 0; x < width; x++) {
out[x] = SkPreMultiplyColor(color_utils::HSLShift(
SkUnPreMultiply::PMColorToColor(in[x]), hsl_shift));
}
}
// Line processor: no-op (i.e., copy).
void LineProcCopy(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s < 0 || fabs(hsl_shift.s - 0.5) < HSLShift::epsilon);
DCHECK(hsl_shift.l < 0 || fabs(hsl_shift.l - 0.5) < HSLShift::epsilon);
memcpy(out, in, static_cast<size_t>(width) * sizeof(out[0]));
}
// Line processor: H no-op, S no-op, L decrease.
void LineProcHnopSnopLdec(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
const uint32_t den = 65536;
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s < 0 || fabs(hsl_shift.s - 0.5) < HSLShift::epsilon);
DCHECK(hsl_shift.l <= 0.5 - HSLShift::epsilon && hsl_shift.l >= 0);
uint32_t ldec_num = static_cast<uint32_t>(hsl_shift.l * 2 * den);
for (int x = 0; x < width; x++) {
uint32_t a = SkGetPackedA32(in[x]);
uint32_t r = SkGetPackedR32(in[x]);
uint32_t g = SkGetPackedG32(in[x]);
uint32_t b = SkGetPackedB32(in[x]);
r = r * ldec_num / den;
g = g * ldec_num / den;
b = b * ldec_num / den;
out[x] = SkPackARGB32(a, r, g, b);
}
}
// Line processor: H no-op, S no-op, L increase.
void LineProcHnopSnopLinc(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
const uint32_t den = 65536;
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s < 0 || fabs(hsl_shift.s - 0.5) < HSLShift::epsilon);
DCHECK(hsl_shift.l >= 0.5 + HSLShift::epsilon && hsl_shift.l <= 1);
uint32_t linc_num = static_cast<uint32_t>((hsl_shift.l - 0.5) * 2 * den);
for (int x = 0; x < width; x++) {
uint32_t a = SkGetPackedA32(in[x]);
uint32_t r = SkGetPackedR32(in[x]);
uint32_t g = SkGetPackedG32(in[x]);
uint32_t b = SkGetPackedB32(in[x]);
r += (a - r) * linc_num / den;
g += (a - g) * linc_num / den;
b += (a - b) * linc_num / den;
out[x] = SkPackARGB32(a, r, g, b);
}
}
// Saturation changes modifications in RGB
//
// (Note that as a further complication, the values we deal in are
// premultiplied, so R/G/B values must be in the range 0..A. For mathematical
// purposes, one may as well use r=R/A, g=G/A, b=B/A. Without loss of
// generality, assume that R/G/B values are in the range 0..1.)
//
// Let Max = max(R,G,B), Min = min(R,G,B), and Med be the median value. Then L =
// (Max+Min)/2. If L is to remain constant, Max+Min must also remain constant.
//
// For H to remain constant, first, the (numerical) order of R/G/B (from
// smallest to largest) must remain the same. Second, all the ratios
// (R-G)/(Max-Min), (R-B)/(Max-Min), (G-B)/(Max-Min) must remain constant (of
// course, if Max = Min, then S = 0 and no saturation change is well-defined,
// since H is not well-defined).
//
// Let C_max be a colour with value Max, C_min be one with value Min, and C_med
// the remaining colour. Increasing saturation (to the maximum) is accomplished
// by increasing the value of C_max while simultaneously decreasing C_min and
// changing C_med so that the ratios are maintained; for the latter, it suffices
// to keep (C_med-C_min)/(C_max-C_min) constant (and equal to
// (Med-Min)/(Max-Min)).
// Line processor: H no-op, S decrease, L no-op.
void LineProcHnopSdecLnop(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s >= 0 && hsl_shift.s <= 0.5 - HSLShift::epsilon);
DCHECK(hsl_shift.l < 0 || fabs(hsl_shift.l - 0.5) < HSLShift::epsilon);
const int32_t denom = 65536;
int32_t s_numer = static_cast<int32_t>(hsl_shift.s * 2 * denom);
for (int x = 0; x < width; x++) {
int32_t a = static_cast<int32_t>(SkGetPackedA32(in[x]));
int32_t r = static_cast<int32_t>(SkGetPackedR32(in[x]));
int32_t g = static_cast<int32_t>(SkGetPackedG32(in[x]));
int32_t b = static_cast<int32_t>(SkGetPackedB32(in[x]));
int32_t vmax, vmin;
if (r > g) { // This uses 3 compares rather than 4.
vmax = std::max(r, b);
vmin = std::min(g, b);
} else {
vmax = std::max(g, b);
vmin = std::min(r, b);
}
// Use denom * L to avoid rounding.
int32_t denom_l = (vmax + vmin) * (denom / 2);
int32_t s_numer_l = (vmax + vmin) * s_numer / 2;
r = (denom_l + r * s_numer - s_numer_l) / denom;
g = (denom_l + g * s_numer - s_numer_l) / denom;
b = (denom_l + b * s_numer - s_numer_l) / denom;
out[x] = SkPackARGB32(a, r, g, b);
}
}
// Line processor: H no-op, S decrease, L decrease.
void LineProcHnopSdecLdec(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s >= 0 && hsl_shift.s <= 0.5 - HSLShift::epsilon);
DCHECK(hsl_shift.l >= 0 && hsl_shift.l <= 0.5 - HSLShift::epsilon);
// Can't be too big since we need room for denom*denom and a bit for sign.
const int32_t denom = 1024;
int32_t l_numer = static_cast<int32_t>(hsl_shift.l * 2 * denom);
int32_t s_numer = static_cast<int32_t>(hsl_shift.s * 2 * denom);
for (int x = 0; x < width; x++) {
int32_t a = static_cast<int32_t>(SkGetPackedA32(in[x]));
int32_t r = static_cast<int32_t>(SkGetPackedR32(in[x]));
int32_t g = static_cast<int32_t>(SkGetPackedG32(in[x]));
int32_t b = static_cast<int32_t>(SkGetPackedB32(in[x]));
int32_t vmax, vmin;
if (r > g) { // This uses 3 compares rather than 4.
vmax = std::max(r, b);
vmin = std::min(g, b);
} else {
vmax = std::max(g, b);
vmin = std::min(r, b);
}
// Use denom * L to avoid rounding.
int32_t denom_l = (vmax + vmin) * (denom / 2);
int32_t s_numer_l = (vmax + vmin) * s_numer / 2;
r = (denom_l + r * s_numer - s_numer_l) * l_numer / (denom * denom);
g = (denom_l + g * s_numer - s_numer_l) * l_numer / (denom * denom);
b = (denom_l + b * s_numer - s_numer_l) * l_numer / (denom * denom);
out[x] = SkPackARGB32(a, r, g, b);
}
}
// Line processor: H no-op, S decrease, L increase.
void LineProcHnopSdecLinc(const color_utils::HSL& hsl_shift,
const SkPMColor* in,
SkPMColor* out,
int width) {
DCHECK(hsl_shift.h < 0);
DCHECK(hsl_shift.s >= 0 && hsl_shift.s <= 0.5 - HSLShift::epsilon);
DCHECK(hsl_shift.l >= 0.5 + HSLShift::epsilon && hsl_shift.l <= 1);
// Can't be too big since we need room for denom*denom and a bit for sign.
const int32_t denom = 1024;
int32_t l_numer = static_cast<int32_t>((hsl_shift.l - 0.5) * 2 * denom);
int32_t s_numer = static_cast<int32_t>(hsl_shift.s * 2 * denom);
for (int x = 0; x < width; x++) {
int32_t a = static_cast<int32_t>(SkGetPackedA32(in[x]));
int32_t r = static_cast<int32_t>(SkGetPackedR32(in[x]));
int32_t g = static_cast<int32_t>(SkGetPackedG32(in[x]));
int32_t b = static_cast<int32_t>(SkGetPackedB32(in[x]));
int32_t vmax, vmin;
if (r > g) { // This uses 3 compares rather than 4.
vmax = std::max(r, b);
vmin = std::min(g, b);
} else {
vmax = std::max(g, b);
vmin = std::min(r, b);
}
// Use denom * L to avoid rounding.
int32_t denom_l = (vmax + vmin) * (denom / 2);
int32_t s_numer_l = (vmax + vmin) * s_numer / 2;
r = denom_l + r * s_numer - s_numer_l;
g = denom_l + g * s_numer - s_numer_l;
b = denom_l + b * s_numer - s_numer_l;
r = (r * denom + (a * denom - r) * l_numer) / (denom * denom);
g = (g * denom + (a * denom - g) * l_numer) / (denom * denom);
b = (b * denom + (a * denom - b) * l_numer) / (denom * denom);
out[x] = SkPackARGB32(a, r, g, b);
}
}
const LineProcessor kLineProcessors[kNumHOps][kNumSOps][kNumLOps] = {
{ // H: kOpHNone
{ // S: kOpSNone
LineProcCopy, // L: kOpLNone
LineProcHnopSnopLdec, // L: kOpLDec
LineProcHnopSnopLinc // L: kOpLInc
},
{ // S: kOpSDec
LineProcHnopSdecLnop, // L: kOpLNone
LineProcHnopSdecLdec, // L: kOpLDec
LineProcHnopSdecLinc // L: kOpLInc
},
{ // S: kOpSInc
LineProcDefault, // L: kOpLNone
LineProcDefault, // L: kOpLDec
LineProcDefault // L: kOpLInc
}
},
{ // H: kOpHShift
{ // S: kOpSNone
LineProcDefault, // L: kOpLNone
LineProcDefault, // L: kOpLDec
LineProcDefault // L: kOpLInc
},
{ // S: kOpSDec
LineProcDefault, // L: kOpLNone
LineProcDefault, // L: kOpLDec
LineProcDefault // L: kOpLInc
},
{ // S: kOpSInc
LineProcDefault, // L: kOpLNone
LineProcDefault, // L: kOpLDec
LineProcDefault // L: kOpLInc
}
}
};
} // namespace HSLShift
} // namespace
// static
SkBitmap SkBitmapOperations::CreateHSLShiftedBitmap(
const SkBitmap& bitmap,
const color_utils::HSL& hsl_shift) {
// Default to NOPs.
HSLShift::OperationOnH H_op = HSLShift::kOpHNone;
HSLShift::OperationOnS S_op = HSLShift::kOpSNone;
HSLShift::OperationOnL L_op = HSLShift::kOpLNone;
if (hsl_shift.h >= 0 && hsl_shift.h <= 1)
H_op = HSLShift::kOpHShift;
// Saturation shift: 0 -> fully desaturate, 0.5 -> NOP, 1 -> fully saturate.
if (hsl_shift.s >= 0 && hsl_shift.s <= (0.5 - HSLShift::epsilon))
S_op = HSLShift::kOpSDec;
else if (hsl_shift.s >= (0.5 + HSLShift::epsilon))
S_op = HSLShift::kOpSInc;
// Lightness shift: 0 -> black, 0.5 -> NOP, 1 -> white.
if (hsl_shift.l >= 0 && hsl_shift.l <= (0.5 - HSLShift::epsilon))
L_op = HSLShift::kOpLDec;
else if (hsl_shift.l >= (0.5 + HSLShift::epsilon))
L_op = HSLShift::kOpLInc;
HSLShift::LineProcessor line_proc =
HSLShift::kLineProcessors[H_op][S_op][L_op];
DCHECK(bitmap.empty() == false);
DCHECK(bitmap.colorType() == kN32_SkColorType);
SkBitmap shifted;
shifted.allocN32Pixels(bitmap.width(), bitmap.height());
SkAutoLockPixels lock_bitmap(bitmap);
SkAutoLockPixels lock_shifted(shifted);
// Loop through the pixels of the original bitmap.
for (int y = 0; y < bitmap.height(); ++y) {
SkPMColor* pixels = bitmap.getAddr32(0, y);
SkPMColor* tinted_pixels = shifted.getAddr32(0, y);
(*line_proc)(hsl_shift, pixels, tinted_pixels, bitmap.width());
}
return shifted;
}
// static
SkBitmap SkBitmapOperations::CreateTiledBitmap(const SkBitmap& source,
int src_x, int src_y,
int dst_w, int dst_h) {
DCHECK(source.colorType() == kN32_SkColorType);
SkBitmap cropped;
cropped.allocN32Pixels(dst_w, dst_h);
SkAutoLockPixels lock_source(source);
SkAutoLockPixels lock_cropped(cropped);
// Loop through the pixels of the original bitmap.
for (int y = 0; y < dst_h; ++y) {
int y_pix = (src_y + y) % source.height();
while (y_pix < 0)
y_pix += source.height();
uint32_t* source_row = source.getAddr32(0, y_pix);
uint32_t* dst_row = cropped.getAddr32(0, y);
for (int x = 0; x < dst_w; ++x) {
int x_pix = (src_x + x) % source.width();
while (x_pix < 0)
x_pix += source.width();
dst_row[x] = source_row[x_pix];
}
}
return cropped;
}
// static
SkBitmap SkBitmapOperations::DownsampleByTwoUntilSize(const SkBitmap& bitmap,
int min_w, int min_h) {
if ((bitmap.width() <= min_w) || (bitmap.height() <= min_h) ||
(min_w < 0) || (min_h < 0))
return bitmap;
// Since bitmaps are refcounted, this copy will be fast.
SkBitmap current = bitmap;
while ((current.width() >= min_w * 2) && (current.height() >= min_h * 2) &&
(current.width() > 1) && (current.height() > 1))
current = DownsampleByTwo(current);
return current;
}
// static
SkBitmap SkBitmapOperations::DownsampleByTwo(const SkBitmap& bitmap) {
// Handle the nop case.
if ((bitmap.width() <= 1) || (bitmap.height() <= 1))
return bitmap;
SkBitmap result;
result.allocN32Pixels((bitmap.width() + 1) / 2, (bitmap.height() + 1) / 2);
SkAutoLockPixels lock(bitmap);
const int resultLastX = result.width() - 1;
const int srcLastX = bitmap.width() - 1;
for (int dest_y = 0; dest_y < result.height(); ++dest_y) {
const int src_y = dest_y << 1;
const SkPMColor* SK_RESTRICT cur_src0 = bitmap.getAddr32(0, src_y);
const SkPMColor* SK_RESTRICT cur_src1 = cur_src0;
if (src_y + 1 < bitmap.height())
cur_src1 = bitmap.getAddr32(0, src_y + 1);
SkPMColor* SK_RESTRICT cur_dst = result.getAddr32(0, dest_y);
for (int dest_x = 0; dest_x <= resultLastX; ++dest_x) {
// This code is based on downsampleby2_proc32 in SkBitmap.cpp. It is very
// clever in that it does two channels at once: alpha and green ("ag")
// and red and blue ("rb"). Each channel gets averaged across 4 pixels
// to get the result.
int bump_x = (dest_x << 1) < srcLastX;
SkPMColor tmp, ag, rb;
// Top left pixel of the 2x2 block.
tmp = cur_src0[0];
ag = (tmp >> 8) & 0xFF00FF;
rb = tmp & 0xFF00FF;
// Top right pixel of the 2x2 block.
tmp = cur_src0[bump_x];
ag += (tmp >> 8) & 0xFF00FF;
rb += tmp & 0xFF00FF;
// Bottom left pixel of the 2x2 block.
tmp = cur_src1[0];
ag += (tmp >> 8) & 0xFF00FF;
rb += tmp & 0xFF00FF;
// Bottom right pixel of the 2x2 block.
tmp = cur_src1[bump_x];
ag += (tmp >> 8) & 0xFF00FF;
rb += tmp & 0xFF00FF;
// Put the channels back together, dividing each by 4 to get the average.
// |ag| has the alpha and green channels shifted right by 8 bits from
// there they should end up, so shifting left by 6 gives them in the
// correct position divided by 4.
*cur_dst++ = ((rb >> 2) & 0xFF00FF) | ((ag << 6) & 0xFF00FF00);
cur_src0 += 2;
cur_src1 += 2;
}
}
return result;
}
// static
SkBitmap SkBitmapOperations::UnPreMultiply(const SkBitmap& bitmap) {
if (bitmap.isNull())
return bitmap;
if (bitmap.isOpaque())
return bitmap;
const SkImageInfo& opaque_info =
bitmap.info().makeAlphaType(kOpaque_SkAlphaType);
SkBitmap opaque_bitmap;
opaque_bitmap.allocPixels(opaque_info);
{
SkAutoLockPixels bitmap_lock(bitmap);
SkAutoLockPixels opaque_bitmap_lock(opaque_bitmap);
for (int y = 0; y < opaque_bitmap.height(); y++) {
for (int x = 0; x < opaque_bitmap.width(); x++) {
uint32_t src_pixel = *bitmap.getAddr32(x, y);
uint32_t* dst_pixel = opaque_bitmap.getAddr32(x, y);
SkColor unmultiplied = SkUnPreMultiply::PMColorToColor(src_pixel);
*dst_pixel = unmultiplied;
}
}
}
return opaque_bitmap;
}
// static
SkBitmap SkBitmapOperations::CreateTransposedBitmap(const SkBitmap& image) {
DCHECK(image.colorType() == kN32_SkColorType);
SkBitmap transposed;
transposed.allocN32Pixels(image.height(), image.width());
SkAutoLockPixels lock_image(image);
SkAutoLockPixels lock_transposed(transposed);
for (int y = 0; y < image.height(); ++y) {
uint32_t* image_row = image.getAddr32(0, y);
for (int x = 0; x < image.width(); ++x) {
uint32_t* dst = transposed.getAddr32(y, x);
*dst = image_row[x];
}
}
return transposed;
}
// static
SkBitmap SkBitmapOperations::CreateColorMask(const SkBitmap& bitmap,
SkColor c) {
DCHECK(bitmap.colorType() == kN32_SkColorType);
SkBitmap color_mask;
color_mask.allocN32Pixels(bitmap.width(), bitmap.height());
color_mask.eraseARGB(0, 0, 0, 0);
SkCanvas canvas(color_mask);
SkPaint paint;
paint.setColorFilter(SkColorFilter::MakeModeFilter(c, SkBlendMode::kSrcIn));
canvas.drawBitmap(bitmap, SkIntToScalar(0), SkIntToScalar(0), &paint);
return color_mask;
}
// static
SkBitmap SkBitmapOperations::CreateDropShadow(
const SkBitmap& bitmap,
const gfx::ShadowValues& shadows) {
DCHECK(bitmap.colorType() == kN32_SkColorType);
// Shadow margin insets are negative values because they grow outside.
// Negate them here as grow direction is not important and only pixel value
// is of interest here.
gfx::Insets shadow_margin = -gfx::ShadowValue::GetMargin(shadows);
SkBitmap image_with_shadow;
image_with_shadow.allocN32Pixels(bitmap.width() + shadow_margin.width(),
bitmap.height() + shadow_margin.height());
image_with_shadow.eraseARGB(0, 0, 0, 0);
SkCanvas canvas(image_with_shadow);
canvas.translate(SkIntToScalar(shadow_margin.left()),
SkIntToScalar(shadow_margin.top()));
SkPaint paint;
for (size_t i = 0; i < shadows.size(); ++i) {
const gfx::ShadowValue& shadow = shadows[i];
SkBitmap shadow_image = SkBitmapOperations::CreateColorMask(bitmap,
shadow.color());
// The blur is halved to produce a shadow that correctly fits within the
// |shadow_margin|.
SkScalar sigma = SkDoubleToScalar(shadow.blur() / 2);
paint.setImageFilter(SkBlurImageFilter::Make(sigma, sigma, nullptr));
canvas.saveLayer(0, &paint);
canvas.drawBitmap(shadow_image,
SkIntToScalar(shadow.x()),
SkIntToScalar(shadow.y()));
canvas.restore();
}
canvas.drawBitmap(bitmap, SkIntToScalar(0), SkIntToScalar(0));
return image_with_shadow;
}
// static
SkBitmap SkBitmapOperations::Rotate(const SkBitmap& source,
RotationAmount rotation) {
// SkCanvas::drawBitmap() fails silently with unpremultiplied SkBitmap.
DCHECK_NE(source.info().alphaType(), kUnpremul_SkAlphaType);
SkBitmap result;
SkScalar angle = SkFloatToScalar(0.0f);
switch (rotation) {
case ROTATION_90_CW:
angle = SkFloatToScalar(90.0f);
result.allocN32Pixels(source.height(), source.width());
break;
case ROTATION_180_CW:
angle = SkFloatToScalar(180.0f);
result.allocN32Pixels(source.width(), source.height());
break;
case ROTATION_270_CW:
angle = SkFloatToScalar(270.0f);
result.allocN32Pixels(source.height(), source.width());
break;
}
SkCanvas canvas(result);
canvas.clear(SkColorSetARGB(0, 0, 0, 0));
canvas.translate(SkFloatToScalar(result.width() * 0.5f),
SkFloatToScalar(result.height() * 0.5f));
canvas.rotate(angle);
canvas.translate(-SkFloatToScalar(source.width() * 0.5f),
-SkFloatToScalar(source.height() * 0.5f));
canvas.drawBitmap(source, 0, 0);
canvas.flush();
return result;
}
|
//===----------------------------------------------------------------------===//
// DuckDB
//
// planner/logical_operator_visitor.hpp
//
//
//===----------------------------------------------------------------------===//
#pragma once
#include "common/common.hpp"
#include "planner/bound_tokens.hpp"
#include "planner/logical_tokens.hpp"
namespace duckdb {
//! The LogicalOperatorVisitor is an abstract base class that implements the
//! Visitor pattern on LogicalOperator.
class LogicalOperatorVisitor {
public:
virtual ~LogicalOperatorVisitor(){};
virtual void VisitOperator(LogicalOperator &op);
virtual void VisitExpression(unique_ptr<Expression> *expression);
protected:
//! Automatically calls the Visit method for LogicalOperator children of the current operator. Can be overloaded to
//! change this behavior.
void VisitOperatorChildren(LogicalOperator &op);
//! Automatically calls the Visit method for Expression children of the current operator. Can be overloaded to
//! change this behavior.
void VisitOperatorExpressions(LogicalOperator &op);
// The VisitExpressionChildren method is called at the end of every call to VisitExpression to recursively visit all
// expressions in an expression tree. It can be overloaded to prevent automatically visiting the entire tree.
virtual void VisitExpressionChildren(Expression &expression);
virtual unique_ptr<Expression> VisitReplace(BoundAggregateExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundCaseExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundCastExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundColumnRefExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundComparisonExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundConjunctionExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundConstantExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundDefaultExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundFunctionExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundOperatorExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundReferenceExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundSubqueryExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundParameterExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(BoundWindowExpression &expr, unique_ptr<Expression> *expr_ptr);
virtual unique_ptr<Expression> VisitReplace(CommonSubExpression &expr, unique_ptr<Expression> *expr_ptr);
};
} // namespace duckdb
|
//
// Copyright (C) 2017 Greg Landrum
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#define NO_IMPORT_ARRAY
#include <RDBoost/python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#include <boost/python/list.hpp>
#include <RDGeneral/Exceptions.h>
#include <GraphMol/RDKitBase.h>
#include <CoordGen/CoordGen.h>
namespace python = boost::python;
namespace RDKit {
namespace {
void SetCoordMap(CoordGen::CoordGenParams *self, python::dict &coordMap) {
self->coordMap.clear();
python::list ks = coordMap.keys();
for (unsigned int i = 0;
i < python::extract<unsigned int>(ks.attr("__len__")()); i++) {
unsigned int id = python::extract<unsigned int>(ks[i]);
self->coordMap[id] = python::extract<RDGeom::Point2D>(coordMap[id]);
}
}
void addCoordsHelper(ROMol &mol, python::object ¶ms) {
CoordGen::CoordGenParams *ps = nullptr;
if (params != python::object()) {
ps = python::extract<CoordGen::CoordGenParams *>(params);
}
CoordGen::addCoords(mol, ps);
}
void SetTemplateMol(CoordGen::CoordGenParams *self, const ROMol *templ) {
self->templateMol = templ;
}
void SetDefaultTemplateFileDir(const std::string &dir) {
CoordGen::defaultParams.templateFileDir = dir;
}
} // end of anonymous namespace
struct coordgen_wrapper {
static void wrap() {
std::string docString = "";
python::class_<CoordGen::CoordGenParams>(
"CoordGenParams", "Parameters controlling coordinate generation")
.def(
"SetCoordMap", SetCoordMap,
"expects a dictionary of Point2D objects with template coordinates")
.def("SetTemplateMol", SetTemplateMol,
python::with_custodian_and_ward<1, 2>(),
"sets a molecule to be used as the template")
.def_readwrite("coordgenScaling",
&CoordGen::CoordGenParams::coordgenScaling,
"scaling factor for a single bond")
.def_readwrite("dbg_useConstrained",
&CoordGen::CoordGenParams::dbg_useConstrained,
"for debugging use")
.def_readwrite("dbg_useFixed", &CoordGen::CoordGenParams::dbg_useFixed,
"for debugging use")
.def_readwrite("templateFileDir",
&CoordGen::CoordGenParams::templateFileDir,
"directory containing the templates.mae file")
.def_readwrite("minimizeOnly", &CoordGen::CoordGenParams::minimizeOnly,
"uses coordgen's force field to cleanup the 2D "
"coordinates of the active conformation")
.def_readonly("sketcherBestPrecision",
&CoordGen::CoordGenParams::sketcherBestPrecision,
"highest quality (and slowest) precision setting")
.def_readonly("sketcherStandardPrecision",
&CoordGen::CoordGenParams::sketcherStandardPrecision,
"standard quality precision setting, the default for the "
"coordgen project")
.def_readonly("sketcherQuickPrecision",
&CoordGen::CoordGenParams::sketcherQuickPrecision,
"faster precision setting")
.def_readonly(
"sketcherCoarsePrecision",
&CoordGen::CoordGenParams::sketcherCoarsePrecision,
"\"coarse\" (fastest) precision setting, produces good-quality "
"coordinates"
" most of the time, this is the default setting for the RDKit")
.def_readwrite("minimizerPrecision",
&CoordGen::CoordGenParams::minimizerPrecision,
"controls sketcher precision")
.def_readwrite("treatBondsToMetalAsZOBs",
&CoordGen::CoordGenParams::treatBondsToMetalAsZeroOrder);
python::def("SetDefaultTemplateFileDir", SetDefaultTemplateFileDir);
docString =
"Add 2D coordinates.\n"
"ARGUMENTS:\n"
" - mol: molecule to modify\n"
" - params: (optional) parameters controlling the coordinate "
"generation\n"
"\n";
python::def("AddCoords", addCoordsHelper,
(python::arg("mol"), python::arg("params") = python::object()),
docString.c_str());
}
};
} // end of namespace RDKit
BOOST_PYTHON_MODULE(rdCoordGen) {
python::scope().attr("__doc__") =
"Module containing interface to the CoordGen library.";
RDKit::coordgen_wrapper::wrap();
}
|
// Copyright (c) 2005-2014 Code Synthesis Tools CC
//
// This program was generated by CodeSynthesis XSD, an XML Schema to
// C++ data binding compiler.
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License version 2 as
// published by the Free Software Foundation.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
//
// In addition, as a special exception, Code Synthesis Tools CC gives
// permission to link this program with the Xerces-C++ library (or with
// modified versions of Xerces-C++ that use the same license as Xerces-C++),
// and distribute linked combinations including the two. You must obey
// the GNU General Public License version 2 in all respects for all of
// the code used other than Xerces-C++. If you modify this copy of the
// program, you may extend this exception to your version of the program,
// but you are not obligated to do so. If you do not wish to do so, delete
// this exception statement from your version.
//
// Furthermore, Code Synthesis Tools CC makes a special exception for
// the Free/Libre and Open Source Software (FLOSS) which is described
// in the accompanying FLOSSE file.
//
#ifndef SIM_FLOW_COMPOUND_EQMT_VIRTUAL_SUPPLY_SUBSYSTEMS_COIL_SYSTEM_HEATING_DX_HXX
#define SIM_FLOW_COMPOUND_EQMT_VIRTUAL_SUPPLY_SUBSYSTEMS_COIL_SYSTEM_HEATING_DX_HXX
#ifndef XSD_USE_CHAR
#define XSD_USE_CHAR
#endif
#ifndef XSD_CXX_TREE_USE_CHAR
#define XSD_CXX_TREE_USE_CHAR
#endif
// Begin prologue.
//
//
// End prologue.
#include <xsd/cxx/config.hxx>
#if (XSD_INT_VERSION != 4000000L)
#error XSD runtime version mismatch
#endif
#include <xsd/cxx/pre.hxx>
#include <xsd/cxx/xml/char-utf8.hxx>
#include <xsd/cxx/tree/exceptions.hxx>
#include <xsd/cxx/tree/elements.hxx>
#include <xsd/cxx/tree/types.hxx>
#include <xsd/cxx/xml/error-handler.hxx>
#include <xsd/cxx/xml/dom/auto-ptr.hxx>
#include <xsd/cxx/tree/parsing.hxx>
#include <xsd/cxx/tree/parsing/byte.hxx>
#include <xsd/cxx/tree/parsing/unsigned-byte.hxx>
#include <xsd/cxx/tree/parsing/short.hxx>
#include <xsd/cxx/tree/parsing/unsigned-short.hxx>
#include <xsd/cxx/tree/parsing/int.hxx>
#include <xsd/cxx/tree/parsing/unsigned-int.hxx>
#include <xsd/cxx/tree/parsing/long.hxx>
#include <xsd/cxx/tree/parsing/unsigned-long.hxx>
#include <xsd/cxx/tree/parsing/boolean.hxx>
#include <xsd/cxx/tree/parsing/float.hxx>
#include <xsd/cxx/tree/parsing/double.hxx>
#include <xsd/cxx/tree/parsing/decimal.hxx>
namespace xml_schema
{
// anyType and anySimpleType.
//
typedef ::xsd::cxx::tree::type type;
typedef ::xsd::cxx::tree::simple_type< char, type > simple_type;
typedef ::xsd::cxx::tree::type container;
// 8-bit
//
typedef signed char byte;
typedef unsigned char unsigned_byte;
// 16-bit
//
typedef short short_;
typedef unsigned short unsigned_short;
// 32-bit
//
typedef int int_;
typedef unsigned int unsigned_int;
// 64-bit
//
typedef long long long_;
typedef unsigned long long unsigned_long;
// Supposed to be arbitrary-length integral types.
//
typedef long long integer;
typedef long long non_positive_integer;
typedef unsigned long long non_negative_integer;
typedef unsigned long long positive_integer;
typedef long long negative_integer;
// Boolean.
//
typedef bool boolean;
// Floating-point types.
//
typedef float float_;
typedef double double_;
typedef double decimal;
// String types.
//
typedef ::xsd::cxx::tree::string< char, simple_type > string;
typedef ::xsd::cxx::tree::normalized_string< char, string > normalized_string;
typedef ::xsd::cxx::tree::token< char, normalized_string > token;
typedef ::xsd::cxx::tree::name< char, token > name;
typedef ::xsd::cxx::tree::nmtoken< char, token > nmtoken;
typedef ::xsd::cxx::tree::nmtokens< char, simple_type, nmtoken > nmtokens;
typedef ::xsd::cxx::tree::ncname< char, name > ncname;
typedef ::xsd::cxx::tree::language< char, token > language;
// ID/IDREF.
//
typedef ::xsd::cxx::tree::id< char, ncname > id;
typedef ::xsd::cxx::tree::idref< char, ncname, type > idref;
typedef ::xsd::cxx::tree::idrefs< char, simple_type, idref > idrefs;
// URI.
//
typedef ::xsd::cxx::tree::uri< char, simple_type > uri;
// Qualified name.
//
typedef ::xsd::cxx::tree::qname< char, simple_type, uri, ncname > qname;
// Binary.
//
typedef ::xsd::cxx::tree::buffer< char > buffer;
typedef ::xsd::cxx::tree::base64_binary< char, simple_type > base64_binary;
typedef ::xsd::cxx::tree::hex_binary< char, simple_type > hex_binary;
// Date/time.
//
typedef ::xsd::cxx::tree::time_zone time_zone;
typedef ::xsd::cxx::tree::date< char, simple_type > date;
typedef ::xsd::cxx::tree::date_time< char, simple_type > date_time;
typedef ::xsd::cxx::tree::duration< char, simple_type > duration;
typedef ::xsd::cxx::tree::gday< char, simple_type > gday;
typedef ::xsd::cxx::tree::gmonth< char, simple_type > gmonth;
typedef ::xsd::cxx::tree::gmonth_day< char, simple_type > gmonth_day;
typedef ::xsd::cxx::tree::gyear< char, simple_type > gyear;
typedef ::xsd::cxx::tree::gyear_month< char, simple_type > gyear_month;
typedef ::xsd::cxx::tree::time< char, simple_type > time;
// Entity.
//
typedef ::xsd::cxx::tree::entity< char, ncname > entity;
typedef ::xsd::cxx::tree::entities< char, simple_type, entity > entities;
typedef ::xsd::cxx::tree::content_order content_order;
// Flags and properties.
//
typedef ::xsd::cxx::tree::flags flags;
typedef ::xsd::cxx::tree::properties< char > properties;
// Parsing/serialization diagnostics.
//
typedef ::xsd::cxx::tree::severity severity;
typedef ::xsd::cxx::tree::error< char > error;
typedef ::xsd::cxx::tree::diagnostics< char > diagnostics;
// Exceptions.
//
typedef ::xsd::cxx::tree::exception< char > exception;
typedef ::xsd::cxx::tree::bounds< char > bounds;
typedef ::xsd::cxx::tree::duplicate_id< char > duplicate_id;
typedef ::xsd::cxx::tree::parsing< char > parsing;
typedef ::xsd::cxx::tree::expected_element< char > expected_element;
typedef ::xsd::cxx::tree::unexpected_element< char > unexpected_element;
typedef ::xsd::cxx::tree::expected_attribute< char > expected_attribute;
typedef ::xsd::cxx::tree::unexpected_enumerator< char > unexpected_enumerator;
typedef ::xsd::cxx::tree::expected_text_content< char > expected_text_content;
typedef ::xsd::cxx::tree::no_prefix_mapping< char > no_prefix_mapping;
typedef ::xsd::cxx::tree::no_type_info< char > no_type_info;
typedef ::xsd::cxx::tree::not_derived< char > not_derived;
// Error handler callback interface.
//
typedef ::xsd::cxx::xml::error_handler< char > error_handler;
// DOM interaction.
//
namespace dom
{
// Automatic pointer for DOMDocument.
//
using ::xsd::cxx::xml::dom::auto_ptr;
#ifndef XSD_CXX_TREE_TREE_NODE_KEY__XML_SCHEMA
#define XSD_CXX_TREE_TREE_NODE_KEY__XML_SCHEMA
// DOM user data key for back pointers to tree nodes.
//
const XMLCh* const tree_node_key = ::xsd::cxx::tree::user_data_keys::node;
#endif
}
}
// Forward declarations.
//
namespace schema
{
namespace simxml
{
namespace MepModel
{
class SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX;
}
}
}
#include <memory> // ::std::auto_ptr
#include <limits> // std::numeric_limits
#include <algorithm> // std::binary_search
#include <xsd/cxx/xml/char-utf8.hxx>
#include <xsd/cxx/tree/exceptions.hxx>
#include <xsd/cxx/tree/elements.hxx>
#include <xsd/cxx/tree/containers.hxx>
#include <xsd/cxx/tree/list.hxx>
#include <xsd/cxx/xml/dom/parsing-header.hxx>
#include "simflowcompoundeqmt_virtualsupplysubsystems.hxx"
namespace schema
{
namespace simxml
{
namespace MepModel
{
class SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX: public ::schema::simxml::MepModel::SimFlowCompoundEqmt_VirtualSupplySubsystems
{
public:
// Constructors.
//
SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX ();
SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX (const RefId_type&);
SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX (const ::xercesc::DOMElement& e,
::xml_schema::flags f = 0,
::xml_schema::container* c = 0);
SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX (const SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX& x,
::xml_schema::flags f = 0,
::xml_schema::container* c = 0);
virtual SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX*
_clone (::xml_schema::flags f = 0,
::xml_schema::container* c = 0) const;
virtual
~SimFlowCompoundEqmt_VirtualSupplySubsystems_CoilSystemHeatingDX ();
};
}
}
}
#include <iosfwd>
#include <xercesc/sax/InputSource.hpp>
#include <xercesc/dom/DOMDocument.hpp>
#include <xercesc/dom/DOMErrorHandler.hpp>
namespace schema
{
namespace simxml
{
namespace MepModel
{
}
}
}
#include <xsd/cxx/post.hxx>
// Begin epilogue.
//
//
// End epilogue.
#endif // SIM_FLOW_COMPOUND_EQMT_VIRTUAL_SUPPLY_SUBSYSTEMS_COIL_SYSTEM_HEATING_DX_HXX
|
// Geometric Tools, LLC
// Copyright (c) 1998-2012
// Distributed under the Boost Software License, Version 1.0.
// http://www.boost.org/LICENSE_1_0.txt
// http://www.geometrictools.com/License/Boost/LICENSE_1_0.txt
//
// File Version: 5.0.0 (2010/01/01)
#include "MTIVertex.h"
//----------------------------------------------------------------------------
MTIVertex::MTIVertex (int label)
:
mLabel(label)
{
}
//----------------------------------------------------------------------------
int MTIVertex::GetLabel () const
{
return mLabel;
}
//----------------------------------------------------------------------------
bool MTIVertex::operator< (const MTIVertex& vertex) const
{
return mLabel < vertex.mLabel;
}
//----------------------------------------------------------------------------
bool MTIVertex::operator== (const MTIVertex& vertex) const
{
return mLabel == vertex.mLabel;
}
//----------------------------------------------------------------------------
bool MTIVertex::operator!= (const MTIVertex& vertex) const
{
return mLabel != vertex.mLabel;
}
//----------------------------------------------------------------------------
|
//Binary Search Tree Program
#include <iostream>
#include <cstdlib>
using namespace std;
class BinarySearchTree {
private:
struct tree_node {
tree_node* left;
tree_node* right;
int data;
};
tree_node* root;
public:
BinarySearchTree() {
root = NULL;
}
bool isEmpty() const {
return root == NULL;
}
void print_inorder();
void inorder(tree_node*);
void print_preorder();
void preorder(tree_node*);
void print_postorder();
void postorder(tree_node*);
void insert(int);
void remove(int);
};
// Smaller elements go left
// larger elements go right
void BinarySearchTree::insert(int d) {
tree_node* t = new tree_node;
tree_node* parent;
t->data = d;
t->left = NULL;
t->right = NULL;
parent = NULL;
// is this a new tree?
if (isEmpty()) root = t;
else {
//Note: ALL insertions are as leaf nodes
tree_node* curr;
curr = root;
// Find the Node's parent
while (curr) {
parent = curr;
if (t->data > curr->data) curr = curr->right;
else curr = curr->left;
}
if (t->data < parent->data)
parent->left = t;
else
parent->right = t;
}
}
void BinarySearchTree::remove(int d) {
//Locate the element
bool found = false;
if (isEmpty()) {
cout << " This Tree is empty! " << endl;
return;
}
tree_node* curr;
tree_node* parent;
curr = root;
while (curr != NULL) {
if (curr->data == d) {
found = true;
break;
} else {
parent = curr;
if (d > curr->data) curr = curr->right;
else curr = curr->left;
}
}
if (!found) {
cout << " Data not found! " << endl;
return;
}
// 3 cases :
// 1. We're removing a leaf node
// 2. We're removing a node with a single child
// 3. we're removing a node with 2 children
// Node with single child
if ((curr->left == NULL && curr->right != NULL) || (curr->left != NULL
&& curr->right == NULL)) {
if (curr->left == NULL && curr->right != NULL) {
if (parent->left == curr) {
parent->left = curr->right;
delete curr;
} else {
parent->right = curr->right;
delete curr;
}
} else // left child present, no right child
{
if (parent->left == curr) {
parent->left = curr->left;
delete curr;
} else {
parent->right = curr->left;
delete curr;
}
}
return;
}
//We're looking at a leaf node
if (curr->left == NULL && curr->right == NULL) {
if (parent->left == curr) parent->left = NULL;
else parent->right = NULL;
delete curr;
return;
}
//Node with 2 children
// replace node with smallest value in right subtree
if (curr->left != NULL && curr->right != NULL) {
tree_node* chkr;
chkr = curr->right;
if ((chkr->left == NULL) && (chkr->right == NULL)) {
curr = chkr;
delete chkr;
curr->right = NULL;
} else // right child has children
{
//if the node's right child has a left child
// Move all the way down left to locate smallest element
if ((curr->right)->left != NULL) {
tree_node* lcurr;
tree_node* lcurrp;
lcurrp = curr->right;
lcurr = (curr->right)->left;
while (lcurr->left != NULL) {
lcurrp = lcurr;
lcurr = lcurr->left;
}
curr->data = lcurr->data;
delete lcurr;
lcurrp->left = NULL;
} else {
tree_node* tmp;
tmp = curr->right;
curr->data = tmp->data;
curr->right = tmp->right;
delete tmp;
}
}
return;
}
}
void BinarySearchTree::print_inorder() {
inorder(root);
}
void BinarySearchTree::inorder(tree_node* p) {
if (p != NULL) {
if (p->left) inorder(p->left);
cout << " " << p->data << " ";
if (p->right) inorder(p->right);
} else return;
}
void BinarySearchTree::print_preorder() {
preorder(root);
}
void BinarySearchTree::preorder(tree_node* p) {
if (p != NULL) {
cout << " " << p->data << " ";
if (p->left) preorder(p->left);
if (p->right) preorder(p->right);
} else return;
}
void BinarySearchTree::print_postorder() {
postorder(root);
}
void BinarySearchTree::postorder(tree_node* p) {
if (p != NULL) {
if (p->left) postorder(p->left);
if (p->right) postorder(p->right);
cout << " " << p->data << " ";
} else return;
}
int main() {
BinarySearchTree b;
int ch, tmp, tmp1;
while (1) {
cout << endl << endl;
cout << " Binary Search Tree Operations " << endl;
cout << " ----------------------------- " << endl;
cout << " 1. Insertion/Creation " << endl;
cout << " 2. In-Order Traversal " << endl;
cout << " 3. Pre-Order Traversal " << endl;
cout << " 4. Post-Order Traversal " << endl;
cout << " 5. Removal " << endl;
cout << " 6. Exit " << endl;
cout << " Enter your choice : ";
cin>>ch;
switch (ch) {
case 1: cout << " Enter Number to be inserted : ";
cin>>tmp;
b.insert(tmp);
break;
case 2: cout << endl;
cout << " In-Order Traversal " << endl;
cout << " -------------------" << endl;
b.print_inorder();
break;
case 3: cout << endl;
cout << " Pre-Order Traversal " << endl;
cout << " -------------------" << endl;
b.print_preorder();
break;
case 4: cout << endl;
cout << " Post-Order Traversal " << endl;
cout << " --------------------" << endl;
b.print_postorder();
break;
case 5: cout << " Enter data to be deleted : ";
cin>>tmp1;
b.remove(tmp1);
break;
case 6:
return 0;
}
}
}
|
//=================================================================================================
/*!
// \file src/mathtest/dmatsmatsub/LDbHCa.cpp
// \brief Source file for the LDbHCa dense matrix/sparse matrix subtraction math test
//
// Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/CompressedMatrix.h>
#include <blaze/math/DynamicMatrix.h>
#include <blaze/math/HermitianMatrix.h>
#include <blaze/math/LowerMatrix.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/dmatsmatsub/OperationTest.h>
#include <blazetest/system/MathTest.h>
#ifdef BLAZE_USE_HPX_THREADS
# include <hpx/hpx_main.hpp>
#endif
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'LDbHCa'..." << std::endl;
using blazetest::mathtest::NumericA;
using blazetest::mathtest::NumericB;
try
{
// Matrix type definitions
using LDb = blaze::LowerMatrix< blaze::DynamicMatrix<NumericB> >;
using HCa = blaze::HermitianMatrix< blaze::CompressedMatrix<NumericA> >;
// Creator type definitions
using CLDb = blazetest::Creator<LDb>;
using CHCa = blazetest::Creator<HCa>;
// Running tests with small matrices
for( size_t i=0UL; i<=6UL; ++i ) {
for( size_t j=0UL; j<=i*i; ++j ) {
RUN_DMATSMATSUB_OPERATION_TEST( CLDb( i ), CHCa( i, j ) );
}
}
// Running tests with large matrices
RUN_DMATSMATSUB_OPERATION_TEST( CLDb( 67UL ), CHCa( 67UL, 7UL ) );
RUN_DMATSMATSUB_OPERATION_TEST( CLDb( 128UL ), CHCa( 128UL, 16UL ) );
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during dense matrix/sparse matrix subtraction:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
#include "replication.h"
#include <signal.h>
#include <arpa/inet.h>
#include <netinet/tcp.h>
#include <future>
#include <string>
#include <thread>
#include <event2/buffer.h>
#include <event2/bufferevent.h>
#include <event2/event.h>
#include <glog/logging.h>
#include "redis_reply.h"
#include "rocksdb_crc32c.h"
#include "util.h"
#include "status.h"
#include "server.h"
FeedSlaveThread::~FeedSlaveThread() {
delete conn_;
}
Status FeedSlaveThread::Start() {
try {
t_ = std::thread([this]() {
Util::ThreadSetName("feed-slave-thread");
sigset_t mask, omask;
sigemptyset(&mask);
sigemptyset(&omask);
sigaddset(&mask, SIGCHLD);
sigaddset(&mask, SIGHUP);
sigaddset(&mask, SIGPIPE);
pthread_sigmask(SIG_BLOCK, &mask, &omask);
// force feed slave thread was scheduled after making the fd blocking,
// and write "+OK\r\n" response to psync command
usleep(10000);
this->loop();
});
} catch (const std::system_error &e) {
conn_ = nullptr; // prevent connection was freed when failed to start the thread
return Status(Status::NotOK, e.what());
}
return Status::OK();
}
void FeedSlaveThread::Stop() {
stop_ = true;
LOG(WARNING) << "Slave thread was terminated, would stop feeding the slave: " << conn_->GetAddr();
}
void FeedSlaveThread::Join() {
if (t_.joinable()) t_.join();
}
void FeedSlaveThread::checkLivenessIfNeed() {
if (++interval % 1000) return;
const auto ping_command = Redis::BulkString("ping");
auto s = Util::SockSend(conn_->GetFD(), ping_command);
if (!s.IsOK()) {
LOG(ERROR) << "Ping slave[" << conn_->GetAddr() << "] err: " << s.Msg()
<< ", would stop the thread";
Stop();
}
}
void FeedSlaveThread::loop() {
uint32_t yield_milliseconds = 2000;
std::vector<std::string> batch_list;
while (!IsStopped()) {
if (!iter_ || !iter_->Valid()) {
if (iter_) LOG(INFO) << "WAL was rotated, would reopen again";
if (!srv_->storage_->WALHasNewData(next_repl_seq_)
|| !srv_->storage_->GetWALIter(next_repl_seq_, &iter_).IsOK()) {
iter_ = nullptr;
usleep(yield_milliseconds);
checkLivenessIfNeed();
continue;
}
}
// iter_ would be always valid here
auto batch = iter_->GetBatch();
if (batch.sequence != next_repl_seq_) {
LOG(ERROR) << "Fatal error encountered, WAL iterator is discrete, some seq might be lost"
<< ", sequence " << next_repl_seq_ << " expectd, but got " << batch.sequence;
Stop();
return;
}
auto data = batch.writeBatchPtr->Data();
batch_list.emplace_back(Redis::BulkString(data));
// feed the bulks data to slave in batch mode iff the lag was far from the master
auto latest_seq = srv_->storage_->LatestSeq();
if (latest_seq - batch.sequence <= 20 || batch_list.size() >= 20) {
for (const auto &bulk_str : batch_list) {
auto s = Util::SockSend(conn_->GetFD(), bulk_str);
if (!s.IsOK()) {
LOG(ERROR) << "Write error while sending batch to slave: " << s.Msg() << ". batch: 0x"
<< Util::StringToHex(data);
Stop();
return;
}
}
batch_list.clear();
}
next_repl_seq_ = batch.sequence + batch.writeBatchPtr->Count();
while (!IsStopped() && !srv_->storage_->WALHasNewData(next_repl_seq_)) {
usleep(yield_milliseconds);
checkLivenessIfNeed();
}
iter_->Next();
}
}
void send_string(bufferevent *bev, const std::string &data) {
auto output = bufferevent_get_output(bev);
evbuffer_add(output, data.c_str(), data.length());
}
void ReplicationThread::CallbacksStateMachine::ConnEventCB(
bufferevent *bev, int16_t events, void *state_machine_ptr) {
if (events & BEV_EVENT_CONNECTED) {
// call write_cb when connected
bufferevent_data_cb write_cb;
bufferevent_getcb(bev, nullptr, &write_cb, nullptr, nullptr);
if (write_cb) write_cb(bev, state_machine_ptr);
return;
}
if (events & (BEV_EVENT_ERROR | BEV_EVENT_EOF)) {
LOG(ERROR) << "[replication] connection error/eof, reconnect the master";
// Wait a bit and reconnect
auto state_m = static_cast<CallbacksStateMachine *>(state_machine_ptr);
state_m->repl_->repl_state_ = kReplConnecting;
std::this_thread::sleep_for(std::chrono::seconds(1));
state_m->Stop();
state_m->Start();
}
}
void ReplicationThread::CallbacksStateMachine::SetReadCB(
bufferevent *bev, bufferevent_data_cb cb, void *state_machine_ptr) {
bufferevent_enable(bev, EV_READ);
bufferevent_setcb(bev, cb, nullptr, ConnEventCB, state_machine_ptr);
}
void ReplicationThread::CallbacksStateMachine::SetWriteCB(
bufferevent *bev, bufferevent_data_cb cb, void *state_machine_ptr) {
bufferevent_enable(bev, EV_WRITE);
bufferevent_setcb(bev, nullptr, cb, ConnEventCB, state_machine_ptr);
}
ReplicationThread::CallbacksStateMachine::CallbacksStateMachine(
ReplicationThread *repl,
ReplicationThread::CallbacksStateMachine::CallbackList &&handlers)
: repl_(repl), handlers_(std::move(handlers)) {
if (!repl_->auth_.empty()) {
handlers_.emplace_front(CallbacksStateMachine::READ, "auth read", authReadCB);
handlers_.emplace_front(CallbacksStateMachine::WRITE, "auth write", authWriteCB);
}
}
void ReplicationThread::CallbacksStateMachine::EvCallback(bufferevent *bev,
void *ctx) {
auto self = static_cast<CallbacksStateMachine *>(ctx);
LOOP_LABEL:
assert(self->handler_idx_ <= self->handlers_.size());
DLOG(INFO) << "[replication] Execute handler[" << self->getHandlerName(self->handler_idx_) << "]";
auto st = self->getHandlerFunc(self->handler_idx_)(bev, self->repl_);
time(&self->repl_->last_io_time_);
switch (st) {
case CBState::NEXT:
++self->handler_idx_;
if (self->getHandlerEventType(self->handler_idx_) == WRITE) {
SetWriteCB(bev, EvCallback, ctx);
} else {
SetReadCB(bev, EvCallback, ctx);
}
// invoke the read handler (of next step) directly, as the bev might
// have the data already.
goto LOOP_LABEL;
case CBState::AGAIN:
break;
case CBState::QUIT: // state that can not be retry, or all steps are executed.
bufferevent_free(bev);
self->bev_ = nullptr;
self->repl_->repl_state_ = kReplError;
break;
case CBState::RESTART: // state that can be retried some time later
self->Stop();
if (self->repl_->stop_flag_) {
LOG(INFO) << "[replication] Wouldn't restart while the replication thread was stopped";
break;
}
LOG(INFO) << "[replication] Retry in 10 seconds";
std::this_thread::sleep_for(std::chrono::seconds(10));
self->Start();
}
}
void ReplicationThread::CallbacksStateMachine::Start() {
if (handlers_.empty()) {
return;
}
auto sockaddr_inet = Util::NewSockaddrInet(repl_->host_, repl_->port_);
auto bev = bufferevent_socket_new(repl_->base_, -1, BEV_OPT_CLOSE_ON_FREE);
if (bufferevent_socket_connect(bev,
reinterpret_cast<sockaddr *>(&sockaddr_inet),
sizeof(sockaddr_inet)) != 0) {
// NOTE: Connection error will not appear here, network err will be reported
// in ConnEventCB. the error here is something fatal.
LOG(ERROR) << "[replication] Failed to start state machine, err: " << strerror(errno);
}
handler_idx_ = 0;
if (getHandlerEventType(0) == WRITE) {
SetWriteCB(bev, EvCallback, this);
} else {
SetReadCB(bev, EvCallback, this);
}
bev_ = bev;
}
void ReplicationThread::CallbacksStateMachine::Stop() {
if (bev_) {
bufferevent_free(bev_);
bev_ = nullptr;
}
}
ReplicationThread::ReplicationThread(std::string host, uint32_t port,
Server *srv, std::string auth)
: host_(std::move(host)),
port_(port),
auth_(std::move(auth)),
srv_(srv),
storage_(srv->storage_),
repl_state_(kReplConnecting),
psync_steps_(this,
CallbacksStateMachine::CallbackList{
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::WRITE, "dbname write", checkDBNameWriteCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::READ, "dbname read", checkDBNameReadCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::WRITE, "replconf write", replConfWriteCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::READ, "replconf read", replConfReadCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::WRITE, "psync write", tryPSyncWriteCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::READ, "psync read", tryPSyncReadCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::READ, "batch loop", incrementBatchLoopCB
}
}),
fullsync_steps_(this,
CallbacksStateMachine::CallbackList{
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::WRITE, "fullsync write", fullSyncWriteCB
},
CallbacksStateMachine::CallbackType{
CallbacksStateMachine::READ, "fullsync read", fullSyncReadCB}
}) {
}
Status ReplicationThread::Start(std::function<void()> &&pre_fullsync_cb,
std::function<void()> &&post_fullsync_cb) {
pre_fullsync_cb_ = std::move(pre_fullsync_cb);
post_fullsync_cb_ = std::move(post_fullsync_cb);
// cleanup the old backups, so we can start replication in a clean state
storage_->PurgeOldBackups(0, 0);
try {
t_ = std::thread([this]() {
Util::ThreadSetName("master-repl");
this->run();
assert(stop_flag_);
});
} catch (const std::system_error &e) {
return Status(Status::NotOK, e.what());
}
return Status::OK();
}
void ReplicationThread::Stop() {
if (stop_flag_) return;
stop_flag_ = true; // Stopping procedure is asynchronous,
// handled by timer
t_.join();
LOG(INFO) << "[replication] Stopped";
}
/*
* Run connect to master, and start the following steps
* asynchronously
* - CheckDBName
* - TryPsync
* - - if ok, IncrementBatchLoop
* - - not, FullSync and restart TryPsync when done
*/
void ReplicationThread::run() {
base_ = event_base_new();
if (base_ == nullptr) {
LOG(ERROR) << "[replication] Failed to create new ev base";
return;
}
psync_steps_.Start();
auto timer = event_new(base_, -1, EV_PERSIST, EventTimerCB, this);
timeval tmo{0, 100000}; // 100 ms
evtimer_add(timer, &tmo);
event_base_dispatch(base_);
event_free(timer);
event_base_free(base_);
}
ReplicationThread::CBState ReplicationThread::authWriteCB(bufferevent *bev,
void *ctx) {
auto self = static_cast<ReplicationThread *>(ctx);
const auto auth_len_str = std::to_string(self->auth_.length());
send_string(bev, Redis::MultiBulkString({"AUTH", self->auth_}));
LOG(INFO) << "[replication] Auth request was sent, waiting for response";
self->repl_state_ = kReplSendAuth;
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::authReadCB(bufferevent *bev,
void *ctx) {
char *line;
size_t line_len;
auto input = bufferevent_get_input(bev);
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (strncmp(line, "+OK", 3) != 0) {
// Auth failed
LOG(ERROR) << "[replication] Auth failed: " << line;
free(line);
return CBState::RESTART;
}
free(line);
LOG(INFO) << "[replication] Auth response was received, continue...";
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::checkDBNameWriteCB(
bufferevent *bev, void *ctx) {
send_string(bev, Redis::MultiBulkString({"_db_name"}));
auto self = static_cast<ReplicationThread *>(ctx);
self->repl_state_ = kReplCheckDBName;
LOG(INFO) << "[replication] Check db name request was sent, waiting for response";
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::checkDBNameReadCB(
bufferevent *bev, void *ctx) {
char *line;
size_t line_len;
auto input = bufferevent_get_input(bev);
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (line[0] == '-') {
if (isRestoringError(line)) {
LOG(WARNING) << "The master was restoring the db, retry later";
} else {
LOG(ERROR) << "Failed to get the db name, " << line;
}
free(line);
return CBState::RESTART;
}
auto self = static_cast<ReplicationThread *>(ctx);
std::string db_name = self->storage_->GetName();
if (line_len == db_name.size() && !strncmp(line, db_name.data(), line_len)) {
// DB name match, we should continue to next step: TryPsync
free(line);
LOG(INFO) << "[replication] DB name is valid, continue...";
return CBState::NEXT;
}
LOG(ERROR) << "[replication] Mismatched the db name, local: " << db_name << ", remote: " << line;
free(line);
return CBState::RESTART;
}
ReplicationThread::CBState ReplicationThread::replConfWriteCB(
bufferevent *bev, void *ctx) {
auto self = static_cast<ReplicationThread *>(ctx);
send_string(bev,
Redis::MultiBulkString({"replconf", "listening-port", std::to_string(self->srv_->GetConfig()->port)}));
self->repl_state_ = kReplReplConf;
LOG(INFO) << "[replication] replconf request was sent, waiting for response";
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::replConfReadCB(
bufferevent *bev, void *ctx) {
char *line;
size_t line_len;
auto input = bufferevent_get_input(bev);
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (line[0] == '-' && isRestoringError(line)) {
free(line);
LOG(WARNING) << "The master was restoring the db, retry later";
return CBState::RESTART;
}
if (strncmp(line, "+OK", 3) != 0) {
LOG(WARNING) << "[replication] Failed to replconf: " << line+1;
free(line);
// backward compatible with old version that doesn't support replconf cmd
return CBState::NEXT;
} else {
free(line);
LOG(INFO) << "[replication] replconf is ok, start psync";
return CBState::NEXT;
}
}
ReplicationThread::CBState ReplicationThread::tryPSyncWriteCB(
bufferevent *bev, void *ctx) {
auto self = static_cast<ReplicationThread *>(ctx);
auto next_seq = self->storage_->LatestSeq() + 1;
send_string(bev, Redis::MultiBulkString({"PSYNC", std::to_string(next_seq)}));
self->repl_state_ = kReplSendPSync;
LOG(INFO) << "[replication] Try to use psync, next seq: " << next_seq;
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::tryPSyncReadCB(bufferevent *bev,
void *ctx) {
char *line;
size_t line_len;
auto self = static_cast<ReplicationThread *>(ctx);
auto input = bufferevent_get_input(bev);
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (line[0] == '-' && isRestoringError(line)) {
free(line);
LOG(WARNING) << "The master was restoring the db, retry later";
return CBState::RESTART;
}
if (strncmp(line, "+OK", 3) != 0) {
// PSYNC isn't OK, we should use FullSync
// Switch to fullsync state machine
self->fullsync_steps_.Start();
LOG(INFO) << "[replication] Failed to psync, switch to fullsync";
free(line);
return CBState::QUIT;
} else {
// PSYNC is OK, use IncrementBatchLoop
free(line);
LOG(INFO) << "[replication] PSync is ok, start increment batch loop";
return CBState::NEXT;
}
}
ReplicationThread::CBState ReplicationThread::incrementBatchLoopCB(
bufferevent *bev, void *ctx) {
char *line = nullptr;
size_t line_len = 0;
char *bulk_data = nullptr;
auto self = static_cast<ReplicationThread *>(ctx);
self->repl_state_ = kReplConnected;
auto input = bufferevent_get_input(bev);
while (true) {
switch (self->incr_state_) {
case Incr_batch_size:
// Read bulk length
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
self->incr_bulk_len_ = line_len > 0 ? std::strtoull(line + 1, nullptr, 10) : 0;
free(line);
if (self->incr_bulk_len_ == 0) {
LOG(ERROR) << "[replication] Invalid increment data size";
return CBState::RESTART;
}
self->incr_state_ = Incr_batch_data;
break;
case Incr_batch_data:
// Read bulk data (batch data)
if (self->incr_bulk_len_+2 <= evbuffer_get_length(input)) { // We got enough data
bulk_data = reinterpret_cast<char *>(evbuffer_pullup(input, self->incr_bulk_len_ + 2));
std::string bulk_string = std::string(bulk_data, self->incr_bulk_len_);
// master would send the ping heartbeat packet to check whether the slave was alive or not,
// don't write ping to db here.
if (bulk_string != "ping") {
auto s = self->storage_->WriteBatch(std::string(bulk_data, self->incr_bulk_len_));
if (!s.IsOK()) {
LOG(ERROR) << "[replication] CRITICAL - Failed to write batch to local, " << s.Msg() << ". batch: 0x"
<< Util::StringToHex(bulk_string);
return CBState::RESTART;
}
self->ParseWriteBatch(bulk_string);
}
evbuffer_drain(input, self->incr_bulk_len_ + 2);
self->incr_state_ = Incr_batch_size;
} else {
return CBState::AGAIN;
}
break;
}
}
}
ReplicationThread::CBState ReplicationThread::fullSyncWriteCB(
bufferevent *bev, void *ctx) {
send_string(bev, Redis::MultiBulkString({"_fetch_meta"}));
auto self = static_cast<ReplicationThread *>(ctx);
self->repl_state_ = kReplFetchMeta;
LOG(INFO) << "[replication] Start syncing data with fullsync";
return CBState::NEXT;
}
ReplicationThread::CBState ReplicationThread::fullSyncReadCB(bufferevent *bev,
void *ctx) {
char *line;
size_t line_len;
auto self = static_cast<ReplicationThread *>(ctx);
auto input = bufferevent_get_input(bev);
switch (self->fullsync_state_) {
case kFetchMetaID:
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (line[0] == '-') {
LOG(ERROR) << "[replication] Failed to fetch meta id: " << line;
free(line);
return CBState::RESTART;
}
self->fullsync_meta_id_ = static_cast<rocksdb::BackupID>(
line_len > 0 ? std::strtoul(line, nullptr, 10) : 0);
free(line);
if (self->fullsync_meta_id_ == 0) {
LOG(ERROR) << "[replication] Invalid meta id received";
return CBState::RESTART;
}
self->storage_->PurgeBackupIfNeed(self->fullsync_meta_id_);
self->fullsync_state_ = kFetchMetaSize;
LOG(INFO) << "[replication] Success to fetch meta id: " << self->fullsync_meta_id_;
case kFetchMetaSize:
line = evbuffer_readln(input, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) return CBState::AGAIN;
if (line[0] == '-') {
LOG(ERROR) << "[replication] Failed to fetch meta size: " << line;
free(line);
return CBState::RESTART;
}
self->fullsync_filesize_ = line_len > 0 ? std::strtoull(line, nullptr, 10) : 0;
free(line);
if (self->fullsync_filesize_ == 0) {
LOG(ERROR) << "[replication] Invalid meta file size received";
return CBState::RESTART;
}
self->fullsync_state_ = kFetchMetaContent;
LOG(INFO) << "[replication] Success to fetch meta size: " << self->fullsync_filesize_;
case kFetchMetaContent:
if (evbuffer_get_length(input) < self->fullsync_filesize_) {
return CBState::AGAIN;
}
auto meta = Engine::Storage::BackupManager::ParseMetaAndSave(
self->storage_, self->fullsync_meta_id_, input);
assert(evbuffer_get_length(input) == 0);
self->fullsync_state_ = kFetchMetaID;
LOG(INFO) << "[replication] Succeeded fetching meta file, fetching files in parallel";
self->repl_state_ = kReplFetchSST;
auto s = self->parallelFetchFile(meta.files);
if (!s.IsOK()) {
LOG(ERROR) << "[replication] Failed to parallel fetch files while " + s.Msg();
return CBState::RESTART;
}
LOG(INFO) << "[replication] Succeeded fetching files in parallel, restoring the backup";
// Restore DB from backup
self->pre_fullsync_cb_();
s = self->storage_->RestoreFromBackup();
if (!s.IsOK()) {
LOG(ERROR) << "[replication] Failed to restore backup while " + s.Msg() + ", restart fullsync";
return CBState::RESTART;
}
LOG(INFO) << "[replication] Succeeded restoring the backup, fullsync was finish";
self->post_fullsync_cb_();
// Switch to psync state machine again
self->psync_steps_.Start();
return CBState::QUIT;
}
LOG(ERROR) << "Should not arrive here";
assert(false);
return CBState::QUIT;
}
Status ReplicationThread::parallelFetchFile(const std::vector<std::pair<std::string, uint32_t>> &files) {
size_t concurrency = 1;
if (files.size() > 20) {
// Use 4 threads to download files in parallel
concurrency = 4;
}
std::atomic<uint32_t> fetch_cnt = {0};
std::atomic<uint32_t> skip_cnt = {0};
std::vector<std::future<Status>> results;
for (size_t tid = 0; tid < concurrency; ++tid) {
results.push_back(std::async(
std::launch::async, [this, &files, tid, concurrency, &fetch_cnt, &skip_cnt]() -> Status {
if (this->stop_flag_) {
return Status(Status::NotOK, "replication thread was stopped");
}
int sock_fd;
Status s = Util::SockConnect(this->host_, this->port_, &sock_fd);
if (!s.IsOK()) {
return Status(Status::NotOK, "connect the server err: " + s.Msg());
}
s = this->sendAuth(sock_fd);
if (!s.IsOK()) {
close(sock_fd);
return Status(Status::NotOK, "sned the auth command err: " + s.Msg());
}
for (auto f_idx = tid; f_idx < files.size(); f_idx += concurrency) {
if (this->stop_flag_) {
return Status(Status::NotOK, "replication thread was stopped");
}
const auto &f_name = files[f_idx].first;
const auto &f_crc = files[f_idx].second;
// Don't fetch existing files
if (Engine::Storage::BackupManager::FileExists(this->storage_, f_name, f_crc)) {
skip_cnt.fetch_add(1);
uint32_t cur_skip_cnt = skip_cnt.load();
uint32_t cur_fetch_cnt = fetch_cnt.load();
LOG(INFO) << "[skip] "<< f_name << " " << f_crc
<< ", skip count: " << cur_skip_cnt << ", fetch count: " << cur_fetch_cnt
<< ", progress: " << cur_skip_cnt+cur_fetch_cnt<< "/" << files.size();
continue;
}
fetch_cnt.fetch_add(1);
uint32_t cur_skip_cnt = skip_cnt.load();
uint32_t cur_fetch_cnt = fetch_cnt.load();
LOG(INFO) << "[fetch] " << f_name << " " << f_crc
<< ", skip count: " << cur_skip_cnt << ", fetch count: " << cur_fetch_cnt
<< ", progress: " << cur_skip_cnt+cur_fetch_cnt<< "/" << files.size();
s = this->fetchFile(sock_fd, f_name, f_crc);
if (!s.IsOK()) {
close(sock_fd);
return Status(Status::NotOK, "fetch file err: " + s.Msg());
}
}
close(sock_fd);
return Status::OK();
}));
}
// Wait til finish
for (auto &f : results) {
Status s = f.get();
if (!s.IsOK()) return s;
}
return Status::OK();
}
Status ReplicationThread::sendAuth(int sock_fd) {
size_t line_len;
// Send auth when needed
if (!auth_.empty()) {
evbuffer *evbuf = evbuffer_new();
const auto auth_command = Redis::MultiBulkString({"AUTH", auth_});
auto s = Util::SockSend(sock_fd, auth_command);
if (!s.IsOK()) return Status(Status::NotOK, "send auth command err:"+s.Msg());
while (true) {
if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
evbuffer_free(evbuf);
return Status(Status::NotOK, std::string("read auth response err: ")+strerror(errno));
}
char *line = evbuffer_readln(evbuf, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) continue;
if (strncmp(line, "+OK", 3) != 0) {
free(line);
evbuffer_free(evbuf);
return Status(Status::NotOK, "auth got invalid response");
}
free(line);
break;
}
evbuffer_free(evbuf);
}
return Status::OK();
}
Status ReplicationThread::fetchFile(int sock_fd, std::string path,
uint32_t crc) {
size_t line_len, file_size;
const auto fetch_command = Redis::MultiBulkString({"_fetch_file", path});
auto s = Util::SockSend(sock_fd, fetch_command);
if (!s.IsOK()) return Status(Status::NotOK, "send fetch file command err: "+s.Msg());
evbuffer *evbuf = evbuffer_new();
// Read file size line
while (true) {
if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
evbuffer_free(evbuf);
return Status(Status::NotOK, std::string("read size line err: ")+strerror(errno));
}
char *line = evbuffer_readln(evbuf, &line_len, EVBUFFER_EOL_CRLF_STRICT);
if (!line) continue;
if (*line == '-') {
free(line);
evbuffer_free(evbuf);
return Status(Status::NotOK, std::string("_fetch_file got err: ")+line);
}
file_size = line_len > 0 ? std::strtoull(line, nullptr, 10) : 0;
free(line);
break;
}
// Write to tmp file
auto tmp_file = Engine::Storage::BackupManager::NewTmpFile(storage_, path);
if (!tmp_file) {
evbuffer_free(evbuf);
return Status(Status::NotOK, "unable to create tmp file");
}
size_t seen_bytes = 0;
uint32_t tmp_crc = 0;
char data[1024];
while (seen_bytes < file_size) {
if (evbuffer_get_length(evbuf) > 0) {
auto data_len = evbuffer_remove(evbuf, data, 1024);
if (data_len == 0) continue;
if (data_len < 0) {
evbuffer_free(evbuf);
return Status(Status::NotOK, "read sst file data error");
}
tmp_file->Append(rocksdb::Slice(data, data_len));
tmp_crc = rocksdb::crc32c::Extend(tmp_crc, data, data_len);
seen_bytes += data_len;
} else {
if (evbuffer_read(evbuf, sock_fd, -1) <= 0) {
evbuffer_free(evbuf);
return Status(Status::NotOK, std::string("read sst file data, err: ")+strerror(errno));
}
}
}
if (crc != tmp_crc) {
evbuffer_free(evbuf);
return Status(Status::NotOK, "CRC mismatch");
}
evbuffer_free(evbuf);
// File is OK, rename to formal name
return Engine::Storage::BackupManager::SwapTmpFile(storage_, path);
}
// Check if stop_flag_ is set, when do, tear down replication
void ReplicationThread::EventTimerCB(int, int16_t, void *ctx) {
// DLOG(INFO) << "[replication] timer";
auto self = static_cast<ReplicationThread *>(ctx);
if (self->stop_flag_) {
LOG(INFO) << "[replication] Stop ev loop";
event_base_loopbreak(self->base_);
self->psync_steps_.Stop();
self->fullsync_steps_.Stop();
}
}
rocksdb::Status ReplicationThread::ParseWriteBatch(const std::string &batch_string) {
rocksdb::WriteBatch write_batch(batch_string);
WriteBatchHandler write_batch_handler;
rocksdb::Status status;
status = write_batch.Iterate(&write_batch_handler);
if (!status.ok()) return status;
if (write_batch_handler.IsPublish()) {
srv_->PublishMessage(write_batch_handler.GetPublishChannel().ToString(),
write_batch_handler.GetPublishValue().ToString());
}
return rocksdb::Status::OK();
}
bool ReplicationThread::isRestoringError(const char *err) {
return std::string(err) == "-ERR restoring the db from backup";
}
rocksdb::Status WriteBatchHandler::PutCF(uint32_t column_family_id, const rocksdb::Slice &key,
const rocksdb::Slice &value) {
if (column_family_id != kColumnFamilyIDPubSub) {
return rocksdb::Status::OK();
}
publish_message_ = std::make_pair(key.ToString(), value.ToString());
is_publish_ = true;
return rocksdb::Status::OK();
}
|
// UserFunctionExpr.cpp
// This file is part of the EScript programming language (https://github.com/EScript)
//
// Copyright (C) 2012-2013 Claudius Jähn <ClaudiusJ@live.de>
// Copyright (C) 2012 Benjamin Eikel <benjamin@eikel.org>
//
// Licensed under the MIT License. See LICENSE file for details.
// ---------------------------------------------------------------------------------
#include "UserFunctionExpr.h"
#include "../../Basics.h"
#include "Block.h"
#include <sstream>
namespace EScript{
namespace AST{
//! (ctor) UserFunctionExpr::Parameter
UserFunctionExpr::Parameter::Parameter(const StringId & _name,ptr_t defaultValueExpression,refArray_t && _typeExpressions):
name(_name),defaultValueExpressionRef(defaultValueExpression),typeExpressions(_typeExpressions),multiParam(false){
}
// ------------------------------------------------------------
//! (ctor)
UserFunctionExpr::UserFunctionExpr(AST::Block * block,const refArray_t & _sConstrExpressions,int _line):
ASTNode(TYPE_USER_FUNCTION_EXPRESSION,true,_line),
blockRef(block), sConstrExpressions(_sConstrExpressions){
//ctor
}
}
}
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "exec/kudu-scanner.h"
#include <string>
#include <vector>
#include <kudu/client/row_result.h>
#include <kudu/client/value.h>
#include <thrift/protocol/TDebugProtocol.h>
#include "exec/exec-node.inline.h"
#include "exec/kudu-util.h"
#include "exprs/scalar-expr-evaluator.h"
#include "exprs/scalar-expr.h"
#include "exprs/slot-ref.h"
#include "gutil/gscoped_ptr.h"
#include "gutil/strings/substitute.h"
#include "kudu/util/block_bloom_filter.h"
#include "kudu/util/slice.h"
#include "runtime/mem-pool.h"
#include "runtime/mem-tracker.h"
#include "runtime/raw-value.h"
#include "runtime/row-batch.h"
#include "runtime/runtime-filter.inline.h"
#include "runtime/runtime-state.h"
#include "runtime/string-value.h"
#include "runtime/timestamp-value.inline.h"
#include "runtime/tuple-row.h"
#include "util/bloom-filter.h"
#include "util/debug-util.h"
#include "util/jni-util.h"
#include "util/min-max-filter.h"
#include "util/periodic-counter-updater.h"
#include "util/runtime-profile-counters.h"
#include "common/names.h"
using kudu::client::KuduClient;
using kudu::client::KuduPredicate;
using kudu::client::KuduScanBatch;
using kudu::client::KuduSchema;
using kudu::client::KuduTable;
using kudu::client::KuduValue;
DEFINE_string(kudu_read_mode, "READ_LATEST", "(Advanced) Sets the Kudu scan ReadMode. "
"Supported Kudu read modes are READ_LATEST and READ_AT_SNAPSHOT. Can be overridden "
"with the query option of the same name.");
DEFINE_int32(kudu_scanner_keep_alive_period_sec, 15,
"The period at which Kudu Scanners should send keep-alive requests to the tablet "
"server to ensure that scanners do not time out.");
DECLARE_int32(kudu_operation_timeout_ms);
namespace impala {
KuduScanner::KuduScanner(KuduScanNodeBase* scan_node, RuntimeState* state)
: scan_node_(scan_node),
state_(state),
expr_perm_pool_(new MemPool(scan_node->expr_mem_tracker())),
expr_results_pool_(new MemPool(scan_node->expr_mem_tracker())),
cur_kudu_batch_num_read_(0),
last_alive_time_micros_(0) {
}
Status KuduScanner::Open() {
for (int i = 0; i < scan_node_->tuple_desc()->slots().size(); ++i) {
const SlotDescriptor* slot = scan_node_->tuple_desc()->slots()[i];
if (slot->type().type == TYPE_TIMESTAMP) {
timestamp_slots_.push_back(slot);
} else if (slot->type().type == TYPE_VARCHAR) {
varchar_slots_.push_back(slot);
}
}
return ScalarExprEvaluator::Clone(&obj_pool_, state_, expr_perm_pool_.get(),
expr_results_pool_.get(), scan_node_->conjunct_evals(), &conjunct_evals_);
}
void KuduScanner::KeepKuduScannerAlive() {
if (scanner_ == nullptr) return;
int64_t now = MonotonicMicros();
int64_t keepalive_us = FLAGS_kudu_scanner_keep_alive_period_sec * 1e6;
if (now < last_alive_time_micros_ + keepalive_us) {
return;
}
// If we fail to send a keepalive, it isn't a big deal. The Kudu
// client code doesn't handle cross-replica failover or retries when
// the server is busy, so it's better to just ignore errors here. In
// the worst case, we will just fail next time we try to fetch a batch
// if the scan is unrecoverable.
kudu::Status s = scanner_->KeepAlive();
if (!s.ok()) {
VLOG(1) << "Unable to keep the Kudu scanner alive: " << s.ToString();
return;
}
last_alive_time_micros_ = now;
}
Status KuduScanner::GetNextWithCountStarOptimization(RowBatch* row_batch, bool* eos) {
int64_t counter = 0;
while (scanner_->HasMoreRows()) {
RETURN_IF_CANCELLED(state_);
RETURN_IF_ERROR(GetNextScannerBatch());
cur_kudu_batch_num_read_ = static_cast<int64_t>(cur_kudu_batch_.NumRows());
counter += cur_kudu_batch_num_read_;
}
*eos = true;
int64_t tuple_buffer_size;
uint8_t* tuple_buffer;
int capacity = 1;
RETURN_IF_ERROR(row_batch->ResizeAndAllocateTupleBuffer(state_,
row_batch->tuple_data_pool(), row_batch->row_desc()->GetRowSize(), &capacity,
&tuple_buffer_size, &tuple_buffer));
Tuple* tuple = reinterpret_cast<Tuple*>(tuple_buffer);
Tuple::ClearNullBits(tuple, scan_node_->tuple_desc()->null_bytes_offset(),
scan_node_->tuple_desc()->num_null_bytes());
int64_t* counter_slot = tuple->GetBigIntSlot(scan_node_->count_star_slot_offset());
*counter_slot = counter;
TupleRow* dst_row = row_batch->GetRow(row_batch->AddRow());
dst_row->SetTuple(0, tuple);
row_batch->CommitLastRow();
CloseCurrentClientScanner();
return Status::OK();
}
Status KuduScanner::GetNext(RowBatch* row_batch, bool* eos) {
SCOPED_TIMER(scan_node_->materialize_tuple_timer());
// Optimized scanning for count(*), only write the NumRows
if (scan_node_->optimize_count_star()) {
return GetNextWithCountStarOptimization(row_batch, eos);
}
int64_t tuple_buffer_size;
uint8_t* tuple_buffer;
RETURN_IF_ERROR(
row_batch->ResizeAndAllocateTupleBuffer(state_, &tuple_buffer_size, &tuple_buffer));
// Main scan loop:
// Tries to fill 'row_batch' with rows from cur_kudu_batch_.
// If there are no rows to decode, tries to get the next row batch from kudu.
// If this scanner has no more rows, the scanner is closed and eos is returned.
Tuple* tuple = reinterpret_cast<Tuple*>(tuple_buffer);
while (!*eos) {
RETURN_IF_CANCELLED(state_);
if (cur_kudu_batch_num_read_ < cur_kudu_batch_.NumRows()) {
RETURN_IF_ERROR(DecodeRowsIntoRowBatch(row_batch, &tuple));
if (row_batch->AtCapacity()) break;
}
if (scanner_->HasMoreRows() && !scan_node_->ReachedLimitShared()) {
RETURN_IF_ERROR(GetNextScannerBatch());
continue;
}
CloseCurrentClientScanner();
*eos = true;
}
return Status::OK();
}
void KuduScanner::Close() {
if (scanner_) CloseCurrentClientScanner();
ScalarExprEvaluator::Close(conjunct_evals_, state_);
expr_perm_pool_->FreeAll();
expr_results_pool_->FreeAll();
}
Status KuduScanner::OpenNextScanToken(const string& scan_token, bool* eos) {
DCHECK(scanner_ == nullptr);
kudu::client::KuduScanner* scanner;
KUDU_RETURN_IF_ERROR(kudu::client::KuduScanToken::DeserializeIntoScanner(
scan_node_->kudu_client(), scan_token, &scanner),
BuildErrorString("Unable to deserialize scan token"));
scanner_.reset(scanner);
if (state_->query_options().kudu_replica_selection
== TKuduReplicaSelection::LEADER_ONLY) {
KUDU_RETURN_IF_ERROR(scanner_->SetSelection(kudu::client::KuduClient::LEADER_ONLY),
BuildErrorString("Could not set replica selection"));
}
kudu::client::KuduScanner::ReadMode mode;
RETURN_IF_ERROR(StringToKuduReadMode(FLAGS_kudu_read_mode, &mode));
if (state_->query_options().kudu_read_mode != TKuduReadMode::DEFAULT) {
RETURN_IF_ERROR(StringToKuduReadMode(
PrintThriftEnum(state_->query_options().kudu_read_mode), &mode));
}
KUDU_RETURN_IF_ERROR(
scanner_->SetReadMode(mode), BuildErrorString("Could not set scanner ReadMode"));
if (state_->query_options().kudu_snapshot_read_timestamp_micros > 0) {
KUDU_RETURN_IF_ERROR(scanner_->SetSnapshotMicros(
state_->query_options().kudu_snapshot_read_timestamp_micros),
BuildErrorString("Could not set snapshot timestamp"));
}
KUDU_RETURN_IF_ERROR(scanner_->SetTimeoutMillis(FLAGS_kudu_operation_timeout_ms),
BuildErrorString("Could not set scanner timeout"));
VLOG_ROW << "Starting KuduScanner with ReadMode=" << mode
<< " timeout=" << FLAGS_kudu_operation_timeout_ms
<< " node with id=" << scan_node_->id()
<< " Kudu table=" << scan_node_->table_desc()->table_name();
if (!timestamp_slots_.empty()) {
uint64_t row_format_flags =
kudu::client::KuduScanner::PAD_UNIXTIME_MICROS_TO_16_BYTES;
scanner_->SetRowFormatFlags(row_format_flags);
}
if (scan_node_->filter_ctxs_.size() > 0) {
for (const FilterContext& ctx : scan_node_->filter_ctxs_) {
if (!ctx.filter->HasFilter() || ctx.filter->AlwaysTrue()) {
// If it's always true, the filter won't actually remove any rows so we
// don't need to push it down to Kudu.
continue;
} else if (ctx.filter->AlwaysFalse()) {
// We can skip this entire scan if it's always false.
CloseCurrentClientScanner();
*eos = true;
return Status::OK();
}
auto it = ctx.filter->filter_desc().planid_to_target_ndx.find(scan_node_->id());
const TRuntimeFilterTargetDesc& target_desc =
ctx.filter->filter_desc().targets[it->second];
const string& col_name = target_desc.kudu_col_name;
DCHECK(col_name != "");
if (ctx.filter->is_bloom_filter()) {
BloomFilter* filter = ctx.filter->get_bloom_filter();
DCHECK(filter != nullptr);
kudu::BlockBloomFilter* bbf = filter->GetBlockBloomFilter();
vector<kudu::Slice> bbf_vec = {
kudu::Slice(reinterpret_cast<const uint8_t*>(bbf), sizeof(*bbf))};
KUDU_RETURN_IF_ERROR(
scanner_->AddConjunctPredicate(
scanner_->GetKuduTable()->NewInBloomFilterPredicate(col_name, bbf_vec)),
BuildErrorString("Failed to add bloom filter predicate"));
} else {
DCHECK(ctx.filter->is_min_max_filter());
MinMaxFilter* filter = ctx.filter->get_min_max();
DCHECK(filter != nullptr);
const void* min = filter->GetMin();
const void* max = filter->GetMax();
// If the type of the filter is not the same as the type of the target column,
// there must be an implicit integer cast and we need to ensure the min/max we
// pass to Kudu are within the range of the target column.
int64_t int_min;
int64_t int_max;
const ColumnType& col_type = ColumnType::FromThrift(target_desc.kudu_col_type);
if (col_type.type != filter->type()) {
DCHECK(col_type.IsIntegerType());
if (!filter->GetCastIntMinMax(col_type, &int_min, &int_max)) {
// The min/max for this filter is outside the range for the target column,
// so all rows are filtered out and we can skip the scan.
CloseCurrentClientScanner();
*eos = true;
return Status::OK();
}
min = &int_min;
max = &int_max;
}
KuduValue* min_value;
RETURN_IF_ERROR(CreateKuduValue(col_type, min, &min_value));
KUDU_RETURN_IF_ERROR(scanner_->AddConjunctPredicate(
scanner_->GetKuduTable()->NewComparisonPredicate(
col_name, KuduPredicate::ComparisonOp::GREATER_EQUAL, min_value)),
BuildErrorString("Failed to add min predicate"));
KuduValue* max_value;
RETURN_IF_ERROR(CreateKuduValue(col_type, max, &max_value));
KUDU_RETURN_IF_ERROR(scanner_->AddConjunctPredicate(
scanner_->GetKuduTable()->NewComparisonPredicate(
col_name, KuduPredicate::ComparisonOp::LESS_EQUAL, max_value)),
BuildErrorString("Failed to add max predicate"));
}
}
}
if (scan_node_->limit() != -1 && conjunct_evals_.empty()) {
KUDU_RETURN_IF_ERROR(scanner_->SetLimit(scan_node_->limit()),
BuildErrorString("Failed to set limit on scan"));
}
{
SCOPED_TIMER2(state_->total_storage_wait_timer(), scan_node_->kudu_client_time());
KUDU_RETURN_IF_ERROR(scanner_->Open(), BuildErrorString("Unable to open scanner"));
}
*eos = false;
return Status::OK();
}
void KuduScanner::CloseCurrentClientScanner() {
DCHECK_NOTNULL(scanner_.get());
scanner_->Close();
scanner_.reset();
}
Status KuduScanner::HandleEmptyProjection(RowBatch* row_batch) {
int num_rows_remaining = cur_kudu_batch_.NumRows() - cur_kudu_batch_num_read_;
int rows_to_add = std::min(row_batch->capacity() - row_batch->num_rows(),
num_rows_remaining);
int num_to_commit = 0;
if (LIKELY(conjunct_evals_.empty())) {
num_to_commit = rows_to_add;
} else {
for (int i = 0; i < rows_to_add; ++i) {
if (ExecNode::EvalConjuncts(conjunct_evals_.data(),
conjunct_evals_.size(), nullptr)) {
++num_to_commit;
}
}
}
for (int i = 0; i < num_to_commit; ++i) {
// IMPALA-6258: Initialize tuple ptrs to non-null value
TupleRow* row = row_batch->GetRow(row_batch->AddRow());
row->SetTuple(0, Tuple::POISON);
row_batch->CommitLastRow();
}
cur_kudu_batch_num_read_ += rows_to_add;
return Status::OK();
}
Status KuduScanner::DecodeRowsIntoRowBatch(RowBatch* row_batch, Tuple** tuple_mem) {
// Short-circuit for empty projection cases.
if (scan_node_->tuple_desc()->slots().empty()) {
return HandleEmptyProjection(row_batch);
}
// Iterate through the Kudu rows, evaluate conjuncts and deep-copy survivors into
// 'row_batch'.
bool has_conjuncts = !conjunct_evals_.empty();
int num_rows = cur_kudu_batch_.NumRows();
for (int krow_idx = cur_kudu_batch_num_read_; krow_idx < num_rows; ++krow_idx) {
Tuple* kudu_tuple = const_cast<Tuple*>(
reinterpret_cast<const Tuple*>(cur_kudu_batch_.direct_data().data()
+ (krow_idx * scan_node_->row_desc()->GetRowSize())));
++cur_kudu_batch_num_read_;
// Kudu tuples containing TIMESTAMP columns (UNIXTIME_MICROS in Kudu, stored as an
// int64) have 8 bytes of padding following the timestamp. Because this padding is
// provided, Impala can convert these unixtime values to Impala's TimestampValue
// format in place and copy the rows to Impala row batches.
// TODO: avoid mem copies with a Kudu mem 'release' mechanism, attaching mem to the
// batch.
// TODO: consider codegen for this per-timestamp col fixup
for (const SlotDescriptor* slot : timestamp_slots_) {
DCHECK(slot->type().type == TYPE_TIMESTAMP);
if (slot->is_nullable() && kudu_tuple->IsNull(slot->null_indicator_offset())) {
continue;
}
int64_t ts_micros = *reinterpret_cast<int64_t*>(
kudu_tuple->GetSlot(slot->tuple_offset()));
TimestampValue tv = TimestampValue::UtcFromUnixTimeMicros(ts_micros);
if (tv.HasDateAndTime()) {
RawValue::Write(&tv, kudu_tuple, slot, nullptr);
} else {
kudu_tuple->SetNull(slot->null_indicator_offset());
RETURN_IF_ERROR(state_->LogOrReturnError(
ErrorMsg::Init(TErrorCode::KUDU_TIMESTAMP_OUT_OF_RANGE,
scan_node_->table_desc()->table_name(),
scanner_->GetKuduTable()->schema().Column(slot->col_pos()).name())));
}
}
// Kudu tuples containing VARCHAR columns use characters instead of bytes to limit
// the length. In the case of ASCII values there is no difference. However, if
// multi-byte characters are written to Kudu the length could be longer than allowed.
// This checks the actual length and truncates the value length if it is too long.
// TODO(IMPALA-5675): Remove this when Impala supports UTF-8 character VARCHAR length.
for (const SlotDescriptor* slot : varchar_slots_) {
DCHECK(slot->type().type == TYPE_VARCHAR);
if (slot->is_nullable() && kudu_tuple->IsNull(slot->null_indicator_offset())) {
continue;
}
StringValue* sv = reinterpret_cast<StringValue*>(
kudu_tuple->GetSlot(slot->tuple_offset()));
int src_len = sv->len;
int dst_len = slot->type().len;
if (src_len > dst_len) {
sv->len = dst_len;
}
}
// Evaluate the conjuncts that haven't been pushed down to Kudu. Conjunct evaluation
// is performed directly on the Kudu tuple because its memory layout is identical to
// Impala's. We only copy the surviving tuples to Impala's output row batch.
if (has_conjuncts && !ExecNode::EvalConjuncts(conjunct_evals_.data(),
conjunct_evals_.size(), reinterpret_cast<TupleRow*>(&kudu_tuple))) {
continue;
}
// Deep copy the tuple, set it in a new row, and commit the row.
kudu_tuple->DeepCopy(*tuple_mem, *scan_node_->tuple_desc(),
row_batch->tuple_data_pool());
TupleRow* row = row_batch->GetRow(row_batch->AddRow());
row->SetTuple(0, *tuple_mem);
row_batch->CommitLastRow();
// If we've reached the capacity, or the LIMIT for the scan, return.
if (row_batch->AtCapacity() || scan_node_->ReachedLimitShared()) break;
// Move to the next tuple in the tuple buffer.
*tuple_mem = next_tuple(*tuple_mem);
}
expr_results_pool_->Clear();
// Check the status in case an error status was set during conjunct evaluation.
return state_->GetQueryStatus();
}
Status KuduScanner::GetNextScannerBatch() {
SCOPED_TIMER2(state_->total_storage_wait_timer(), scan_node_->kudu_client_time());
int64_t now = MonotonicMicros();
KUDU_RETURN_IF_ERROR(scanner_->NextBatch(&cur_kudu_batch_),
BuildErrorString("Unable to advance iterator"));
COUNTER_ADD(scan_node_->kudu_round_trips(), 1);
cur_kudu_batch_num_read_ = 0;
COUNTER_ADD(scan_node_->rows_read_counter(), cur_kudu_batch_.NumRows());
last_alive_time_micros_ = now;
return Status::OK();
}
string KuduScanner::BuildErrorString(const char* msg) {
return Substitute("$0 for node with id '$1' for Kudu table '$2'", msg, scan_node_->id(),
scan_node_->table_desc()->table_name());
}
} // namespace impala
|
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2021 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "DocumentExpressionContext.h"
#include "Aql/AqlValue.h"
using namespace arangodb::aql;
DocumentExpressionContext::DocumentExpressionContext(arangodb::transaction::Methods& trx,
QueryContext& query,
AqlFunctionsInternalCache& cache,
arangodb::velocypack::Slice document) noexcept
: QueryExpressionContext(trx, query, cache), _document(document) {}
AqlValue DocumentExpressionContext::getVariableValue(Variable const*, bool doCopy,
bool& mustDestroy) const {
if (doCopy) {
mustDestroy = true; // as we are copying
return AqlValue(AqlValueHintCopy(_document.start()));
}
mustDestroy = false;
return AqlValue(AqlValueHintDocumentNoCopy(_document.start()));
}
|
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
#include "SAddContentDialog.h"
#include "Widgets/Layout/SBorder.h"
#include "EditorStyleSet.h"
#include "SAddContentWidget.h"
#define LOCTEXT_NAMESPACE "AddContentDialog"
void SAddContentDialog::Construct(const FArguments& InArgs)
{
SAssignNew(AddContentWidget, SAddContentWidget);
SWindow::Construct(SWindow::FArguments()
.Title(LOCTEXT("AddContentDialogTitle", "Add Content to the Project"))
.SizingRule(ESizingRule::UserSized)
.ClientSize(FVector2D(900, 500))
.SupportsMinimize(false)
.SupportsMaximize(false)
[
SNew(SBorder)
.BorderImage(FEditorStyle::GetBrush("ToolPanel.GroupBorder"))
.Padding(FMargin(15))
[
SNew(SVerticalBox)
// Add content widget.
+ SVerticalBox::Slot()
[
AddContentWidget.ToSharedRef()
]
]
]);
}
SAddContentDialog::~SAddContentDialog()
{
}
#undef LOCTEXT_NAMESPACE
|
//-----------------------------------------------------------------------------
// Copyright (c) 2018 Benjamin Buch
//
// https://github.com/bebuch/webservice
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt)
//-----------------------------------------------------------------------------
#ifndef _webservice__json_ws_service__hpp_INCLUDED_
#define _webservice__json_ws_service__hpp_INCLUDED_
#include "json_conversion.hpp"
#include "basic_ws_service.hpp"
namespace webservice{
template <
typename Value,
typename SendBinaryType,
typename ReceiveBinaryType = SendBinaryType >
class basic_json_ws_service
: public basic_ws_service< Value,
nlohmann::json, SendBinaryType, nlohmann::json, ReceiveBinaryType >
{
using basic_ws_service< Value, nlohmann::json, SendBinaryType,
nlohmann::json, ReceiveBinaryType >::basic_ws_service;
};
class json_ws_service
: public basic_json_ws_service< none_t, std::vector< std::uint8_t > >
{
using basic_json_ws_service::basic_json_ws_service;
/// \brief Create a new ws_session
void on_server_connect(
boost::asio::ip::tcp::socket&& socket,
http_request&& req
){
async_server_connect(std::move(socket), std::move(req));
}
/// \brief Create a new client websocket session
void on_client_connect(
std::string&& host,
std::string&& port,
std::string&& resource
){
async_client_connect(std::move(host), std::move(port),
std::move(resource));
}
};
}
#endif
|
// Description: セグメント木 点更新 区間クエリ
// TimeComplexity: 初期化$\mathcal{O}(n \log n)$ 更新とクエリ$\mathcal{O}(\log n)$
// Verifyed: AOJ DSL_2_A
template <int depth> struct Segment_tree {
const static int h = depth;
const static int n = 1 << h;
using T = long long;
T data[2 * n];
const T out = (1LL << 31) - 1;
inline T vmerge(T l, T r) {return min(l, r);}
void init() {
fill_n(data, 2 * n, out);
}
void update(int p, T x) { // set value at position p
for (data[p += n] = x; p > 1; p >>= 1){
data[p >> 1] = vmerge(data[p], data[p ^ 1]);
}
}
T query(int l, int r) { // sum on interval [l, r)
T resl = out, resr = out;
for (l += n, r += n; l < r; l >>= 1, r >>= 1) {
if (l & 1) resl = vmerge(data[l++], resl);
if (r & 1) resr = vmerge(resr, data[--r]);
}
return vmerge(resl, resr);
}
};
|
// Copyright (c) 2014-2017 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <chain.h>
#include <versionbits.h>
#include <test/test_drivechain.h>
#include <chainparams.h>
#include <validation.h>
#include <consensus/params.h>
#include <boost/test/unit_test.hpp>
/* Define a virtual block time, one block per 10 minutes after Nov 14 2014, 0:55:36am */
int32_t TestTime(int nHeight) { return 1415926536 + 600 * nHeight; }
static const Consensus::Params paramsDummy = Consensus::Params();
class TestConditionChecker : public AbstractThresholdConditionChecker
{
private:
mutable ThresholdConditionCache cache;
public:
int64_t BeginTime(const Consensus::Params& params) const override { return TestTime(10000); }
int64_t EndTime(const Consensus::Params& params) const override { return TestTime(20000); }
int Period(const Consensus::Params& params) const override { return 1000; }
int Threshold(const Consensus::Params& params) const override { return 900; }
bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return (pindex->nVersion & 0x100); }
ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, paramsDummy, cache); }
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, paramsDummy, cache); }
};
class TestAlwaysActiveConditionChecker : public TestConditionChecker
{
public:
int64_t BeginTime(const Consensus::Params& params) const override { return Consensus::BIP9Deployment::ALWAYS_ACTIVE; }
};
#define CHECKERS 6
class VersionBitsTester
{
// A fake blockchain
std::vector<CBlockIndex*> vpblock;
// 6 independent checkers for the same bit.
// The first one performs all checks, the second only 50%, the third only 25%, etc...
// This is to test whether lack of cached information leads to the same results.
TestConditionChecker checker[CHECKERS];
// Another 6 that assume always active activation
TestAlwaysActiveConditionChecker checker_always[CHECKERS];
// Test counter (to identify failures)
int num;
public:
VersionBitsTester() : num(0) {}
VersionBitsTester& Reset() {
for (unsigned int i = 0; i < vpblock.size(); i++) {
delete vpblock[i];
}
for (unsigned int i = 0; i < CHECKERS; i++) {
checker[i] = TestConditionChecker();
checker_always[i] = TestAlwaysActiveConditionChecker();
}
vpblock.clear();
return *this;
}
~VersionBitsTester() {
Reset();
}
VersionBitsTester& Mine(unsigned int height, int32_t nTime, int32_t nVersion) {
while (vpblock.size() < height) {
CBlockIndex* pindex = new CBlockIndex();
pindex->nHeight = vpblock.size();
pindex->pprev = vpblock.size() > 0 ? vpblock.back() : nullptr;
pindex->nTime = nTime;
pindex->nVersion = nVersion;
pindex->BuildSkip();
vpblock.push_back(pindex);
}
return *this;
}
VersionBitsTester& TestStateSinceHeight(int height) {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateSinceHeightFor(vpblock.empty() ? nullptr : vpblock.back()) == height, strprintf("Test %i for StateSinceHeight", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateSinceHeightFor(vpblock.empty() ? nullptr : vpblock.back()) == 0, strprintf("Test %i for StateSinceHeight (always active)", num));
}
}
num++;
return *this;
}
VersionBitsTester& TestDefined() {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_DEFINED, strprintf("Test %i for DEFINED", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num));
}
}
num++;
return *this;
}
VersionBitsTester& TestStarted() {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_STARTED, strprintf("Test %i for STARTED", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num));
}
}
num++;
return *this;
}
VersionBitsTester& TestLockedIn() {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_LOCKED_IN, strprintf("Test %i for LOCKED_IN", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num));
}
}
num++;
return *this;
}
VersionBitsTester& TestActive() {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num));
}
}
num++;
return *this;
}
VersionBitsTester& TestFailed() {
for (int i = 0; i < CHECKERS; i++) {
if (InsecureRandBits(i) == 0) {
BOOST_CHECK_MESSAGE(checker[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_FAILED, strprintf("Test %i for FAILED", num));
BOOST_CHECK_MESSAGE(checker_always[i].GetStateFor(vpblock.empty() ? nullptr : vpblock.back()) == THRESHOLD_ACTIVE, strprintf("Test %i for ACTIVE (always active)", num));
}
}
num++;
return *this;
}
CBlockIndex * Tip() { return vpblock.size() ? vpblock.back() : nullptr; }
};
BOOST_FIXTURE_TEST_SUITE(versionbits_tests, TestingSetup)
BOOST_AUTO_TEST_CASE(versionbits_test)
{
for (int i = 0; i < 64; i++) {
// DEFINED -> FAILED
VersionBitsTester().TestDefined().TestStateSinceHeight(0)
.Mine(1, TestTime(1), 0x100).TestDefined().TestStateSinceHeight(0)
.Mine(11, TestTime(11), 0x100).TestDefined().TestStateSinceHeight(0)
.Mine(989, TestTime(989), 0x100).TestDefined().TestStateSinceHeight(0)
.Mine(999, TestTime(20000), 0x100).TestDefined().TestStateSinceHeight(0)
.Mine(1000, TestTime(20000), 0x100).TestFailed().TestStateSinceHeight(1000)
.Mine(1999, TestTime(30001), 0x100).TestFailed().TestStateSinceHeight(1000)
.Mine(2000, TestTime(30002), 0x100).TestFailed().TestStateSinceHeight(1000)
.Mine(2001, TestTime(30003), 0x100).TestFailed().TestStateSinceHeight(1000)
.Mine(2999, TestTime(30004), 0x100).TestFailed().TestStateSinceHeight(1000)
.Mine(3000, TestTime(30005), 0x100).TestFailed().TestStateSinceHeight(1000)
// DEFINED -> STARTED -> FAILED
.Reset().TestDefined().TestStateSinceHeight(0)
.Mine(1, TestTime(1), 0).TestDefined().TestStateSinceHeight(0)
.Mine(1000, TestTime(10000) - 1, 0x100).TestDefined().TestStateSinceHeight(0) // One second more and it would be defined
.Mine(2000, TestTime(10000), 0x100).TestStarted().TestStateSinceHeight(2000) // So that's what happens the next period
.Mine(2051, TestTime(10010), 0).TestStarted().TestStateSinceHeight(2000) // 51 old blocks
.Mine(2950, TestTime(10020), 0x100).TestStarted().TestStateSinceHeight(2000) // 899 new blocks
.Mine(3000, TestTime(20000), 0).TestFailed().TestStateSinceHeight(3000) // 50 old blocks (so 899 out of the past 1000)
.Mine(4000, TestTime(20010), 0x100).TestFailed().TestStateSinceHeight(3000)
// DEFINED -> STARTED -> FAILED while threshold reached
.Reset().TestDefined().TestStateSinceHeight(0)
.Mine(1, TestTime(1), 0).TestDefined().TestStateSinceHeight(0)
.Mine(1000, TestTime(10000) - 1, 0x101).TestDefined().TestStateSinceHeight(0) // One second more and it would be defined
.Mine(2000, TestTime(10000), 0x101).TestStarted().TestStateSinceHeight(2000) // So that's what happens the next period
.Mine(2999, TestTime(30000), 0x100).TestStarted().TestStateSinceHeight(2000) // 999 new blocks
.Mine(3000, TestTime(30000), 0x100).TestFailed().TestStateSinceHeight(3000) // 1 new block (so 1000 out of the past 1000 are new)
.Mine(3999, TestTime(30001), 0).TestFailed().TestStateSinceHeight(3000)
.Mine(4000, TestTime(30002), 0).TestFailed().TestStateSinceHeight(3000)
.Mine(14333, TestTime(30003), 0).TestFailed().TestStateSinceHeight(3000)
.Mine(24000, TestTime(40000), 0).TestFailed().TestStateSinceHeight(3000)
// DEFINED -> STARTED -> LOCKEDIN at the last minute -> ACTIVE
.Reset().TestDefined()
.Mine(1, TestTime(1), 0).TestDefined().TestStateSinceHeight(0)
.Mine(1000, TestTime(10000) - 1, 0x101).TestDefined().TestStateSinceHeight(0) // One second more and it would be defined
.Mine(2000, TestTime(10000), 0x101).TestStarted().TestStateSinceHeight(2000) // So that's what happens the next period
.Mine(2050, TestTime(10010), 0x200).TestStarted().TestStateSinceHeight(2000) // 50 old blocks
.Mine(2950, TestTime(10020), 0x100).TestStarted().TestStateSinceHeight(2000) // 900 new blocks
.Mine(2999, TestTime(19999), 0x200).TestStarted().TestStateSinceHeight(2000) // 49 old blocks
.Mine(3000, TestTime(29999), 0x200).TestLockedIn().TestStateSinceHeight(3000) // 1 old block (so 900 out of the past 1000)
.Mine(3999, TestTime(30001), 0).TestLockedIn().TestStateSinceHeight(3000)
.Mine(4000, TestTime(30002), 0).TestActive().TestStateSinceHeight(4000)
.Mine(14333, TestTime(30003), 0).TestActive().TestStateSinceHeight(4000)
.Mine(24000, TestTime(40000), 0).TestActive().TestStateSinceHeight(4000)
// DEFINED multiple periods -> STARTED multiple periods -> FAILED
.Reset().TestDefined().TestStateSinceHeight(0)
.Mine(999, TestTime(999), 0).TestDefined().TestStateSinceHeight(0)
.Mine(1000, TestTime(1000), 0).TestDefined().TestStateSinceHeight(0)
.Mine(2000, TestTime(2000), 0).TestDefined().TestStateSinceHeight(0)
.Mine(3000, TestTime(10000), 0).TestStarted().TestStateSinceHeight(3000)
.Mine(4000, TestTime(10000), 0).TestStarted().TestStateSinceHeight(3000)
.Mine(5000, TestTime(10000), 0).TestStarted().TestStateSinceHeight(3000)
.Mine(6000, TestTime(20000), 0).TestFailed().TestStateSinceHeight(6000)
.Mine(7000, TestTime(20000), 0x100).TestFailed().TestStateSinceHeight(6000);
}
// Sanity checks of version bit deployments
const auto chainParams = CreateChainParams(CBaseChainParams::MAIN);
const Consensus::Params &mainnetParams = chainParams->GetConsensus();
for (int i=0; i<(int) Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) {
uint32_t bitmask = VersionBitsMask(mainnetParams, static_cast<Consensus::DeploymentPos>(i));
// Make sure that no deployment tries to set an invalid bit.
BOOST_CHECK_EQUAL(bitmask & ~(uint32_t)VERSIONBITS_TOP_MASK, bitmask);
// Verify that the deployment windows of different deployment using the
// same bit are disjoint.
// This test may need modification at such time as a new deployment
// is proposed that reuses the bit of an activated soft fork, before the
// end time of that soft fork. (Alternatively, the end time of that
// activated soft fork could be later changed to be earlier to avoid
// overlap.)
for (int j=i+1; j<(int) Consensus::MAX_VERSION_BITS_DEPLOYMENTS; j++) {
if (VersionBitsMask(mainnetParams, static_cast<Consensus::DeploymentPos>(j)) == bitmask) {
BOOST_CHECK(mainnetParams.vDeployments[j].nStartTime > mainnetParams.vDeployments[i].nTimeout ||
mainnetParams.vDeployments[i].nStartTime > mainnetParams.vDeployments[j].nTimeout);
}
}
}
}
BOOST_AUTO_TEST_CASE(versionbits_computeblockversion)
{
// Check that ComputeBlockVersion will set the appropriate bit correctly
// on mainnet.
const auto chainParams = CreateChainParams(CBaseChainParams::MAIN);
const Consensus::Params &mainnetParams = chainParams->GetConsensus();
// Use the TESTDUMMY deployment for testing purposes.
int64_t bit = mainnetParams.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit;
int64_t nStartTime = mainnetParams.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime;
int64_t nTimeout = mainnetParams.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nTimeout;
assert(nStartTime < nTimeout);
// In the first chain, test that the bit is set by CBV until it has failed.
// In the second chain, test the bit is set by CBV while STARTED and
// LOCKED-IN, and then no longer set while ACTIVE.
VersionBitsTester firstChain, secondChain;
// Start generating blocks before nStartTime
int64_t nTime = nStartTime - 1;
// Before MedianTimePast of the chain has crossed nStartTime, the bit
// should not be set.
CBlockIndex *lastBlock = nullptr;
lastBlock = firstChain.Mine(2016, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
// Mine 2011 more blocks at the old time, and check that CBV isn't setting the bit yet.
for (int i=1; i<2012; i++) {
lastBlock = firstChain.Mine(2016+i, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
// This works because VERSIONBITS_LAST_OLD_BLOCK_VERSION happens
// to be 4, and the bit we're testing happens to be bit 28.
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
}
// Now mine 5 more blocks at the start time -- MTP should not have passed yet, so
// CBV should still not yet set the bit.
nTime = nStartTime;
for (int i=2012; i<=2016; i++) {
lastBlock = firstChain.Mine(2016+i, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
}
// Advance to the next period and transition to STARTED,
lastBlock = firstChain.Mine(6048, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
// so ComputeBlockVersion should now set the bit,
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
// and should also be using the VERSIONBITS_TOP_BITS.
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
// Check that ComputeBlockVersion will set the bit until nTimeout
nTime += 600;
int blocksToMine = 4032; // test blocks for up to 2 time periods
int nHeight = 6048;
// These blocks are all before nTimeout is reached.
while (nTime < nTimeout && blocksToMine > 0) {
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
blocksToMine--;
nTime += 600;
nHeight += 1;
}
nTime = nTimeout;
// FAILED is only triggered at the end of a period, so CBV should be setting
// the bit until the period transition.
for (int i=0; i<2015; i++) {
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
nHeight += 1;
}
// The next block should trigger no longer setting the bit.
lastBlock = firstChain.Mine(nHeight+1, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
// On a new chain:
// verify that the bit will be set after lock-in, and then stop being set
// after activation.
nTime = nStartTime;
// Mine one period worth of blocks, and check that the bit will be on for the
// next period.
lastBlock = secondChain.Mine(2016, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
// Mine another period worth of blocks, signaling the new bit.
lastBlock = secondChain.Mine(4032, nTime, VERSIONBITS_TOP_BITS | (1<<bit)).Tip();
// After one period of setting the bit on each block, it should have locked in.
// We keep setting the bit for one more period though, until activation.
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
// Now check that we keep mining the block until the end of this period, and
// then stop at the beginning of the next period.
lastBlock = secondChain.Mine(6047, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK((ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit)) != 0);
lastBlock = secondChain.Mine(6048, nTime, VERSIONBITS_LAST_OLD_BLOCK_VERSION).Tip();
BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & (1<<bit), 0);
// Finally, verify that after a soft fork has activated, CBV no longer uses
// VERSIONBITS_LAST_OLD_BLOCK_VERSION.
//BOOST_CHECK_EQUAL(ComputeBlockVersion(lastBlock, mainnetParams) & VERSIONBITS_TOP_MASK, VERSIONBITS_TOP_BITS);
}
BOOST_AUTO_TEST_SUITE_END()
|
/*
* Copyright 2009-2017 Alibaba Cloud All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <alibabacloud/live/model/DescribeLiveStreamsOnlineListResult.h>
#include <json/json.h>
using namespace AlibabaCloud::Live;
using namespace AlibabaCloud::Live::Model;
DescribeLiveStreamsOnlineListResult::DescribeLiveStreamsOnlineListResult() :
ServiceResult()
{}
DescribeLiveStreamsOnlineListResult::DescribeLiveStreamsOnlineListResult(const std::string &payload) :
ServiceResult()
{
parse(payload);
}
DescribeLiveStreamsOnlineListResult::~DescribeLiveStreamsOnlineListResult()
{}
void DescribeLiveStreamsOnlineListResult::parse(const std::string &payload)
{
Json::Reader reader;
Json::Value value;
reader.parse(payload, value);
setRequestId(value["RequestId"].asString());
auto allOnlineInfoNode = value["OnlineInfo"]["LiveStreamOnlineInfo"];
for (auto valueOnlineInfoLiveStreamOnlineInfo : allOnlineInfoNode)
{
LiveStreamOnlineInfo onlineInfoObject;
if(!valueOnlineInfoLiveStreamOnlineInfo["DomainName"].isNull())
onlineInfoObject.domainName = valueOnlineInfoLiveStreamOnlineInfo["DomainName"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["AppName"].isNull())
onlineInfoObject.appName = valueOnlineInfoLiveStreamOnlineInfo["AppName"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["StreamName"].isNull())
onlineInfoObject.streamName = valueOnlineInfoLiveStreamOnlineInfo["StreamName"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["PublishTime"].isNull())
onlineInfoObject.publishTime = valueOnlineInfoLiveStreamOnlineInfo["PublishTime"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["PublishUrl"].isNull())
onlineInfoObject.publishUrl = valueOnlineInfoLiveStreamOnlineInfo["PublishUrl"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["PublishDomain"].isNull())
onlineInfoObject.publishDomain = valueOnlineInfoLiveStreamOnlineInfo["PublishDomain"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["PublishType"].isNull())
onlineInfoObject.publishType = valueOnlineInfoLiveStreamOnlineInfo["PublishType"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["Transcoded"].isNull())
onlineInfoObject.transcoded = valueOnlineInfoLiveStreamOnlineInfo["Transcoded"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["TranscodeId"].isNull())
onlineInfoObject.transcodeId = valueOnlineInfoLiveStreamOnlineInfo["TranscodeId"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["ServerIp"].isNull())
onlineInfoObject.serverIp = valueOnlineInfoLiveStreamOnlineInfo["ServerIp"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["ClientIp"].isNull())
onlineInfoObject.clientIp = valueOnlineInfoLiveStreamOnlineInfo["ClientIp"].asString();
if(!valueOnlineInfoLiveStreamOnlineInfo["VideoCodecId"].isNull())
onlineInfoObject.videoCodecId = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["VideoCodecId"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["VideoDataRate"].isNull())
onlineInfoObject.videoDataRate = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["VideoDataRate"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["FrameRate"].isNull())
onlineInfoObject.frameRate = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["FrameRate"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["Width"].isNull())
onlineInfoObject.width = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["Width"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["Height"].isNull())
onlineInfoObject.height = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["Height"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["AudioCodecId"].isNull())
onlineInfoObject.audioCodecId = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["AudioCodecId"].asString());
if(!valueOnlineInfoLiveStreamOnlineInfo["AudioDataRate"].isNull())
onlineInfoObject.audioDataRate = std::stoi(valueOnlineInfoLiveStreamOnlineInfo["AudioDataRate"].asString());
onlineInfo_.push_back(onlineInfoObject);
}
if(!value["PageNum"].isNull())
pageNum_ = std::stoi(value["PageNum"].asString());
if(!value["PageSize"].isNull())
pageSize_ = std::stoi(value["PageSize"].asString());
if(!value["TotalNum"].isNull())
totalNum_ = std::stoi(value["TotalNum"].asString());
if(!value["TotalPage"].isNull())
totalPage_ = std::stoi(value["TotalPage"].asString());
}
int DescribeLiveStreamsOnlineListResult::getTotalNum()const
{
return totalNum_;
}
int DescribeLiveStreamsOnlineListResult::getPageNum()const
{
return pageNum_;
}
int DescribeLiveStreamsOnlineListResult::getPageSize()const
{
return pageSize_;
}
int DescribeLiveStreamsOnlineListResult::getTotalPage()const
{
return totalPage_;
}
std::vector<DescribeLiveStreamsOnlineListResult::LiveStreamOnlineInfo> DescribeLiveStreamsOnlineListResult::getOnlineInfo()const
{
return onlineInfo_;
}
|
#include <stdexcept>
#include "OnlineDB/Oracle/interface/Oracle.h"
#include "OnlineDB/EcalCondDB/interface/FEConfigMainInfo.h"
#include "OnlineDB/EcalCondDB/interface/Tm.h"
#include "OnlineDB/EcalCondDB/interface/DateHandler.h"
using namespace std;
using namespace oracle::occi;
FEConfigMainInfo::FEConfigMainInfo()
{
m_env = NULL;
m_conn = NULL;
m_writeStmt = NULL;
m_readStmt = NULL;
m_ID=0;
m_version=0;
clear();
}
FEConfigMainInfo::~FEConfigMainInfo(){}
void FEConfigMainInfo::clear() {
m_description="";
m_ped_id=0;
m_lin_id=0;
m_lut_id=0;
m_sli_id=0;
m_fgr_id=0;
m_wei_id=0;
m_bxt_id=0;
m_btt_id=0;
m_tim_id=0;
m_spi_id=0;
m_bst_id=0;
m_db_time=Tm();
}
int FEConfigMainInfo::fetchNextId() noexcept(false) {
int result=0;
try {
this->checkConnection();
m_readStmt = m_conn->createStatement();
m_readStmt->setSQL("select fe_config_main_sq.NextVal from dual");
ResultSet* rset = m_readStmt->executeQuery();
while (rset->next ()){
result= rset->getInt(1);
}
m_conn->terminateStatement(m_readStmt);
return result;
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::fetchNextId(): "+e.getMessage()));
}
}
int FEConfigMainInfo::fetchID()
noexcept(false)
{
// Return from memory if available
if (m_ID>0) {
return m_ID;
}
this->checkConnection();
DateHandler dh(m_env, m_conn);
std::cout << " tag/version " << getConfigTag() <<"/"<<getVersion() << std::endl;
try {
Statement* stmt = m_conn->createStatement();
if(m_version !=0){
stmt->setSQL("SELECT conf_id from FE_CONFIG_MAIN "
"WHERE tag = :tag "
" and version = :version " );
stmt->setString(1, m_config_tag);
stmt->setInt(2, m_version);
std::cout<<" using query with version " <<endl;
} else {
// always select the last inserted one with a given tag
stmt->setSQL("SELECT conf_id from FE_CONFIG_MAIN "
"WHERE tag = :1 and version= (select max(version) from FE_CONFIG_MAIN where tag=:2) " );
stmt->setString(1, m_config_tag);
stmt->setString(2, m_config_tag);
std::cout<<" using query WITHOUT version " <<endl;
}
ResultSet* rset = stmt->executeQuery();
if (rset->next()) {
m_ID = rset->getInt(1);
} else {
m_ID = 0;
}
std::cout<<m_ID<<endl;
m_conn->terminateStatement(stmt);
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::fetchID: "+e.getMessage()));
}
setByID(m_ID);
return m_ID;
}
void FEConfigMainInfo::prepareWrite()
noexcept(false)
{
this->checkConnection();
int next_id=fetchNextId();
try {
m_writeStmt = m_conn->createStatement();
m_writeStmt->setSQL("INSERT INTO fe_config_main (conf_id, ped_conf_id, lin_conf_id, lut_conf_id, fgr_conf_id, sli_conf_id, wei_conf_id, spi_conf_id, tim_conf_id, bxt_conf_id, btt_conf_id, bst_conf_id, tag, version, description) "
" VALUES (:1, :2, :3 , :4, :5, :6 ,:7, :8, :9, :10, :11, :12, :13, :14, :15 )");
m_writeStmt->setInt(1, next_id);
m_ID=next_id;
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::prepareWrite(): "+e.getMessage()));
}
}
void FEConfigMainInfo::writeDB()
noexcept(false)
{
this->checkConnection();
this->checkPrepare();
// Validate the data, use infinity-till convention
DateHandler dh(m_env, m_conn);
try {
m_writeStmt->setInt(2, this->getPedId());
m_writeStmt->setInt(3, this->getLinId());
m_writeStmt->setInt(4, this->getLUTId());
m_writeStmt->setInt(5, this->getFgrId());
m_writeStmt->setInt(6, this->getSliId());
m_writeStmt->setInt(7, this->getWeiId());
m_writeStmt->setInt(8, this->getSpiId());
m_writeStmt->setInt(9, this->getTimId());
m_writeStmt->setInt(10, this->getBxtId());
m_writeStmt->setInt(11, this->getBttId());
m_writeStmt->setInt(12, this->getBstId());
m_writeStmt->setString(13, this->getConfigTag());
m_writeStmt->setInt(14, this->getVersion());
m_writeStmt->setString(15, this->getDescription());
m_writeStmt->executeUpdate();
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::writeDB: "+e.getMessage()));
}
// Now get the ID
if (!this->fetchID()) {
throw(std::runtime_error("FEConfigMainInfo::writeDB: Failed to write"));
}
setByID(m_ID);
cout<< "FEConfigMainInfo::writeDB>> done inserting FEConfigMainInfo with id="<<m_ID<<endl;
}
int FEConfigMainInfo::fetchIDLast()
noexcept(false)
{
this->checkConnection();
DateHandler dh(m_env, m_conn);
try {
Statement* stmt = m_conn->createStatement();
stmt->setSQL("SELECT max(conf_id) FROM fe_config_main ");
ResultSet* rset = stmt->executeQuery();
if (rset->next()) {
m_ID = rset->getInt(1);
} else {
m_ID = 0;
}
m_conn->terminateStatement(stmt);
} catch (SQLException &e) {
throw(std::runtime_error("ODRunConfigInfo::fetchIDLast: "+e.getMessage()));
}
setByID(m_ID);
return m_ID;
}
void FEConfigMainInfo::setByID(int id)
noexcept(false)
{
this->checkConnection();
DateHandler dh(m_env, m_conn);
cout<< "FEConfigMainInfo::setByID called for id "<<id<<endl;
try {
Statement* stmt = m_conn->createStatement();
stmt->setSQL("SELECT * FROM FE_CONFIG_MAIN WHERE conf_id = :1 ");
stmt->setInt(1, id);
ResultSet* rset = stmt->executeQuery();
if (rset->next()) {
setId( rset->getInt(1) );
setPedId( rset->getInt(2) );
setLinId( rset->getInt(3) );
setLUTId( rset->getInt(4) );
setFgrId( rset->getInt(5) );
setSliId( rset->getInt(6) );
setWeiId( rset->getInt(7) );
setSpiId( rset->getInt(8) );
setTimId( rset->getInt(9) );
setBxtId( rset->getInt(10) );
setBttId( rset->getInt(11) );
setBstId( rset->getInt(12) );
setConfigTag( rset->getString(13) );
setVersion( rset->getInt(14) );
setDescription( rset->getString(15) );
Date dbdate = rset->getDate(16);
setDBTime( dh.dateToTm( dbdate ));
m_ID = id;
} else {
throw(std::runtime_error("FEConfigMainInfo::setByID: Given cycle_id is not in the database"));
}
m_conn->terminateStatement(stmt);
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::setByID: "+e.getMessage()));
}
}
void FEConfigMainInfo::fetchData(FEConfigMainInfo * result)
noexcept(false)
{ std::cout << " ### 1 getId from FEConfigMainInfo = " << result->getId() << std::endl;
std::cout << " tag/version " << result->getConfigTag() <<"/"<<result->getVersion() << std::endl;
this->checkConnection();
DateHandler dh(m_env, m_conn);
// result->clear();
int idid=0;
if(result->getId()==0){
//throw(std::runtime_error("FEConfigMainInfo::fetchData(): no Id defined for this FEConfigMainInfo "));
idid=result->fetchID();
result->setId(idid);
}
try {
m_readStmt->setSQL("SELECT * FROM FE_CONFIG_MAIN WHERE conf_id = :1 ");
std::cout << " ### 2 getId from FEConfigMainInfo = " << result->getId() << std::endl;
// good m_readStmt->setInt(1, result->getId());
m_readStmt->setInt(1, result->getId());
ResultSet* rset = m_readStmt->executeQuery();
rset->next();
result->setId( rset->getInt(1) );
setPedId( rset->getInt(2) );
setLinId( rset->getInt(3) );
setLUTId( rset->getInt(4) );
setFgrId( rset->getInt(5) );
setSliId( rset->getInt(6) );
setWeiId( rset->getInt(7) );
setSpiId( rset->getInt(8) );
setTimId( rset->getInt(9) );
setBxtId( rset->getInt(10) );
setBttId( rset->getInt(11) );
setBstId( rset->getInt(12) );
result->setConfigTag( rset->getString(13) );
result->setVersion( rset->getInt(14) );
result->setDescription( rset->getString(15) );
Date dbdate = rset->getDate(16);
result->setDBTime( dh.dateToTm( dbdate ));
} catch (SQLException &e) {
throw(std::runtime_error("FEConfigMainInfo::fetchData(): "+e.getMessage()));
}
}
void FEConfigMainInfo::insertConfig()
noexcept(false)
{
try {
prepareWrite();
writeDB();
m_conn->commit();
terminateWriteStatement();
} catch (std::runtime_error &e) {
m_conn->rollback();
throw(e);
} catch (...) {
m_conn->rollback();
throw(std::runtime_error("FEConfigMainInfo::insertConfig: Unknown exception caught"));
}
}
|
#include "stdafx.h"
#include "interactive_motion.h"
#include "physicsshell.h"
#include "PhysicsShellHolder.h"
#include "MathUtils.h"
#include "../Include/xrRender/Kinematics.h"
interactive_motion::interactive_motion()
{
init();
}
void interactive_motion::init()
{
flags.assign(0);
}
void interactive_motion::setup(LPCSTR m,CPhysicsShell *s)
{
VERIFY(s);
motion = smart_cast<IKinematicsAnimated*>(s->PKinematics())->LL_MotionID(m);
if(motion.valid())
flags.set(fl_use_death_motion,TRUE);
}
void interactive_motion::setup(MotionID m, CPhysicsShell *s)
{
VERIFY(s);
motion = m;
if (motion.valid())
flags.set(fl_use_death_motion, TRUE);
}
void interactive_motion::anim_callback(CBlend *B)
{
VERIFY(B->CallbackParam);
((interactive_motion*)(B->CallbackParam))->flags.set(fl_switch_dm_toragdoll,TRUE);
}
void interactive_motion::play(CPhysicsShell *s)
{
VERIFY( s );
smart_cast<IKinematicsAnimated*>( s->PKinematics( ) )->PlayCycle( motion, TRUE, anim_callback, this );
state_start( s );
}
float depth = 0;
void get_depth(bool& do_colide,bool bo1,dContact& c,SGameMtl * /*material_1*/,SGameMtl * /*material_2*/)
{
save_max(depth, c.geom.depth);
}
void interactive_motion::state_start(CPhysicsShell *s)
{
s->add_ObjectContactCallback(get_depth);
collide(s);
if(flags.test(fl_switch_dm_toragdoll))
{
flags.assign(0);
s->remove_ObjectContactCallback(get_depth);
return;
}
}
void interactive_motion::state_end(CPhysicsShell *s)
{
flags.set( fl_switch_dm_toragdoll, FALSE );
flags.set( fl_use_death_motion, FALSE );
s->Enable( );
s->remove_ObjectContactCallback( get_depth );
////set and velocities
s->AnimToVelocityState( Device.fTimeDelta, default_l_limit * 10, default_w_limit * 10 );
}
void interactive_motion::update(CPhysicsShell *s)
{
IKinematics *K = s->PKinematics();
VERIFY(K);
K -> CalculateBones();
collide(s);
if(flags.test(fl_switch_dm_toragdoll))
{
switch_to_free(s);
} else
move_update(s);
}
void interactive_motion::switch_to_free(CPhysicsShell *s)
{
//set to normal state
state_end(s);
///set all matrises valide
CPhysicsShellHolder *obj = s->get_ElementByStoreOrder(0)->PhysicsRefObject();
VERIFY(obj);
s->InterpolateGlobalTransform(&obj->XFORM());
IKinematics *K = s->PKinematics();
VERIFY(K);
K->CalculateBones_Invalidate();
K->CalculateBones(TRUE);
}
///////////////////////////////////////////////////////////////////////////////////////
void imotion_position::state_start(CPhysicsShell *s)
{
inherited::state_start(s);
if(!is_enabled())
return;
s->Disable();
s->EnabledCallbacks(FALSE);
}
void imotion_position::state_end(CPhysicsShell *s)
{
inherited::state_end( s );
s->ToAnimBonesPositions();
s->EnabledCallbacks(TRUE);
}
void imotion_position::move_update(CPhysicsShell *s)
{
s->Disable();
s->ToAnimBonesPositions();
}
void imotion_position::collide(CPhysicsShell *s)
{
depth = 0;
s->CollideAll();
if(depth > 0.05)
flags.set(fl_switch_dm_toragdoll,TRUE);
}
////////////////////////////////////////////////////////////////////////////////////
void imotion_velocity::state_start(CPhysicsShell *s)
{
inherited::state_start(s);
if(!is_enabled())
return;
s->set_ApplyByGravity(false);
//s->set_DynamicLimits(default_l_limit,default_w_limit * 5.f);
//s->set_DynamicScales(1,1);
//s->SetAirResistance(0,0);
}
void imotion_velocity::state_end(CPhysicsShell *s)
{
inherited::state_end( s );
s->set_ApplyByGravity(true);
}
void imotion_velocity::collide(CPhysicsShell *s)
{
}
void imotion_velocity::move_update(CPhysicsShell *s)
{
if(!s->AnimToVelocityState( Device.fTimeDelta, 2 * default_l_limit, 10.f * default_w_limit ))
flags.set(fl_switch_dm_toragdoll,TRUE);
Fmatrix sv;sv.set(s->mXFORM);
s->InterpolateGlobalTransform(&s->mXFORM);
s->mXFORM.set(sv);
}
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "velox/dwio/dwrf/common/OutputStream.h"
#include "velox/dwio/dwrf/common/wrap/dwrf-proto-wrapper.h"
#include "velox/dwio/dwrf/test/OrcTest.h"
#include <gtest/gtest.h>
using namespace facebook::dwio::common;
using namespace facebook::velox::memory;
using namespace facebook::velox::dwrf;
TEST(BufferedOutputStream, blockAligned) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t block = 10;
DataBufferHolder holder{pool, block, 0, DEFAULT_PAGE_GROW_RATIO, &memSink};
BufferedOutputStream bufStream(holder);
for (int32_t i = 0; i < 100; ++i) {
char* buf;
int32_t len;
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(10, len);
for (int32_t j = 0; j < 10; ++j) {
buf[j] = static_cast<char>('a' + j);
}
}
bufStream.flush();
ASSERT_EQ(1000, memSink.size());
for (int32_t i = 0; i < 1000; ++i) {
ASSERT_EQ(memSink.getData()[i], 'a' + i % 10);
}
}
TEST(BufferedOutputStream, blockNotAligned) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t block = 10;
DataBufferHolder holder{pool, block, 0, DEFAULT_PAGE_GROW_RATIO, &memSink};
BufferedOutputStream bufStream(holder);
char* buf;
int32_t len;
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(10, len);
for (int32_t i = 0; i < 7; ++i) {
buf[i] = static_cast<char>('a' + i);
}
bufStream.BackUp(3);
bufStream.flush();
ASSERT_EQ(7, memSink.size());
for (int32_t i = 0; i < 7; ++i) {
ASSERT_EQ(memSink.getData()[i], 'a' + i);
}
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(10, len);
for (int32_t i = 0; i < 5; ++i) {
buf[i] = static_cast<char>('a' + i);
}
bufStream.BackUp(5);
bufStream.flush();
ASSERT_EQ(12, memSink.size());
for (int32_t i = 0; i < 7; ++i) {
ASSERT_EQ(memSink.getData()[i], 'a' + i);
}
for (int32_t i = 0; i < 5; ++i) {
ASSERT_EQ(memSink.getData()[i + 7], 'a' + i);
}
}
TEST(BufferedOutputStream, protoBufSerialization) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t block = 10;
DataBufferHolder holder{pool, block, 0, DEFAULT_PAGE_GROW_RATIO, &memSink};
BufferedOutputStream bufStream(holder);
proto::PostScript ps;
ps.set_footerlength(197934);
ps.set_compression(proto::ZLIB);
ps.set_writerversion(123);
ASSERT_TRUE(ps.SerializeToZeroCopyStream(&bufStream));
bufStream.flush();
ASSERT_EQ(ps.ByteSizeLong(), memSink.size());
proto::PostScript ps2;
ps2.ParseFromArray(memSink.getData(), static_cast<int32_t>(memSink.size()));
ASSERT_EQ(ps.footerlength(), ps2.footerlength());
ASSERT_EQ(ps.compression(), ps2.compression());
ASSERT_EQ(ps.writerversion(), ps2.writerversion());
}
TEST(BufferedOutputStream, increaseSize) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t max = 512;
uint64_t min = 16;
DataBufferHolder holder{pool, max, min, DEFAULT_PAGE_GROW_RATIO, &memSink};
BufferedOutputStream bufStream(holder);
char* buf;
int32_t len;
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(16, len);
for (int32_t i = 0; i < len; ++i) {
buf[i] = static_cast<char>('a' + i);
}
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(16, len);
ASSERT_EQ(0, memSink.size());
for (int32_t i = 0; i < len; ++i) {
buf[i] = static_cast<char>('b' + i);
}
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len));
ASSERT_EQ(32, len);
ASSERT_EQ(0, memSink.size());
for (int32_t i = 0; i < len; ++i) {
buf[i] = static_cast<char>('c' + i);
}
ASSERT_TRUE(bufStream.Next(reinterpret_cast<void**>(&buf), &len, 128));
ASSERT_EQ(192, len);
ASSERT_EQ(0, memSink.size());
for (int32_t i = 0; i < len; ++i) {
buf[i] = static_cast<char>('d' + i);
}
bufStream.flush();
ASSERT_EQ(256, memSink.size());
std::array<uint64_t, 4> expected = {16, 16, 32, 192};
size_t pos = 0;
for (size_t i = 0; i < expected.size(); ++i) {
for (size_t j = 0; j < expected[i]; ++j) {
ASSERT_EQ(
memSink.getData()[pos++], static_cast<char>(('a' + i + j) % 0x100));
}
}
}
TEST(BufferedOutputStream, recordPosition) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t block = 256;
uint64_t initial = 128;
DataBufferHolder holder{
pool, block, initial, DEFAULT_PAGE_GROW_RATIO, &memSink};
BufferedOutputStream bufStream(holder);
TestPositionRecorder recorder;
EXPECT_EQ(bufStream.size(), 0);
bufStream.recordPosition(recorder, 3, 2);
{
auto& pos = recorder.getPositions();
EXPECT_EQ(pos.size(), 1);
EXPECT_EQ(pos.at(0), 0);
}
int32_t size;
void* data;
bufStream.Next(&data, &size);
EXPECT_EQ(size, initial);
recorder.addEntry();
EXPECT_EQ(bufStream.size(), initial);
bufStream.recordPosition(recorder, size, 100);
{
auto& pos = recorder.getPositions();
EXPECT_EQ(pos.size(), 1);
EXPECT_EQ(pos.at(0), 100);
}
bufStream.Next(&data, &size);
EXPECT_EQ(size, block - initial);
recorder.addEntry();
EXPECT_EQ(bufStream.size(), block);
bufStream.recordPosition(recorder, size, 100);
{
auto& pos = recorder.getPositions();
EXPECT_EQ(pos.size(), 1);
EXPECT_EQ(pos.at(0), initial + 100);
}
bufStream.Next(&data, &size);
EXPECT_EQ(size, block);
recorder.addEntry();
EXPECT_EQ(bufStream.size(), block + block);
bufStream.recordPosition(recorder, size, 100);
{
auto& pos = recorder.getPositions();
EXPECT_EQ(pos.size(), 1);
EXPECT_EQ(pos.at(0), block + 100);
}
recorder.addEntry();
bufStream.recordPosition(recorder, size, 100, 4);
auto& pos = recorder.getPositions(4);
EXPECT_EQ(pos.size(), 1);
EXPECT_EQ(pos.at(0), block + 100);
}
TEST(AppendOnlyBufferedStream, Basic) {
auto scopedPool = getDefaultScopedMemoryPool();
auto& pool = scopedPool->getPool();
MemorySink memSink(pool, 1024);
uint64_t block = 10;
DataBufferHolder holder{pool, block, 0, DEFAULT_PAGE_GROW_RATIO, &memSink};
auto bufStream = std::make_unique<BufferedOutputStream>(holder);
AppendOnlyBufferedStream appendable(std::move(bufStream));
std::array<char, 1024> data;
for (size_t i = 0; i < data.size(); ++i) {
data[i] = 'a' + i % 26;
}
appendable.write(data.data(), 173);
ASSERT_EQ(appendable.size(), 173);
TestPositionRecorder recorder;
appendable.recordPosition(recorder);
auto& pos = recorder.getPositions();
ASSERT_EQ(pos.size(), 1);
ASSERT_EQ(pos.at(0), 173);
appendable.flush();
ASSERT_EQ(memSink.size(), 173);
}
|
#include <iostream>
#include <iomanip>
#include <string>
#include <vector>
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
#include "UCTTower.hh"
#include "UCTLogging.hh"
using namespace l1tcalo;
bool UCTTower::process() {
if(region >= NRegionsInCard) {
return processHFTower();
}
if(ecalET > etInputMax) ecalET = etInputMax;
if(hcalET > etInputMax) hcalET = etInputMax;
uint32_t calibratedECALET = ecalET;
uint32_t logECALET = (uint32_t) log2((double) ecalET);
if(logECALET > erMaxV) logECALET = erMaxV;
if(ecalLUT != 0) {
uint32_t etaAddress = region * NEtaInRegion + iEta;
uint32_t fbAddress = 0;
if(ecalFG) fbAddress = 1;
uint32_t value = (*ecalLUT)[etaAddress][fbAddress][ecalET];
calibratedECALET = value & etInputMax;
logECALET = (value & 0x7000) >> 12;
}
uint32_t calibratedHCALET = hcalET;
uint32_t logHCALET = (uint32_t) log2((double) hcalET);
if(logHCALET > erMaxV) logHCALET = erMaxV;
if(hcalLUT != 0) {
uint32_t etaAddress = region * NEtaInRegion + iEta;
uint32_t fbAddress = 0;
if((hcalFB & 0x1) != 0) fbAddress = 1;
uint32_t value = (*hcalLUT)[etaAddress][fbAddress][hcalET];
calibratedHCALET = value & etInputMax;
logHCALET = (value & 0x7000) >> 12;
}
// Saturation codes implemented in fwVersion 1
if(fwVersion >= 1) {
if(calibratedECALET==0xFF && calibratedHCALET==0xFF)
towerData = 0x1FF;
else if(calibratedECALET==0xFF)
towerData = 0x1FE;
else if(calibratedHCALET==0xFF)
towerData = 0x1FD;
else
towerData = calibratedECALET + calibratedHCALET;
} else {
towerData = calibratedECALET + calibratedHCALET;
}
if(towerData > etMask) towerData = etMask;
uint32_t er = 0;
if(calibratedECALET == 0 || calibratedHCALET == 0) {
er = 0;
towerData |= zeroFlagMask;
if(calibratedHCALET == 0 && calibratedECALET != 0)
towerData |= eohrFlagMask;
}
else if(calibratedECALET == calibratedHCALET) {
er = 0;
towerData |= eohrFlagMask;
}
else if(calibratedECALET > calibratedHCALET) {
er = logECALET - logHCALET;
if(er > erMaxV) er = erMaxV;
towerData |= eohrFlagMask;
}
else {
er = logHCALET - logECALET;
if(er > erMaxV) er = erMaxV;
}
towerData |= (er << erShift);
// Unfortunately, hcalFlag is presently bogus :(
// It has never been studied nor used in Run-1
// The same status persists in Run-2, but it is available usage
// Currently, summarize all hcalFeatureBits in one flag bit
if((hcalFB & 0x1) != 0) towerData |= hcalFlagMask; // FIXME - ignore top bits if(hcalFB != 0)
if(ecalFG) towerData |= ecalFlagMask;
// Store ecal and hcal calibrated ET in unused upper bits
towerData |= (calibratedECALET << ecalShift);
towerData |= (calibratedHCALET << hcalShift);
// All done!
return true;
}
bool UCTTower::processHFTower() {
uint32_t calibratedET = hcalET;
if(hfLUT != 0) {
uint32_t etaAddress = (region - NRegionsInCard) * NHFEtaInRegion + iEta;
const std::array< uint32_t, 256> a = hfLUT->at(etaAddress);
calibratedET = a[hcalET] & 0xFF;
}
uint32_t absCaloEta = abs(caloEta());
if(absCaloEta > 29 && absCaloEta < 40) {
// Divide by two (since two duplicate towers are sent)
calibratedET /= 2;
}
else if(absCaloEta == 40 || absCaloEta == 41) {
// Divide by four
calibratedET /= 4;
}
towerData = calibratedET | zeroFlagMask;
if((hcalFB & 0x1) == 0x1) towerData |= ecalFlagMask; // LSB defines short over long fiber ratio
if((hcalFB & 0x2) == 0x2) towerData |= hcalFlagMask; // MSB defines minbias flag
return true;
}
bool UCTTower::setECALData(bool eFG, uint32_t eET) {
ecalFG = eFG;
ecalET = eET;
if(eET > etInputMax) {
LOG_ERROR << "UCTTower::setData - ecalET too high " << eET << "; Pegged to etInputMax" << std::endl;
ecalET = etInputMax;
}
return true;
}
bool UCTTower::setHCALData(uint32_t hFB, uint32_t hET) {
hcalET = hET;
hcalFB = hFB;
if(hET > etInputMax) {
LOG_ERROR << "UCTTower::setData - hcalET too high " << hET << "; Pegged to etInputMax" << std::endl;
hcalET = etInputMax;
}
if(hFB > 0x3F) {
LOG_ERROR << "UCTTower::setData - too many hcalFeatureBits " << std::hex << hFB
<< "; Used only bottom 6 bits" << std::endl;
hcalFB &= 0x3F;
}
return true;
}
bool UCTTower::setHFData(uint32_t fbIn, uint32_t etIn) {
ecalFG = false; // HF has no separate ecal section
ecalET = 0;
hcalET = etIn; // We reuse HCAL place as HF
hcalFB = fbIn;
if(etIn > etInputMax) {
LOG_ERROR << "UCTTower::setData - HF ET too high " << etIn << "; Pegged to etInputMax" << std::endl;
hcalET = etInputMax;
}
if(fbIn > 0x3) {
LOG_ERROR << "UCTTower::setData - too many HF FeatureBits " << std::hex << fbIn
<< "; Used only bottom 2 bits" << std::endl;
hcalFB &= 0x3;
}
return true;
}
const uint16_t UCTTower::location() const {
uint16_t l = 0;
if(negativeEta) l = 0x8000; // Used top bit for +/- eta-side
l |= iPhi; // Max iPhi is 4, so bottom 2 bits for iPhi
l |= (iEta << 2); // Max iEta is 4, so 2 bits needed
l |= (region << 4); // Max region number 14, so 4 bits needed
l |= (card << 8); // Max card number is 6, so 3 bits needed
l |= (crate << 11); // Max crate number is 2, so 2 bits needed
return l;
}
UCTTower::UCTTower(uint16_t location, int fwv) :
fwVersion(fwv) {
if((location & 0x8000) != 0) negativeEta = true;
crate = (location & 0x1800) >> 11;
card = (location & 0x0700) >> 8;
region = (location & 0x00F0) >> 4;
iEta = (location & 0x000C) >> 2;
iPhi = (location & 0x0003);
towerData = 0;
}
const uint64_t UCTTower::extendedData() const {
uint64_t d = rawData();
uint64_t l = location();
uint64_t r = (l << 48) + d;
return r;
}
std::ostream& operator<<(std::ostream& os, const UCTTower& t) {
// if((t.ecalET + t.hcalET) == 0) return os;
os << "Side Crt Crd Rgn iEta iPhi cEta cPhi eET eFG hET hFB Summary" << std::endl;
UCTGeometry g;
std::string side = "+eta ";
if(t.negativeEta) side = "-eta ";
os << side
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.crate << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.card << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.region << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.iEta << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.iPhi << " "
<< std::setw(4) << std::setfill(' ') << std::dec
<< g.getCaloEtaIndex(t.negativeEta, t.region, t.iEta) << " "
<< std::setw(4) << std::setfill(' ') << std::dec
<< g.getCaloPhiIndex(t.crate, t.card, t.region, t.iPhi) << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.ecalET << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.ecalFG << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.hcalET << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(4) << std::hex
<< t.hcalFB << " "
<< std::showbase << std::internal << std::setfill('0') << std::setw(10) << std::hex
<< t.towerData
<< std::endl;
return os;
}
|
/////////////////////////////////////////////////////////////////////////////
// Name: src/qt/taskbar.cpp
// Author: Peter Most
// Copyright: (c) Peter Most
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#include "wx/taskbar.h"
#include <QtWidgets/QSystemTrayIcon>
//=============================================================================
bool wxTaskBarIconBase::IsAvailable()
{
return QSystemTrayIcon::isSystemTrayAvailable();
}
//=============================================================================
wxIMPLEMENT_DYNAMIC_CLASS(wxTaskBarIcon, wxEvtHandler);
wxTaskBarIcon::wxTaskBarIcon(wxTaskBarIconType WXUNUSED(iconType))
{
m_qtSystemTrayIcon = new QSystemTrayIcon;
}
wxTaskBarIcon::~wxTaskBarIcon()
{
delete m_qtSystemTrayIcon;
}
bool wxTaskBarIcon::SetIcon(const wxIcon& WXUNUSED(icon),
const wxString& WXUNUSED(tooltip))
{
return false;
}
bool wxTaskBarIcon::RemoveIcon()
{
return false;
}
bool wxTaskBarIcon::PopupMenu(wxMenu *WXUNUSED(menu))
{
return false;
}
|
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
//
// TODO(kenton): Share code with the versions of this test in other languages?
// It seemed like parameterizing it would add more complexity than it is
// worth.
#include <memory>
#include <google/protobuf/compiler/cpp/cpp_generator.h>
#include <google/protobuf/compiler/command_line_interface.h>
#include <google/protobuf/io/zero_copy_stream.h>
#include <google/protobuf/io/printer.h>
#include <google/protobuf/testing/file.h>
#include <google/protobuf/testing/file.h>
#include <google/protobuf/testing/googletest.h>
#include <gtest/gtest.h>
namespace google {
namespace protobuf {
namespace compiler {
namespace cpp {
namespace {
class TestGenerator : public CodeGenerator {
public:
TestGenerator() {}
~TestGenerator() {}
virtual bool Generate(const FileDescriptor* file,
const string& parameter,
GeneratorContext* context,
string* error) const {
TryInsert("test.pb.h", "includes", context);
TryInsert("test.pb.h", "namespace_scope", context);
TryInsert("test.pb.h", "global_scope", context);
TryInsert("test.pb.h", "class_scope:foo.Bar", context);
TryInsert("test.pb.h", "class_scope:foo.Bar.Baz", context);
TryInsert("test.pb.cc", "includes", context);
TryInsert("test.pb.cc", "namespace_scope", context);
TryInsert("test.pb.cc", "global_scope", context);
// Check field accessors for an optional int32:
TryInsert("test.pb.h", "field_get:foo.Bar.optInt", context);
TryInsert("test.pb.h", "field_set:foo.Bar.optInt", context);
// Check field accessors for a repeated int32:
TryInsert("test.pb.h", "field_get:foo.Bar.repeatedInt", context);
TryInsert("test.pb.h", "field_set:foo.Bar.repeatedInt", context);
// Check field accessors for a required string:
TryInsert("test.pb.h", "field_get:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_set:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_set_allocated:foo.Bar.requiredString",
context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.requiredString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.requiredString", context);
// Check field accessors for a repeated string:
TryInsert("test.pb.h", "field_get:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_set:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.repeatedString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.repeatedString", context);
// Check field accessors for an int inside oneof{}:
TryInsert("test.pb.h", "field_get:foo.Bar.oneOfInt", context);
TryInsert("test.pb.h", "field_set:foo.Bar.oneOfInt", context);
// Check field accessors for a string inside oneof{}:
TryInsert("test.pb.h", "field_get:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set_allocated:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set_char:foo.Bar.oneOfString", context);
TryInsert("test.pb.h", "field_set_pointer:foo.Bar.oneOfString", context);
// Check field accessors for an optional message:
TryInsert("test.pb.h", "field_get:foo.Bar.optMessage", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.optMessage", context);
TryInsert("test.pb.h", "field_set_allocated:foo.Bar.optMessage", context);
// Check field accessors for a repeated message:
TryInsert("test.pb.h", "field_add:foo.Bar.repeatedMessage", context);
TryInsert("test.pb.h", "field_get:foo.Bar.repeatedMessage", context);
TryInsert("test.pb.h", "field_list:foo.Bar.repeatedMessage", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.repeatedMessage", context);
TryInsert("test.pb.h", "field_mutable_list:foo.Bar.repeatedMessage",
context);
// Check field accessors for a message inside oneof{}:
TryInsert("test.pb.h", "field_get:foo.Bar.oneOfMessage", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.oneOfMessage", context);
TryInsert("test.pb.cc", "field_set_allocated:foo.Bar.oneOfMessage", context);
// Check field accessors for an optional enum:
TryInsert("test.pb.h", "field_get:foo.Bar.optEnum", context);
TryInsert("test.pb.h", "field_set:foo.Bar.optEnum", context);
// Check field accessors for a repeated enum:
TryInsert("test.pb.h", "field_get:foo.Bar.repeatedEnum", context);
TryInsert("test.pb.h", "field_set:foo.Bar.repeatedEnum", context);
TryInsert("test.pb.h", "field_add:foo.Bar.repeatedEnum", context);
TryInsert("test.pb.h", "field_list:foo.Bar.repeatedEnum", context);
TryInsert("test.pb.h", "field_mutable_list:foo.Bar.repeatedEnum", context);
// Check field accessors for an enum inside oneof{}:
TryInsert("test.pb.h", "field_get:foo.Bar.oneOfEnum", context);
TryInsert("test.pb.h", "field_set:foo.Bar.oneOfEnum", context);
// Check field accessors for a required cord:
TryInsert("test.pb.h", "field_get:foo.Bar.requiredCord", context);
TryInsert("test.pb.h", "field_set:foo.Bar.requiredCord", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.requiredCord", context);
// Check field accessors for a repeated cord:
TryInsert("test.pb.h", "field_get:foo.Bar.repeatedCord", context);
TryInsert("test.pb.h", "field_set:foo.Bar.repeatedCord", context);
TryInsert("test.pb.h", "field_add:foo.Bar.repeatedCord", context);
TryInsert("test.pb.h", "field_list:foo.Bar.repeatedCord", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.repeatedCord", context);
TryInsert("test.pb.h", "field_mutable_list:foo.Bar.repeatedCord", context);
// Check field accessors for a cord inside oneof{}:
TryInsert("test.pb.h", "field_get:foo.Bar.oneOfCord", context);
TryInsert("test.pb.h", "field_set:foo.Bar.oneOfCord", context);
TryInsert("test.pb.h", "field_mutable:foo.Bar.oneOfCord", context);
return true;
}
void TryInsert(const string& filename, const string& insertion_point,
GeneratorContext* context) const {
std::unique_ptr<io::ZeroCopyOutputStream> output(
context->OpenForInsert(filename, insertion_point));
io::Printer printer(output.get(), '$');
printer.Print("// inserted $name$\n", "name", insertion_point);
}
};
// This test verifies that all the expected insertion points exist. It does
// not verify that they are correctly-placed; that would require actually
// compiling the output which is a bit more than I care to do for this test.
TEST(CppPluginTest, PluginTest) {
GOOGLE_CHECK_OK(File::SetContents(TestTempDir() + "/test.proto",
"syntax = \"proto2\";\n"
"package foo;\n"
"\n"
"enum Thud { VALUE = 0; }\n"
"\n"
"message Bar {\n"
" message Baz {}\n"
" optional int32 optInt = 1;\n"
" repeated int32 repeatedInt = 2;\n"
"\n"
" required string requiredString = 3;\n"
" repeated string repeatedString = 4;\n"
"\n"
" optional Baz optMessage = 6;\n"
" repeated Baz repeatedMessage = 7;\n"
"\n"
" optional Thud optEnum = 8;\n"
" repeated Thud repeatedEnum = 9;\n"
"\n"
" required string requiredCord = 10 [\n"
" ctype = CORD\n"
" ];\n"
" repeated string repeatedCord = 11 [\n"
" ctype = CORD\n"
" ];\n"
"\n"
" oneof Qux {\n"
" int64 oneOfInt = 20;\n"
" string oneOfString = 21;\n"
" Baz oneOfMessage = 22;\n"
" Thud oneOfEnum = 23;"
" string oneOfCord = 24 [\n"
" ctype = CORD\n"
" ];\n"
" }\n"
"}\n",
true));
google::protobuf::compiler::CommandLineInterface cli;
cli.SetInputsAreProtoPathRelative(true);
CppGenerator cpp_generator;
TestGenerator test_generator;
cli.RegisterGenerator("--cpp_out", &cpp_generator, "");
cli.RegisterGenerator("--test_out", &test_generator, "");
string proto_path = "-I" + TestTempDir();
string cpp_out = "--cpp_out=" + TestTempDir();
string test_out = "--test_out=" + TestTempDir();
const char* argv[] = {
"protoc",
proto_path.c_str(),
cpp_out.c_str(),
test_out.c_str(),
"test.proto"
};
EXPECT_EQ(0, cli.Run(5, argv));
}
} // namespace
} // namespace cpp
} // namespace compiler
} // namespace protobuf
} // namespace google
|
#include "bindings.h"
#include <cmath>
//
// Set the tracing mode.
//
// @param newMode One of
// - OBOE_TRACE_NEVER(0) to disable tracing,
// - OBOE_TRACE_ALWAYS(1) to start a new trace if needed
//
Napi::Value setTracingMode(const Napi::CallbackInfo& info) {
const Napi::Env env = info.Env();
// Validate arguments
if (info.Length() != 1 || !info[0].IsNumber()) {
Napi::TypeError::New(env, "Invalid arguments").ThrowAsJavaScriptException();
return env.Null();
}
int mode = info[0].As<Napi::Number>().Uint32Value();
if (mode != OBOE_TRACE_NEVER && mode != OBOE_TRACE_ALWAYS) {
Napi::RangeError::New(env, "Invalid tracing mode").ThrowAsJavaScriptException();
return env.Null();
}
oboe_settings_mode_set(mode);
return env.Null();
}
//
// Set the default sample rate.
//
// This rate is used until overridden by the servers. If not set then
// oboe supplies a default value (300,000, i.e., 30%) at time of this writing.
//
// The rate is interpreted as a ratio out of OBOE_SAMPLE_RESOLUTION (1,000,000).
//
// @param newRate A number between 0 (none) and OBOE_SAMPLE_RESOLUTION (a million)
//
Napi::Value setDefaultSampleRate(const Napi::CallbackInfo& info) {
// presume failure
int rateUsed = -1;
// Validate arguments, if not valid or if the argument is nan,
// return -1 (impossible rate).
if (info.Length() == 1 && info[0].IsNumber()) {
double check = info[0].As<Napi::Number>().DoubleValue();
// don't convert check to an int because NaN becomes 0
int rate = check;
if (!std::isnan(check)) {
// it's a valid number but maybe not in range.
if (rate < 0) {
rate = 0;
} else if (rate > OBOE_SAMPLE_RESOLUTION) {
rate = OBOE_SAMPLE_RESOLUTION;
}
rateUsed = rate;
oboe_settings_rate_set(rate);
}
}
return Napi::Number::New(info.Env(), rateUsed);
}
// 1: OBOE_SAMPLE_RATE_SOURCE_FILE - local agent config
// 2: OBOE_SAMPLE_RATE_SOURCE_DEFAULT - compiled default
// 3: OBOE_SAMPLE_RATE_SOURCE_OBOE - remote config (layer-specific)
// 4: OBOE_SAMPLE_RATE_SOURCE_LAST_OBOE - previous oboe setting
// 5: OBOE_SAMPLE_RATE_SOURCE_DEFAULT_MISCONFIGURED - unknown source
// 6: OBOE_SAMPLE_RATE_SOURCE_OBOE_DEFAULT - remote setting(default)
// 7: OBOE_SAMPLE_RATE_SOURCE_CUSTOM - custom setting passed as parameter via
// the oboe_tracing_decisions(). supports
// request level customized mode/rates.
//
// New function to start a trace. It returns all information
// necessary in a single call.
//
// getTraceSettings(object)
//
// object.xtrace - Metadata instance or undefined
// object.mode - a route-specific trace mode, 0 or 1 for 'never'
// or 'always' object.rate - a route-specific sampling rate
// object.edge - override the default edge setting.
//
Napi::Value getTraceSettings(const Napi::CallbackInfo& info) {
Napi::Env env = info.Env();
oboe_tracing_decisions_in_t in;
oboe_tracing_decisions_out_t out;
oboe_metadata_t omd;
// in defaults
bool have_metadata = false;
std::string xtrace;
std::string tracestate;
int rate = -1;
int mode = -1;
// edge back to supplied metadata unless there is none.
bool edge = true;
// debugging booleans
//bool showIn = false;
//bool showOut = false;
//
// trigger trace extensions
//
// type_requested 0 = normal, 1 = trigger-trace
int type_requested = 0;
std::string xtraceOpts;
std::string xtraceOptsSig;
int64_t xtraceOptsTimestamp = 0;
int customTriggerMode = -1;
// caller specified values. errors are ignored and default values are used.
if (info[0].IsObject()) {
Napi::Object o = info[0].ToObject();
// is an xtrace supplied?
Napi::Value v = o.Get("xtrace");
if (v.IsString()) {
xtrace = v.As<Napi::String>();
// make sure it's the right length before calling oboe.
if (xtrace.length() == 60 || xtrace.length() == 55) {
// try to convert it to metadata. if it fails act as if no xtrace was
// supplied.
oboe_metadata_init(&omd);
int status = oboe_metadata_fromstr(&omd, xtrace.c_str(), xtrace.length());
// status can be zero with a version other than 2, so check that too.
if (status < 0) {
xtrace = "";
} else {
have_metadata = true;
}
} else {
// if it's the wrong length don't pass it to oboe
xtrace = "";
}
}
v = o.Get("tracestate");
if (v.IsString()) {
tracestate = v.As<Napi::String>();
}
// now get the much simpler integer values
v = o.Get("rate");
if (v.IsNumber()) {
rate = v.As<Napi::Number>().Int64Value();
}
v = o.Get("mode");
if (v.IsNumber()) {
mode = v.As<Napi::Number>().Int64Value();
}
// allow overriding the edge setting. it's not clear why
// this might need to be done but it does add some control
// for testing or unforseen cases.
if (o.Has("edge")) {
edge = o.Get("edge").ToBoolean().Value();
}
// now handle x-trace-options and x-trace-options-signature
v = o.Get("typeRequested");
if (v.IsNumber()) {
type_requested = v.As<Napi::Number>().Int64Value();
}
v = o.Get("xtraceOpts");
if (v.IsString()) {
xtraceOpts = v.As<Napi::String>();
}
v = o.Get("xtraceOptsSig");
if (v.IsString()) {
xtraceOptsSig = v.As<Napi::String>();
}
v = o.Get("xtraceOptsTimestamp");
if (v.IsNumber()) {
xtraceOptsTimestamp = v.As<Napi::Number>().Int64Value();
}
v = o.Get("customTriggerMode");
if (v.IsNumber()) {
customTriggerMode = v.As<Napi::Number>().Int32Value();
}
}
// apply default or user specified values.
// for traceparent: type is 1 and version is 0
// for xtrace: type is 0 and version is 2
if (omd.type == 0) {
in.version = 2;
} else {
in.version = 3;
}
in.service_name = "";
in.in_xtrace = xtrace.c_str();
in.tracestate = tracestate.c_str();
in.custom_sample_rate = rate;
in.custom_tracing_mode = mode;
// oboe logs an error for an empty xtrace (and then ignores it)
// only set key when existing
if(!xtrace.empty()) {
in.in_xtrace = xtrace.c_str();
} else {
in.in_xtrace = nullptr;
}
// v2 fields (added for trigger-trace support)
in.custom_trigger_mode = customTriggerMode;
in.request_type = type_requested;
in.header_options = xtraceOpts.c_str();
in.header_signature = xtraceOptsSig.c_str();
in.header_timestamp = xtraceOptsTimestamp;
// ask for oboe's decisions on life, the universe, and everything.
out.version = 3;
int status = oboe_tracing_decisions(&in, &out);
// version 2+ of the oboe_tracing_decisions_out structure returns a
// pointer to the message string for all codes.
//
// -2 tracing-mode-disabled
// -1 xtrace-not-sampled
// 0 ok
// set the message and auth info for both error and successful returns
Napi::Object o = Napi::Object::New(env);
o.Set("status", Napi::Number::New(env, status));
o.Set("message", Napi::String::New(env, out.status_message));
o.Set("authStatus", Napi::Number::New(env, out.auth_status));
o.Set("authMessage", Napi::String::New(env, out.auth_message));
o.Set("typeProvisioned", Napi::Number::New(env, out.request_provisioned));
// status > 0 is an error return; do no additional processing.
if (status > 0) {
return o;
}
// if an x-trace was not used by oboe to make the decision then
// create metadata. oboe sets sample_source to -1 when it was a
// "continue" decision, i.e., the trace was continued using the
// supplied x-trace (no trace decision was made).
have_metadata = out.sample_source == OBOE_SAMPLE_RATE_SOURCE_CONTINUED;
if (!have_metadata) {
edge = false;
oboe_metadata_init(&omd);
oboe_metadata_random(&omd);
}
// now we have oboe_metadata_t either from a supplied xtrace id or from
// a Metadata object created for this span. set the sample bit to match
// the sample decision and create a JavaScript Metadata instance.
if (out.do_sample) {
omd.flags |= XTR_FLAGS_SAMPLED;
} else {
omd.flags &= ~XTR_FLAGS_SAMPLED;
}
Napi::Object event = Event::makeFromOboeMetadata(env, omd);
//Napi::Value v = Napi::External<oboe_metadata_t>::New(env, &omd);
//Napi::Object md = Metadata::NewInstance(env, v);
// oboe sets these if not continued, for when that change will be made
//out->sample_rate = -1;
//out->sample_source = -1;
//out->token_bucket_rate = -1;
//out->token_bucket_capacity = -1;
// augment the return object
o.Set("metadata", event);
o.Set("metadataFromXtrace", Napi::Boolean::New(env, have_metadata));
//o.Set("status", Napi::Number::New(env, status));
o.Set("edge", Napi::Boolean::New(env, edge));
o.Set("doSample", Napi::Boolean::New(env, out.do_sample));
o.Set("doMetrics", Napi::Boolean::New(env, out.do_metrics));
o.Set("source", Napi::Number::New(env, out.sample_source));
o.Set("rate", Napi::Number::New(env, out.sample_rate));
o.Set("tokenBucketRate", Napi::Number::New(env, out.token_bucket_rate));
o.Set("tokenBucketCapacity", Napi::Number::New(env, out.token_bucket_capacity));
return o;
}
//
// This is not a class, just a group of functions in a JavaScript namespace.
// (well, in two javascript namespaces for compatability.)
//
namespace Settings {
Napi::Object Init(Napi::Env env, Napi::Object exports) {
Napi::HandleScope scope(env);
Napi::Object module = Napi::Object::New(env);
module.Set("setTracingMode", Napi::Function::New(env, setTracingMode));
module.Set("setDefaultSampleRate", Napi::Function::New(env, setDefaultSampleRate));
module.Set("getTraceSettings", Napi::Function::New(env, getTraceSettings));
exports.Set("Settings", module);
//
// for legacy compatibility, keep the Context namespace valid
//
exports.Set("Context", module);
return exports;
}
} // end namespace Settings
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin developers
//Copyright (c) 2017-2020 The PIVX developers
//Copyright (c) 2020 The earnscoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifdef HAVE_CONFIG_H
#include "config/earnscoin-config.h"
#endif
#include "netbase.h"
#include "hash.h"
#include "sync.h"
#include "uint256.h"
#include "random.h"
#include "util.h"
#include "utilstrencodings.h"
#include <atomic>
#ifdef HAVE_GETADDRINFO_A
#include <netdb.h>
#endif
#ifndef WIN32
#if HAVE_INET_PTON
#include <arpa/inet.h>
#endif
#include <fcntl.h>
#endif
#include <boost/algorithm/string/case_conv.hpp> // for to_lower()
#include <boost/algorithm/string/predicate.hpp> // for startswith() and endswith()
#include <boost/thread.hpp>
#if !defined(HAVE_MSG_NOSIGNAL) && !defined(MSG_NOSIGNAL)
#define MSG_NOSIGNAL 0
#endif
// Settings
static proxyType proxyInfo[NET_MAX];
static proxyType nameProxy;
static RecursiveMutex cs_proxyInfos;
int nConnectTimeout = DEFAULT_CONNECT_TIMEOUT;
bool fNameLookup = false;
// Need ample time for negotiation for very slow proxies such as Tor (milliseconds)
static const int SOCKS5_RECV_TIMEOUT = 20 * 1000;
enum Network ParseNetwork(std::string net)
{
boost::to_lower(net);
if (net == "ipv4") return NET_IPV4;
if (net == "ipv6") return NET_IPV6;
if (net == "tor" || net == "onion") return NET_TOR;
return NET_UNROUTABLE;
}
std::string GetNetworkName(enum Network net)
{
switch (net) {
case NET_IPV4:
return "ipv4";
case NET_IPV6:
return "ipv6";
case NET_TOR:
return "onion";
default:
return "";
}
}
void SplitHostPort(std::string in, int& portOut, std::string& hostOut)
{
size_t colon = in.find_last_of(':');
// if a : is found, and it either follows a [...], or no other : is in the string, treat it as port separator
bool fHaveColon = colon != in.npos;
bool fBracketed = fHaveColon && (in[0] == '[' && in[colon - 1] == ']'); // if there is a colon, and in[0]=='[', colon is not 0, so in[colon-1] is safe
bool fMultiColon = fHaveColon && (in.find_last_of(':', colon - 1) != in.npos);
if (fHaveColon && (colon == 0 || fBracketed || !fMultiColon)) {
int32_t n;
if (ParseInt32(in.substr(colon + 1), &n) && n > 0 && n < 0x10000) {
in = in.substr(0, colon);
portOut = n;
}
}
if (in.size() > 0 && in[0] == '[' && in[in.size() - 1] == ']')
hostOut = in.substr(1, in.size() - 2);
else
hostOut = in;
}
bool static LookupIntern(const char* pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
{
vIP.clear();
{
CNetAddr addr;
if (addr.SetSpecial(std::string(pszName))) {
vIP.push_back(addr);
return true;
}
}
#ifdef HAVE_GETADDRINFO_A
struct in_addr ipv4_addr;
#ifdef HAVE_INET_PTON
if (inet_pton(AF_INET, pszName, &ipv4_addr) > 0) {
vIP.push_back(CNetAddr(ipv4_addr));
return true;
}
struct in6_addr ipv6_addr;
if (inet_pton(AF_INET6, pszName, &ipv6_addr) > 0) {
vIP.push_back(CNetAddr(ipv6_addr));
return true;
}
#else
ipv4_addr.s_addr = inet_addr(pszName);
if (ipv4_addr.s_addr != INADDR_NONE) {
vIP.push_back(CNetAddr(ipv4_addr));
return true;
}
#endif
#endif
struct addrinfo aiHint;
memset(&aiHint, 0, sizeof(struct addrinfo));
aiHint.ai_socktype = SOCK_STREAM;
aiHint.ai_protocol = IPPROTO_TCP;
aiHint.ai_family = AF_UNSPEC;
#ifdef WIN32
aiHint.ai_flags = fAllowLookup ? 0 : AI_NUMERICHOST;
#else
aiHint.ai_flags = fAllowLookup ? AI_ADDRCONFIG : AI_NUMERICHOST;
#endif
struct addrinfo* aiRes = NULL;
#ifdef HAVE_GETADDRINFO_A
struct gaicb gcb, *query = &gcb;
memset(query, 0, sizeof(struct gaicb));
gcb.ar_name = pszName;
gcb.ar_request = &aiHint;
int nErr = getaddrinfo_a(GAI_NOWAIT, &query, 1, NULL);
if (nErr)
return false;
do {
// Should set the timeout limit to a resonable value to avoid
// generating unnecessary checking call during the polling loop,
// while it can still response to stop request quick enough.
// 2 seconds looks fine in our situation.
struct timespec ts = {2, 0};
gai_suspend(&query, 1, &ts);
boost::this_thread::interruption_point();
nErr = gai_error(query);
if (0 == nErr)
aiRes = query->ar_result;
} while (nErr == EAI_INPROGRESS);
#else
int nErr = getaddrinfo(pszName, NULL, &aiHint, &aiRes);
#endif
if (nErr)
return false;
struct addrinfo* aiTrav = aiRes;
while (aiTrav != NULL && (nMaxSolutions == 0 || vIP.size() < nMaxSolutions)) {
if (aiTrav->ai_family == AF_INET) {
assert(aiTrav->ai_addrlen >= sizeof(sockaddr_in));
vIP.push_back(CNetAddr(((struct sockaddr_in*)(aiTrav->ai_addr))->sin_addr));
}
if (aiTrav->ai_family == AF_INET6) {
assert(aiTrav->ai_addrlen >= sizeof(sockaddr_in6));
vIP.push_back(CNetAddr(((struct sockaddr_in6*)(aiTrav->ai_addr))->sin6_addr));
}
aiTrav = aiTrav->ai_next;
}
freeaddrinfo(aiRes);
return (vIP.size() > 0);
}
bool LookupHost(const char* pszName, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup)
{
std::string strHost(pszName);
if (strHost.empty())
return false;
if (boost::algorithm::starts_with(strHost, "[") && boost::algorithm::ends_with(strHost, "]")) {
strHost = strHost.substr(1, strHost.size() - 2);
}
return LookupIntern(strHost.c_str(), vIP, nMaxSolutions, fAllowLookup);
}
bool LookupHost(const char* pszName, CNetAddr& addr, bool fAllowLookup)
{
std::vector<CNetAddr> vIP;
LookupHost(pszName, vIP, 1, fAllowLookup);
if (vIP.empty())
return false;
addr = vIP.front();
return true;
}
bool Lookup(const char* pszName, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions)
{
if (pszName[0] == 0)
return false;
int port = portDefault;
std::string hostname = "";
SplitHostPort(std::string(pszName), port, hostname);
std::vector<CNetAddr> vIP;
bool fRet = LookupIntern(hostname.c_str(), vIP, nMaxSolutions, fAllowLookup);
if (!fRet)
return false;
vAddr.resize(vIP.size());
for (unsigned int i = 0; i < vIP.size(); i++)
vAddr[i] = CService(vIP[i], port);
return true;
}
bool Lookup(const char* pszName, CService& addr, int portDefault, bool fAllowLookup)
{
std::vector<CService> vService;
bool fRet = Lookup(pszName, vService, portDefault, fAllowLookup, 1);
if (!fRet)
return false;
addr = vService[0];
return true;
}
CService LookupNumeric(const char* pszName, int portDefault)
{
CService addr;
// "1.2:345" will fail to resolve the ip, but will still set the port.
// If the ip fails to resolve, re-init the result.
if (!Lookup(pszName, addr, portDefault, false))
addr = CService();
return addr;
}
struct timeval MillisToTimeval(int64_t nTimeout)
{
struct timeval timeout;
timeout.tv_sec = nTimeout / 1000;
timeout.tv_usec = (nTimeout % 1000) * 1000;
return timeout;
}
enum class IntrRecvError {
OK,
Timeout,
Disconnected,
NetworkError,
Interrupted
};
/**
* Read bytes from socket. This will either read the full number of bytes requested
* or return False on error or timeout.
* This function can be interrupted by boost thread interrupt.
*
* @param data Buffer to receive into
* @param len Length of data to receive
* @param timeout Timeout in milliseconds for receive operation
*
* @note This function requires that hSocket is in non-blocking mode.
*/
static IntrRecvError InterruptibleRecv(char* data, size_t len, int timeout, SOCKET& hSocket)
{
int64_t curTime = GetTimeMillis();
int64_t endTime = curTime + timeout;
// Maximum time to wait in one select call. It will take up until this time (in millis)
// to break off in case of an interruption.
const int64_t maxWait = 1000;
while (len > 0 && curTime < endTime) {
ssize_t ret = recv(hSocket, data, len, 0); // Optimistically try the recv first
if (ret > 0) {
len -= ret;
data += ret;
} else if (ret == 0) { // Unexpected disconnection
return IntrRecvError::Disconnected;
} else { // Other error or blocking
int nErr = WSAGetLastError();
if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL) {
if (!IsSelectableSocket(hSocket)) {
return IntrRecvError::NetworkError;
}
struct timeval tval = MillisToTimeval(std::min(endTime - curTime, maxWait));
fd_set fdset;
FD_ZERO(&fdset);
FD_SET(hSocket, &fdset);
int nRet = select(hSocket + 1, &fdset, NULL, NULL, &tval);
if (nRet == SOCKET_ERROR) {
return IntrRecvError::NetworkError;
}
} else {
return IntrRecvError::NetworkError;
}
}
boost::this_thread::interruption_point();
curTime = GetTimeMillis();
}
return len == 0 ? IntrRecvError::OK : IntrRecvError::Timeout;
}
struct ProxyCredentials
{
std::string username;
std::string password;
};
/** Connect using SOCKS5 (as described in RFC1928) */
bool static Socks5(std::string strDest, int port, const ProxyCredentials *auth, SOCKET& hSocket)
{
IntrRecvError recvr;
LogPrintf("SOCKS5 connecting %s\n", strDest);
if (strDest.size() > 255) {
CloseSocket(hSocket);
return error("Hostname too long");
}
// Accepted authentication methods
std::vector<uint8_t> vSocks5Init;
vSocks5Init.push_back(0x05);
if (auth) {
vSocks5Init.push_back(0x02); // # METHODS
vSocks5Init.push_back(0x00); // X'00' NO AUTHENTICATION REQUIRED
vSocks5Init.push_back(0x02); // X'02' USERNAME/PASSWORD (RFC1929)
} else {
vSocks5Init.push_back(0x01); // # METHODS
vSocks5Init.push_back(0x00); // X'00' NO AUTHENTICATION REQUIRED
}
ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vSocks5Init.size()) {
CloseSocket(hSocket);
return error("Error sending to proxy");
}
char pchRet1[2];
if ((recvr = InterruptibleRecv(pchRet1, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
CloseSocket(hSocket);
return error("Error reading proxy response");
}
if (pchRet1[0] != 0x05) {
CloseSocket(hSocket);
return error("Proxy failed to initialize");
}
if (pchRet1[1] == 0x02 && auth) {
// Perform username/password authentication (as described in RFC1929)
std::vector<uint8_t> vAuth;
vAuth.push_back(0x01);
if (auth->username.size() > 255 || auth->password.size() > 255)
return error("Proxy username or password too long");
vAuth.push_back(auth->username.size());
vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end());
vAuth.push_back(auth->password.size());
vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end());
ret = send(hSocket, (const char*)vAuth.data(), vAuth.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vAuth.size()) {
CloseSocket(hSocket);
return error("Error sending authentication to proxy");
}
LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password);
char pchRetA[2];
if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
CloseSocket(hSocket);
return error("Error reading proxy authentication response");
}
if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) {
CloseSocket(hSocket);
return error("Proxy authentication unsuccesful");
}
} else if (pchRet1[1] == 0x00) {
// Perform no authentication
} else {
CloseSocket(hSocket);
return error("Proxy requested wrong authentication method %02x", pchRet1[1]);
}
std::vector<uint8_t> vSocks5;
vSocks5.push_back(0x05); // VER protocol version
vSocks5.push_back(0x01); // CMD CONNECT
vSocks5.push_back(0x00); // RSV Reserved
vSocks5.push_back(0x03); // ATYP DOMAINNAME
vSocks5.push_back(strDest.size()); // Length<=255 is checked at beginning of function
vSocks5.insert(vSocks5.end(), strDest.begin(), strDest.end());
vSocks5.push_back((port >> 8) & 0xFF);
vSocks5.push_back((port >> 0) & 0xFF);
ret = send(hSocket, (const char*)vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL);
if (ret != (ssize_t)vSocks5.size()) {
CloseSocket(hSocket);
return error("Error sending to proxy");
}
char pchRet2[4];
if ((recvr = InterruptibleRecv(pchRet2, 4, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
CloseSocket(hSocket);
if (recvr == IntrRecvError::Timeout) {
/* If a timeout happens here, this effectively means we timed out while connecting
* to the remote node. This is very common for Tor, so do not print an
* error message. */
return false;
} else {
return error("Error while reading proxy response");
}
}
if (pchRet2[0] != 0x05) {
CloseSocket(hSocket);
return error("Proxy failed to accept request");
}
if (pchRet2[1] != 0x00) {
CloseSocket(hSocket);
switch (pchRet2[1]) {
case 0x01:
return error("Proxy error: general failure");
case 0x02:
return error("Proxy error: connection not allowed");
case 0x03:
return error("Proxy error: network unreachable");
case 0x04:
return error("Proxy error: host unreachable");
case 0x05:
return error("Proxy error: connection refused");
case 0x06:
return error("Proxy error: TTL expired");
case 0x07:
return error("Proxy error: protocol error");
case 0x08:
return error("Proxy error: address type not supported");
default:
return error("Proxy error: unknown");
}
}
if (pchRet2[2] != 0x00) {
CloseSocket(hSocket);
return error("Error: malformed proxy response");
}
char pchRet3[256];
switch (pchRet2[3]) {
case 0x01:
recvr = InterruptibleRecv(pchRet3, 4, SOCKS5_RECV_TIMEOUT, hSocket);
break;
case 0x04:
recvr = InterruptibleRecv(pchRet3, 16, SOCKS5_RECV_TIMEOUT, hSocket);
break;
case 0x03: {
recvr = InterruptibleRecv(pchRet3, 1, SOCKS5_RECV_TIMEOUT, hSocket);
if (recvr != IntrRecvError::OK) {
CloseSocket(hSocket);
return error("Error reading from proxy");
}
int nRecv = pchRet3[0];
recvr = InterruptibleRecv(pchRet3, nRecv, SOCKS5_RECV_TIMEOUT, hSocket);
break;
}
default:
CloseSocket(hSocket);
return error("Error: malformed proxy response");
}
if (recvr != IntrRecvError::OK) {
CloseSocket(hSocket);
return error("Error reading from proxy");
}
if ((recvr = InterruptibleRecv(pchRet3, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) {
CloseSocket(hSocket);
return error("Error reading from proxy");
}
LogPrintf("SOCKS5 connected %s\n", strDest);
return true;
}
bool static ConnectSocketDirectly(const CService& addrConnect, SOCKET& hSocketRet, int nTimeout)
{
hSocketRet = INVALID_SOCKET;
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) {
LogPrintf("Cannot connect to %s: unsupported network\n", addrConnect.ToString());
return false;
}
SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP);
if (hSocket == INVALID_SOCKET)
return false;
#ifdef SO_NOSIGPIPE
int set = 1;
// Different way of disabling SIGPIPE on BSD
setsockopt(hSocket, SOL_SOCKET, SO_NOSIGPIPE, (void*)&set, sizeof(int));
#endif
// Set to non-blocking
if (!SetSocketNonBlocking(hSocket, true))
return error("ConnectSocketDirectly: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError()));
if (connect(hSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) {
int nErr = WSAGetLastError();
// WSAEINVAL is here because some legacy version of winsock uses it
if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL) {
struct timeval timeout = MillisToTimeval(nTimeout);
fd_set fdset;
FD_ZERO(&fdset);
FD_SET(hSocket, &fdset);
int nRet = select(hSocket + 1, NULL, &fdset, NULL, &timeout);
if (nRet == 0) {
LogPrint(BCLog::NET, "connection to %s timeout\n", addrConnect.ToString());
CloseSocket(hSocket);
return false;
}
if (nRet == SOCKET_ERROR) {
LogPrintf("select() for %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
CloseSocket(hSocket);
return false;
}
socklen_t nRetSize = sizeof(nRet);
#ifdef WIN32
if (getsockopt(hSocket, SOL_SOCKET, SO_ERROR, (char*)(&nRet), &nRetSize) == SOCKET_ERROR)
#else
if (getsockopt(hSocket, SOL_SOCKET, SO_ERROR, &nRet, &nRetSize) == SOCKET_ERROR)
#endif
{
LogPrintf("getsockopt() for %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
CloseSocket(hSocket);
return false;
}
if (nRet != 0) {
LogPrintf("connect() to %s failed after select(): %s\n", addrConnect.ToString(), NetworkErrorString(nRet));
CloseSocket(hSocket);
return false;
}
}
#ifdef WIN32
else if (WSAGetLastError() != WSAEISCONN)
#else
else
#endif
{
LogPrintf("connect() to %s failed: %s\n", addrConnect.ToString(), NetworkErrorString(WSAGetLastError()));
CloseSocket(hSocket);
return false;
}
}
hSocketRet = hSocket;
return true;
}
bool SetProxy(enum Network net, const proxyType &addrProxy)
{
assert(net >= 0 && net < NET_MAX);
if (!addrProxy.IsValid())
return false;
LOCK(cs_proxyInfos);
proxyInfo[net] = addrProxy;
return true;
}
bool GetProxy(enum Network net, proxyType& proxyInfoOut)
{
assert(net >= 0 && net < NET_MAX);
LOCK(cs_proxyInfos);
if (!proxyInfo[net].IsValid())
return false;
proxyInfoOut = proxyInfo[net];
return true;
}
bool SetNameProxy(const proxyType &addrProxy)
{
if (!addrProxy.IsValid())
return false;
LOCK(cs_proxyInfos);
nameProxy = addrProxy;
return true;
}
bool GetNameProxy(proxyType &nameProxyOut)
{
LOCK(cs_proxyInfos);
if (!nameProxy.IsValid())
return false;
nameProxyOut = nameProxy;
return true;
}
bool HaveNameProxy()
{
LOCK(cs_proxyInfos);
return nameProxy.IsValid();
}
bool IsProxy(const CNetAddr& addr)
{
LOCK(cs_proxyInfos);
for (int i = 0; i < NET_MAX; i++) {
if (addr == (CNetAddr)proxyInfo[i].proxy)
return true;
}
return false;
}
static bool ConnectThroughProxy(const proxyType &proxy, const std::string strDest, int port, SOCKET& hSocketRet, int nTimeout, bool *outProxyConnectionFailed)
{
SOCKET hSocket = INVALID_SOCKET;
// first connect to proxy server
if (!ConnectSocketDirectly(proxy.proxy, hSocket, nTimeout)) {
if (outProxyConnectionFailed)
*outProxyConnectionFailed = true;
return false;
}
// do socks negotiation
if (proxy.randomize_credentials) {
ProxyCredentials random_auth;
static std::atomic_int counter;
random_auth.username = random_auth.password = strprintf("%i", counter++);
if (!Socks5(strDest, (unsigned short)port, &random_auth, hSocket))
return false;
} else {
if (!Socks5(strDest, (unsigned short)port, 0, hSocket))
return false;
}
hSocketRet = hSocket;
return true;
}
bool ConnectSocket(const CService &addrDest, SOCKET& hSocketRet, int nTimeout, bool *outProxyConnectionFailed)
{
proxyType proxy;
if (outProxyConnectionFailed)
*outProxyConnectionFailed = false;
if (GetProxy(addrDest.GetNetwork(), proxy))
return ConnectThroughProxy(proxy, addrDest.ToStringIP(), addrDest.GetPort(), hSocketRet, nTimeout, outProxyConnectionFailed);
else // no proxy needed (none set for target network)
return ConnectSocketDirectly(addrDest, hSocketRet, nTimeout);
}
bool ConnectSocketByName(CService& addr, SOCKET& hSocketRet, const char* pszDest, int portDefault, int nTimeout, bool* outProxyConnectionFailed)
{
std::string strDest;
int port = portDefault;
if (outProxyConnectionFailed)
*outProxyConnectionFailed = false;
SplitHostPort(std::string(pszDest), port, strDest);
proxyType nameProxy;
GetNameProxy(nameProxy);
std::vector<CService> addrResolved;
if (Lookup(strDest.c_str(), addrResolved, port, fNameLookup && !HaveNameProxy(), 256)) {
if (addrResolved.size() > 0) {
addr = addrResolved[GetRand(addrResolved.size())];
return ConnectSocket(addr, hSocketRet, nTimeout);
}
}
addr = CService();
if (!HaveNameProxy())
return false;
return ConnectThroughProxy(nameProxy, strDest, port, hSocketRet, nTimeout, outProxyConnectionFailed);
}
bool LookupSubNet(const char* pszName, CSubNet& ret)
{
std::string strSubnet(pszName);
size_t slash = strSubnet.find_last_of('/');
std::vector<CNetAddr> vIP;
std::string strAddress = strSubnet.substr(0, slash);
if (LookupHost(strAddress.c_str(), vIP, 1, false))
{
CNetAddr network = vIP[0];
if (slash != strSubnet.npos) {
std::string strNetmask = strSubnet.substr(slash + 1);
int32_t n;
// IPv4 addresses start at offset 12, and first 12 bytes must match, so just offset n
if (ParseInt32(strNetmask, &n)) { // If valid number, assume /24 syntax
ret = CSubNet(network, n);
return ret.IsValid();
} else // If not a valid number, try full netmask syntax
{
// Never allow lookup for netmask
if (LookupHost(strNetmask.c_str(), vIP, 1, false)) {
ret = CSubNet(network, vIP[0]);
return ret.IsValid();
}
}
} else {
ret = CSubNet(network);
return ret.IsValid();
}
}
return false;
}
#ifdef WIN32
std::string NetworkErrorString(int err)
{
char buf[256];
buf[0] = 0;
if (FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK,
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
buf, sizeof(buf), NULL)) {
return strprintf("%s (%d)", buf, err);
} else {
return strprintf("Unknown error (%d)", err);
}
}
#else
std::string NetworkErrorString(int err)
{
char buf[256];
const char* s = buf;
buf[0] = 0;
/* Too bad there are two incompatible implementations of the
* thread-safe strerror. */
#ifdef STRERROR_R_CHAR_P /* GNU variant can return a pointer outside the passed buffer */
s = strerror_r(err, buf, sizeof(buf));
#else /* POSIX variant always returns message in buffer */
if (strerror_r(err, buf, sizeof(buf)))
buf[0] = 0;
#endif
return strprintf("%s (%d)", s, err);
}
#endif
bool CloseSocket(SOCKET& hSocket)
{
if (hSocket == INVALID_SOCKET)
return false;
#ifdef WIN32
int ret = closesocket(hSocket);
#else
int ret = close(hSocket);
#endif
hSocket = INVALID_SOCKET;
return ret != SOCKET_ERROR;
}
bool SetSocketNonBlocking(SOCKET& hSocket, bool fNonBlocking)
{
if (fNonBlocking) {
#ifdef WIN32
u_long nOne = 1;
if (ioctlsocket(hSocket, FIONBIO, &nOne) == SOCKET_ERROR) {
#else
int fFlags = fcntl(hSocket, F_GETFL, 0);
if (fcntl(hSocket, F_SETFL, fFlags | O_NONBLOCK) == SOCKET_ERROR) {
#endif
CloseSocket(hSocket);
return false;
}
} else {
#ifdef WIN32
u_long nZero = 0;
if (ioctlsocket(hSocket, FIONBIO, &nZero) == SOCKET_ERROR) {
#else
int fFlags = fcntl(hSocket, F_GETFL, 0);
if (fcntl(hSocket, F_SETFL, fFlags & ~O_NONBLOCK) == SOCKET_ERROR) {
#endif
CloseSocket(hSocket);
return false;
}
}
return true;
}
|
//---------------------------------------------------------- -*- Mode: C++ -*-
// $Id$
//
// Created 2013/06/08
// Author: Mike Ovsiannikov
//
// Copyright 2013,2016 Quantcast Corporation. All rights reserved.
//
// This file is part of Kosmos File System (KFS).
//
// Licensed under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Kerberos 5 client side authentication implementation.
//
//----------------------------------------------------------------------------
#include "KrbClient.h"
#ifdef KFS_KRB_IGNORE_DEPRECATED
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif /* KFS_KRB_IGNORE_DEPRECATED */
#include "KfsKrb5.h"
#include <string.h>
#include <errno.h>
#include <time.h>
#include <string>
#include <algorithm>
namespace KFS
{
using std::string;
using std::max;
class KrbClient::Impl
{
public:
Impl()
: mServiceHost(),
mKeyTabFileName(),
mClientName(),
mCtx(),
mAuthCtx(),
mErrCode(0),
mOutBuf(),
mCreds(),
mServerPtr(0),
mKeyBlockPtr(0),
mCachePtr(0),
mInitedFlag(false),
mUseKeyTabFlag(false),
mForceCacheInitFlag(false),
mLastCredEndTime(-1),
mServiceName(),
mErrorMsg()
{
mOutBuf.data = 0;
mOutBuf.length = 0;
memset(&mCreds, 0, sizeof(mCreds));
}
~Impl()
{ Impl::CleanupSelf(); }
const char* Init(
const char* inServiceHostNamePtr,
const char* inServeiceNamePtr,
const char* inKeyTabNamePtr,
const char* inClientNamePtr,
bool inForceCacheInitFlag)
{
CleanupSelf();
mServiceHost = inServiceHostNamePtr ? inServiceHostNamePtr : "";
mServiceName = inServeiceNamePtr ? inServeiceNamePtr : "";
mKeyTabFileName = inKeyTabNamePtr ? inKeyTabNamePtr : "";
mClientName = inClientNamePtr ? inClientNamePtr : "";
mUseKeyTabFlag = inKeyTabNamePtr != 0;
mForceCacheInitFlag = inForceCacheInitFlag;
mErrCode = 0;
mErrorMsg.clear();
InitSelf(mForceCacheInitFlag);
if (mErrCode) {
return ErrStr();
}
return 0;
}
const char* Cleanup()
{
mErrorMsg.clear();
mErrCode = CleanupSelf();
if (mErrCode) {
return ErrStr();
}
return 0;
}
const char* Request(
const char*& outDataPtr,
int& outDataLen,
const char*& outSessionKeyPtr,
int& outSessionKeyLen)
{
if (! mInitedFlag) {
mErrCode = EINVAL;
mErrorMsg = "not initialized yet, invoke KrbClient::Init";
return mErrorMsg.c_str();
}
CleanupAuth();
if (mUseKeyTabFlag && mLastCredEndTime <= time(0) + 5) {
InitCredCacheKeyTab();
if (mErrCode) {
return ErrStr();
}
}
mCreds.server = mServerPtr;
if ((mErrCode = krb5_cc_get_principal(
mCtx, mCachePtr, &mCreds.client)) != 0) {
return ErrStr();
}
krb5_creds* theCredsPtr = 0;
if ((mErrCode = krb5_get_credentials(
mCtx, 0, mCachePtr, &mCreds, &theCredsPtr)) != 0) {
return ErrStr();
}
#ifdef _KFS_KRB_CLIENT_SET_AUTH_FLAGS
krb5_int32 theFlags = 0;
if ((mErrCode = krb5_auth_con_init(mCtx, &mAuthCtx)) ||
(mErrCode = krb5_auth_con_getflags(mCtx, mAuthCtx, &theFlags))) {
krb5_free_creds(mCtx, theCredsPtr);
return ErrStr();
}
theFlags |= KRB5_AUTH_CONTEXT_DO_SEQUENCE;
theFlags &= ~(KRB5_AUTH_CONTEXT_DO_TIME | KRB5_AUTH_CONTEXT_RET_TIME);
if ((mErrCode = krb5_auth_con_setflags(mCtx, mAuthCtx, theFlags))) {
krb5_free_creds(mCtx, theCredsPtr);
return ErrStr();
}
#endif
krb5_data theAppData = { 0 };
theAppData.data = 0;
theAppData.length = 0;
mErrCode = krb5_mk_req_extended(
mCtx,
&mAuthCtx,
AP_OPTS_MUTUAL_REQUIRED,
&theAppData,
theCredsPtr,
&mOutBuf
);
if (theCredsPtr) {
mLastCredEndTime = theCredsPtr->times.endtime;
}
krb5_free_creds(mCtx, theCredsPtr);
if (mErrCode != 0) {
return ErrStr();
}
if ((mErrCode = krb5_auth_con_getkey(mCtx, mAuthCtx, &mKeyBlockPtr))) {
return ErrStr();
}
outDataPtr = (const char*)mOutBuf.data;
outDataLen = (int)mOutBuf.length;
outSessionKeyPtr = KfsKrb5::get_key_block_contents(mKeyBlockPtr);
outSessionKeyLen = KfsKrb5::get_key_block_length(mKeyBlockPtr);
return 0;
}
const char* Reply(
const char* inReplyPtr,
int inReplyLen)
{
if (! mInitedFlag) {
mErrCode = EINVAL;
mErrorMsg = "not initialized yet, invoke KrbClient::Init";
return mErrorMsg.c_str();
}
if (mOutBuf.length <= 0 || ! mOutBuf.data) {
mErrCode = EINVAL;
mErrorMsg = "not ready to process reply, invoke KrbClient::Request";
return mErrorMsg.c_str();
}
krb5_data theData = { 0 };
theData.length = max(0, inReplyLen);
theData.data = const_cast<char*>(inReplyPtr);
krb5_ap_rep_enc_part* theReplPtr = 0;
if ((mErrCode = krb5_rd_rep(
mCtx,
mAuthCtx,
&theData,
&theReplPtr))) {
return ErrStr();
}
krb5_free_ap_rep_enc_part(mCtx, theReplPtr);
KfsKrb5::free_data_contents(mCtx, &mOutBuf);
return 0;
}
int GetErrorCode() const
{ return (int)mErrCode; }
time_t GetLastCredEndTime() const
{ return mLastCredEndTime; }
Impl* Clone(
const char*& outErrMsgPtr) const
{
Impl& theRet = *(new Impl());
if (mInitedFlag) {
outErrMsgPtr = theRet.Init(
mServiceHost.c_str(),
mServiceName.c_str(),
mKeyTabFileName.c_str(),
mClientName.c_str(),
mForceCacheInitFlag
);
} else {
outErrMsgPtr = 0;
}
return &theRet;
}
private:
string mServiceHost;
string mKeyTabFileName;
string mClientName;
krb5_context mCtx;
krb5_auth_context mAuthCtx;
krb5_error_code mErrCode;
krb5_data mOutBuf;
krb5_creds mCreds;
krb5_principal mServerPtr;
krb5_keyblock* mKeyBlockPtr;
krb5_ccache mCachePtr;
bool mInitedFlag;
bool mUseKeyTabFlag;
bool mForceCacheInitFlag;
time_t mLastCredEndTime;
string mServiceName;
string mErrorMsg;
void InitCredCacheKeyTab()
{
if (! mInitedFlag) {
mErrCode = EINVAL;
return;
}
krb5_get_init_creds_opt* theInitOptionsPtr = 0;
#ifdef KFS_KRB_USE_KRB5_GET_INIT_CREDS_OPT
if ((mErrCode = krb5_get_init_creds_opt_alloc(
mCtx, &theInitOptionsPtr))) {
return;
}
if ((mErrCode = KfsKrb5::get_init_creds_opt_set_out_ccache(
mCtx, theInitOptionsPtr, mCachePtr)) == 0) {
#else
krb5_get_init_creds_opt theInitOptions;
krb5_get_init_creds_opt_init(&theInitOptions);
theInitOptionsPtr = &theInitOptions;
#endif
krb5_keytab theKeyTabPtr = 0;
if ((mErrCode = mKeyTabFileName.empty() ?
krb5_kt_default(mCtx, &theKeyTabPtr) :
krb5_kt_resolve(mCtx, mKeyTabFileName.c_str(),
&theKeyTabPtr))) {
return;
}
krb5_principal theClientPtr = 0;
if ((mErrCode = krb5_parse_name(
mCtx, mClientName.c_str(), &theClientPtr)) == 0) {
if ((mErrCode = krb5_get_init_creds_keytab(
mCtx,
&mCreds,
theClientPtr,
theKeyTabPtr,
0,
0,
theInitOptionsPtr)) == 0) {
#ifndef KFS_KRB_USE_KRB5_GET_INIT_CREDS_OPT
if ((mErrCode = krb5_cc_initialize(
mCtx, mCachePtr, mCreds.client)) == 0) {
mErrCode = krb5_cc_store_cred(
mCtx, mCachePtr, &mCreds);
}
#endif
if (theClientPtr && theClientPtr == mCreds.client) {
mCreds.client = 0;
}
krb5_free_cred_contents(mCtx, &mCreds);
}
memset(&mCreds, 0, sizeof(mCreds));
if (theClientPtr) {
krb5_free_principal(mCtx, theClientPtr);
theClientPtr = 0;
}
}
if (theKeyTabPtr) {
krb5_error_code const theErr = krb5_kt_close(
mCtx, theKeyTabPtr);
if (! mErrCode && theErr) {
mErrCode = theErr;
}
}
#ifdef KFS_KRB_USE_KRB5_GET_INIT_CREDS_OPT
}
if (theInitOptionsPtr) {
krb5_get_init_creds_opt_free(mCtx, theInitOptionsPtr);
}
#endif
}
bool ComparePrincipal(
const char* inNamePtr,
krb5_principal inPrinPtr)
{
krb5_principal thePrinPtr = 0;
if ((mErrCode = krb5_parse_name(
mCtx, inNamePtr, &thePrinPtr)) == 0) {
const bool theRet = krb5_principal_compare(
mCtx, inPrinPtr, thePrinPtr);
krb5_free_principal(mCtx, thePrinPtr);
return theRet;
}
return false;
}
void InitSelf(
bool inForceCacheInitFlag)
{
mErrCode = krb5_init_context(&mCtx);
if (mErrCode) {
return;
}
mInitedFlag = true;
memset(&mCreds, 0, sizeof(mCreds));
mCachePtr = 0;
if ((mErrCode = krb5_sname_to_principal(
mCtx,
mServiceHost.c_str(),
mServiceName.c_str(),
KRB5_NT_UNKNOWN, // KRB5_NT_SRV_HST,
&mServerPtr
))) {
return;
}
if ((mErrCode = krb5_cc_default(mCtx, &mCachePtr)) != 0) {
return;
}
if (mUseKeyTabFlag) {
const char* theDataPtr = 0;
int theDataLen = 0;
const char* theSessionKeyPtr = 0;
int theSessionKeyLen = 0;
time_t const theNow = time(0);
mLastCredEndTime = theNow + 24 * 3600;
if (inForceCacheInitFlag ||
Request(
theDataPtr,
theDataLen,
theSessionKeyPtr,
theSessionKeyLen) ||
! mCreds.client ||
mLastCredEndTime <= theNow + 10 ||
! ComparePrincipal(mClientName.c_str(),
mCreds.client)) {
CleanupAuth();
mErrorMsg.clear();
mErrCode = 0;
InitCredCacheKeyTab();
}
}
}
krb5_error_code CleanupSelf()
{
if (! mInitedFlag) {
return 0;
}
krb5_error_code theErr = CleanupAuth();
mInitedFlag = false;
memset(&mCreds, 0, sizeof(mCreds));
if (mServerPtr) {
krb5_free_principal(mCtx, mServerPtr);
mServerPtr = 0;
}
if (mCachePtr) {
krb5_error_code const theCErr = krb5_cc_close(mCtx, mCachePtr);
mCachePtr = 0;
if (! theErr) {
theErr = theCErr;
}
}
krb5_free_context(mCtx);
return theErr;
}
krb5_error_code CleanupAuth()
{
if (! mInitedFlag) {
return 0;
}
if (mCreds.client) {
krb5_free_principal(mCtx, mCreds.client);
mCreds.client = 0;
}
if (mKeyBlockPtr) {
krb5_free_keyblock(mCtx, mKeyBlockPtr);
mKeyBlockPtr = 0;
}
KfsKrb5::free_data_contents(mCtx, &mOutBuf);
if (! mAuthCtx) {
return 0;
}
const krb5_error_code theErr = krb5_auth_con_free(mCtx, mAuthCtx);
memset(&mCreds, 0, sizeof(mCreds));
mAuthCtx = 0;
return theErr;
}
const char* ErrStr()
{
mErrorMsg = ErrToStr(mErrCode);
return mErrorMsg.c_str();
}
string ErrToStr(
krb5_error_code inErrCode) const
{
if (! inErrCode) {
return string();
}
if ( ! mCtx) {
return string("no kerberos context");
}
const char* const theMsgPtr = krb5_get_error_message(mCtx, inErrCode);
const string theMsg((theMsgPtr && *theMsgPtr) ?
theMsgPtr : "unspecified kerberos error");
if (theMsgPtr) {
// cast away const to make it compatible with older krb5 releases.
krb5_free_error_message(mCtx, const_cast<char*>(theMsgPtr));
}
return theMsg;
}
private:
Impl(
const Impl& inImpl);
Impl& operator=(
const Impl& inImpl);
};
#ifdef KFS_KRB_IGNORE_DEPRECATED
#pragma clang diagnostic push
#endif /* KFS_KRB_IGNORE_DEPRECATED */
KrbClient::KrbClient()
: mImpl(*(new Impl()))
{
}
KrbClient::KrbClient(
KrbClient::Impl& inImpl)
: mImpl(inImpl)
{
}
KrbClient::~KrbClient()
{
delete &mImpl;
}
const char*
KrbClient::Init(
const char* inServiceHostNamePtr,
const char* inServeiceNamePtr,
const char* inKeyTabNamePtr,
const char* inClientNamePtr,
bool inForceCacheInitFlag)
{
return mImpl.Init(
inServiceHostNamePtr,
inServeiceNamePtr,
inKeyTabNamePtr,
inClientNamePtr,
inForceCacheInitFlag
);
}
const char*
KrbClient::Cleanup()
{
return mImpl.Cleanup();
}
const char*
KrbClient::Request(
const char*& outDataPtr,
int& outDataLen,
const char*& outSessionKeyPtr,
int& outSessionKeyLen)
{
return mImpl.Request(
outDataPtr, outDataLen, outSessionKeyPtr, outSessionKeyLen);
}
const char*
KrbClient::Reply(
const char* inReplyPtr,
int inReplyLen)
{
return mImpl.Reply(
inReplyPtr, inReplyLen);
}
int
KrbClient::GetErrorCode() const
{
return mImpl.GetErrorCode();
}
time_t
KrbClient::GetLastCredEndTime() const
{
return mImpl.GetLastCredEndTime();
}
KrbClient*
KrbClient::Clone(
const char*& outErrMsgPtr) const
{
return new KrbClient(*mImpl.Clone(outErrMsgPtr));
}
}
|
#include <OpenEXR/ImfTiledRgbaFile.h>
#include <cppmm_bind.hpp>
namespace cppmm_bind {
namespace OPENEXR_IMF_INTERNAL_NAMESPACE {
namespace Imf = ::OPENEXR_IMF_INTERNAL_NAMESPACE;
struct TiledRgbaOutputFile {
using BoundType = Imf::TiledRgbaOutputFile;
IMF_EXPORT
TiledRgbaOutputFile(const char name[], const Imf::Header& header,
Imf::RgbaChannels rgbaChannels, int tileXSize,
int tileYSize, Imf::LevelMode mode,
Imf::LevelRoundingMode rmode, int numThreads)
CPPMM_RENAME(with_header);
IMF_EXPORT
TiledRgbaOutputFile(Imf::OStream& os, const Imf::Header& header,
Imf::RgbaChannels rgbaChannels, int tileXSize,
int tileYSize, Imf::LevelMode mode,
Imf::LevelRoundingMode rmode, int numThreads)
CPPMM_RENAME(from_stream_with_header);
IMF_EXPORT
TiledRgbaOutputFile(const char name[], int tileXSize, int tileYSize,
Imf::LevelMode mode, Imf::LevelRoundingMode rmode,
const IMATH_NAMESPACE::Box2i& displayWindow,
const IMATH_NAMESPACE::Box2i& dataWindow,
Imf::RgbaChannels rgbaChannels, float pixelAspectRatio,
const IMATH_NAMESPACE::V2f screenWindowCenter,
float screenWindowWidth, Imf::LineOrder lineOrder,
Imf::Compression compression, int numThreads);
IMF_EXPORT
virtual ~TiledRgbaOutputFile();
IMF_EXPORT
void setFrameBuffer(const Imf::Rgba* base, size_t xStride, size_t yStride);
IMF_EXPORT
const Imf::Header& header() const;
IMF_EXPORT
const Imf::FrameBuffer& frameBuffer() const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& displayWindow() const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& dataWindow() const;
IMF_EXPORT
float pixelAspectRatio() const;
IMF_EXPORT
const IMATH_NAMESPACE::V2f screenWindowCenter() const;
IMF_EXPORT
float screenWindowWidth() const;
IMF_EXPORT
Imf::LineOrder lineOrder() const;
IMF_EXPORT
Imf::Compression compression() const;
IMF_EXPORT
Imf::RgbaChannels channels() const;
IMF_EXPORT
unsigned int tileXSize() const;
IMF_EXPORT
unsigned int tileYSize() const;
IMF_EXPORT
Imf::LevelMode levelMode() const;
IMF_EXPORT
Imf::LevelRoundingMode levelRoundingMode() const;
IMF_EXPORT
int numLevels() const;
IMF_EXPORT
int numXLevels() const;
IMF_EXPORT
int numYLevels() const;
IMF_EXPORT
bool isValidLevel(int lx, int ly) const;
IMF_EXPORT
int levelWidth(int lx) const;
IMF_EXPORT
int levelHeight(int ly) const;
IMF_EXPORT
int numXTiles(int lx = 0) const;
IMF_EXPORT
int numYTiles(int ly = 0) const;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForLevel(int l = 0) const CPPMM_IGNORE;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForLevel(int lx, int ly) const;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForTile(int dx, int dy,
int l = 0) const CPPMM_IGNORE;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForTile(int dx, int dy, int lx,
int ly) const;
IMF_EXPORT
void writeTile(int dx, int dy, int l = 0) CPPMM_IGNORE;
IMF_EXPORT
void writeTile(int dx, int dy, int lx, int ly);
IMF_EXPORT
void writeTiles(int dxMin, int dxMax, int dyMin, int dyMax, int lx, int ly);
IMF_EXPORT
void writeTiles(int dxMin, int dxMax, int dyMin, int dyMax,
int l = 0) CPPMM_IGNORE;
IMF_EXPORT
void updatePreviewImage(const Imf::PreviewRgba newPixels[]);
IMF_EXPORT
void breakTile(int dx, int dy, int lx, int ly, int offset, int length,
char c);
} CPPMM_OPAQUEBYTES;
struct TiledRgbaInputFile {
using BoundType = Imf::TiledRgbaInputFile;
IMF_EXPORT
TiledRgbaInputFile(const char name[], int numThreads);
IMF_EXPORT
TiledRgbaInputFile(Imf::IStream& is, int numThreads)
CPPMM_RENAME(from_stream);
IMF_EXPORT
TiledRgbaInputFile(const char name[], const std::string& layerName,
int numThreads) CPPMM_RENAME(with_layer);
IMF_EXPORT
TiledRgbaInputFile(Imf::IStream& is, const std::string& layerName,
int numThreads) CPPMM_RENAME(from_stream_with_layer);
IMF_EXPORT
virtual ~TiledRgbaInputFile();
IMF_EXPORT
void setFrameBuffer(Imf::Rgba* base, size_t xStride, size_t yStride);
IMF_EXPORT
void setLayerName(const std::string& layerName);
IMF_EXPORT
const Imf::Header& header() const;
IMF_EXPORT
const Imf::FrameBuffer& frameBuffer() const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& displayWindow() const;
IMF_EXPORT
const IMATH_NAMESPACE::Box2i& dataWindow() const;
IMF_EXPORT
float pixelAspectRatio() const;
IMF_EXPORT
const IMATH_NAMESPACE::V2f screenWindowCenter() const;
IMF_EXPORT
float screenWindowWidth() const;
IMF_EXPORT
Imf::LineOrder lineOrder() const;
IMF_EXPORT
Imf::Compression compression() const;
IMF_EXPORT
Imf::RgbaChannels channels() const;
IMF_EXPORT
const char* fileName() const;
IMF_EXPORT
bool isComplete() const;
IMF_EXPORT
int version() const;
IMF_EXPORT
unsigned int tileXSize() const;
IMF_EXPORT
unsigned int tileYSize() const;
IMF_EXPORT
Imf::LevelMode levelMode() const;
IMF_EXPORT
Imf::LevelRoundingMode levelRoundingMode() const;
IMF_EXPORT
int numLevels() const;
IMF_EXPORT
int numXLevels() const;
IMF_EXPORT
int numYLevels() const;
IMF_EXPORT
bool isValidLevel(int lx, int ly) const;
IMF_EXPORT
int levelWidth(int lx) const;
IMF_EXPORT
int levelHeight(int ly) const;
IMF_EXPORT
int numXTiles(int lx = 0) const;
IMF_EXPORT
int numYTiles(int ly = 0) const;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForLevel(int l = 0) const CPPMM_IGNORE;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForLevel(int lx, int ly) const;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForTile(int dx, int dy,
int l = 0) const CPPMM_IGNORE;
IMF_EXPORT
IMATH_NAMESPACE::Box2i dataWindowForTile(int dx, int dy, int lx,
int ly) const;
IMF_EXPORT
void readTile(int dx, int dy, int l = 0) CPPMM_IGNORE;
IMF_EXPORT
void readTile(int dx, int dy, int lx, int ly);
IMF_EXPORT
void readTiles(int dxMin, int dxMax, int dyMin, int dyMax, int lx, int ly);
IMF_EXPORT
void readTiles(int dxMin, int dxMax, int dyMin, int dyMax,
int l = 0) CPPMM_IGNORE;
} CPPMM_OPAQUEBYTES;
} // namespace OPENEXR_IMF_INTERNAL_NAMESPACE
} // namespace cppmm_bind
|
/*
* Author: bwilliams
* Created: Oct 22, 2010
*
* Copyright (C) 2010-2018 VMware, Inc. All rights reserved. -- VMware Confidential
*/
#include "stdafx.h"
#include "Doc/PersistenceDoc/CCertCollectionDoc.h"
#include "Doc/PersistenceDoc/CLocalSecurityDoc.h"
#include "Doc/PersistenceDoc/CPersistenceDoc.h"
#include "Doc/PersistenceDoc/CPersistenceProtocolCollectionDoc.h"
#include "Doc/PersistenceDoc/CPersistenceProtocolDoc.h"
#include "Exception/CCafException.h"
#include "CConfigEnvMerge.h"
#ifdef WIN32
#include <winsock.h>
#pragma comment (lib, "wsock32.lib")
#else
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#endif
using namespace Caf;
SmartPtrCPersistenceDoc CConfigEnvMerge::mergePersistence(
const SmartPtrCPersistenceDoc& persistence,
const std::string& cacertPath,
const std::string& vcidPath) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "mergePersistence");
CAF_CM_VALIDATE_SMARTPTR(persistence);
CAF_CM_VALIDATE_STRING(cacertPath);
CAF_CM_VALIDATE_STRING(vcidPath);
const std::string localId = mergeLocalId(persistence, vcidPath);
std::string localIdDiff;
if (persistence->getLocalSecurity()->getLocalId().compare(localId) != 0) {
CAF_CM_LOG_DEBUG_VA2("LocalId changed - %s != %s",
persistence->getLocalSecurity()->getLocalId().c_str(), localId.c_str());
localIdDiff = localId;
}
const std::string cacert = loadTextFile(cacertPath);
const std::deque<SmartPtrCPersistenceProtocolDoc> persistenceProtocolCollectionInnerDiff =
mergePersistenceProtocolCollectionInner(
persistence->getPersistenceProtocolCollection()->getPersistenceProtocol(),
localId, cacert);
SmartPtrCPersistenceDoc rc;
if (! localIdDiff.empty() || ! persistenceProtocolCollectionInnerDiff.empty()) {
SmartPtrCLocalSecurityDoc localSecurity = persistence->getLocalSecurity();
if (! localIdDiff.empty()) {
CAF_CM_LOG_DEBUG_VA0("Creating local security diff");
localSecurity.CreateInstance();
localSecurity->initialize(
localIdDiff,
persistence->getLocalSecurity()->getPrivateKey(),
persistence->getLocalSecurity()->getCert(),
persistence->getLocalSecurity()->getPrivateKeyPath(),
persistence->getLocalSecurity()->getCertPath());
}
SmartPtrCPersistenceProtocolCollectionDoc persistenceProtocolCollection =
persistence->getPersistenceProtocolCollection();
if (! persistenceProtocolCollectionInnerDiff.empty()) {
CAF_CM_LOG_DEBUG_VA0("Creating persistence protocol diff");
persistenceProtocolCollection.CreateInstance();
persistenceProtocolCollection->initialize(persistenceProtocolCollectionInnerDiff);
}
rc.CreateInstance();
rc->initialize(
localSecurity,
persistence->getRemoteSecurityCollection(),
persistenceProtocolCollection,
persistence->getVersion());
}
return rc;
}
std::string CConfigEnvMerge::mergeLocalId(
const SmartPtrCPersistenceDoc& persistence,
const std::string& vcidPath) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "mergeLocalId");
CAF_CM_VALIDATE_SMARTPTR(persistence);
CAF_CM_VALIDATE_STRING(vcidPath);
std::string rc = loadTextFile(vcidPath);
if (rc.empty()) {
if (persistence->getLocalSecurity()->getLocalId().empty()) {
rc = CStringUtils::createRandomUuid();
} else {
rc = persistence->getLocalSecurity()->getLocalId();
}
}
return rc;
}
std::deque<SmartPtrCPersistenceProtocolDoc> CConfigEnvMerge::mergePersistenceProtocolCollectionInner(
const std::deque<SmartPtrCPersistenceProtocolDoc>& persistenceProtocolCollectionInner,
const std::string& localId,
const std::string& cacert) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "mergePersistenceProtocolCollectionInner");
CAF_CM_VALIDATE_BOOL(persistenceProtocolCollectionInner.size() == 1);
CAF_CM_VALIDATE_STRING(localId);
const bool isTunnelEnabled = isTunnelEnabledFunc();
std::deque<SmartPtrCPersistenceProtocolDoc> rc;
std::deque<SmartPtrCPersistenceProtocolDoc> persistenceProtocolCollectionInnerDiff;
std::deque<SmartPtrCPersistenceProtocolDoc> persistenceProtocolCollectionInnerAll;
for (TConstIterator<std::deque<SmartPtrCPersistenceProtocolDoc> > persistenceProtocolIter(persistenceProtocolCollectionInner);
persistenceProtocolIter; persistenceProtocolIter++) {
const SmartPtrCPersistenceProtocolDoc persistenceProtocol = *persistenceProtocolIter;
const std::string uriDiff = mergeUri(persistenceProtocol, localId, isTunnelEnabled);
const SmartPtrCCertCollectionDoc tlsCertCollectionDiff =
mergeTlsCertCollection(persistenceProtocol->getTlsCertCollection(), cacert);
SmartPtrCPersistenceProtocolDoc persistenceProtocolDiff;
persistenceProtocolDiff.CreateInstance();
persistenceProtocolDiff->initialize(
persistenceProtocol->getProtocolName(),
! uriDiff.empty() ? uriDiff : persistenceProtocol->getUri(),
! uriDiff.empty() && ! isTunnelEnabled ? uriDiff : persistenceProtocol->getUriAmqp(),
! uriDiff.empty() && isTunnelEnabled ? uriDiff : persistenceProtocol->getUriTunnel(),
persistenceProtocol->getTlsCert(),
persistenceProtocol->getTlsProtocol(),
persistenceProtocol->getTlsCipherCollection(),
tlsCertCollectionDiff.IsNull() ? persistenceProtocol->getTlsCertCollection() : tlsCertCollectionDiff,
persistenceProtocol->getUriAmqpPath(),
persistenceProtocol->getUriTunnelPath(),
persistenceProtocol->getTlsCertPath(),
persistenceProtocol->getTlsCertPathCollection());
persistenceProtocolCollectionInnerAll.push_back(persistenceProtocolDiff);
CAF_CM_LOG_DEBUG_VA2("uriDiff=%s, isTunnelEnabled=%s", uriDiff.c_str(), isTunnelEnabled?"true":"false" );
if (! uriDiff.empty() || ! tlsCertCollectionDiff.IsNull()) {
persistenceProtocolCollectionInnerDiff.push_back(persistenceProtocolDiff);
}
}
if (! persistenceProtocolCollectionInnerDiff.empty()) {
rc = persistenceProtocolCollectionInnerAll;
}
return rc;
}
std::string CConfigEnvMerge::mergeUri(
const SmartPtrCPersistenceProtocolDoc& persistenceProtocol,
const std::string& localId,
const bool isTunnelEnabled) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "mergeUri");
CAF_CM_VALIDATE_SMARTPTR(persistenceProtocol);
CAF_CM_VALIDATE_STRING(localId);
const std::string uri = persistenceProtocol->getUri();
const std::string uriNew = isTunnelEnabled ?
persistenceProtocol->getUriTunnel() :
persistenceProtocol->getUriAmqp();
CAF_CM_VALIDATE_STRING(uriNew);
CAF_CM_LOG_DEBUG_VA3("uri: %s, uriNew: %s, localId: %s",
uri.c_str(), uriNew.c_str(), localId.c_str());
UriUtils::SUriRecord uriDataNew;
UriUtils::parseUriString(uriNew, uriDataNew);
std::string rc;
std::string pathNew(localId);
if (isTunnelEnabled) {
pathNew += "-agentId1";
}
if ((uri.compare(uriNew) != 0) || (uriDataNew.path.compare(pathNew) != 0)) {
uriDataNew.path = pathNew;
rc = UriUtils::buildUriString(uriDataNew);
CAF_CM_LOG_DEBUG_VA4("uri changed - %s != %s || %s != %s",
uri.c_str(), rc.c_str(), pathNew.c_str(), uriDataNew.path.c_str());
}
CAF_CM_LOG_DEBUG_VA1("rc: %s", rc.c_str());
return rc;
}
SmartPtrCCertCollectionDoc CConfigEnvMerge::mergeTlsCertCollection(
const SmartPtrCCertCollectionDoc& tlsCertCollection,
const std::string& cacert) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "mergeTlsCertCollection");
CAF_CM_VALIDATE_SMARTPTR(tlsCertCollection);
SmartPtrCCertCollectionDoc rc;
if (! cacert.empty()) {
const Cdeqstr tlsCertCollectionInner = tlsCertCollection->getCert();
if (tlsCertCollectionInner.size() == 1) {
const std::string tlsCert = tlsCertCollectionInner.front();
if (tlsCert.compare(cacert) != 0) {
CAF_CM_LOG_DEBUG_VA2("cacert changed - %s != %s", cacert.c_str(), tlsCert.c_str());
Cdeqstr tlsCertCollectionInnerTmp;
tlsCertCollectionInnerTmp.push_back(cacert);
rc.CreateInstance();
rc->initialize(tlsCertCollectionInnerTmp);
}
}
}
return rc;
}
bool CConfigEnvMerge::isTunnelEnabledFunc() {
CAF_CM_STATIC_FUNC_LOG("CConfigEnvMerge", "isTunnelEnabledFunc");
bool rc = false;
#ifdef WIN32
try {
WSADATA wsaData;
int result = ::WSAStartup(MAKEWORD(2, 2), &wsaData);
if (result != NO_ERROR) {
CAF_CM_EXCEPTION_VA0(E_UNEXPECTED, "WSAStartup() Failed");
}
SOCKADDR_IN socketClient;
memset(&socketClient, 0, sizeof(SOCKADDR_IN));
socketClient.sin_family = AF_INET;
socketClient.sin_addr.s_addr = ::inet_addr("127.0.0.1");
socketClient.sin_port = ::htons(6672);
SOCKET socketFd = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (socketFd == INVALID_SOCKET) {
CAF_CM_EXCEPTION_VA1(E_UNEXPECTED, "Failed to open socket - %s", WSAGetLastError());
}
rc = (0 == ::connect(socketFd, (SOCKADDR*) &socketClient, sizeof(socketClient)));
if (socketFd != INVALID_SOCKET) {
::closesocket(socketFd);
}
}
CAF_CM_CATCH_CAF
CAF_CM_CATCH_DEFAULT
CAF_CM_LOG_CRIT_CAFEXCEPTION;
CAF_CM_CLEAREXCEPTION;
WSACleanup();
#else
int socketFd = -1;
try {
socketFd = ::socket(AF_INET, SOCK_STREAM, 0);
if (socketFd < 0) {
CAF_CM_EXCEPTION_VA0(E_UNEXPECTED, "Failed to open socket");
}
struct sockaddr_in socketClient;
memset(&socketClient, 0, sizeof(sockaddr_in));
socketClient.sin_family = AF_INET;
socketClient.sin_port = htons(6672);
int result = ::inet_aton("127.0.0.1", &socketClient.sin_addr);
if (0 == result) {
CAF_CM_EXCEPTION_VA0(ERROR_PATH_NOT_FOUND,
"Failed to get address of 127.0.0.1");
}
rc = (0 == ::connect(socketFd, (struct sockaddr *) &socketClient,
sizeof(socketClient)));
}
CAF_CM_CATCH_CAF
CAF_CM_CATCH_DEFAULT
CAF_CM_LOG_CRIT_CAFEXCEPTION;
CAF_CM_CLEAREXCEPTION;
if (socketFd >= 0) {
::close(socketFd);
}
#endif
return rc;
}
std::string CConfigEnvMerge::loadTextFile(
const std::string& path) {
CAF_CM_STATIC_FUNC_LOG_VALIDATE("CConfigEnvMerge", "loadTextFile");
CAF_CM_VALIDATE_STRING(path);
std::string rc;
if (FileSystemUtils::doesFileExist(path)) {
rc = FileSystemUtils::loadTextFile(path);
rc = CStringUtils::trimRight(rc);
} else {
CAF_CM_LOG_DEBUG_VA1("File does not exist - %s", path.c_str());
}
return rc;
}
|
/*############################################################################
# Copyright (C) Intel Corporation
#
# SPDX-License-Identifier: MIT
############################################################################*/
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <algorithm>
#include "vpl/mfxdispatcher.h"
#include "vpl/mfxjpeg.h"
#include "vpl/mfxstructures.h"
#include "vpl/mfxvp8.h"
#define DECODE_FOURCC(ch) ch & 0xff, ch >> 8 & 0xff, ch >> 16 & 0xff, ch >> 24 & 0xff
#define DECODE_FOURCC_2(ch, s) \
s[0] = ch & 0xff; \
s[1] = ch >> 8 & 0xff; \
s[2] = ch >> 16 & 0xff; \
s[3] = ch >> 24 & 0xff;
#define MAKEFOURCC(ch0, ch1, ch2, ch3) \
((mfxU32)(mfxU8)(ch0) | ((mfxU32)(mfxU8)(ch1) << 8) | ((mfxU32)(mfxU8)(ch2) << 16) | \
((mfxU32)(mfxU8)(ch3) << 24))
#define STRING_OPTION(x) \
case x: \
return #x
const char *_print_fourcc(int ch) {
static char str[5];
if (0 == ch) {
str[0] = 'U';
str[1] = 'N';
str[2] = 'K';
str[3] = 'N';
str[4] = '\0';
}
else if (41 == ch) {
str[0] = 'P';
str[1] = '8';
str[2] = '\0';
}
else {
DECODE_FOURCC_2(ch, str);
str[4] = '\0';
}
return str;
}
const char *_print_Impl(mfxIMPL impl) {
switch (impl) {
STRING_OPTION(MFX_IMPL_TYPE_SOFTWARE);
STRING_OPTION(MFX_IMPL_TYPE_HARDWARE);
}
return "<unknown implementation>";
}
const char *_print_AccelMode(mfxAccelerationMode mode) {
switch (mode) {
STRING_OPTION(MFX_ACCEL_MODE_NA);
STRING_OPTION(MFX_ACCEL_MODE_VIA_D3D9);
STRING_OPTION(MFX_ACCEL_MODE_VIA_D3D11);
STRING_OPTION(MFX_ACCEL_MODE_VIA_VAAPI);
STRING_OPTION(MFX_ACCEL_MODE_VIA_VAAPI_DRM_MODESET);
STRING_OPTION(MFX_ACCEL_MODE_VIA_VAAPI_GLX);
STRING_OPTION(MFX_ACCEL_MODE_VIA_VAAPI_X11);
STRING_OPTION(MFX_ACCEL_MODE_VIA_VAAPI_WAYLAND);
STRING_OPTION(MFX_ACCEL_MODE_VIA_HDDLUNITE);
}
return "<unknown acceleration mode>";
}
const char *_print_PoolPolicy(mfxPoolAllocationPolicy policy) {
switch (policy) {
STRING_OPTION(MFX_ALLOCATION_OPTIMAL);
STRING_OPTION(MFX_ALLOCATION_UNLIMITED);
STRING_OPTION(MFX_ALLOCATION_LIMITED);
}
return "<unknown pool allocation policy>";
}
const char *_print_MediaAdapterType(mfxMediaAdapterType type) {
switch (type) {
STRING_OPTION(MFX_MEDIA_UNKNOWN);
STRING_OPTION(MFX_MEDIA_INTEGRATED);
STRING_OPTION(MFX_MEDIA_DISCRETE);
}
return "<unknown media adapter type>";
}
const char *_print_ResourceType(mfxResourceType type) {
switch (type) {
STRING_OPTION(MFX_RESOURCE_SYSTEM_SURFACE);
STRING_OPTION(MFX_RESOURCE_VA_SURFACE_PTR);
STRING_OPTION(MFX_RESOURCE_VA_BUFFER_PTR);
STRING_OPTION(MFX_RESOURCE_DX9_SURFACE);
STRING_OPTION(MFX_RESOURCE_DX11_TEXTURE);
STRING_OPTION(MFX_RESOURCE_DX12_RESOURCE);
STRING_OPTION(MFX_RESOURCE_DMA_RESOURCE);
STRING_OPTION(MFX_RESOURCE_HDDLUNITE_REMOTE_MEMORY);
}
return "<unknown resource type>";
}
const char *_print_ProfileType(mfxU32 fourcc, mfxU32 type) {
switch (fourcc) {
case MFX_CODEC_JPEG: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_JPEG_BASELINE);
default:
return "<unknown MFX_CODEC_JPEG profile>";
}
}
case MFX_CODEC_AVC: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_AVC_BASELINE);
STRING_OPTION(MFX_PROFILE_AVC_MAIN);
STRING_OPTION(MFX_PROFILE_AVC_EXTENDED);
STRING_OPTION(MFX_PROFILE_AVC_HIGH);
STRING_OPTION(MFX_PROFILE_AVC_HIGH10);
STRING_OPTION(MFX_PROFILE_AVC_HIGH_422);
STRING_OPTION(MFX_PROFILE_AVC_CONSTRAINED_BASELINE);
STRING_OPTION(MFX_PROFILE_AVC_CONSTRAINED_HIGH);
STRING_OPTION(MFX_PROFILE_AVC_PROGRESSIVE_HIGH);
default:
return "<unknown MFX_CODEC_AVC profile>";
}
}
case MFX_CODEC_HEVC: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_HEVC_MAIN);
STRING_OPTION(MFX_PROFILE_HEVC_MAIN10);
STRING_OPTION(MFX_PROFILE_HEVC_MAINSP);
STRING_OPTION(MFX_PROFILE_HEVC_REXT);
STRING_OPTION(MFX_PROFILE_HEVC_SCC);
default:
return "<unknown MFX_CODEC_HEVC profile>";
}
}
case MFX_CODEC_MPEG2: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_MPEG2_SIMPLE);
STRING_OPTION(MFX_PROFILE_MPEG2_MAIN);
STRING_OPTION(MFX_LEVEL_MPEG2_HIGH);
STRING_OPTION(MFX_LEVEL_MPEG2_HIGH1440);
default:
return "<unknown MFX_CODEC_MPEG2 profile>";
}
}
case MFX_CODEC_VP8: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_VP8_0);
STRING_OPTION(MFX_PROFILE_VP8_1);
STRING_OPTION(MFX_PROFILE_VP8_2);
STRING_OPTION(MFX_PROFILE_VP8_3);
default:
return "<unknown MFX_CODEC_VP9 profile>";
}
}
case MFX_CODEC_VC1: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_VC1_SIMPLE);
STRING_OPTION(MFX_PROFILE_VC1_MAIN);
STRING_OPTION(MFX_PROFILE_VC1_ADVANCED);
default:
return "<unknown MFX_CODEC_VC1 profile>";
}
}
case MFX_CODEC_VP9: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_VP9_0);
STRING_OPTION(MFX_PROFILE_VP9_1);
STRING_OPTION(MFX_PROFILE_VP9_2);
STRING_OPTION(MFX_PROFILE_VP9_3);
default:
return "<unknown MFX_CODEC_VP9 profile>";
}
}
case MFX_CODEC_AV1: {
switch (type) {
STRING_OPTION(MFX_PROFILE_UNKNOWN);
STRING_OPTION(MFX_PROFILE_AV1_MAIN);
STRING_OPTION(MFX_PROFILE_AV1_HIGH);
STRING_OPTION(MFX_PROFILE_AV1_PRO);
default:
return "<unknown MFX_CODEC_AV1 profile>";
}
}
}
return "<unknown codec format>";
}
int main(int argc, char *argv[]) {
mfxLoader loader = MFXLoad();
if (loader == NULL) {
printf("Error - MFXLoad() returned null - no libraries found\n");
return -1;
}
bool bPrintImplementedFunctions = false;
bool bFullInfo = true;
if (argc == 2) {
if (!strncmp(argv[1], "-f", 2)) {
bPrintImplementedFunctions = true;
}
else if (!strncmp(argv[1], "-b", 2)) {
bFullInfo = false;
}
}
int i = 0;
mfxImplDescription *idesc;
while (MFX_ERR_NONE == MFXEnumImplementations(loader,
i,
MFX_IMPLCAPS_IMPLDESCSTRUCTURE,
reinterpret_cast<mfxHDL *>(&idesc))) {
printf("\nImplementation #%d: %s\n", i, idesc->ImplName);
// get path if supported (available starting with API 2.4)
mfxHDL hImplPath = nullptr;
if (MFX_ERR_NONE == MFXEnumImplementations(loader, i, MFX_IMPLCAPS_IMPLPATH, &hImplPath)) {
if (hImplPath) {
printf("%2sLibrary path: %s\n", "", reinterpret_cast<mfxChar *>(hImplPath));
MFXDispReleaseImplDescription(loader, hImplPath);
}
}
printf("%2sAccelerationMode: %s\n", "", _print_AccelMode(idesc->AccelerationMode));
printf("%2sApiVersion: %hu.%hu\n", "", idesc->ApiVersion.Major, idesc->ApiVersion.Minor);
printf("%2sImpl: %s\n", "", _print_Impl(idesc->Impl));
printf("%2sVendorImplID: 0x%04X\n", "", idesc->VendorImplID);
printf("%2sImplName: %s\n", "", idesc->ImplName);
printf("%2sLicense: %s\n", "", idesc->License);
printf("%2sVersion: %hu.%hu\n", "", idesc->Version.Major, idesc->Version.Minor);
printf("%2sKeywords: %s\n", "", idesc->Keywords);
printf("%2sVendorID: 0x%04X\n", "", idesc->VendorID);
/* mfxAccelerationModeDescription */
mfxAccelerationModeDescription *accel = &idesc->AccelerationModeDescription;
printf("%2smfxAccelerationModeDescription:\n", "");
printf("%4sVersion: %hu.%hu\n", "", accel->Version.Major, accel->Version.Minor);
for (int mode = 0; mode < accel->NumAccelerationModes; mode++) {
printf("%4sMode: %s\n", "", _print_AccelMode(accel->Mode[mode]));
}
/* mfxPoolPolicyDescription */
if (idesc->Version.Version >= MFX_STRUCT_VERSION(1, 2)) {
mfxPoolPolicyDescription *poolPolicies = &idesc->PoolPolicies;
printf("%2smfxPoolPolicyDescription:\n", "");
printf("%4sVersion: %hu.%hu\n",
"",
poolPolicies->Version.Major,
poolPolicies->Version.Minor);
for (int policy = 0; policy < poolPolicies->NumPoolPolicies; policy++) {
printf("%4sPolicy: %s\n", "", _print_PoolPolicy(poolPolicies->Policy[policy]));
}
}
/* mfxDeviceDescription */
mfxDeviceDescription *dev = &idesc->Dev;
printf("%2smfxDeviceDescription:\n", "");
if (dev->Version.Version >= MFX_STRUCT_VERSION(1, 1)) {
printf("%4sMediaAdapterType: %s\n",
"",
_print_MediaAdapterType((mfxMediaAdapterType)dev->MediaAdapterType));
}
printf("%4sDeviceID: %s\n", "", dev->DeviceID);
printf("%4sVersion: %hu.%hu\n", "", dev->Version.Major, dev->Version.Minor);
for (int subdevice = 0; subdevice < dev->NumSubDevices; subdevice++) {
printf("%4sIndex: %u\n", "", dev->SubDevices[subdevice].Index);
printf("%4sSubDeviceID: %s\n", "", dev->SubDevices[subdevice].SubDeviceID);
}
if (bFullInfo) {
/* mfxDecoderDescription */
mfxDecoderDescription *dec = &idesc->Dec;
printf("%2smfxDecoderDescription:\n", "");
printf("%4sVersion: %hu.%hu\n", "", dec->Version.Major, dec->Version.Minor);
for (int codec = 0; codec < dec->NumCodecs; codec++) {
printf("%4sCodecID: %c%c%c%c\n", "", DECODE_FOURCC(dec->Codecs[codec].CodecID));
printf("%4sMaxcodecLevel: %hu\n", "", dec->Codecs[codec].MaxcodecLevel);
for (int profile = 0; profile < dec->Codecs[codec].NumProfiles; profile++) {
printf("%6sProfile: %s\n",
"",
_print_ProfileType(dec->Codecs[codec].CodecID,
dec->Codecs[codec].Profiles[profile].Profile));
for (int memtype = 0;
memtype < dec->Codecs[codec].Profiles[profile].NumMemTypes;
memtype++) {
printf("%8sMemHandleType: %s\n",
"",
_print_ResourceType(dec->Codecs[codec]
.Profiles[profile]
.MemDesc[memtype]
.MemHandleType));
printf("%10sWidth Min: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Min);
printf("%10sWidth Max: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Max);
printf("%10sWidth Step: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Step);
printf("%10sHeight Min: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Min);
printf("%10sHeight Max: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Max);
printf("%10sHeight Step: %u\n",
"",
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Step);
printf("%10sColorFormats: ", "");
for (int colorformat = 0;
colorformat <
dec->Codecs[codec].Profiles[profile].MemDesc[memtype].NumColorFormats;
colorformat++) {
if (0 != colorformat)
printf(", ");
printf("%s",
_print_fourcc(dec->Codecs[codec]
.Profiles[profile]
.MemDesc[memtype]
.ColorFormats[colorformat]));
}
printf("\n");
}
}
}
/* mfxEncoderDescription */
mfxEncoderDescription *enc = &idesc->Enc;
printf("%2smfxEncoderDescription:\n", "");
printf("%4sVersion: %hu.%hu\n", "", enc->Version.Major, enc->Version.Minor);
for (int codec = 0; codec < enc->NumCodecs; codec++) {
printf("%4sCodecID: %c%c%c%c\n", "", DECODE_FOURCC(enc->Codecs[codec].CodecID));
printf("%4sMaxcodecLevel: %hu\n", "", enc->Codecs[codec].MaxcodecLevel);
printf("%4sBiDirectionalPrediction: %hu\n",
"",
enc->Codecs[codec].BiDirectionalPrediction);
for (int profile = 0; profile < enc->Codecs[codec].NumProfiles; profile++) {
printf("%6sProfile: %s\n",
"",
_print_ProfileType(enc->Codecs[codec].CodecID,
enc->Codecs[codec].Profiles[profile].Profile));
for (int memtype = 0;
memtype < enc->Codecs[codec].Profiles[profile].NumMemTypes;
memtype++) {
printf("%8sMemHandleType: %s\n",
"",
_print_ResourceType(enc->Codecs[codec]
.Profiles[profile]
.MemDesc[memtype]
.MemHandleType));
printf("%10sWidth Min: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Min);
printf("%10sWidth Max: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Max);
printf("%10sWidth Step: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Width.Step);
printf("%10sHeight Min: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Min);
printf("%10sHeight Max: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Max);
printf("%10sHeight Step: %u\n",
"",
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].Height.Step);
printf("%10sColorFormats: ", "");
for (int colorformat = 0;
colorformat <
enc->Codecs[codec].Profiles[profile].MemDesc[memtype].NumColorFormats;
colorformat++) {
if (0 != colorformat)
printf(", ");
printf("%s",
_print_fourcc(enc->Codecs[codec]
.Profiles[profile]
.MemDesc[memtype]
.ColorFormats[colorformat]));
}
printf("\n");
}
}
}
/* mfxVPPDescription */
mfxVPPDescription *vpp = &idesc->VPP;
printf("%2smfxVPPDescription:\n", "");
printf("%4sVersion: %hu.%hu\n", "", vpp->Version.Major, vpp->Version.Minor);
for (int filter = 0; filter < vpp->NumFilters; filter++) {
printf("%4sFilterFourCC: %c%c%c%c\n",
"",
DECODE_FOURCC(vpp->Filters[filter].FilterFourCC));
printf("%4sMaxDelayInFrames: %hu\n", "", vpp->Filters[filter].MaxDelayInFrames);
for (int memtype = 0; memtype < vpp->Filters[filter].NumMemTypes; memtype++) {
printf(
"%6sMemHandleType: %s\n",
"",
_print_ResourceType(vpp->Filters[filter].MemDesc[memtype].MemHandleType));
printf("%6sWidth Min: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Min);
printf("%6sWidth Max: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Max);
printf("%6sWidth Step: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Step);
printf("%6sHeight Min: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Min);
printf("%6sHeight Max: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Max);
printf("%6sHeight Step: %u\n",
"",
vpp->Filters[filter].MemDesc[memtype].Width.Step);
for (int informat = 0;
informat < vpp->Filters[filter].MemDesc[memtype].NumInFormats;
informat++) {
printf(
"%8sInFormat: %s\n",
"",
_print_fourcc(
vpp->Filters[filter].MemDesc[memtype].Formats[informat].InFormat));
printf("%10sOutFormats: ", "");
for (int outformat = 0;
outformat <
vpp->Filters[filter].MemDesc[memtype].Formats[informat].NumOutFormat;
outformat++) {
if (0 != outformat)
printf(", ");
printf("%s",
_print_fourcc(vpp->Filters[filter]
.MemDesc[memtype]
.Formats[informat]
.OutFormats[outformat]));
}
printf("\n");
}
}
}
printf("%2sNumExtParam: %d\n", "", idesc->NumExtParam);
}
MFXDispReleaseImplDescription(loader, idesc);
if (bPrintImplementedFunctions) {
mfxImplementedFunctions *fdesc;
mfxStatus sts = MFXEnumImplementations(loader,
i,
MFX_IMPLCAPS_IMPLEMENTEDFUNCTIONS,
reinterpret_cast<mfxHDL *>(&fdesc));
if (sts == MFX_ERR_NONE) {
// print out list of functions' name
printf("%2sImplemented functions:\n", "");
std::for_each(fdesc->FunctionsName,
fdesc->FunctionsName + fdesc->NumFunctions,
[](mfxChar *functionName) {
printf("%4s%s\n", "", functionName);
});
MFXDispReleaseImplDescription(loader, fdesc);
}
else {
printf("%2sWarning - MFX_IMPLCAPS_IMPLEMENTEDFUNCTIONS not supported\n", "");
}
}
i++;
}
if (i == 0)
printf("\nWarning - no implementations found by MFXEnumImplementations()\n");
else
printf("\nTotal number of implementations found = %d\n", i);
MFXUnload(loader);
return 0;
}
|
/*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2021 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#include <inviwo/dataframe/processors/dataframemetadata.h>
#include <inviwo/core/util/zip.h>
namespace inviwo {
// The Class Identifier has to be globally unique. Use a reverse DNS naming scheme
const ProcessorInfo DataFrameMetaData::processorInfo_{
"org.inviwo.DataFrameMetaData", // Class identifier
"DataFrame MetaData", // Display name
"DataFrame", // Category
CodeState::Experimental, // Code state
"CPU, DataFrame", // Tags
};
const ProcessorInfo DataFrameMetaData::getProcessorInfo() const { return processorInfo_; }
DataFrameMetaData::DataFrameMetaData()
: Processor()
, inport_("inport")
, outport_("outport")
, columns_("columns", "Column MetaData", inport_) {
addPort(inport_);
addPort(outport_);
addProperty(columns_);
}
void DataFrameMetaData::process() {
auto dataframe = std::make_shared<DataFrame>(*inport_.getData().get());
for (auto&& [index, col] : util::enumerate(*dataframe)) {
col->setRange(columns_.getRange(index));
col->copyMetaDataFrom(columns_.getColumnMetaData(index));
}
outport_.setData(dataframe);
}
} // namespace inviwo
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "net_processing.h"
#include "addrman.h"
#include "arith_uint256.h"
#include "blockencodings.h"
#include "chainparams.h"
#include "consensus/validation.h"
#include "hash.h"
#include "init.h"
#include "validation.h"
#include "merkleblock.h"
#include "net.h"
#include "netmessagemaker.h"
#include "netbase.h"
#include "policy/fees.h"
#include "policy/policy.h"
#include "primitives/block.h"
#include "primitives/transaction.h"
#include "random.h"
#include "reverse_iterator.h"
#include "scheduler.h"
#include "tinyformat.h"
#include "txmempool.h"
#include "ui_interface.h"
#include "util.h"
#include "utilmoneystr.h"
#include "utilstrencodings.h"
#include "validationinterface.h"
#if defined(NDEBUG)
# error "JattCoin cannot be compiled without assertions."
#endif
std::atomic<int64_t> nTimeBestReceived(0); // Used only to inform the wallet of when we last received a block
struct IteratorComparator
{
template<typename I>
bool operator()(const I& a, const I& b)
{
return &(*a) < &(*b);
}
};
struct COrphanTx {
// When modifying, adapt the copy of this definition in tests/DoS_tests.
CTransactionRef tx;
NodeId fromPeer;
int64_t nTimeExpire;
};
std::map<uint256, COrphanTx> mapOrphanTransactions GUARDED_BY(cs_main);
std::map<COutPoint, std::set<std::map<uint256, COrphanTx>::iterator, IteratorComparator>> mapOrphanTransactionsByPrev GUARDED_BY(cs_main);
void EraseOrphansFor(NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
static size_t vExtraTxnForCompactIt = 0;
static std::vector<std::pair<uint256, CTransactionRef>> vExtraTxnForCompact GUARDED_BY(cs_main);
static const uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL; // SHA256("main address relay")[0:8]
// Internal stuff
namespace {
/** Number of nodes with fSyncStarted. */
int nSyncStarted = 0;
/**
* Sources of received blocks, saved to be able to send them reject
* messages or ban them when processing happens afterwards. Protected by
* cs_main.
* Set mapBlockSource[hash].second to false if the node should not be
* punished if the block is invalid.
*/
std::map<uint256, std::pair<NodeId, bool>> mapBlockSource;
/**
* Filter for transactions that were recently rejected by
* AcceptToMemoryPool. These are not rerequested until the chain tip
* changes, at which point the entire filter is reset. Protected by
* cs_main.
*
* Without this filter we'd be re-requesting txs from each of our peers,
* increasing bandwidth consumption considerably. For instance, with 100
* peers, half of which relay a tx we don't accept, that might be a 50x
* bandwidth increase. A flooding attacker attempting to roll-over the
* filter using minimum-sized, 60byte, transactions might manage to send
* 1000/sec if we have fast peers, so we pick 120,000 to give our peers a
* two minute window to send invs to us.
*
* Decreasing the false positive rate is fairly cheap, so we pick one in a
* million to make it highly unlikely for users to have issues with this
* filter.
*
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects;
uint256 hashRecentRejectsChainTip;
/** Blocks that are in flight, and that are in the queue to be downloaded. Protected by cs_main. */
struct QueuedBlock {
uint256 hash;
const CBlockIndex* pindex; //!< Optional.
bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request.
std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads
};
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight;
/** Stack of nodes which we have set to announce using compact blocks */
std::list<NodeId> lNodesAnnouncingHeaderAndIDs;
/** Number of preferable block download peers. */
int nPreferredDownload = 0;
/** Number of peers from which we're downloading blocks. */
int nPeersWithValidatedDownloads = 0;
/** Number of outbound peers with m_chain_sync.m_protect. */
int g_outbound_peers_with_protect_from_disconnect = 0;
/** When our tip was last updated. */
int64_t g_last_tip_update = 0;
/** Relay map, protected by cs_main. */
typedef std::map<uint256, CTransactionRef> MapRelay;
MapRelay mapRelay;
/** Expiration-time ordered list of (expire time, relay map entry) pairs, protected by cs_main). */
std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration;
} // namespace
namespace {
struct CBlockReject {
unsigned char chRejectCode;
std::string strRejectReason;
uint256 hashBlock;
};
/**
* Maintain validation-specific state about nodes, protected by cs_main, instead
* by CNode's own locks. This simplifies asynchronous operation, where
* processing of incoming data is done after the ProcessMessage call returns,
* and we're no longer holding the node's locks.
*/
struct CNodeState {
//! The peer's address
const CService address;
//! Whether we have a fully established connection.
bool fCurrentlyConnected;
//! Accumulated misbehaviour score for this peer.
int nMisbehavior;
//! Whether this peer should be disconnected and banned (unless whitelisted).
bool fShouldBan;
//! String name of this peer (debugging/logging purposes).
const std::string name;
//! List of asynchronously-determined block rejections to notify this peer about.
std::vector<CBlockReject> rejects;
//! The best known block we know this peer has announced.
const CBlockIndex *pindexBestKnownBlock;
//! The hash of the last unknown block this peer has announced.
uint256 hashLastUnknownBlock;
//! The last full block we both have.
const CBlockIndex *pindexLastCommonBlock;
//! The best header we have sent our peer.
const CBlockIndex *pindexBestHeaderSent;
//! Length of current-streak of unconnecting headers announcements
int nUnconnectingHeaders;
//! Whether we've started headers synchronization with this peer.
bool fSyncStarted;
//! When to potentially disconnect peer for stalling headers download
int64_t nHeadersSyncTimeout;
//! Since when we're stalling block download progress (in microseconds), or 0.
int64_t nStallingSince;
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
int64_t nDownloadingSince;
int nBlocksInFlight;
int nBlocksInFlightValidHeaders;
//! Whether we consider this a preferred download peer.
bool fPreferredDownload;
//! Whether this peer wants invs or headers (when possible) for block announcements.
bool fPreferHeaders;
//! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
bool fPreferHeaderAndIDs;
/**
* Whether this peer will send us cmpctblocks if we request them.
* This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
* but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
*/
bool fProvidesHeaderAndIDs;
//! Whether this peer can give us witnesses
bool fHaveWitness;
//! Whether this peer wants witnesses in cmpctblocks/blocktxns
bool fWantsCmpctWitness;
/**
* If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
* otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
*/
bool fSupportsDesiredCmpctVersion;
/** State used to enforce CHAIN_SYNC_TIMEOUT
* Only in effect for outbound, non-manual connections, with
* m_protect == false
* Algorithm: if a peer's best known block has less work than our tip,
* set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
* - If at timeout their best known block now has more work than our tip
* when the timeout was set, then either reset the timeout or clear it
* (after comparing against our current tip's work)
* - If at timeout their best known block still has less work than our
* tip did when the timeout was set, then send a getheaders message,
* and set a shorter timeout, HEADERS_RESPONSE_TIME seconds in future.
* If their best known block is still behind when that new timeout is
* reached, disconnect.
*/
struct ChainSyncTimeoutState {
//! A timeout used for checking whether our peer has sufficiently synced
int64_t m_timeout;
//! A header with the work we require on our peer's chain
const CBlockIndex * m_work_header;
//! After timeout is reached, set to true after sending getheaders
bool m_sent_getheaders;
//! Whether this peer is protected from disconnection due to a bad/slow chain
bool m_protect;
};
ChainSyncTimeoutState m_chain_sync;
//! Time of last new block announcement
int64_t m_last_block_announcement;
CNodeState(CAddress addrIn, std::string addrNameIn) : address(addrIn), name(addrNameIn) {
fCurrentlyConnected = false;
nMisbehavior = 0;
fShouldBan = false;
pindexBestKnownBlock = nullptr;
hashLastUnknownBlock.SetNull();
pindexLastCommonBlock = nullptr;
pindexBestHeaderSent = nullptr;
nUnconnectingHeaders = 0;
fSyncStarted = false;
nHeadersSyncTimeout = 0;
nStallingSince = 0;
nDownloadingSince = 0;
nBlocksInFlight = 0;
nBlocksInFlightValidHeaders = 0;
fPreferredDownload = false;
fPreferHeaders = false;
fPreferHeaderAndIDs = false;
fProvidesHeaderAndIDs = false;
fHaveWitness = false;
fWantsCmpctWitness = false;
fSupportsDesiredCmpctVersion = false;
m_chain_sync = { 0, nullptr, false, false };
m_last_block_announcement = 0;
}
};
/** Map maintaining per-node state. Requires cs_main. */
std::map<NodeId, CNodeState> mapNodeState;
// Requires cs_main.
CNodeState *State(NodeId pnode) {
std::map<NodeId, CNodeState>::iterator it = mapNodeState.find(pnode);
if (it == mapNodeState.end())
return nullptr;
return &it->second;
}
void UpdatePreferredDownload(CNode* node, CNodeState* state)
{
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
state->fPreferredDownload = (!node->fInbound || node->fWhitelisted) && !node->fOneShot && !node->fClient;
nPreferredDownload += state->fPreferredDownload;
}
void PushNodeVersion(CNode *pnode, CConnman* connman, int64_t nTime)
{
ServiceFlags nLocalNodeServices = pnode->GetLocalServices();
uint64_t nonce = pnode->GetLocalNonce();
int nNodeStartingHeight = pnode->GetMyStartingHeight();
NodeId nodeid = pnode->GetId();
CAddress addr = pnode->addr;
CAddress addrYou = (addr.IsRoutable() && !IsProxy(addr) ? addr : CAddress(CService(), addr.nServices));
CAddress addrMe = CAddress(CService(), nLocalNodeServices);
connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
nonce, strSubVersion, nNodeStartingHeight, ::fRelayTxes));
if (fLogIPs) {
LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
} else {
LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
}
}
// Requires cs_main.
// Returns a bool indicating whether we requested this block.
// Also used if a block was /not/ received and timed out or started with another peer
bool MarkBlockAsReceived(const uint256& hash) {
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end()) {
CNodeState *state = State(itInFlight->second.first);
state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders;
if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) {
// Last validated block on the queue was received.
nPeersWithValidatedDownloads--;
}
if (state->vBlocksInFlight.begin() == itInFlight->second.second) {
// First block on the queue was received, update the start download time for the next one
state->nDownloadingSince = std::max(state->nDownloadingSince, GetTimeMicros());
}
state->vBlocksInFlight.erase(itInFlight->second.second);
state->nBlocksInFlight--;
state->nStallingSince = 0;
mapBlocksInFlight.erase(itInFlight);
return true;
}
return false;
}
// Requires cs_main.
// returns false, still setting pit, if the block was already in flight from the same peer
// pit will only be valid as long as the same cs_main lock is being held
bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
// Short-circuit most stuff in case its from the same node
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
if (pit) {
*pit = &itInFlight->second.second;
}
return false;
}
// Make sure it's not listed somewhere already.
MarkBlockAsReceived(hash);
std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
{hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)});
state->nBlocksInFlight++;
state->nBlocksInFlightValidHeaders += it->fValidatedHeaders;
if (state->nBlocksInFlight == 1) {
// We're starting a block download (batch) from this peer.
state->nDownloadingSince = GetTimeMicros();
}
if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) {
nPeersWithValidatedDownloads++;
}
itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
if (pit)
*pit = &itInFlight->second.second;
return true;
}
/** Check whether the last unknown block a peer advertised is not yet known. */
void ProcessBlockAvailability(NodeId nodeid) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
if (!state->hashLastUnknownBlock.IsNull()) {
BlockMap::iterator itOld = mapBlockIndex.find(state->hashLastUnknownBlock);
if (itOld != mapBlockIndex.end() && itOld->second->nChainWork > 0) {
if (state->pindexBestKnownBlock == nullptr || itOld->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = itOld->second;
state->hashLastUnknownBlock.SetNull();
}
}
}
/** Update tracking information about which blocks a peer is assumed to have. */
void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
CNodeState *state = State(nodeid);
assert(state != nullptr);
ProcessBlockAvailability(nodeid);
BlockMap::iterator it = mapBlockIndex.find(hash);
if (it != mapBlockIndex.end() && it->second->nChainWork > 0) {
// An actually better block was announced.
if (state->pindexBestKnownBlock == nullptr || it->second->nChainWork >= state->pindexBestKnownBlock->nChainWork)
state->pindexBestKnownBlock = it->second;
} else {
// An unknown block was announced; just assume that the latest one is the best one.
state->hashLastUnknownBlock = hash;
}
}
void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman* connman) {
AssertLockHeld(cs_main);
CNodeState* nodestate = State(nodeid);
if (!nodestate || !nodestate->fSupportsDesiredCmpctVersion) {
// Never ask from peers who can't provide witnesses.
return;
}
if (nodestate->fProvidesHeaderAndIDs) {
for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) {
if (*it == nodeid) {
lNodesAnnouncingHeaderAndIDs.erase(it);
lNodesAnnouncingHeaderAndIDs.push_back(nodeid);
return;
}
}
connman->ForNode(nodeid, [connman](CNode* pfrom){
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1;
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
// As per BIP152, we only get 3 of our peers to announce
// blocks using compact encodings.
connman->ForNode(lNodesAnnouncingHeaderAndIDs.front(), [connman, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion](CNode* pnodeStop){
connman->PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
return true;
});
lNodesAnnouncingHeaderAndIDs.pop_front();
}
fAnnounceUsingCMPCTBLOCK = true;
connman->PushMessage(pfrom, CNetMsgMaker(pfrom->GetSendVersion()).Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
return true;
});
}
}
bool TipMayBeStale(const Consensus::Params &consensusParams)
{
AssertLockHeld(cs_main);
if (g_last_tip_update == 0) {
g_last_tip_update = GetTime();
}
return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
}
// Requires cs_main
bool CanDirectFetch(const Consensus::Params &consensusParams)
{
return chainActive.Tip()->GetBlockTime() > GetAdjustedTime() - consensusParams.nPowTargetSpacing * 20;
}
// Requires cs_main
bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex)
{
if (state->pindexBestKnownBlock && pindex == state->pindexBestKnownBlock->GetAncestor(pindex->nHeight))
return true;
if (state->pindexBestHeaderSent && pindex == state->pindexBestHeaderSent->GetAncestor(pindex->nHeight))
return true;
return false;
}
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
* at most count entries. */
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
if (count == 0)
return;
vBlocks.reserve(vBlocks.size() + count);
CNodeState *state = State(nodeid);
assert(state != nullptr);
// Make sure pindexBestKnownBlock is up to date, we'll need it.
ProcessBlockAvailability(nodeid);
if (state->pindexBestKnownBlock == nullptr || state->pindexBestKnownBlock->nChainWork < chainActive.Tip()->nChainWork || state->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
// This peer has nothing interesting.
return;
}
if (state->pindexLastCommonBlock == nullptr) {
// Bootstrap quickly by guessing a parent of our best tip is the forking point.
// Guessing wrong in either direction is not a problem.
state->pindexLastCommonBlock = chainActive[std::min(state->pindexBestKnownBlock->nHeight, chainActive.Height())];
}
// If the peer reorganized, our previous pindexLastCommonBlock may not be an ancestor
// of its current tip anymore. Go back enough to fix that.
state->pindexLastCommonBlock = LastCommonAncestor(state->pindexLastCommonBlock, state->pindexBestKnownBlock);
if (state->pindexLastCommonBlock == state->pindexBestKnownBlock)
return;
std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = state->pindexLastCommonBlock;
// Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last
// linked block we have in common with this peer. The +1 is so we can detect stalling, namely if we would be able to
// download that next block if the window were 1 larger.
int nWindowEnd = state->pindexLastCommonBlock->nHeight + BLOCK_DOWNLOAD_WINDOW;
int nMaxHeight = std::min<int>(state->pindexBestKnownBlock->nHeight, nWindowEnd + 1);
NodeId waitingfor = -1;
while (pindexWalk->nHeight < nMaxHeight) {
// Read up to 128 (or more, if more blocks than that are needed) successors of pindexWalk (towards
// pindexBestKnownBlock) into vToFetch. We fetch 128, because CBlockIndex::GetAncestor may be as expensive
// as iterating over ~100 CBlockIndex* entries anyway.
int nToFetch = std::min(nMaxHeight - pindexWalk->nHeight, std::max<int>(count - vBlocks.size(), 128));
vToFetch.resize(nToFetch);
pindexWalk = state->pindexBestKnownBlock->GetAncestor(pindexWalk->nHeight + nToFetch);
vToFetch[nToFetch - 1] = pindexWalk;
for (unsigned int i = nToFetch - 1; i > 0; i--) {
vToFetch[i - 1] = vToFetch[i]->pprev;
}
// Iterate over those blocks in vToFetch (in forward direction), adding the ones that
// are not yet downloaded and not in flight to vBlocks. In the mean time, update
// pindexLastCommonBlock as long as all ancestors are already downloaded, or if it's
// already part of our chain (and therefore don't need it even if pruned).
for (const CBlockIndex* pindex : vToFetch) {
if (!pindex->IsValid(BLOCK_VALID_TREE)) {
// We consider the chain that this peer is on invalid.
return;
}
if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
// We wouldn't download this block or its descendants from this peer.
return;
}
if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
if (pindex->nChainTx)
state->pindexLastCommonBlock = pindex;
} else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) {
// The block is not already downloaded, and not yet in flight.
if (pindex->nHeight > nWindowEnd) {
// We reached the end of the window.
if (vBlocks.size() == 0 && waitingfor != nodeid) {
// We aren't able to fetch anything, but we would be if the download window was one larger.
nodeStaller = waitingfor;
}
return;
}
vBlocks.push_back(pindex);
if (vBlocks.size() == count) {
return;
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
}
}
}
}
} // namespace
// This function is used for testing the stale tip eviction logic, see
// DoS_tests.cpp
void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
{
LOCK(cs_main);
CNodeState *state = State(node);
if (state) state->m_last_block_announcement = time_in_seconds;
}
// Returns true for outbound peers, excluding manual connections, feelers, and
// one-shots
bool IsOutboundDisconnectionCandidate(const CNode *node)
{
return !(node->fInbound || node->m_manual_connection || node->fFeeler || node->fOneShot);
}
void PeerLogicValidation::InitializeNode(CNode *pnode) {
CAddress addr = pnode->addr;
std::string addrName = pnode->GetAddrName();
NodeId nodeid = pnode->GetId();
{
LOCK(cs_main);
mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName)));
}
if(!pnode->fInbound)
PushNodeVersion(pnode, connman, GetTime());
}
void PeerLogicValidation::FinalizeNode(NodeId nodeid, bool& fUpdateConnectionTime) {
fUpdateConnectionTime = false;
LOCK(cs_main);
CNodeState *state = State(nodeid);
assert(state != nullptr);
if (state->fSyncStarted)
nSyncStarted--;
if (state->nMisbehavior == 0 && state->fCurrentlyConnected) {
fUpdateConnectionTime = true;
}
for (const QueuedBlock& entry : state->vBlocksInFlight) {
mapBlocksInFlight.erase(entry.hash);
}
EraseOrphansFor(nodeid);
nPreferredDownload -= state->fPreferredDownload;
nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0);
assert(nPeersWithValidatedDownloads >= 0);
g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
assert(g_outbound_peers_with_protect_from_disconnect >= 0);
mapNodeState.erase(nodeid);
if (mapNodeState.empty()) {
// Do a consistency check after the last peer is removed.
assert(mapBlocksInFlight.empty());
assert(nPreferredDownload == 0);
assert(nPeersWithValidatedDownloads == 0);
assert(g_outbound_peers_with_protect_from_disconnect == 0);
}
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
}
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
LOCK(cs_main);
CNodeState *state = State(nodeid);
if (state == nullptr)
return false;
stats.nMisbehavior = state->nMisbehavior;
stats.nSyncHeight = state->pindexBestKnownBlock ? state->pindexBestKnownBlock->nHeight : -1;
stats.nCommonHeight = state->pindexLastCommonBlock ? state->pindexLastCommonBlock->nHeight : -1;
for (const QueuedBlock& queue : state->vBlocksInFlight) {
if (queue.pindex)
stats.vHeightInFlight.push_back(queue.pindex->nHeight);
}
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
void AddToCompactExtraTransactions(const CTransactionRef& tx)
{
size_t max_extra_txn = gArgs.GetArg("-blockreconstructionextratxn", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN);
if (max_extra_txn <= 0)
return;
if (!vExtraTxnForCompact.size())
vExtraTxnForCompact.resize(max_extra_txn);
vExtraTxnForCompact[vExtraTxnForCompactIt] = std::make_pair(tx->GetWitnessHash(), tx);
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % max_extra_txn;
}
bool AddOrphanTx(const CTransactionRef& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
const uint256& hash = tx->GetHash();
if (mapOrphanTransactions.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 100 orphans, each of which is at most 99,999 bytes big is
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
unsigned int sz = GetTransactionWeight(*tx);
if (sz >= MAX_STANDARD_TX_WEIGHT)
{
LogPrint(BCLog::MEMPOOL, "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
return false;
}
auto ret = mapOrphanTransactions.emplace(hash, COrphanTx{tx, peer, GetTime() + ORPHAN_TX_EXPIRE_TIME});
assert(ret.second);
for (const CTxIn& txin : tx->vin) {
mapOrphanTransactionsByPrev[txin.prevout].insert(ret.first);
}
AddToCompactExtraTransactions(tx);
LogPrint(BCLog::MEMPOOL, "stored orphan tx %s (mapsz %u outsz %u)\n", hash.ToString(),
mapOrphanTransactions.size(), mapOrphanTransactionsByPrev.size());
return true;
}
int static EraseOrphanTx(uint256 hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.find(hash);
if (it == mapOrphanTransactions.end())
return 0;
for (const CTxIn& txin : it->second.tx->vin)
{
auto itPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
if (itPrev == mapOrphanTransactionsByPrev.end())
continue;
itPrev->second.erase(it);
if (itPrev->second.empty())
mapOrphanTransactionsByPrev.erase(itPrev);
}
mapOrphanTransactions.erase(it);
return 1;
}
void EraseOrphansFor(NodeId peer)
{
int nErased = 0;
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
{
std::map<uint256, COrphanTx>::iterator maybeErase = iter++; // increment to avoid iterator becoming invalid
if (maybeErase->second.fromPeer == peer)
{
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
}
}
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx from peer=%d\n", nErased, peer);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
unsigned int nEvicted = 0;
static int64_t nNextSweep;
int64_t nNow = GetTime();
if (nNextSweep <= nNow) {
// Sweep out expired orphan pool entries:
int nErased = 0;
int64_t nMinExpTime = nNow + ORPHAN_TX_EXPIRE_TIME - ORPHAN_TX_EXPIRE_INTERVAL;
std::map<uint256, COrphanTx>::iterator iter = mapOrphanTransactions.begin();
while (iter != mapOrphanTransactions.end())
{
std::map<uint256, COrphanTx>::iterator maybeErase = iter++;
if (maybeErase->second.nTimeExpire <= nNow) {
nErased += EraseOrphanTx(maybeErase->second.tx->GetHash());
} else {
nMinExpTime = std::min(maybeErase->second.nTimeExpire, nMinExpTime);
}
}
// Sweep again 5 minutes after the next entry that expires in order to batch the linear scan.
nNextSweep = nMinExpTime + ORPHAN_TX_EXPIRE_INTERVAL;
if (nErased > 0) LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx due to expiration\n", nErased);
}
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
std::map<uint256, COrphanTx>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
// Requires cs_main.
void Misbehaving(NodeId pnode, int howmuch)
{
if (howmuch == 0)
return;
CNodeState *state = State(pnode);
if (state == nullptr)
return;
state->nMisbehavior += howmuch;
int banscore = gArgs.GetArg("-banscore", DEFAULT_BANSCORE_THRESHOLD);
if (state->nMisbehavior >= banscore && state->nMisbehavior - howmuch < banscore)
{
LogPrintf("%s: %s peer=%d (%d -> %d) BAN THRESHOLD EXCEEDED\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
state->fShouldBan = true;
} else
LogPrintf("%s: %s peer=%d (%d -> %d)\n", __func__, state->name, pnode, state->nMisbehavior-howmuch, state->nMisbehavior);
}
//////////////////////////////////////////////////////////////////////////////
//
// blockchain -> download logic notification
//
PeerLogicValidation::PeerLogicValidation(CConnman* connmanIn, CScheduler &scheduler) : connman(connmanIn), m_stale_tip_check_time(0) {
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
const Consensus::Params& consensusParams = Params().GetConsensus();
// Stale tip checking and peer eviction are on two different timers, but we
// don't want them to get out of sync due to drift in the scheduler, so we
// combine them in one function and schedule at the quicker (peer-eviction)
// timer.
static_assert(EXTRA_PEER_CHECK_INTERVAL < STALE_CHECK_INTERVAL, "peer eviction timer should be less than stale tip check timer");
scheduler.scheduleEvery(std::bind(&PeerLogicValidation::CheckForStaleTipAndEvictPeers, this, consensusParams), EXTRA_PEER_CHECK_INTERVAL * 1000);
}
void PeerLogicValidation::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindex, const std::vector<CTransactionRef>& vtxConflicted) {
LOCK(cs_main);
std::vector<uint256> vOrphanErase;
for (const CTransactionRef& ptx : pblock->vtx) {
const CTransaction& tx = *ptx;
// Which orphan pool entries must we evict?
for (const auto& txin : tx.vin) {
auto itByPrev = mapOrphanTransactionsByPrev.find(txin.prevout);
if (itByPrev == mapOrphanTransactionsByPrev.end()) continue;
for (auto mi = itByPrev->second.begin(); mi != itByPrev->second.end(); ++mi) {
const CTransaction& orphanTx = *(*mi)->second.tx;
const uint256& orphanHash = orphanTx.GetHash();
vOrphanErase.push_back(orphanHash);
}
}
}
// Erase orphan transactions include or precluded by this block
if (vOrphanErase.size()) {
int nErased = 0;
for (uint256 &orphanHash : vOrphanErase) {
nErased += EraseOrphanTx(orphanHash);
}
LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased);
}
g_last_tip_update = GetTime();
}
// All of the following cache a recent block, and are protected by cs_most_recent_block
static CCriticalSection cs_most_recent_block;
static std::shared_ptr<const CBlock> most_recent_block;
static std::shared_ptr<const CBlockHeaderAndShortTxIDs> most_recent_compact_block;
static uint256 most_recent_block_hash;
static bool fWitnessesPresentInMostRecentCompactBlock;
void PeerLogicValidation::NewPoWValidBlock(const CBlockIndex *pindex, const std::shared_ptr<const CBlock>& pblock) {
std::shared_ptr<const CBlockHeaderAndShortTxIDs> pcmpctblock = std::make_shared<const CBlockHeaderAndShortTxIDs> (*pblock, true);
const CNetMsgMaker msgMaker(PROTOCOL_VERSION);
LOCK(cs_main);
static int nHighestFastAnnounce = 0;
if (pindex->nHeight <= nHighestFastAnnounce)
return;
nHighestFastAnnounce = pindex->nHeight;
bool fWitnessEnabled = IsWitnessEnabled(pindex->pprev, Params().GetConsensus());
uint256 hashBlock(pblock->GetHash());
{
LOCK(cs_most_recent_block);
most_recent_block_hash = hashBlock;
most_recent_block = pblock;
most_recent_compact_block = pcmpctblock;
fWitnessesPresentInMostRecentCompactBlock = fWitnessEnabled;
}
connman->ForEachNode([this, &pcmpctblock, pindex, &msgMaker, fWitnessEnabled, &hashBlock](CNode* pnode) {
// TODO: Avoid the repeated-serialization here
if (pnode->nVersion < INVALID_CB_NO_BAN_VERSION || pnode->fDisconnect)
return;
ProcessBlockAvailability(pnode->GetId());
CNodeState &state = *State(pnode->GetId());
// If the peer has, or we announced to them the previous block already,
// but we don't think they have this one, go ahead and announce it
if (state.fPreferHeaderAndIDs && (!fWitnessEnabled || state.fWantsCmpctWitness) &&
!PeerHasHeader(&state, pindex) && PeerHasHeader(&state, pindex->pprev)) {
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", "PeerLogicValidation::NewPoWValidBlock",
hashBlock.ToString(), pnode->GetId());
connman->PushMessage(pnode, msgMaker.Make(NetMsgType::CMPCTBLOCK, *pcmpctblock));
state.pindexBestHeaderSent = pindex;
}
});
}
void PeerLogicValidation::UpdatedBlockTip(const CBlockIndex *pindexNew, const CBlockIndex *pindexFork, bool fInitialDownload) {
const int nNewHeight = pindexNew->nHeight;
connman->SetBestHeight(nNewHeight);
if (!fInitialDownload) {
// Find the hashes of all blocks that weren't previously in the best chain.
std::vector<uint256> vHashes;
const CBlockIndex *pindexToAnnounce = pindexNew;
while (pindexToAnnounce != pindexFork) {
vHashes.push_back(pindexToAnnounce->GetBlockHash());
pindexToAnnounce = pindexToAnnounce->pprev;
if (vHashes.size() == MAX_BLOCKS_TO_ANNOUNCE) {
// Limit announcements in case of a huge reorganization.
// Rely on the peer's synchronization mechanism in that case.
break;
}
}
// Relay inventory, but don't relay old inventory during initial block download.
connman->ForEachNode([nNewHeight, &vHashes](CNode* pnode) {
if (nNewHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : 0)) {
for (const uint256& hash : reverse_iterate(vHashes)) {
pnode->PushBlockHash(hash);
}
}
});
connman->WakeMessageHandler();
}
nTimeBestReceived = GetTime();
}
void PeerLogicValidation::BlockChecked(const CBlock& block, const CValidationState& state) {
LOCK(cs_main);
const uint256 hash(block.GetHash());
std::map<uint256, std::pair<NodeId, bool>>::iterator it = mapBlockSource.find(hash);
int nDoS = 0;
if (state.IsInvalid(nDoS)) {
// Don't send reject message with code 0 or an internal reject code.
if (it != mapBlockSource.end() && State(it->second.first) && state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) {
CBlockReject reject = {(unsigned char)state.GetRejectCode(), state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), hash};
State(it->second.first)->rejects.push_back(reject);
if (nDoS > 0 && it->second.second)
Misbehaving(it->second.first, nDoS);
}
}
// Check that:
// 1. The block is valid
// 2. We're not in initial block download
// 3. This is currently the best block we're aware of. We haven't updated
// the tip yet so we have no way to check this directly here. Instead we
// just check that there are currently no other blocks in flight.
else if (state.IsValid() &&
!IsInitialBlockDownload() &&
mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) {
if (it != mapBlockSource.end()) {
MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, connman);
}
}
if (it != mapBlockSource.end())
mapBlockSource.erase(it);
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
switch (inv.type)
{
case MSG_TX:
case MSG_WITNESS_TX:
{
assert(recentRejects);
if (chainActive.Tip()->GetBlockHash() != hashRecentRejectsChainTip)
{
// If the chain tip has changed previously rejected transactions
// might be now valid, e.g. due to a nLockTime'd tx becoming valid,
// or a double-spend. Reset the rejects filter and give those
// txs a second chance.
hashRecentRejectsChainTip = chainActive.Tip()->GetBlockHash();
recentRejects->reset();
}
return recentRejects->contains(inv.hash) ||
mempool.exists(inv.hash) ||
mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 0)) || // Best effort: only try output 0 and 1
pcoinsTip->HaveCoinInCache(COutPoint(inv.hash, 1));
}
case MSG_BLOCK:
case MSG_WITNESS_BLOCK:
return mapBlockIndex.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
static void RelayTransaction(const CTransaction& tx, CConnman* connman)
{
CInv inv(MSG_TX, tx.GetHash());
connman->ForEachNode([&inv](CNode* pnode)
{
pnode->PushInventory(inv);
});
}
static void RelayAddress(const CAddress& addr, bool fReachable, CConnman* connman)
{
unsigned int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the addrKnowns of the chosen nodes prevent repeats
uint64_t hashAddr = addr.GetHash();
const CSipHasher hasher = connman->GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24*60*60));
FastRandomContext insecure_rand;
std::array<std::pair<uint64_t, CNode*>,2> best{{{0, nullptr}, {0, nullptr}}};
assert(nRelayNodes <= best.size());
auto sortfunc = [&best, &hasher, nRelayNodes](CNode* pnode) {
if (pnode->nVersion >= CADDR_TIME_VERSION) {
uint64_t hashKey = CSipHasher(hasher).Write(pnode->GetId()).Finalize();
for (unsigned int i = 0; i < nRelayNodes; i++) {
if (hashKey > best[i].first) {
std::copy(best.begin() + i, best.begin() + nRelayNodes - 1, best.begin() + i + 1);
best[i] = std::make_pair(hashKey, pnode);
break;
}
}
}
};
auto pushfunc = [&addr, &best, nRelayNodes, &insecure_rand] {
for (unsigned int i = 0; i < nRelayNodes && best[i].first != 0; i++) {
best[i].second->PushAddress(addr, insecure_rand);
}
};
connman->ForEachNodeThen(std::move(sortfunc), std::move(pushfunc));
}
void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
std::vector<CInv> vNotFound;
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
LOCK(cs_main);
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend)
break;
const CInv &inv = *it;
{
if (interruptMsgProc)
return;
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
{
bool send = false;
BlockMap::iterator mi = mapBlockIndex.find(inv.hash);
std::shared_ptr<const CBlock> a_recent_block;
std::shared_ptr<const CBlockHeaderAndShortTxIDs> a_recent_compact_block;
bool fWitnessesPresentInARecentCompactBlock;
{
LOCK(cs_most_recent_block);
a_recent_block = most_recent_block;
a_recent_compact_block = most_recent_compact_block;
fWitnessesPresentInARecentCompactBlock = fWitnessesPresentInMostRecentCompactBlock;
}
if (mi != mapBlockIndex.end())
{
if (mi->second->nChainTx && !mi->second->IsValid(BLOCK_VALID_SCRIPTS) &&
mi->second->IsValid(BLOCK_VALID_TREE)) {
// If we have the block and all of its parents, but have not yet validated it,
// we might be in the middle of connecting it (ie in the unlock of cs_main
// before ActivateBestChain but after AcceptBlock).
// In this case, we need to run ActivateBestChain prior to checking the relay
// conditions below.
CValidationState dummy;
ActivateBestChain(dummy, Params(), a_recent_block);
}
if (chainActive.Contains(mi->second)) {
send = true;
} else {
static const int nOneMonth = 30 * 24 * 60 * 60;
// To prevent fingerprinting attacks, only send blocks outside of the active
// chain if they are valid, and no more than a month older (both in time, and in
// best equivalent proof of work) than the best header chain we know about.
send = mi->second->IsValid(BLOCK_VALID_SCRIPTS) && (pindexBestHeader != nullptr) &&
(pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() < nOneMonth) &&
(GetBlockProofEquivalentTime(*pindexBestHeader, *mi->second, *pindexBestHeader, consensusParams) < nOneMonth);
if (!send) {
LogPrintf("%s: ignoring request from peer=%i for old block that isn't in the main chain\n", __func__, pfrom->GetId());
}
}
}
// disconnect node in case we have reached the outbound limit for serving historical blocks
// never disconnect whitelisted nodes
static const int nOneWeek = 7 * 24 * 60 * 60; // assume > 1 week = historical
if (send && connman->OutboundTargetReached(true) && ( ((pindexBestHeader != nullptr) && (pindexBestHeader->GetBlockTime() - mi->second->GetBlockTime() > nOneWeek)) || inv.type == MSG_FILTERED_BLOCK) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "historical block serving limit reached, disconnect peer=%d\n", pfrom->GetId());
//disconnect node
pfrom->fDisconnect = true;
send = false;
}
// Pruned nodes may have deleted the block, so check whether
// it's available before trying to send.
if (send && (mi->second->nStatus & BLOCK_HAVE_DATA))
{
std::shared_ptr<const CBlock> pblock;
if (a_recent_block && a_recent_block->GetHash() == (*mi).second->GetBlockHash()) {
pblock = a_recent_block;
} else {
// Send block from disk
std::shared_ptr<CBlock> pblockRead = std::make_shared<CBlock>();
if (!ReadBlockFromDisk(*pblockRead, (*mi).second, consensusParams))
assert(!"cannot load block from disk");
pblock = pblockRead;
}
if (inv.type == MSG_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_WITNESS_BLOCK)
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::BLOCK, *pblock));
else if (inv.type == MSG_FILTERED_BLOCK)
{
bool sendMerkleBlock = false;
CMerkleBlock merkleBlock;
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter) {
sendMerkleBlock = true;
merkleBlock = CMerkleBlock(*pblock, *pfrom->pfilter);
}
}
if (sendMerkleBlock) {
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::MERKLEBLOCK, merkleBlock));
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didn't send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
for (PairType& pair : merkleBlock.vMatchedTxn)
connman->PushMessage(pfrom, msgMaker.Make(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::TX, *pblock->vtx[pair.first]));
}
// else
// no response
}
else if (inv.type == MSG_CMPCT_BLOCK)
{
// If a peer is asking for old blocks, we're almost guaranteed
// they won't have a useful mempool to match against a compact block,
// and we don't feel like constructing the object for them, so
// instead we respond with the full, non-compact block.
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
int nSendFlags = fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
if (CanDirectFetch(consensusParams) && mi->second->nHeight >= chainActive.Height() - MAX_CMPCTBLOCK_DEPTH) {
if ((fPeerWantsWitness || !fWitnessesPresentInARecentCompactBlock) && a_recent_compact_block && a_recent_compact_block->header.GetHash() == mi->second->GetBlockHash()) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *a_recent_compact_block));
} else {
CBlockHeaderAndShortTxIDs cmpctblock(*pblock, fPeerWantsWitness);
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
} else {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCK, *pblock));
}
}
// Trigger the peer node to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
std::vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, chainActive.Tip()->GetBlockHash()));
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::INV, vInv));
pfrom->hashContinue.SetNull();
}
}
}
else if (inv.type == MSG_TX || inv.type == MSG_WITNESS_TX)
{
// Send stream from relay memory
bool push = false;
auto mi = mapRelay.find(inv.hash);
int nSendFlags = (inv.type == MSG_TX ? SERIALIZE_TRANSACTION_NO_WITNESS : 0);
if (mi != mapRelay.end()) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *mi->second));
push = true;
} else if (pfrom->timeLastMempoolReq) {
auto txinfo = mempool.info(inv.hash);
// To protect privacy, do not answer getdata using the mempool when
// that TX couldn't have been INVed in reply to a MEMPOOL request.
if (txinfo.tx && txinfo.nTime <= pfrom->timeLastMempoolReq) {
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *txinfo.tx));
push = true;
}
}
if (!push) {
vNotFound.push_back(inv);
}
}
// Track requests for our stuff.
GetMainSignals().Inventory(inv.hash);
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK || inv.type == MSG_CMPCT_BLOCK || inv.type == MSG_WITNESS_BLOCK)
break;
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound));
}
}
uint32_t GetFetchFlags(CNode* pfrom) {
uint32_t nFetchFlags = 0;
if ((pfrom->GetLocalServices() & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
nFetchFlags |= MSG_WITNESS_FLAG;
}
return nFetchFlags;
}
inline void static SendBlockTransactions(const CBlock& block, const BlockTransactionsRequest& req, CNode* pfrom, CConnman* connman) {
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
LogPrintf("Peer %d sent us a getblocktxn with out-of-bounds tx indices", pfrom->GetId());
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
}
LOCK(cs_main);
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
int nSendFlags = State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
connman->PushMessage(pfrom, msgMaker.Make(nSendFlags, NetMsgType::BLOCKTXN, resp));
}
bool static ProcessHeadersMessage(CNode *pfrom, CConnman *connman, const std::vector<CBlockHeader>& headers, const CChainParams& chainparams, bool punish_duplicate_invalid)
{
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
size_t nCount = headers.size();
if (nCount == 0) {
// Nothing interesting. Stop asking this peers for more headers.
return true;
}
bool received_new_header = false;
const CBlockIndex *pindexLast = nullptr;
{
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
// If this looks like it could be a block announcement (nCount <
// MAX_BLOCKS_TO_ANNOUNCE), use special logic for handling headers that
// don't connect:
// - Send a getheaders message in response to try to connect the chain.
// - The peer can send up to MAX_UNCONNECTING_HEADERS in a row that
// don't connect before giving DoS points
// - Once a headers message is received that is valid and does connect,
// nUnconnectingHeaders gets reset back to 0.
if (mapBlockIndex.find(headers[0].hashPrevBlock) == mapBlockIndex.end() && nCount < MAX_BLOCKS_TO_ANNOUNCE) {
nodestate->nUnconnectingHeaders++;
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
pindexBestHeader->nHeight,
pfrom->GetId(), nodestate->nUnconnectingHeaders);
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
UpdateBlockAvailability(pfrom->GetId(), headers.back().GetHash());
if (nodestate->nUnconnectingHeaders % MAX_UNCONNECTING_HEADERS == 0) {
Misbehaving(pfrom->GetId(), 20);
}
return true;
}
uint256 hashLastBlock;
for (const CBlockHeader& header : headers) {
if (!hashLastBlock.IsNull() && header.hashPrevBlock != hashLastBlock) {
Misbehaving(pfrom->GetId(), 20);
return error("non-continuous headers sequence");
}
hashLastBlock = header.GetHash();
}
// If we don't have the last header, then they'll have given us
// something new (if these headers are valid).
if (mapBlockIndex.find(hashLastBlock) == mapBlockIndex.end()) {
received_new_header = true;
}
}
CValidationState state;
CBlockHeader first_invalid_header;
if (!ProcessNewBlockHeaders(headers, state, chainparams, &pindexLast, &first_invalid_header)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
LOCK(cs_main);
if (nDoS > 0) {
Misbehaving(pfrom->GetId(), nDoS);
}
if (punish_duplicate_invalid && mapBlockIndex.find(first_invalid_header.GetHash()) != mapBlockIndex.end()) {
// Goal: don't allow outbound peers to use up our outbound
// connection slots if they are on incompatible chains.
//
// We ask the caller to set punish_invalid appropriately based
// on the peer and the method of header delivery (compact
// blocks are allowed to be invalid in some circumstances,
// under BIP 152).
// Here, we try to detect the narrow situation that we have a
// valid block header (ie it was valid at the time the header
// was received, and hence stored in mapBlockIndex) but know the
// block is invalid, and that a peer has announced that same
// block as being on its active chain.
// Disconnect the peer in such a situation.
//
// Note: if the header that is invalid was not accepted to our
// mapBlockIndex at all, that may also be grounds for
// disconnecting the peer, as the chain they are on is likely
// to be incompatible. However, there is a circumstance where
// that does not hold: if the header's timestamp is more than
// 2 hours ahead of our current time. In that case, the header
// may become valid in the future, and we don't want to
// disconnect a peer merely for serving us one too-far-ahead
// block header, to prevent an attacker from splitting the
// network by mining a block right at the 2 hour boundary.
//
// TODO: update the DoS logic (or, rather, rewrite the
// DoS-interface between validation and net_processing) so that
// the interface is cleaner, and so that we disconnect on all the
// reasons that a peer's headers chain is incompatible
// with ours (eg block->nVersion softforks, MTP violations,
// etc), and not just the duplicate-invalid case.
pfrom->fDisconnect = true;
}
return error("invalid header received");
}
}
{
LOCK(cs_main);
CNodeState *nodestate = State(pfrom->GetId());
if (nodestate->nUnconnectingHeaders > 0) {
LogPrint(BCLog::NET, "peer=%d: resetting nUnconnectingHeaders (%d -> 0)\n", pfrom->GetId(), nodestate->nUnconnectingHeaders);
}
nodestate->nUnconnectingHeaders = 0;
assert(pindexLast);
UpdateBlockAvailability(pfrom->GetId(), pindexLast->GetBlockHash());
// From here, pindexBestKnownBlock should be guaranteed to be non-null,
// because it is set in UpdateBlockAvailability. Some nullptr checks
// are still present, however, as belt-and-suspenders.
if (received_new_header && pindexLast->nChainWork > chainActive.Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
if (nCount == MAX_HEADERS_RESULTS) {
// Headers message had its maximum size; the peer may have more headers.
// TODO: optimize: if pindexLast is an ancestor of chainActive.Tip or pindexBestHeader, continue
// from there instead.
LogPrint(BCLog::NET, "more getheaders (%d) to end to peer=%d (startheight:%d)\n", pindexLast->nHeight, pfrom->GetId(), pfrom->nStartingHeight);
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexLast), uint256()));
}
bool fCanDirectFetch = CanDirectFetch(chainparams.GetConsensus());
// If this set of headers is valid and ends in a block with at least as
// much work as our tip, download as much as possible.
if (fCanDirectFetch && pindexLast->IsValid(BLOCK_VALID_TREE) && chainActive.Tip()->nChainWork <= pindexLast->nChainWork) {
std::vector<const CBlockIndex*> vToFetch;
const CBlockIndex *pindexWalk = pindexLast;
// Calculate all the blocks we'd need to switch to pindexLast, up to a limit.
while (pindexWalk && !chainActive.Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) &&
!mapBlocksInFlight.count(pindexWalk->GetBlockHash()) &&
(!IsWitnessEnabled(pindexWalk->pprev, chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
// We don't have this block, and it's not yet in flight.
vToFetch.push_back(pindexWalk);
}
pindexWalk = pindexWalk->pprev;
}
// If pindexWalk still isn't on our main chain, we're looking at a
// very large reorg at a time we think we're close to caught up to
// the main chain -- this shouldn't really happen. Bail out on the
// direct fetch and rely on parallel download instead.
if (!chainActive.Contains(pindexWalk)) {
LogPrint(BCLog::NET, "Large reorg, won't direct fetch to %s (%d)\n",
pindexLast->GetBlockHash().ToString(),
pindexLast->nHeight);
} else {
std::vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
}
uint32_t nFetchFlags = GetFetchFlags(pfrom);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
pindex->GetBlockHash().ToString(), pfrom->GetId());
}
if (vGetData.size() > 1) {
LogPrint(BCLog::NET, "Downloading blocks toward %s (%d) via headers direct fetch\n",
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
}
if (vGetData.size() > 0) {
if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
// In any case, we want to download using a compact block, not a regular one
vGetData[0] = CInv(MSG_CMPCT_BLOCK, vGetData[0].hash);
}
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vGetData));
}
}
}
// If we're in IBD, we want outbound peers that will serve us a useful
// chain. Disconnect peers that are on chains with insufficient work.
if (IsInitialBlockDownload() && nCount != MAX_HEADERS_RESULTS) {
// When nCount < MAX_HEADERS_RESULTS, we know we have no more
// headers to fetch from this peer.
if (nodestate->pindexBestKnownBlock && nodestate->pindexBestKnownBlock->nChainWork < nMinimumChainWork) {
// This peer has too little work on their headers chain to help
// us sync -- disconnect if using an outbound slot (unless
// whitelisted or addnode).
// Note: We compare their tip to nMinimumChainWork (rather than
// chainActive.Tip()) because we won't start block download
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
// as an anti-DoS measure.
if (IsOutboundDisconnectionCandidate(pfrom)) {
LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom->GetId());
pfrom->fDisconnect = true;
}
}
}
if (!pfrom->fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr) {
// If this is an outbound peer, check to see if we should protect
// it from the bad/lagging chain logic.
if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) {
LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom->GetId());
nodestate->m_chain_sync.m_protect = true;
++g_outbound_peers_with_protect_from_disconnect;
}
}
}
return true;
}
bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStream& vRecv, int64_t nTimeReceived, const CChainParams& chainparams, CConnman* connman, const std::atomic<bool>& interruptMsgProc)
{
LogPrint(BCLog::NET, "received: %s (%u bytes) peer=%d\n", SanitizeString(strCommand), vRecv.size(), pfrom->GetId());
if (gArgs.IsArgSet("-dropmessagestest") && GetRand(gArgs.GetArg("-dropmessagestest", 0)) == 0)
{
LogPrintf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (!(pfrom->GetLocalServices() & NODE_BLOOM) &&
(strCommand == NetMsgType::FILTERLOAD ||
strCommand == NetMsgType::FILTERADD))
{
if (pfrom->nVersion >= NO_BLOOM_VERSION) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
return false;
} else {
pfrom->fDisconnect = true;
return false;
}
}
if (strCommand == NetMsgType::REJECT)
{
if (LogAcceptCategory(BCLog::NET)) {
try {
std::string strMsg; unsigned char ccode; std::string strReason;
vRecv >> LIMITED_STRING(strMsg, CMessageHeader::COMMAND_SIZE) >> ccode >> LIMITED_STRING(strReason, MAX_REJECT_MESSAGE_LENGTH);
std::ostringstream ss;
ss << strMsg << " code " << itostr(ccode) << ": " << strReason;
if (strMsg == NetMsgType::BLOCK || strMsg == NetMsgType::TX)
{
uint256 hash;
vRecv >> hash;
ss << ": hash " << hash.ToString();
}
LogPrint(BCLog::NET, "Reject %s\n", SanitizeString(ss.str()));
} catch (const std::ios_base::failure&) {
// Avoid feedback loops by preventing reject messages from triggering a new reject message.
LogPrint(BCLog::NET, "Unparseable reject message received\n");
}
}
}
else if (strCommand == NetMsgType::VERSION)
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_DUPLICATE, std::string("Duplicate version message")));
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
}
int64_t nTime;
CAddress addrMe;
CAddress addrFrom;
uint64_t nNonce = 1;
uint64_t nServiceInt;
ServiceFlags nServices;
int nVersion;
int nSendVersion;
std::string strSubVer;
std::string cleanSubVer;
int nStartingHeight = -1;
bool fRelay = true;
vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
nServices = ServiceFlags(nServiceInt);
if (!pfrom->fInbound)
{
connman->SetServices(pfrom->addr, nServices);
}
if (pfrom->nServicesExpected & ~nServices)
{
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom->GetId(), nServices, pfrom->nServicesExpected);
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_NONSTANDARD,
strprintf("Expected to offer services %08x", pfrom->nServicesExpected)));
pfrom->fDisconnect = true;
return false;
}
if (nServices & ((1 << 7) | (1 << 5))) {
if (GetTime() < 1533096000) {
// Immediately disconnect peers that use service bits 6 or 8 until August 1st, 2018
// These bits have been used as a flag to indicate that a node is running incompatible
// consensus rules instead of changing the network magic, so we're stuck disconnecting
// based on these service bits, at least for a while.
pfrom->fDisconnect = true;
return false;
}
}
if (nVersion < MIN_PEER_PROTO_VERSION)
{
// disconnect from peers older than this proto version
LogPrintf("peer=%d using obsolete version %i; disconnecting\n", pfrom->GetId(), nVersion);
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE,
strprintf("Version must be %d or greater", MIN_PEER_PROTO_VERSION)));
pfrom->fDisconnect = true;
return false;
}
if (nVersion == 10300)
nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty()) {
vRecv >> LIMITED_STRING(strSubVer, MAX_SUBVERSION_LENGTH);
cleanSubVer = SanitizeString(strSubVer);
}
if (!vRecv.empty()) {
vRecv >> nStartingHeight;
}
if (!vRecv.empty())
vRecv >> fRelay;
// Disconnect if we connected to ourself
if (pfrom->fInbound && !connman->CheckIncomingNonce(nNonce))
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom->addr.ToString());
pfrom->fDisconnect = true;
return true;
}
if (pfrom->fInbound && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
PushNodeVersion(pfrom, connman, GetAdjustedTime());
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERACK));
pfrom->nServices = nServices;
pfrom->SetAddrLocal(addrMe);
{
LOCK(pfrom->cs_SubVer);
pfrom->strSubVer = strSubVer;
pfrom->cleanSubVer = cleanSubVer;
}
pfrom->nStartingHeight = nStartingHeight;
pfrom->fClient = !(nServices & NODE_NETWORK);
{
LOCK(pfrom->cs_filter);
pfrom->fRelayTxes = fRelay; // set to true after we get the first filter* message
}
// Change version
pfrom->SetSendVersion(nSendVersion);
pfrom->nVersion = nVersion;
if((nServices & NODE_WITNESS))
{
LOCK(cs_main);
State(pfrom->GetId())->fHaveWitness = true;
}
// Potentially mark this peer as a preferred download peer.
{
LOCK(cs_main);
UpdatePreferredDownload(pfrom, State(pfrom->GetId()));
}
if (!pfrom->fInbound)
{
// Advertise our address
if (fListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr, pfrom->GetLocalServices());
FastRandomContext insecure_rand;
if (addr.IsRoutable())
{
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr, insecure_rand);
} else if (IsPeerAddrLocalGood(pfrom)) {
addr.SetIP(addrMe);
LogPrint(BCLog::NET, "ProcessMessages: advertising address %s\n", addr.ToString());
pfrom->PushAddress(addr, insecure_rand);
}
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || connman->GetAddressCount() < 1000)
{
connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make(NetMsgType::GETADDR));
pfrom->fGetAddr = true;
}
connman->MarkAddressGood(pfrom->addr);
}
std::string remoteAddr;
if (fLogIPs)
remoteAddr = ", peeraddr=" + pfrom->addr.ToString();
LogPrintf("receive version message: %s: version %d, blocks=%d, us=%s, peer=%d%s\n",
cleanSubVer, pfrom->nVersion,
pfrom->nStartingHeight, addrMe.ToString(), pfrom->GetId(),
remoteAddr);
int64_t nTimeOffset = nTime - GetTime();
pfrom->nTimeOffset = nTimeOffset;
AddTimeData(pfrom->addr, nTimeOffset);
// If the peer is old enough to have the old alert system, send it the final alert.
if (pfrom->nVersion <= 70012) {
CDataStream finalAlert(ParseHex("5c0100000015f7675900000000ffffff7f00000000ffffff7ffeffff7f0000000000ffffff7f00ffffff7f002f555247454e543a20416c657274206b657920636f6d70726f6d697365642c2075706772616465207265717569726564004630440220405f7e7572b176f3316d4e12deab75ad4ff978844f7a7bcd5ed06f6aa094eb6602207880fcc07d0a78e0f46f188d115e04ed4ad48980ea3572cb0e0cb97921048095"), SER_NETWORK, PROTOCOL_VERSION);
connman->PushMessage(pfrom, CNetMsgMaker(nSendVersion).Make("alert", finalAlert));
}
// Feeler connections exist only to verify if address is online.
if (pfrom->fFeeler) {
assert(pfrom->fInbound == false);
pfrom->fDisconnect = true;
}
return true;
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
}
// At this point, the outgoing message serialization version can't change.
const CNetMsgMaker msgMaker(pfrom->GetSendVersion());
if (strCommand == NetMsgType::VERACK)
{
pfrom->SetRecvVersion(std::min(pfrom->nVersion.load(), PROTOCOL_VERSION));
if (!pfrom->fInbound) {
// Mark this node as currently connected, so we update its timestamp later.
LOCK(cs_main);
State(pfrom->GetId())->fCurrentlyConnected = true;
}
if (pfrom->nVersion >= SENDHEADERS_VERSION) {
// Tell our peer we prefer to receive headers rather than inv's
// We send this to non-NODE NETWORK peers as well, because even
// non-NODE NETWORK peers can announce blocks (such as pruning
// nodes)
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDHEADERS));
}
if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks
// However, we do not request new block announcements using
// cmpctblock messages.
// We send this to non-NODE NETWORK peers as well, because
// they may wish to request compact blocks from us
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 2;
if (pfrom->GetLocalServices() & NODE_WITNESS)
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
nCMPCTBLOCKVersion = 1;
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion));
}
pfrom->fSuccessfullyConnected = true;
}
else if (!pfrom->fSuccessfullyConnected)
{
// Must have a verack message before anything else
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 1);
return false;
}
else if (strCommand == NetMsgType::ADDR)
{
std::vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && connman->GetAddressCount() > 1000)
return true;
if (vAddr.size() > 1000)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message addr size() = %u", vAddr.size());
}
// Store the new addresses
std::vector<CAddress> vAddrOk;
int64_t nNow = GetAdjustedTime();
int64_t nSince = nNow - 10 * 60;
for (CAddress& addr : vAddr)
{
if (interruptMsgProc)
return true;
if ((addr.nServices & REQUIRED_SERVICES) != REQUIRED_SERVICES)
continue;
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
RelayAddress(addr, fReachable, connman);
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
connman->AddNewAddresses(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == NetMsgType::SENDHEADERS)
{
LOCK(cs_main);
State(pfrom->GetId())->fPreferHeaders = true;
}
else if (strCommand == NetMsgType::SENDCMPCT)
{
bool fAnnounceUsingCMPCTBLOCK = false;
uint64_t nCMPCTBLOCKVersion = 0;
vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
if (nCMPCTBLOCKVersion == 1 || ((pfrom->GetLocalServices() & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
LOCK(cs_main);
// fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
}
if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
if (pfrom->GetLocalServices() & NODE_WITNESS)
State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
else
State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
}
}
}
else if (strCommand == NetMsgType::INV)
{
std::vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message inv size() = %u", vInv.size());
}
bool fBlocksOnly = !fRelayTxes;
// Allow whitelisted peers to send data other than blocks in blocks only mode if whitelistrelay is true
if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY))
fBlocksOnly = false;
LOCK(cs_main);
uint32_t nFetchFlags = GetFetchFlags(pfrom);
for (CInv &inv : vInv)
{
if (interruptMsgProc)
return true;
bool fAlreadyHave = AlreadyHave(inv);
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom->GetId());
if (inv.type == MSG_TX) {
inv.type |= nFetchFlags;
}
if (inv.type == MSG_BLOCK) {
UpdateBlockAvailability(pfrom->GetId(), inv.hash);
if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) {
// We used to request the full block here, but since headers-announcements are now the
// primary method of announcement on the network, and since, in the case that a node
// fell back to inv we probably have a reorg which we should get the headers for first,
// we now only provide a getheaders response here. When we receive the headers, we will
// then ask for the blocks we need.
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), inv.hash));
LogPrint(BCLog::NET, "getheaders (%d) %s to peer=%d\n", pindexBestHeader->nHeight, inv.hash.ToString(), pfrom->GetId());
}
}
else
{
pfrom->AddInventoryKnown(inv);
if (fBlocksOnly) {
LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol peer=%d\n", inv.hash.ToString(), pfrom->GetId());
} else if (!fAlreadyHave && !fImporting && !fReindex && !IsInitialBlockDownload()) {
pfrom->AskFor(inv);
}
}
// Track requests for our stuff
GetMainSignals().Inventory(inv.hash);
}
}
else if (strCommand == NetMsgType::GETDATA)
{
std::vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("message getdata size() = %u", vInv.size());
}
LogPrint(BCLog::NET, "received getdata (%u invsz) peer=%d\n", vInv.size(), pfrom->GetId());
if (vInv.size() > 0) {
LogPrint(BCLog::NET, "received getdata for: %s peer=%d\n", vInv[0].ToString(), pfrom->GetId());
}
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
}
else if (strCommand == NetMsgType::GETBLOCKS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
// We might have announced the currently-being-connected tip using a
// compact block, which resulted in the peer sending a getblocks
// request, which we would otherwise respond to without the new block.
// To avoid this situation we simply verify that we are on our best
// known chain now. This is super overkill, but we handle it better
// for getheaders requests, and there are no known nodes which support
// compact blocks but still use getblocks to request blocks.
{
std::shared_ptr<const CBlock> a_recent_block;
{
LOCK(cs_most_recent_block);
a_recent_block = most_recent_block;
}
CValidationState dummy;
ActivateBestChain(dummy, Params(), a_recent_block);
}
LOCK(cs_main);
// Find the last block the caller has in the main chain
const CBlockIndex* pindex = FindForkInGlobalIndex(chainActive, locator);
// Send the rest of the chain
if (pindex)
pindex = chainActive.Next(pindex);
int nLimit = 500;
LogPrint(BCLog::NET, "getblocks %d to %s limit %d from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), nLimit, pfrom->GetId());
for (; pindex; pindex = chainActive.Next(pindex))
{
if (pindex->GetBlockHash() == hashStop)
{
LogPrint(BCLog::NET, " getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
// If pruning, don't inv blocks unless we have on disk and are likely to still have
// for some reasonable time window (1 hour) that block relay might require.
const int nPrunedBlocksLikelyToHave = MIN_BLOCKS_TO_KEEP - 3600 / chainparams.GetConsensus().nPowTargetSpacing;
if (fPruneMode && (!(pindex->nStatus & BLOCK_HAVE_DATA) || pindex->nHeight <= chainActive.Tip()->nHeight - nPrunedBlocksLikelyToHave))
{
LogPrint(BCLog::NET, " getblocks stopping, pruned or too old block at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll
// trigger the peer to getblocks the next batch of inventory.
LogPrint(BCLog::NET, " getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == NetMsgType::GETBLOCKTXN)
{
BlockTransactionsRequest req;
vRecv >> req;
std::shared_ptr<const CBlock> recent_block;
{
LOCK(cs_most_recent_block);
if (most_recent_block_hash == req.blockhash)
recent_block = most_recent_block;
// Unlock cs_most_recent_block to avoid cs_main lock inversion
}
if (recent_block) {
SendBlockTransactions(*recent_block, req, pfrom, connman);
return true;
}
LOCK(cs_main);
BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->GetId());
return true;
}
if (it->second->nHeight < chainActive.Height() - MAX_BLOCKTXN_DEPTH) {
// If an older block is requested (should never happen in practice,
// but can happen in tests) send a block response instead of a
// blocktxn response. Sending a full block response instead of a
// small blocktxn response is preferable in the case where a peer
// might maliciously send lots of getblocktxn requests to trigger
// expensive disk reads, because it will require the peer to
// actually receive all the data read from disk over the network.
LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block > %i deep", pfrom->GetId(), MAX_BLOCKTXN_DEPTH);
CInv inv;
inv.type = State(pfrom->GetId())->fWantsCmpctWitness ? MSG_WITNESS_BLOCK : MSG_BLOCK;
inv.hash = req.blockhash;
pfrom->vRecvGetData.push_back(inv);
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
return true;
}
CBlock block;
bool ret = ReadBlockFromDisk(block, it->second, chainparams.GetConsensus());
assert(ret);
SendBlockTransactions(block, req, pfrom, connman);
}
else if (strCommand == NetMsgType::GETHEADERS)
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
LOCK(cs_main);
if (IsInitialBlockDownload() && !pfrom->fWhitelisted) {
LogPrint(BCLog::NET, "Ignoring getheaders from peer=%d because node is in initial block download\n", pfrom->GetId());
return true;
}
CNodeState *nodestate = State(pfrom->GetId());
const CBlockIndex* pindex = nullptr;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
BlockMap::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = FindForkInGlobalIndex(chainActive, locator);
if (pindex)
pindex = chainActive.Next(pindex);
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
std::vector<CBlock> vHeaders;
int nLimit = MAX_HEADERS_RESULTS;
LogPrint(BCLog::NET, "getheaders %d to %s from peer=%d\n", (pindex ? pindex->nHeight : -1), hashStop.IsNull() ? "end" : hashStop.ToString(), pfrom->GetId());
for (; pindex; pindex = chainActive.Next(pindex))
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
// pindex can be nullptr either if we sent chainActive.Tip() OR
// if our peer has chainActive.Tip() (and thus we are sending an empty
// headers message). In both cases it's safe to update
// pindexBestHeaderSent to be our tip.
//
// It is important that we simply reset the BestHeaderSent value here,
// and not max(BestHeaderSent, newHeaderSent). We might have announced
// the currently-being-connected tip using a compact block, which
// resulted in the peer sending a headers request, which we respond to
// without the new block. By resetting the BestHeaderSent, we ensure we
// will re-announce the new block via headers (or compact blocks again)
// in the SendMessages logic.
nodestate->pindexBestHeaderSent = pindex ? pindex : chainActive.Tip();
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
}
else if (strCommand == NetMsgType::TX)
{
// Stop processing the transaction early if
// We are in blocks only mode and peer is either not whitelisted or whitelistrelay is off
if (!fRelayTxes && (!pfrom->fWhitelisted || !gArgs.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY)))
{
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom->GetId());
return true;
}
std::deque<COutPoint> vWorkQueue;
std::vector<uint256> vEraseQueue;
CTransactionRef ptx;
vRecv >> ptx;
const CTransaction& tx = *ptx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
LOCK(cs_main);
bool fMissingInputs = false;
CValidationState state;
pfrom->setAskFor.erase(inv.hash);
mapAlreadyAskedFor.erase(inv.hash);
std::list<CTransactionRef> lRemovedTxn;
if (!AlreadyHave(inv) && AcceptToMemoryPool(mempool, state, ptx, true, &fMissingInputs, &lRemovedTxn)) {
mempool.check(pcoinsTip);
RelayTransaction(tx, connman);
for (unsigned int i = 0; i < tx.vout.size(); i++) {
vWorkQueue.emplace_back(inv.hash, i);
}
pfrom->nLastTXTime = GetTime();
LogPrint(BCLog::MEMPOOL, "AcceptToMemoryPool: peer=%d: accepted %s (poolsz %u txn, %u kB)\n",
pfrom->GetId(),
tx.GetHash().ToString(),
mempool.size(), mempool.DynamicMemoryUsage() / 1000);
// Recursively process any orphan transactions that depended on this one
std::set<NodeId> setMisbehaving;
while (!vWorkQueue.empty()) {
auto itByPrev = mapOrphanTransactionsByPrev.find(vWorkQueue.front());
vWorkQueue.pop_front();
if (itByPrev == mapOrphanTransactionsByPrev.end())
continue;
for (auto mi = itByPrev->second.begin();
mi != itByPrev->second.end();
++mi)
{
const CTransactionRef& porphanTx = (*mi)->second.tx;
const CTransaction& orphanTx = *porphanTx;
const uint256& orphanHash = orphanTx.GetHash();
NodeId fromPeer = (*mi)->second.fromPeer;
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
if (setMisbehaving.count(fromPeer))
continue;
if (AcceptToMemoryPool(mempool, stateDummy, porphanTx, true, &fMissingInputs2, &lRemovedTxn)) {
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
RelayTransaction(orphanTx, connman);
for (unsigned int i = 0; i < orphanTx.vout.size(); i++) {
vWorkQueue.emplace_back(orphanHash, i);
}
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
int nDos = 0;
if (stateDummy.IsInvalid(nDos) && nDos > 0)
{
// Punish peer that gave us an invalid orphan tx
Misbehaving(fromPeer, nDos);
setMisbehaving.insert(fromPeer);
LogPrint(BCLog::MEMPOOL, " invalid orphan tx %s\n", orphanHash.ToString());
}
// Has inputs but not accepted to mempool
// Probably non-standard or insufficient fee
LogPrint(BCLog::MEMPOOL, " removed orphan tx %s\n", orphanHash.ToString());
vEraseQueue.push_back(orphanHash);
if (!orphanTx.HasWitness() && !stateDummy.CorruptionPossible()) {
// Do not use rejection cache for witness transactions or
// witness-stripped transactions, as they can have been malleated.
// See https://github.com/bitcoin/bitcoin/issues/8279 for details.
assert(recentRejects);
recentRejects->insert(orphanHash);
}
}
mempool.check(pcoinsTip);
}
}
for (uint256 hash : vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
for (const CTxIn& txin : tx.vin) {
if (recentRejects->contains(txin.prevout.hash)) {
fRejectedParents = true;
break;
}
}
if (!fRejectedParents) {
uint32_t nFetchFlags = GetFetchFlags(pfrom);
for (const CTxIn& txin : tx.vin) {
CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
pfrom->AddInventoryKnown(_inv);
if (!AlreadyHave(_inv)) pfrom->AskFor(_inv);
}
AddOrphanTx(ptx, pfrom->GetId());
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nMaxOrphanTx = (unsigned int)std::max((int64_t)0, gArgs.GetArg("-maxorphantx", DEFAULT_MAX_ORPHAN_TRANSACTIONS));
unsigned int nEvicted = LimitOrphanTxSize(nMaxOrphanTx);
if (nEvicted > 0) {
LogPrint(BCLog::MEMPOOL, "mapOrphan overflow, removed %u tx\n", nEvicted);
}
} else {
LogPrint(BCLog::MEMPOOL, "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
// We will continue to reject this tx since it has rejected
// parents so avoid re-requesting it from other peers.
recentRejects->insert(tx.GetHash());
}
} else {
if (!tx.HasWitness() && !state.CorruptionPossible()) {
// Do not use rejection cache for witness transactions or
// witness-stripped transactions, as they can have been malleated.
// See https://github.com/bitcoin/bitcoin/issues/8279 for details.
assert(recentRejects);
recentRejects->insert(tx.GetHash());
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
} else if (tx.HasWitness() && RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
if (pfrom->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
// Always relay transactions received from whitelisted peers, even
// if they were already in the mempool or rejected from it due
// to policy, allowing the node to function as a gateway for
// nodes hidden behind it.
//
// Never relay transactions that we would assign a non-zero DoS
// score for, as we expect peers to do the same with us in that
// case.
int nDoS = 0;
if (!state.IsInvalid(nDoS) || nDoS == 0) {
LogPrintf("Force relaying tx %s from whitelisted peer=%d\n", tx.GetHash().ToString(), pfrom->GetId());
RelayTransaction(tx, connman);
} else {
LogPrintf("Not relaying invalid transaction %s from whitelisted peer=%d (%s)\n", tx.GetHash().ToString(), pfrom->GetId(), FormatStateMessage(state));
}
}
}
for (const CTransactionRef& removedTx : lRemovedTxn)
AddToCompactExtraTransactions(removedTx);
int nDoS = 0;
if (state.IsInvalid(nDoS))
{
LogPrint(BCLog::MEMPOOLREJ, "%s from peer=%d was not accepted: %s\n", tx.GetHash().ToString(),
pfrom->GetId(),
FormatStateMessage(state));
if (state.GetRejectCode() > 0 && state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash));
if (nDoS > 0) {
Misbehaving(pfrom->GetId(), nDoS);
}
}
}
else if (strCommand == NetMsgType::CMPCTBLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlockHeaderAndShortTxIDs cmpctblock;
vRecv >> cmpctblock;
bool received_new_header = false;
{
LOCK(cs_main);
if (mapBlockIndex.find(cmpctblock.header.hashPrevBlock) == mapBlockIndex.end()) {
// Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers
if (!IsInitialBlockDownload())
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexBestHeader), uint256()));
return true;
}
if (mapBlockIndex.find(cmpctblock.header.GetHash()) == mapBlockIndex.end()) {
received_new_header = true;
}
}
const CBlockIndex *pindex = nullptr;
CValidationState state;
if (!ProcessNewBlockHeaders({cmpctblock.header}, state, chainparams, &pindex)) {
int nDoS;
if (state.IsInvalid(nDoS)) {
if (nDoS > 0) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), nDoS);
}
LogPrintf("Peer %d sent us invalid header via cmpctblock\n", pfrom->GetId());
return true;
}
}
// When we succeed in decoding a block's txids from a cmpctblock
// message we typically jump to the BLOCKTXN handling code, with a
// dummy (empty) BLOCKTXN message, to re-use the logic there in
// completing processing of the putative block (without cs_main).
bool fProcessBLOCKTXN = false;
CDataStream blockTxnMsg(SER_NETWORK, PROTOCOL_VERSION);
// If we end up treating this as a plain headers message, call that as well
// without cs_main.
bool fRevertToHeaderProcessing = false;
// Keep a CBlock for "optimistic" compactblock reconstructions (see
// below)
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
bool fBlockReconstructed = false;
{
LOCK(cs_main);
// If AcceptBlockHeader returned true, it set pindex
assert(pindex);
UpdateBlockAvailability(pfrom->GetId(), pindex->GetBlockHash());
CNodeState *nodestate = State(pfrom->GetId());
// If this was a new header with more work than our tip, update the
// peer's last block announcement time
if (received_new_header && pindex->nChainWork > chainActive.Tip()->nChainWork) {
nodestate->m_last_block_announcement = GetTime();
}
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
return true;
if (pindex->nChainWork <= chainActive.Tip()->nChainWork || // We know something better
pindex->nTx != 0) { // We had this block at some point, but pruned it
if (fAlreadyInFlight) {
// We requested this block for some reason, but our mempool will probably be useless
// so we just grab the block via normal getdata
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
}
return true;
}
// If we're not close to tip yet, give up and let parallel block fetch work its magic
if (!fAlreadyInFlight && !CanDirectFetch(chainparams.GetConsensus()))
return true;
if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
// Don't bother trying to process compact blocks from v1 peers
// after segwit activates.
return true;
}
// We want to be a bit conservative just to be extra careful about DoS
// possibilities in compact block processing...
if (pindex->nHeight <= chainActive.Height() + 2) {
if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
(fAlreadyInFlight && blockInFlightIt->second.first == pfrom->GetId())) {
std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
if (!MarkBlockAsInFlight(pfrom->GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock)
(*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&mempool));
else {
// The block was already in flight using compact blocks from the same peer
LogPrint(BCLog::NET, "Peer sent us compact block we were already syncing!\n");
return true;
}
}
PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock;
ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case of whitelist
Misbehaving(pfrom->GetId(), 100);
LogPrintf("Peer %d sent us invalid compact block\n", pfrom->GetId());
return true;
} else if (status == READ_STATUS_FAILED) {
// Duplicate txindexes, the block is now in-flight, so just request it
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return true;
}
BlockTransactionsRequest req;
for (size_t i = 0; i < cmpctblock.BlockTxCount(); i++) {
if (!partialBlock.IsTxAvailable(i))
req.indexes.push_back(i);
}
if (req.indexes.empty()) {
// Dirty hack to jump to BLOCKTXN code (TODO: move message handling into their own functions)
BlockTransactions txn;
txn.blockhash = cmpctblock.header.GetHash();
blockTxnMsg << txn;
fProcessBLOCKTXN = true;
} else {
req.blockhash = pindex->GetBlockHash();
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
}
} else {
// This block is either already in flight from a different
// peer, or this peer has too many blocks outstanding to
// download from.
// Optimistically try to reconstruct anyway since we might be
// able to without any round trips.
PartiallyDownloadedBlock tempBlock(&mempool);
ReadStatus status = tempBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status != READ_STATUS_OK) {
// TODO: don't ignore failures
return true;
}
std::vector<CTransactionRef> dummy;
status = tempBlock.FillBlock(*pblock, dummy);
if (status == READ_STATUS_OK) {
fBlockReconstructed = true;
}
}
} else {
if (fAlreadyInFlight) {
// We requested this block, but its far into the future, so our
// mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1);
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom), cmpctblock.header.GetHash());
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
return true;
} else {
// If this was an announce-cmpctblock, we want the same treatment as a header message
fRevertToHeaderProcessing = true;
}
}
} // cs_main
if (fProcessBLOCKTXN)
return ProcessMessage(pfrom, NetMsgType::BLOCKTXN, blockTxnMsg, nTimeReceived, chainparams, connman, interruptMsgProc);
if (fRevertToHeaderProcessing) {
// Headers received from HB compact block peers are permitted to be
// relayed before full validation (see BIP 152), so we don't want to disconnect
// the peer if the header turns out to be for an invalid block.
// Note that if a peer tries to build on an invalid chain, that
// will be detected and the peer will be banned.
return ProcessHeadersMessage(pfrom, connman, {cmpctblock.header}, chainparams, /*punish_duplicate_invalid=*/false);
}
if (fBlockReconstructed) {
// If we got here, we were able to optimistically reconstruct a
// block that is in flight from some other peer.
{
LOCK(cs_main);
mapBlockSource.emplace(pblock->GetHash(), std::make_pair(pfrom->GetId(), false));
}
bool fNewBlock = false;
// Setting fForceProcessing to true means that we bypass some of
// our anti-DoS protections in AcceptBlock, which filters
// unrequested blocks that might be trying to waste our resources
// (eg disk space). Because we only try to reconstruct blocks when
// we're close to caught up (via the CanDirectFetch() requirement
// above, combined with the behavior of not requesting blocks until
// we have a chain with at least nMinimumChainWork), and we ignore
// compact blocks with less work than our tip, it is safe to treat
// reconstructed compact blocks as having been requested.
ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
LOCK(cs_main);
mapBlockSource.erase(pblock->GetHash());
}
LOCK(cs_main); // hold cs_main for CBlockIndex::IsValid()
if (pindex->IsValid(BLOCK_VALID_TRANSACTIONS)) {
// Clear download state for this block, which is in
// process from some other peer. We do this after calling
// ProcessNewBlock so that a malleated cmpctblock announcement
// can't be used to interfere with block relay.
MarkBlockAsReceived(pblock->GetHash());
}
}
}
else if (strCommand == NetMsgType::BLOCKTXN && !fImporting && !fReindex) // Ignore blocks received while importing
{
BlockTransactions resp;
vRecv >> resp;
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
bool fBlockRead = false;
{
LOCK(cs_main);
std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
it->second.first != pfrom->GetId()) {
LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom->GetId());
return true;
}
PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
if (status == READ_STATUS_INVALID) {
MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case of whitelist
Misbehaving(pfrom->GetId(), 100);
LogPrintf("Peer %d sent us invalid compact block/non-matching block transactions\n", pfrom->GetId());
return true;
} else if (status == READ_STATUS_FAILED) {
// Might have collided, fall back to getdata now :(
std::vector<CInv> invs;
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom), resp.blockhash));
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
} else {
// Block is either okay, or possibly we received
// READ_STATUS_CHECKBLOCK_FAILED.
// Note that CheckBlock can only fail for one of a few reasons:
// 1. bad-proof-of-work (impossible here, because we've already
// accepted the header)
// 2. merkleroot doesn't match the transactions given (already
// caught in FillBlock with READ_STATUS_FAILED, so
// impossible here)
// 3. the block is otherwise invalid (eg invalid coinbase,
// block is too big, too many legacy sigops, etc).
// So if CheckBlock failed, #3 is the only possibility.
// Under BIP 152, we don't DoS-ban unless proof of work is
// invalid (we don't require all the stateless checks to have
// been run). This is handled below, so just treat this as
// though the block was successfully read, and rely on the
// handling in ProcessNewBlock to ensure the block index is
// updated, reject messages go out, etc.
MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer
fBlockRead = true;
// mapBlockSource is only used for sending reject messages and DoS scores,
// so the race between here and cs_main in ProcessNewBlock is fine.
// BIP 152 permits peers to relay compact blocks after validating
// the header only; we should not punish peers if the block turns
// out to be invalid.
mapBlockSource.emplace(resp.blockhash, std::make_pair(pfrom->GetId(), false));
}
} // Don't hold cs_main when we call into ProcessNewBlock
if (fBlockRead) {
bool fNewBlock = false;
// Since we requested this block (it was in mapBlocksInFlight), force it to be processed,
// even if it would not be a candidate for new tip (missing previous block, chain not long enough, etc)
// This bypasses some anti-DoS logic in AcceptBlock (eg to prevent
// disk-space attacks), but this should be safe due to the
// protections in the compact block handler -- see related comment
// in compact block optimistic reconstruction handling.
ProcessNewBlock(chainparams, pblock, /*fForceProcessing=*/true, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
LOCK(cs_main);
mapBlockSource.erase(pblock->GetHash());
}
}
}
else if (strCommand == NetMsgType::HEADERS && !fImporting && !fReindex) // Ignore headers received while importing
{
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 20);
return error("headers message size = %u", nCount);
}
headers.resize(nCount);
for (unsigned int n = 0; n < nCount; n++) {
vRecv >> headers[n];
ReadCompactSize(vRecv); // ignore tx count; assume it is 0.
}
// Headers received via a HEADERS message should be valid, and reflect
// the chain the peer is on. If we receive a known-invalid header,
// disconnect the peer if it is using one of our outbound connection
// slots.
bool should_punish = !pfrom->fInbound && !pfrom->m_manual_connection;
return ProcessHeadersMessage(pfrom, connman, headers, chainparams, should_punish);
}
else if (strCommand == NetMsgType::BLOCK && !fImporting && !fReindex) // Ignore blocks received while importing
{
std::shared_ptr<CBlock> pblock = std::make_shared<CBlock>();
vRecv >> *pblock;
LogPrint(BCLog::NET, "received block %s peer=%d\n", pblock->GetHash().ToString(), pfrom->GetId());
bool forceProcessing = false;
const uint256 hash(pblock->GetHash());
{
LOCK(cs_main);
// Also always process if we requested the block explicitly, as we may
// need it even though it is not a candidate for a new best tip.
forceProcessing |= MarkBlockAsReceived(hash);
// mapBlockSource is only used for sending reject messages and DoS scores,
// so the race between here and cs_main in ProcessNewBlock is fine.
mapBlockSource.emplace(hash, std::make_pair(pfrom->GetId(), true));
}
bool fNewBlock = false;
ProcessNewBlock(chainparams, pblock, forceProcessing, &fNewBlock);
if (fNewBlock) {
pfrom->nLastBlockTime = GetTime();
} else {
LOCK(cs_main);
mapBlockSource.erase(pblock->GetHash());
}
}
else if (strCommand == NetMsgType::GETADDR)
{
// This asymmetric behavior for inbound and outbound connections was introduced
// to prevent a fingerprinting attack: an attacker can send specific fake addresses
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
if (!pfrom->fInbound) {
LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom->GetId());
return true;
}
// Only send one GetAddr response per connection to reduce resource waste
// and discourage addr stamping of INV announcements.
if (pfrom->fSentAddr) {
LogPrint(BCLog::NET, "Ignoring repeated \"getaddr\". peer=%d\n", pfrom->GetId());
return true;
}
pfrom->fSentAddr = true;
pfrom->vAddrToSend.clear();
std::vector<CAddress> vAddr = connman->GetAddresses();
FastRandomContext insecure_rand;
for (const CAddress &addr : vAddr)
pfrom->PushAddress(addr, insecure_rand);
}
else if (strCommand == NetMsgType::MEMPOOL)
{
if (!(pfrom->GetLocalServices() & NODE_BLOOM) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "mempool request with bloom filters disabled, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
if (connman->OutboundTargetReached(false) && !pfrom->fWhitelisted)
{
LogPrint(BCLog::NET, "mempool request with bandwidth limit reached, disconnect peer=%d\n", pfrom->GetId());
pfrom->fDisconnect = true;
return true;
}
LOCK(pfrom->cs_inventory);
pfrom->fSendMempool = true;
}
else if (strCommand == NetMsgType::PING)
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64_t nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
connman->PushMessage(pfrom, msgMaker.Make(NetMsgType::PONG, nonce));
}
}
else if (strCommand == NetMsgType::PONG)
{
int64_t pingUsecEnd = nTimeReceived;
uint64_t nonce = 0;
size_t nAvail = vRecv.in_avail();
bool bPingFinished = false;
std::string sProblem;
if (nAvail >= sizeof(nonce)) {
vRecv >> nonce;
// Only process pong message if there is an outstanding ping (old ping without nonce should never pong)
if (pfrom->nPingNonceSent != 0) {
if (nonce == pfrom->nPingNonceSent) {
// Matching pong received, this ping is no longer outstanding
bPingFinished = true;
int64_t pingUsecTime = pingUsecEnd - pfrom->nPingUsecStart;
if (pingUsecTime > 0) {
// Successful ping time measurement, replace previous
pfrom->nPingUsecTime = pingUsecTime;
pfrom->nMinPingUsecTime = std::min(pfrom->nMinPingUsecTime.load(), pingUsecTime);
} else {
// This should never happen
sProblem = "Timing mishap";
}
} else {
// Nonce mismatches are normal when pings are overlapping
sProblem = "Nonce mismatch";
if (nonce == 0) {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Nonce zero";
}
}
} else {
sProblem = "Unsolicited pong without ping";
}
} else {
// This is most likely a bug in another implementation somewhere; cancel this ping
bPingFinished = true;
sProblem = "Short payload";
}
if (!(sProblem.empty())) {
LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n",
pfrom->GetId(),
sProblem,
pfrom->nPingNonceSent,
nonce,
nAvail);
}
if (bPingFinished) {
pfrom->nPingNonceSent = 0;
}
}
else if (strCommand == NetMsgType::FILTERLOAD)
{
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints())
{
// There is no excuse for sending a too-large filter
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
}
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
pfrom->fRelayTxes = true;
}
}
else if (strCommand == NetMsgType::FILTERADD)
{
std::vector<unsigned char> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
bool bad = false;
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
bad = true;
} else {
LOCK(pfrom->cs_filter);
if (pfrom->pfilter) {
pfrom->pfilter->insert(vData);
} else {
bad = true;
}
}
if (bad) {
LOCK(cs_main);
Misbehaving(pfrom->GetId(), 100);
}
}
else if (strCommand == NetMsgType::FILTERCLEAR)
{
LOCK(pfrom->cs_filter);
if (pfrom->GetLocalServices() & NODE_BLOOM) {
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter();
}
pfrom->fRelayTxes = true;
}
else if (strCommand == NetMsgType::FEEFILTER) {
CAmount newFeeFilter = 0;
vRecv >> newFeeFilter;
if (MoneyRange(newFeeFilter)) {
{
LOCK(pfrom->cs_feeFilter);
pfrom->minFeeFilter = newFeeFilter;
}
LogPrint(BCLog::NET, "received: feefilter of %s from peer=%d\n", CFeeRate(newFeeFilter).ToString(), pfrom->GetId());
}
}
else if (strCommand == NetMsgType::NOTFOUND) {
// We do not care about the NOTFOUND message, but logging an Unknown Command
// message would be undesirable as we transmit it ourselves.
}
else {
// Ignore unknown commands for extensibility
LogPrint(BCLog::NET, "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->GetId());
}
return true;
}
static bool SendRejectsAndCheckIfBanned(CNode* pnode, CConnman* connman)
{
AssertLockHeld(cs_main);
CNodeState &state = *State(pnode->GetId());
for (const CBlockReject& reject : state.rejects) {
connman->PushMessage(pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, (std::string)NetMsgType::BLOCK, reject.chRejectCode, reject.strRejectReason, reject.hashBlock));
}
state.rejects.clear();
if (state.fShouldBan) {
state.fShouldBan = false;
if (pnode->fWhitelisted)
LogPrintf("Warning: not punishing whitelisted peer %s!\n", pnode->addr.ToString());
else if (pnode->m_manual_connection)
LogPrintf("Warning: not punishing addnoded peer %s!\n", pnode->addr.ToString());
else {
pnode->fDisconnect = true;
if (pnode->addr.IsLocal())
LogPrintf("Warning: not banning local peer %s!\n", pnode->addr.ToString());
else
{
connman->Ban(pnode->addr, BanReasonNodeMisbehaving);
}
}
return true;
}
return false;
}
bool PeerLogicValidation::ProcessMessages(CNode* pfrom, std::atomic<bool>& interruptMsgProc)
{
const CChainParams& chainparams = Params();
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fMoreWork = false;
if (!pfrom->vRecvGetData.empty())
ProcessGetData(pfrom, chainparams.GetConsensus(), connman, interruptMsgProc);
if (pfrom->fDisconnect)
return false;
// this maintains the order of responses
if (!pfrom->vRecvGetData.empty()) return true;
// Don't bother if send buffer is too full to respond anyway
if (pfrom->fPauseSend)
return false;
std::list<CNetMessage> msgs;
{
LOCK(pfrom->cs_vProcessMsg);
if (pfrom->vProcessMsg.empty())
return false;
// Just take one message
msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin());
pfrom->nProcessQueueSize -= msgs.front().vRecv.size() + CMessageHeader::HEADER_SIZE;
pfrom->fPauseRecv = pfrom->nProcessQueueSize > connman->GetReceiveFloodSize();
fMoreWork = !pfrom->vProcessMsg.empty();
}
CNetMessage& msg(msgs.front());
msg.SetVersion(pfrom->GetRecvVersion());
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, chainparams.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
LogPrintf("PROCESSMESSAGE: INVALID MESSAGESTART %s peer=%d\n", SanitizeString(msg.hdr.GetCommand()), pfrom->GetId());
pfrom->fDisconnect = true;
return false;
}
// Read header
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid(chainparams.MessageStart()))
{
LogPrintf("PROCESSMESSAGE: ERRORS IN HEADER %s peer=%d\n", SanitizeString(hdr.GetCommand()), pfrom->GetId());
return fMoreWork;
}
std::string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
// Checksum
CDataStream& vRecv = msg.vRecv;
const uint256& hash = msg.GetMessageHash();
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0)
{
LogPrintf("%s(%s, %u bytes): CHECKSUM ERROR expected %s was %s\n", __func__,
SanitizeString(strCommand), nMessageSize,
HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
return fMoreWork;
}
// Process message
bool fRet = false;
try
{
fRet = ProcessMessage(pfrom, strCommand, vRecv, msg.nTime, chainparams, connman, interruptMsgProc);
if (interruptMsgProc)
return false;
if (!pfrom->vRecvGetData.empty())
fMoreWork = true;
}
catch (const std::ios_base::failure& e)
{
connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_MALFORMED, std::string("error parsing message")));
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
LogPrintf("%s(%s, %u bytes): Exception '%s' caught, normally caused by a message being shorter than its stated length\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from over-long size
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else if (strstr(e.what(), "non-canonical ReadCompactSize()"))
{
// Allow exceptions from non-canonical encoding
LogPrintf("%s(%s, %u bytes): Exception '%s' caught\n", __func__, SanitizeString(strCommand), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (const std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(nullptr, "ProcessMessages()");
}
if (!fRet) {
LogPrintf("%s(%s, %u bytes) FAILED peer=%d\n", __func__, SanitizeString(strCommand), nMessageSize, pfrom->GetId());
}
LOCK(cs_main);
SendRejectsAndCheckIfBanned(pfrom, connman);
return fMoreWork;
}
void PeerLogicValidation::ConsiderEviction(CNode *pto, int64_t time_in_seconds)
{
AssertLockHeld(cs_main);
CNodeState &state = *State(pto->GetId());
const CNetMsgMaker msgMaker(pto->GetSendVersion());
if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
// This is an outbound peer subject to disconnection if they don't
// announce a block with as much work as the current tip within
// CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
// their chain has more work than ours, we should sync to it,
// unless it's invalid, in which case we should find that out and
// disconnect from them elsewhere).
if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= chainActive.Tip()->nChainWork) {
if (state.m_chain_sync.m_timeout != 0) {
state.m_chain_sync.m_timeout = 0;
state.m_chain_sync.m_work_header = nullptr;
state.m_chain_sync.m_sent_getheaders = false;
}
} else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
// Our best block known by this peer is behind our tip, and we're either noticing
// that for the first time, OR this peer was able to catch up to some earlier point
// where we checked against our tip.
// Either way, set a new timeout based on current tip.
state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
state.m_chain_sync.m_work_header = chainActive.Tip();
state.m_chain_sync.m_sent_getheaders = false;
} else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
// No evidence yet that our peer has synced to a chain with work equal to that
// of our tip, when we first detected it was behind. Send a single getheaders
// message to give the peer a chance to update us.
if (state.m_chain_sync.m_sent_getheaders) {
// They've run out of time to catch up!
LogPrintf("Disconnecting outbound peer %d for old chain, best known block = %s\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>");
pto->fDisconnect = true;
} else {
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto->GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
// Bump the timeout to allow a response, which could clear the timeout
// (if the response shows the peer has synced), reset the timeout (if
// the peer syncs to the required work but not to our tip), or result
// in disconnect (if we advance to the timeout and pindexBestKnownBlock
// has not sufficiently progressed)
state.m_chain_sync.m_timeout = time_in_seconds + HEADERS_RESPONSE_TIME;
}
}
}
}
void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
{
// Check whether we have too many outbound peers
int extra_peers = connman->GetExtraOutboundCount();
if (extra_peers > 0) {
// If we have more outbound peers than we target, disconnect one.
// Pick the outbound peer that least recently announced
// us a new block, with ties broken by choosing the more recent
// connection (higher node id)
NodeId worst_peer = -1;
int64_t oldest_block_announcement = std::numeric_limits<int64_t>::max();
LOCK(cs_main);
connman->ForEachNode([&](CNode* pnode) {
// Ignore non-outbound peers, or nodes marked for disconnect already
if (!IsOutboundDisconnectionCandidate(pnode) || pnode->fDisconnect) return;
CNodeState *state = State(pnode->GetId());
if (state == nullptr) return; // shouldn't be possible, but just in case
// Don't evict our protected peers
if (state->m_chain_sync.m_protect) return;
if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
worst_peer = pnode->GetId();
oldest_block_announcement = state->m_last_block_announcement;
}
});
if (worst_peer != -1) {
bool disconnected = connman->ForNode(worst_peer, [&](CNode *pnode) {
// Only disconnect a peer that has been connected to us for
// some reasonable fraction of our check-frequency, to give
// it time for new information to have arrived.
// Also don't disconnect any peer we're trying to download a
// block from.
CNodeState &state = *State(pnode->GetId());
if (time_in_seconds - pnode->nTimeConnected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
pnode->fDisconnect = true;
return true;
} else {
LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n", pnode->GetId(), pnode->nTimeConnected, state.nBlocksInFlight);
return false;
}
});
if (disconnected) {
// If we disconnected an extra peer, that means we successfully
// connected to at least one peer after the last time we
// detected a stale tip. Don't try any more extra peers until
// we next detect a stale tip, to limit the load we put on the
// network from these extra connections.
connman->SetTryNewOutboundPeer(false);
}
}
}
}
void PeerLogicValidation::CheckForStaleTipAndEvictPeers(const Consensus::Params &consensusParams)
{
if (connman == nullptr) return;
int64_t time_in_seconds = GetTime();
EvictExtraOutboundPeers(time_in_seconds);
if (time_in_seconds > m_stale_tip_check_time) {
LOCK(cs_main);
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer
if (TipMayBeStale(consensusParams)) {
LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update);
connman->SetTryNewOutboundPeer(true);
} else if (connman->GetTryNewOutboundPeer()) {
connman->SetTryNewOutboundPeer(false);
}
m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
}
}
class CompareInvMempoolOrder
{
CTxMemPool *mp;
public:
CompareInvMempoolOrder(CTxMemPool *_mempool)
{
mp = _mempool;
}
bool operator()(std::set<uint256>::iterator a, std::set<uint256>::iterator b)
{
/* As std::make_heap produces a max-heap, we want the entries with the
* fewest ancestors/highest fee to sort later. */
return mp->CompareDepthAndScore(*b, *a);
}
};
bool PeerLogicValidation::SendMessages(CNode* pto, std::atomic<bool>& interruptMsgProc)
{
const Consensus::Params& consensusParams = Params().GetConsensus();
{
// Don't send anything until the version handshake is complete
if (!pto->fSuccessfullyConnected || pto->fDisconnect)
return true;
// If we get here, the outgoing message serialization version is set and can't change.
const CNetMsgMaker msgMaker(pto->GetSendVersion());
//
// Message: ping
//
bool pingSend = false;
if (pto->fPingQueued) {
// RPC ping request by user
pingSend = true;
}
if (pto->nPingNonceSent == 0 && pto->nPingUsecStart + PING_INTERVAL * 1000000 < GetTimeMicros()) {
// Ping automatically sent as a latency probe & keepalive.
pingSend = true;
}
if (pingSend) {
uint64_t nonce = 0;
while (nonce == 0) {
GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
}
pto->fPingQueued = false;
pto->nPingUsecStart = GetTimeMicros();
if (pto->nVersion > BIP0031_VERSION) {
pto->nPingNonceSent = nonce;
connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce));
} else {
// Peer is too old to support ping command with nonce, pong will never arrive.
pto->nPingNonceSent = 0;
connman->PushMessage(pto, msgMaker.Make(NetMsgType::PING));
}
}
TRY_LOCK(cs_main, lockMain); // Acquire cs_main for IsInitialBlockDownload() and CNodeState()
if (!lockMain)
return true;
if (SendRejectsAndCheckIfBanned(pto, connman))
return true;
CNodeState &state = *State(pto->GetId());
// Address refresh broadcast
int64_t nNow = GetTimeMicros();
if (!IsInitialBlockDownload() && pto->nNextLocalAddrSend < nNow) {
AdvertiseLocal(pto);
pto->nNextLocalAddrSend = PoissonNextSend(nNow, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
//
// Message: addr
//
if (pto->nNextAddrSend < nNow) {
pto->nNextAddrSend = PoissonNextSend(nNow, AVG_ADDRESS_BROADCAST_INTERVAL);
std::vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
for (const CAddress& addr : pto->vAddrToSend)
{
if (!pto->addrKnown.contains(addr.GetKey()))
{
pto->addrKnown.insert(addr.GetKey());
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr));
// we only send the big addr message once
if (pto->vAddrToSend.capacity() > 40)
pto->vAddrToSend.shrink_to_fit();
}
// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = chainActive.Tip();
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
state.fSyncStarted = true;
state.nHeadersSyncTimeout = GetTimeMicros() + HEADERS_DOWNLOAD_TIMEOUT_BASE + HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER * (GetAdjustedTime() - pindexBestHeader->GetBlockTime())/(consensusParams.nPowTargetSpacing);
nSyncStarted++;
const CBlockIndex *pindexStart = pindexBestHeader;
/* If possible, start at the block preceding the currently
best known header. This ensures that we always get a
non-empty list of headers back as long as the peer
is up-to-date. With a non-empty response, we can initialise
the peer's known best block. This wouldn't be possible
if we requested starting at pindexBestHeader and
got back an empty response. */
if (pindexStart->pprev)
pindexStart = pindexStart->pprev;
LogPrint(BCLog::NET, "initial getheaders (%d) to peer=%d (startheight:%d)\n", pindexStart->nHeight, pto->GetId(), pto->nStartingHeight);
connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETHEADERS, chainActive.GetLocator(pindexStart), uint256()));
}
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
GetMainSignals().Broadcast(nTimeBestReceived, connman);
}
//
// Try sending block announcements via headers
//
{
// If we have less than MAX_BLOCKS_TO_ANNOUNCE in our
// list of block hashes we're relaying, and our peer wants
// headers announcements, then find the first header
// not yet known to our peer but would connect, and send.
// If no header would connect, or if we have too many
// blocks, or if the peer doesn't want headers, just
// add all to the inv queue.
LOCK(pto->cs_inventory);
std::vector<CBlock> vHeaders;
bool fRevertToInv = ((!state.fPreferHeaders &&
(!state.fPreferHeaderAndIDs || pto->vBlockHashesToAnnounce.size() > 1)) ||
pto->vBlockHashesToAnnounce.size() > MAX_BLOCKS_TO_ANNOUNCE);
const CBlockIndex *pBestIndex = nullptr; // last header queued for delivery
ProcessBlockAvailability(pto->GetId()); // ensure pindexBestKnownBlock is up-to-date
if (!fRevertToInv) {
bool fFoundStartingHeader = false;
// Try to find first header that our peer doesn't have, and
// then send all headers past that one. If we come across any
// headers that aren't on chainActive, give up.
for (const uint256 &hash : pto->vBlockHashesToAnnounce) {
BlockMap::iterator mi = mapBlockIndex.find(hash);
assert(mi != mapBlockIndex.end());
const CBlockIndex *pindex = mi->second;
if (chainActive[pindex->nHeight] != pindex) {
// Bail out if we reorged away from this block
fRevertToInv = true;
break;
}
if (pBestIndex != nullptr && pindex->pprev != pBestIndex) {
// This means that the list of blocks to announce don't
// connect to each other.
// This shouldn't really be possible to hit during
// regular operation (because reorgs should take us to
// a chain that has some block not on the prior chain,
// which should be caught by the prior check), but one
// way this could happen is by using invalidateblock /
// reconsiderblock repeatedly on the tip, causing it to
// be added multiple times to vBlockHashesToAnnounce.
// Robustly deal with this rare situation by reverting
// to an inv.
fRevertToInv = true;
break;
}
pBestIndex = pindex;
if (fFoundStartingHeader) {
// add this to the headers message
vHeaders.push_back(pindex->GetBlockHeader());
} else if (PeerHasHeader(&state, pindex)) {
continue; // keep looking for the first new block
} else if (pindex->pprev == nullptr || PeerHasHeader(&state, pindex->pprev)) {
// Peer doesn't have this header but they do have the prior one.
// Start sending headers.
fFoundStartingHeader = true;
vHeaders.push_back(pindex->GetBlockHeader());
} else {
// Peer doesn't have this header or the prior one -- nothing will
// connect, so bail out.
fRevertToInv = true;
break;
}
}
}
if (!fRevertToInv && !vHeaders.empty()) {
if (vHeaders.size() == 1 && state.fPreferHeaderAndIDs) {
// We only send up to 1 block as header-and-ids, as otherwise
// probably means we're doing an initial-ish-sync or they're slow
LogPrint(BCLog::NET, "%s sending header-and-ids %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->GetId());
int nSendFlags = state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS;
bool fGotBlockFromCache = false;
{
LOCK(cs_most_recent_block);
if (most_recent_block_hash == pBestIndex->GetBlockHash()) {
if (state.fWantsCmpctWitness || !fWitnessesPresentInMostRecentCompactBlock)
connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, *most_recent_compact_block));
else {
CBlockHeaderAndShortTxIDs cmpctblock(*most_recent_block, state.fWantsCmpctWitness);
connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
fGotBlockFromCache = true;
}
}
if (!fGotBlockFromCache) {
CBlock block;
bool ret = ReadBlockFromDisk(block, pBestIndex, consensusParams);
assert(ret);
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
connman->PushMessage(pto, msgMaker.Make(nSendFlags, NetMsgType::CMPCTBLOCK, cmpctblock));
}
state.pindexBestHeaderSent = pBestIndex;
} else if (state.fPreferHeaders) {
if (vHeaders.size() > 1) {
LogPrint(BCLog::NET, "%s: %u headers, range (%s, %s), to peer=%d\n", __func__,
vHeaders.size(),
vHeaders.front().GetHash().ToString(),
vHeaders.back().GetHash().ToString(), pto->GetId());
} else {
LogPrint(BCLog::NET, "%s: sending header %s to peer=%d\n", __func__,
vHeaders.front().GetHash().ToString(), pto->GetId());
}
connman->PushMessage(pto, msgMaker.Make(NetMsgType::HEADERS, vHeaders));
state.pindexBestHeaderSent = pBestIndex;
} else
fRevertToInv = true;
}
if (fRevertToInv) {
// If falling back to using an inv, just try to inv the tip.
// The last entry in vBlockHashesToAnnounce was our tip at some point
// in the past.
if (!pto->vBlockHashesToAnnounce.empty()) {
const uint256 &hashToAnnounce = pto->vBlockHashesToAnnounce.back();
BlockMap::iterator mi = mapBlockIndex.find(hashToAnnounce);
assert(mi != mapBlockIndex.end());
const CBlockIndex *pindex = mi->second;
// Warn if we're announcing a block that is not on the main chain.
// This should be very rare and could be optimized out.
// Just log for now.
if (chainActive[pindex->nHeight] != pindex) {
LogPrint(BCLog::NET, "Announcing block %s not on main chain (tip=%s)\n",
hashToAnnounce.ToString(), chainActive.Tip()->GetBlockHash().ToString());
}
// If the peer's chain has this block, don't inv it back.
if (!PeerHasHeader(&state, pindex)) {
pto->PushInventory(CInv(MSG_BLOCK, hashToAnnounce));
LogPrint(BCLog::NET, "%s: sending inv peer=%d hash=%s\n", __func__,
pto->GetId(), hashToAnnounce.ToString());
}
}
}
pto->vBlockHashesToAnnounce.clear();
}
//
// Message: inventory
//
std::vector<CInv> vInv;
{
LOCK(pto->cs_inventory);
vInv.reserve(std::max<size_t>(pto->vInventoryBlockToSend.size(), INVENTORY_BROADCAST_MAX));
// Add blocks
for (const uint256& hash : pto->vInventoryBlockToSend) {
vInv.push_back(CInv(MSG_BLOCK, hash));
if (vInv.size() == MAX_INV_SZ) {
connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
pto->vInventoryBlockToSend.clear();
// Check whether periodic sends should happen
bool fSendTrickle = pto->fWhitelisted;
if (pto->nNextInvSend < nNow) {
fSendTrickle = true;
// Use half the delay for outbound peers, as there is less privacy concern for them.
pto->nNextInvSend = PoissonNextSend(nNow, INVENTORY_BROADCAST_INTERVAL >> !pto->fInbound);
}
// Time to send but the peer has requested we not relay transactions.
if (fSendTrickle) {
LOCK(pto->cs_filter);
if (!pto->fRelayTxes) pto->setInventoryTxToSend.clear();
}
// Respond to BIP35 mempool requests
if (fSendTrickle && pto->fSendMempool) {
auto vtxinfo = mempool.infoAll();
pto->fSendMempool = false;
CAmount filterrate = 0;
{
LOCK(pto->cs_feeFilter);
filterrate = pto->minFeeFilter;
}
LOCK(pto->cs_filter);
for (const auto& txinfo : vtxinfo) {
const uint256& hash = txinfo.tx->GetHash();
CInv inv(MSG_TX, hash);
pto->setInventoryTxToSend.erase(hash);
if (filterrate) {
if (txinfo.feeRate.GetFeePerK() < filterrate)
continue;
}
if (pto->pfilter) {
if (!pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
}
pto->filterInventoryKnown.insert(hash);
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ) {
connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
}
pto->timeLastMempoolReq = GetTime();
}
// Determine transactions to relay
if (fSendTrickle) {
// Produce a vector with all candidates for sending
std::vector<std::set<uint256>::iterator> vInvTx;
vInvTx.reserve(pto->setInventoryTxToSend.size());
for (std::set<uint256>::iterator it = pto->setInventoryTxToSend.begin(); it != pto->setInventoryTxToSend.end(); it++) {
vInvTx.push_back(it);
}
CAmount filterrate = 0;
{
LOCK(pto->cs_feeFilter);
filterrate = pto->minFeeFilter;
}
// Topologically and fee-rate sort the inventory we send for privacy and priority reasons.
// A heap is used so that not all items need sorting if only a few are being sent.
CompareInvMempoolOrder compareInvMempoolOrder(&mempool);
std::make_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
// No reason to drain out at many times the network's capacity,
// especially since we have many peers and some will draw much shorter delays.
unsigned int nRelayedTransactions = 0;
LOCK(pto->cs_filter);
while (!vInvTx.empty() && nRelayedTransactions < INVENTORY_BROADCAST_MAX) {
// Fetch the top element from the heap
std::pop_heap(vInvTx.begin(), vInvTx.end(), compareInvMempoolOrder);
std::set<uint256>::iterator it = vInvTx.back();
vInvTx.pop_back();
uint256 hash = *it;
// Remove it from the to-be-sent set
pto->setInventoryTxToSend.erase(it);
// Check if not in the filter already
if (pto->filterInventoryKnown.contains(hash)) {
continue;
}
// Not in the mempool anymore? don't bother sending it.
auto txinfo = mempool.info(hash);
if (!txinfo.tx) {
continue;
}
if (filterrate && txinfo.feeRate.GetFeePerK() < filterrate) {
continue;
}
if (pto->pfilter && !pto->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue;
// Send
vInv.push_back(CInv(MSG_TX, hash));
nRelayedTransactions++;
{
// Expire old relay messages
while (!vRelayExpiration.empty() && vRelayExpiration.front().first < nNow)
{
mapRelay.erase(vRelayExpiration.front().second);
vRelayExpiration.pop_front();
}
auto ret = mapRelay.insert(std::make_pair(hash, std::move(txinfo.tx)));
if (ret.second) {
vRelayExpiration.push_back(std::make_pair(nNow + 15 * 60 * 1000000, ret.first));
}
}
if (vInv.size() == MAX_INV_SZ) {
connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
vInv.clear();
}
pto->filterInventoryKnown.insert(hash);
}
}
}
if (!vInv.empty())
connman->PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
// Detect whether we're stalling
nNow = GetTimeMicros();
if (state.nStallingSince && state.nStallingSince < nNow - 1000000 * BLOCK_STALLING_TIMEOUT) {
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
return true;
}
// In case there is a block that has been in flight from this peer for 2 + 0.5 * N times the block interval
// (with N the number of peers from which we're downloading validated blocks), disconnect due to timeout.
// We compensate for other peers to prevent killing off peers due to our own downstream link
// being saturated. We only count validated in-flight blocks so peers can't advertise non-existing block hashes
// to unreasonably increase our timeout.
if (state.vBlocksInFlight.size() > 0) {
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0);
if (nNow > state.nDownloadingSince + consensusParams.nPowTargetSpacing * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId());
pto->fDisconnect = true;
return true;
}
}
// Check for headers sync timeouts
if (state.fSyncStarted && state.nHeadersSyncTimeout < std::numeric_limits<int64_t>::max()) {
// Detect whether this is a stalling initial-headers-sync peer
if (pindexBestHeader->GetBlockTime() <= GetAdjustedTime() - 24*60*60) {
if (nNow > state.nHeadersSyncTimeout && nSyncStarted == 1 && (nPreferredDownload - state.fPreferredDownload >= 1)) {
// Disconnect a (non-whitelisted) peer if it is our only sync peer,
// and we have others we could be using instead.
// Note: If all our peers are inbound, then we won't
// disconnect our sync peer for stalling; we have bigger
// problems if we can't get any outbound peers.
if (!pto->fWhitelisted) {
LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
pto->fDisconnect = true;
return true;
} else {
LogPrintf("Timeout downloading headers from whitelisted peer=%d, not disconnecting\n", pto->GetId());
// Reset the headers sync state so that we have a
// chance to try downloading from a different peer.
// Note: this will also result in at least one more
// getheaders message to be sent to
// this peer (eventually).
state.fSyncStarted = false;
nSyncStarted--;
state.nHeadersSyncTimeout = 0;
}
}
} else {
// After we've caught up once, reset the timeout so we can't trigger
// disconnect later.
state.nHeadersSyncTimeout = std::numeric_limits<int64_t>::max();
}
}
// Check that outbound peers have reasonable chains
// GetTime() is used by this anti-DoS logic so we can test this using mocktime
ConsiderEviction(pto, GetTime());
//
// Message: getdata (blocks)
//
std::vector<CInv> vGetData;
if (!pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(pto);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex);
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->GetId());
}
if (state.nBlocksInFlight == 0 && staller != -1) {
if (State(staller)->nStallingSince == 0) {
State(staller)->nStallingSince = nNow;
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
}
}
}
//
// Message: getdata (non-blocks)
//
while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId());
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
vGetData.clear();
}
} else {
//If we're not going to ask, don't expect a response.
pto->setAskFor.erase(inv.hash);
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData));
//
// Message: feefilter
//
// We don't want white listed peers to filter txs to us if we have -whitelistforcerelay
if (pto->nVersion >= FEEFILTER_VERSION && gArgs.GetBoolArg("-feefilter", DEFAULT_FEEFILTER) &&
!(pto->fWhitelisted && gArgs.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY))) {
CAmount currentFilter = mempool.GetMinFee(gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000).GetFeePerK();
int64_t timeNow = GetTimeMicros();
if (timeNow > pto->nextSendTimeFeeFilter) {
static CFeeRate default_feerate(DEFAULT_MIN_RELAY_TX_FEE);
static FeeFilterRounder filterRounder(default_feerate);
CAmount filterToSend = filterRounder.round(currentFilter);
// We always have a fee filter of at least minRelayTxFee
filterToSend = std::max(filterToSend, ::minRelayTxFee.GetFeePerK());
if (filterToSend != pto->lastSentFeeFilter) {
connman->PushMessage(pto, msgMaker.Make(NetMsgType::FEEFILTER, filterToSend));
pto->lastSentFeeFilter = filterToSend;
}
pto->nextSendTimeFeeFilter = PoissonNextSend(timeNow, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
else if (timeNow + MAX_FEEFILTER_CHANGE_DELAY * 1000000 < pto->nextSendTimeFeeFilter &&
(currentFilter < 3 * pto->lastSentFeeFilter / 4 || currentFilter > 4 * pto->lastSentFeeFilter / 3)) {
pto->nextSendTimeFeeFilter = timeNow + GetRandInt(MAX_FEEFILTER_CHANGE_DELAY) * 1000000;
}
}
}
return true;
}
class CNetProcessingCleanup
{
public:
CNetProcessingCleanup() {}
~CNetProcessingCleanup() {
// orphan transactions
mapOrphanTransactions.clear();
mapOrphanTransactionsByPrev.clear();
}
} instance_of_cnetprocessingcleanup;
|
// Copyright 2014 PDFium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com
#include "xfa/fxgraphics/cxfa_graphics.h"
#include <cmath>
#include <memory>
#include "core/fxge/cfx_defaultrenderdevice.h"
#include "core/fxge/cfx_renderdevice.h"
#include "core/fxge/cfx_unicodeencoding.h"
#include "core/fxge/dib/cfx_dibitmap.h"
#include "third_party/base/ptr_util.h"
#include "xfa/fxgraphics/cxfa_gecolor.h"
#include "xfa/fxgraphics/cxfa_gepath.h"
#include "xfa/fxgraphics/cxfa_gepattern.h"
#include "xfa/fxgraphics/cxfa_geshading.h"
namespace {
struct FX_HATCHDATA {
int32_t width;
int32_t height;
uint8_t maskBits[64];
};
const FX_HATCHDATA kHatchBitmapData[] = {
{16, // Horizontal
16,
{
0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}},
{16, // Vertical
16,
{
0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00,
0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80,
0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80,
0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00,
0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00,
0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00,
}},
{16, // ForwardDiagonal
16,
{
0x80, 0x80, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x20, 0x20, 0x00,
0x00, 0x10, 0x10, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04,
0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x80,
0x80, 0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x20, 0x20, 0x00, 0x00,
0x10, 0x10, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00,
0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x00, 0x00,
}},
{16, // BackwardDiagonal
16,
{
0x01, 0x01, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x04, 0x04, 0x00,
0x00, 0x08, 0x08, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x20, 0x20,
0x00, 0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x01,
0x01, 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x04, 0x04, 0x00, 0x00,
0x08, 0x08, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x20, 0x20, 0x00,
0x00, 0x40, 0x40, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00,
}},
{16, // Cross
16,
{
0xff, 0xff, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00,
0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80,
0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0xff,
0xff, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00,
0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00,
0x00, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x00, 0x00,
}},
{16, // DiagonalCross
16,
{
0x81, 0x81, 0x00, 0x00, 0x42, 0x42, 0x00, 0x00, 0x24, 0x24, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x24, 0x24,
0x00, 0x00, 0x42, 0x42, 0x00, 0x00, 0x81, 0x81, 0x00, 0x00, 0x81,
0x81, 0x00, 0x00, 0x42, 0x42, 0x00, 0x00, 0x24, 0x24, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x24, 0x24, 0x00,
0x00, 0x42, 0x42, 0x00, 0x00, 0x81, 0x81, 0x00, 0x00,
}},
};
const FX_HATCHDATA kHatchPlaceHolder = {
0,
0,
{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}};
const FX_HATCHDATA& GetHatchBitmapData(size_t index) {
return index < FX_ArraySize(kHatchBitmapData) ? kHatchBitmapData[index]
: kHatchPlaceHolder;
}
} // namespace
CXFA_Graphics::CXFA_Graphics(CFX_RenderDevice* renderDevice)
: m_renderDevice(renderDevice) {
ASSERT(m_renderDevice);
}
CXFA_Graphics::~CXFA_Graphics() = default;
void CXFA_Graphics::SaveGraphState() {
m_renderDevice->SaveState();
m_infoStack.push_back(pdfium::MakeUnique<TInfo>(m_info));
}
void CXFA_Graphics::RestoreGraphState() {
m_renderDevice->RestoreState(false);
if (m_infoStack.empty())
return;
m_info = *m_infoStack.back();
m_infoStack.pop_back();
return;
}
void CXFA_Graphics::SetLineCap(CFX_GraphStateData::LineCap lineCap) {
m_info.graphState.m_LineCap = lineCap;
}
void CXFA_Graphics::SetLineDash(float dashPhase,
const float* dashArray,
size_t dashCount) {
ASSERT(dashArray);
ASSERT(dashCount);
float scale = m_info.isActOnDash ? m_info.graphState.m_LineWidth : 1.0;
m_info.graphState.m_DashPhase = dashPhase;
m_info.graphState.m_DashArray.resize(dashCount);
for (size_t i = 0; i < dashCount; i++)
m_info.graphState.m_DashArray[i] = dashArray[i] * scale;
}
void CXFA_Graphics::SetSolidLineDash() {
m_info.graphState.m_DashArray.clear();
}
void CXFA_Graphics::SetLineWidth(float lineWidth) {
m_info.graphState.m_LineWidth = lineWidth;
}
void CXFA_Graphics::EnableActOnDash() {
m_info.isActOnDash = true;
}
void CXFA_Graphics::SetStrokeColor(const CXFA_GEColor& color) {
m_info.strokeColor = color;
}
void CXFA_Graphics::SetFillColor(const CXFA_GEColor& color) {
m_info.fillColor = color;
}
void CXFA_Graphics::StrokePath(CXFA_GEPath* path, const CFX_Matrix* matrix) {
if (path)
RenderDeviceStrokePath(path, matrix);
}
void CXFA_Graphics::FillPath(CXFA_GEPath* path,
FX_FillMode fillMode,
const CFX_Matrix* matrix) {
if (path)
RenderDeviceFillPath(path, fillMode, matrix);
}
void CXFA_Graphics::ConcatMatrix(const CFX_Matrix* matrix) {
if (matrix)
m_info.CTM.Concat(*matrix);
}
const CFX_Matrix* CXFA_Graphics::GetMatrix() const {
return &m_info.CTM;
}
CFX_RectF CXFA_Graphics::GetClipRect() const {
FX_RECT r = m_renderDevice->GetClipBox();
return CFX_RectF(r.left, r.top, r.Width(), r.Height());
}
void CXFA_Graphics::SetClipRect(const CFX_RectF& rect) {
m_renderDevice->SetClip_Rect(
FX_RECT(FXSYS_roundf(rect.left), FXSYS_roundf(rect.top),
FXSYS_roundf(rect.right()), FXSYS_roundf(rect.bottom())));
}
CFX_RenderDevice* CXFA_Graphics::GetRenderDevice() {
return m_renderDevice;
}
void CXFA_Graphics::RenderDeviceStrokePath(const CXFA_GEPath* path,
const CFX_Matrix* matrix) {
if (m_info.strokeColor.GetType() != CXFA_GEColor::Solid)
return;
CFX_Matrix m = m_info.CTM;
if (matrix)
m.Concat(*matrix);
m_renderDevice->DrawPath(path->GetPathData(), &m, &m_info.graphState, 0x0,
m_info.strokeColor.GetArgb(), 0);
}
void CXFA_Graphics::RenderDeviceFillPath(const CXFA_GEPath* path,
FX_FillMode fillMode,
const CFX_Matrix* matrix) {
CFX_Matrix m = m_info.CTM;
if (matrix)
m.Concat(*matrix);
switch (m_info.fillColor.GetType()) {
case CXFA_GEColor::Solid:
m_renderDevice->DrawPath(path->GetPathData(), &m, &m_info.graphState,
m_info.fillColor.GetArgb(), 0x0, fillMode);
return;
case CXFA_GEColor::Pattern:
FillPathWithPattern(path, fillMode, m);
return;
case CXFA_GEColor::Shading:
FillPathWithShading(path, fillMode, m);
return;
default:
return;
}
}
void CXFA_Graphics::FillPathWithPattern(const CXFA_GEPath* path,
FX_FillMode fillMode,
const CFX_Matrix& matrix) {
RetainPtr<CFX_DIBitmap> bitmap = m_renderDevice->GetBitmap();
int32_t width = bitmap->GetWidth();
int32_t height = bitmap->GetHeight();
auto bmp = pdfium::MakeRetain<CFX_DIBitmap>();
bmp->Create(width, height, FXDIB_Argb);
m_renderDevice->GetDIBits(bmp, 0, 0);
FX_HatchStyle hatchStyle = m_info.fillColor.GetPattern()->m_hatchStyle;
const FX_HATCHDATA& data =
GetHatchBitmapData(static_cast<size_t>(hatchStyle));
auto mask = pdfium::MakeRetain<CFX_DIBitmap>();
mask->Create(data.width, data.height, FXDIB_1bppMask);
memcpy(mask->GetBuffer(), data.maskBits, mask->GetPitch() * data.height);
const CFX_FloatRect rectf =
matrix.TransformRect(path->GetPathData()->GetBoundingBox());
const FX_RECT rect = rectf.ToRoundedFxRect();
CFX_DefaultRenderDevice device;
device.Attach(bmp, false, nullptr, false);
device.FillRect(rect, m_info.fillColor.GetPattern()->m_backArgb);
for (int32_t j = rect.bottom; j < rect.top; j += mask->GetHeight()) {
for (int32_t i = rect.left; i < rect.right; i += mask->GetWidth())
device.SetBitMask(mask, i, j, m_info.fillColor.GetPattern()->m_foreArgb);
}
CFX_RenderDevice::StateRestorer restorer(m_renderDevice);
m_renderDevice->SetClip_PathFill(path->GetPathData(), &matrix, fillMode);
SetDIBitsWithMatrix(bmp, CFX_Matrix());
}
void CXFA_Graphics::FillPathWithShading(const CXFA_GEPath* path,
FX_FillMode fillMode,
const CFX_Matrix& matrix) {
RetainPtr<CFX_DIBitmap> bitmap = m_renderDevice->GetBitmap();
int32_t width = bitmap->GetWidth();
int32_t height = bitmap->GetHeight();
float start_x = m_info.fillColor.GetShading()->m_beginPoint.x;
float start_y = m_info.fillColor.GetShading()->m_beginPoint.y;
float end_x = m_info.fillColor.GetShading()->m_endPoint.x;
float end_y = m_info.fillColor.GetShading()->m_endPoint.y;
auto bmp = pdfium::MakeRetain<CFX_DIBitmap>();
bmp->Create(width, height, FXDIB_Argb);
m_renderDevice->GetDIBits(bmp, 0, 0);
int32_t pitch = bmp->GetPitch();
bool result = false;
switch (m_info.fillColor.GetShading()->m_type) {
case FX_SHADING_Axial: {
float x_span = end_x - start_x;
float y_span = end_y - start_y;
float axis_len_square = (x_span * x_span) + (y_span * y_span);
for (int32_t row = 0; row < height; row++) {
uint32_t* dib_buf =
reinterpret_cast<uint32_t*>(bmp->GetBuffer() + row * pitch);
for (int32_t column = 0; column < width; column++) {
float scale = 0.0f;
if (axis_len_square) {
float y = static_cast<float>(row);
float x = static_cast<float>(column);
scale = (((x - start_x) * x_span) + ((y - start_y) * y_span)) /
axis_len_square;
if (std::isnan(scale) || scale < 0.0f) {
if (!m_info.fillColor.GetShading()->m_isExtendedBegin)
continue;
scale = 0.0f;
} else if (scale > 1.0f) {
if (!m_info.fillColor.GetShading()->m_isExtendedEnd)
continue;
scale = 1.0f;
}
}
int32_t index = static_cast<int32_t>(scale * (FX_SHADING_Steps - 1));
dib_buf[column] = m_info.fillColor.GetShading()->m_argbArray[index];
}
}
result = true;
break;
}
case FX_SHADING_Radial: {
float start_r = m_info.fillColor.GetShading()->m_beginRadius;
float end_r = m_info.fillColor.GetShading()->m_endRadius;
float a = ((start_x - end_x) * (start_x - end_x)) +
((start_y - end_y) * (start_y - end_y)) -
((start_r - end_r) * (start_r - end_r));
for (int32_t row = 0; row < height; row++) {
uint32_t* dib_buf = (uint32_t*)(bmp->GetBuffer() + row * pitch);
for (int32_t column = 0; column < width; column++) {
float x = (float)(column);
float y = (float)(row);
float b = -2 * (((x - start_x) * (end_x - start_x)) +
((y - start_y) * (end_y - start_y)) +
(start_r * (end_r - start_r)));
float c = ((x - start_x) * (x - start_x)) +
((y - start_y) * (y - start_y)) - (start_r * start_r);
float s;
if (a == 0) {
s = -c / b;
} else {
float b2_4ac = (b * b) - 4 * (a * c);
if (b2_4ac < 0) {
continue;
}
float root = (sqrt(b2_4ac));
float s1, s2;
if (a > 0) {
s1 = (-b - root) / (2 * a);
s2 = (-b + root) / (2 * a);
} else {
s2 = (-b - root) / (2 * a);
s1 = (-b + root) / (2 * a);
}
if (s2 <= 1.0f || m_info.fillColor.GetShading()->m_isExtendedEnd) {
s = (s2);
} else {
s = (s1);
}
if ((start_r) + s * (end_r - start_r) < 0) {
continue;
}
}
if (std::isnan(s) || s < 0.0f) {
if (!m_info.fillColor.GetShading()->m_isExtendedBegin)
continue;
s = 0.0f;
}
if (s > 1.0f) {
if (!m_info.fillColor.GetShading()->m_isExtendedEnd)
continue;
s = 1.0f;
}
int index = (int32_t)(s * (FX_SHADING_Steps - 1));
dib_buf[column] = m_info.fillColor.GetShading()->m_argbArray[index];
}
}
result = true;
break;
}
default: {
result = false;
break;
}
}
if (result) {
CFX_RenderDevice::StateRestorer restorer(m_renderDevice);
m_renderDevice->SetClip_PathFill(path->GetPathData(), &matrix, fillMode);
SetDIBitsWithMatrix(bmp, matrix);
}
}
void CXFA_Graphics::SetDIBitsWithMatrix(const RetainPtr<CFX_DIBBase>& source,
const CFX_Matrix& matrix) {
if (matrix.IsIdentity()) {
m_renderDevice->SetDIBits(source, 0, 0);
} else {
CFX_Matrix m((float)source->GetWidth(), 0, 0, (float)source->GetHeight(), 0,
0);
m.Concat(matrix);
int32_t left;
int32_t top;
RetainPtr<CFX_DIBitmap> bmp1 = source->FlipImage(false, true);
RetainPtr<CFX_DIBitmap> bmp2 = bmp1->TransformTo(m, &left, &top);
m_renderDevice->SetDIBits(bmp2, left, top);
}
}
CXFA_Graphics::TInfo::TInfo()
: isActOnDash(false), strokeColor(nullptr), fillColor(nullptr) {}
CXFA_Graphics::TInfo::TInfo(const TInfo& info)
: graphState(info.graphState),
CTM(info.CTM),
isActOnDash(info.isActOnDash),
strokeColor(info.strokeColor),
fillColor(info.fillColor) {}
CXFA_Graphics::TInfo& CXFA_Graphics::TInfo::operator=(const TInfo& other) {
graphState = other.graphState;
CTM = other.CTM;
isActOnDash = other.isActOnDash;
strokeColor = other.strokeColor;
fillColor = other.fillColor;
return *this;
}
|
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "ip/ip.hpp"
namespace ip {
void compute_ref_fwd(const prb_t *p, dnn_mem_t &src_m,
dnn_mem_t &wei_m, dnn_mem_t &bia_m, dnn_mem_t &dst_m) {
auto ker = [&](float &d, int mb, int oc) {
for (int ic = 0; ic < p->ic; ++ic) {
for (int ih = 0; ih < p->ih; ++ih) {
for (int iw = 0; iw < p->iw; ++iw) {
size_t src_off = src_off_f(p, mb, ic, ih, iw);
size_t wei_off = wei_off_f(p, oc, ic, ih, iw);
d += ((float*)src_m)[src_off] * ((float*)wei_m)[wei_off];
}
}
}
};
# pragma omp parallel for collapse(2)
for (int mb = 0; mb < p->mb; ++mb) {
for (int oc = 0; oc < p->oc; ++oc) {
size_t dst_off = dst_off_f(p, mb, oc);
size_t bia_off = bia_off_f(p, oc);
float &d = ((float*)dst_m)[dst_off];
d = (p->dir & FLAG_BIA) ? ((float*)bia_m)[bia_off] : 0;
ker(d, mb, oc);
}
}
}
#if 0
void compute_ref_convolution_bwd_d(const prb_t *p, dnn_mem_t &diff_src_m,
dnn_mem_t &wei_m, dnn_mem_t &diff_dst_m) {
auto ker = [=](float &ds, int g, int mb, int ic, int ih, int iw) {
for (int oc = 0; oc < p->oc/p->g; ++oc) {
for (int kh = 0; kh < p->kh; ++kh) {
int oh = ih - kh + p->ph;
if (oh < 0 || oh % p->sh) continue;
oh /= p->sh;
if (oh >= p->oh) continue;
for (int kw = 0; kw < p->kw; ++kw) {
int ow = iw - kw + p->pw;
if (ow < 0 || ow % p->sw) continue;
ow /= p->sw;
if (ow >= p->ow) continue;
size_t dst_off = dst_off_f(p, mb, g, oc, oh, ow);
size_t wei_off = wei_off_f(p, g, oc, ic, kh, kw);
ds += ((float*)diff_dst_m)[dst_off]
* ((float*)wei_m)[wei_off];
}
}
}
};
# pragma omp parallel for collapse(5)
for (int g = 0; g < p->g; ++g) {
for (int mb = 0; mb < p->mb; ++mb) {
for (int ic = 0; ic < p->ic/p->g; ++ic) {
for (int ih = 0; ih < p->ih; ++ih) {
for (int iw = 0; iw < p->iw; ++iw) {
size_t src_off = src_off_f(p, mb, g, ic, ih, iw);
float &ds = ((float*)diff_src_m)[src_off];
ds = 0;
ker(ds, g, mb, ic, ih, iw);
}
}
}
}
}
}
void compute_ref_convolution_bwd_w(const prb_t *p, dnn_mem_t &src_m,
dnn_mem_t &diff_wei_m, dnn_mem_t &diff_bia_m, dnn_mem_t &diff_dst_m) {
auto ker = [=](float &dw, int g, int oc, int ic, int kh, int kw) {
for (int mb = 0; mb < p->mb; ++mb) {
for (int oh = 0; oh < p->oh; ++oh) {
for (int ow = 0; ow < p->ow; ++ow) {
const int ih = oh * p->sh - p->ph + kh;
const int iw = ow * p->sw - p->pw + kw;
if (ih < 0 || ih >= p->ih) continue;
if (iw < 0 || iw >= p->iw) continue;
size_t src_off = src_off_f(p, mb, g, ic, ih, iw);
size_t dst_off = dst_off_f(p, mb, g, oc, oh, ow);
dw += ((float*)diff_dst_m)[dst_off]
* ((float*)src_m)[src_off];
}
}
}
};
# pragma omp parallel for collapse(5)
for (int g = 0; g < p->g; ++g) {
for (int oc = 0; oc < p->oc/p->g; ++oc) {
for (int ic = 0; ic < p->ic/p->g; ++ic) {
for (int kh = 0; kh < p->kh; ++kh) {
for (int kw = 0; kw < p->kw; ++kw) {
size_t wei_off = wei_off_f(p, g, oc, ic, kh, kw);
float &dw = ((float*)diff_wei_m)[wei_off];
dw = 0;
ker(dw, g, oc, ic, kh, kw);
}
}
}
}
}
if (!(p->dir & FLAG_BIA)) return;
# pragma omp parallel for collapse(2)
for (int g = 0; g < p->g; ++g) {
for (int oc = 0; oc < p->oc/p->g; ++oc) {
size_t bia_off = bia_off_f(p, g, oc);
float &db = ((float*)diff_bia_m)[bia_off];
db = 0;
for (int mb = 0; mb < p->mb; ++mb) {
for (int oh = 0; oh < p->oh; ++oh) {
for (int ow = 0; ow < p->ow; ++ow) {
size_t dst_off = dst_off_f(p, mb, g, oc, oh, ow);
db += ((float*)diff_dst_m)[dst_off];
}
}
}
}
}
}
#endif
}
|
//=================================================================================================
/*!
// \file src/mathtest/smatdmatmult/DCaD3x3b.cpp
// \brief Source file for the DCaD3x3b sparse matrix/dense matrix multiplication math test
//
// Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <cstdlib>
#include <iostream>
#include <blaze/math/CompressedMatrix.h>
#include <blaze/math/DiagonalMatrix.h>
#include <blaze/math/StaticMatrix.h>
#include <blazetest/mathtest/Creator.h>
#include <blazetest/mathtest/smatdmatmult/OperationTest.h>
#include <blazetest/system/MathTest.h>
//=================================================================================================
//
// MAIN FUNCTION
//
//=================================================================================================
//*************************************************************************************************
int main()
{
std::cout << " Running 'DCaD3x3b'..." << std::endl;
using blazetest::mathtest::TypeA;
using blazetest::mathtest::TypeB;
try
{
// Matrix type definitions
using DCa = blaze::DiagonalMatrix< blaze::CompressedMatrix<TypeA> >;
using D3x3b = blaze::DiagonalMatrix< blaze::StaticMatrix<TypeB,3UL,3UL> >;
// Creator type definitions
using CDCa = blazetest::Creator<DCa>;
using CD3x3b = blazetest::Creator<D3x3b>;
// Running the tests
for( size_t i=0UL; i<=3UL; ++i ) {
RUN_SMATDMATMULT_OPERATION_TEST( CDCa( 3UL, i ), CD3x3b() );
}
}
catch( std::exception& ex ) {
std::cerr << "\n\n ERROR DETECTED during sparse matrix/dense matrix multiplication:\n"
<< ex.what() << "\n";
return EXIT_FAILURE;
}
return EXIT_SUCCESS;
}
//*************************************************************************************************
|
/**
* Copyright (C) 2016-2022 Xilinx, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may
* not use this file except in compliance with the License. A copy of the
* License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (C) 2015 Xilinx, Inc
*/
#include "shim.h"
#include "core/common/system.h"
#include "core/common/device.h"
int xclExportBO(xclDeviceHandle handle, unsigned int boHandle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclExportBO(boHandle);
}
unsigned int xclImportBO(xclDeviceHandle handle, int boGlobalHandle, unsigned flags)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclImportBO(boGlobalHandle,flags);
}
int xclCloseExportHandle(int ehdl)
{
// Implement per hw_em requirements
return 0;
}
int xclCopyBO(xclDeviceHandle handle, unsigned int dst_boHandle, unsigned int src_boHandle, size_t size, size_t dst_offset, size_t src_offset)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclCopyBO(dst_boHandle, src_boHandle, size, dst_offset, src_offset) : -ENODEV;
}
int xclResetDevice(xclDeviceHandle handle, xclResetKind kind)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->resetProgram();
}
int xclReClock2(xclDeviceHandle handle, unsigned short region, const unsigned short *targetFreqMHz)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
drv->resetProgram();
return 0;
}
int xclLockDevice(xclDeviceHandle handle)
{
return 0;
}
int xclUnlockDevice(xclDeviceHandle handle)
{
return 0;
}
size_t xclReadBO(xclDeviceHandle handle, unsigned int boHandle, void *dst,
size_t size, size_t skip)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -EINVAL;
return drv->xclReadBO(boHandle, dst, size, skip);
}
unsigned int xclAllocBO(xclDeviceHandle handle, size_t size, int unused, unsigned flags)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -EINVAL;
return drv->xclAllocBO(size, unused, flags);
}
void *xclMapBO(xclDeviceHandle handle, unsigned int boHandle, bool write)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return NULL;
return drv->xclMapBO(boHandle, write);
}
int xclUnmapBO(xclDeviceHandle handle, unsigned int boHandle, void* addr)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -EINVAL;
return drv->xclUnmapBO(boHandle, addr);
}
int xclSyncBO(xclDeviceHandle handle, unsigned int boHandle, xclBOSyncDirection dir, size_t size, size_t offset)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -EINVAL;
return drv->xclSyncBO(boHandle, dir , size, offset);
}
size_t xclWriteBO(xclDeviceHandle handle, unsigned int boHandle, const void *src,
size_t size, size_t seek)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -EINVAL;
return drv->xclWriteBO(boHandle, src, size, seek);
}
void xclFreeBO(xclDeviceHandle handle, unsigned int boHandle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return;
drv->xclFreeBO(boHandle);
}
int xclGetBOProperties(xclDeviceHandle handle, unsigned int boHandle, xclBOProperties *properties)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclGetBOProperties(boHandle, properties);
}
int xclExecBuf(xclDeviceHandle handle, unsigned int cmdBO)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclExecBuf(cmdBO);
}
int xclExecBufWithWaitList(xclDeviceHandle handle, unsigned int cmdBO, size_t num_bo_in_wait_list, unsigned int *bo_wait_list)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclExecBuf(cmdBO,num_bo_in_wait_list,bo_wait_list);
}
//defining following two functions as they gets called in scheduler init call
int xclOpenContext(xclDeviceHandle handle, const uuid_t xclbinId, unsigned int ipIndex, bool shared)
{
return 0;
}
int xclCloseContext(xclDeviceHandle handle, const uuid_t xclbinId, unsigned ipIndex)
{
return 0;
}
int xclRegisterEventNotify(xclDeviceHandle handle, unsigned int userInterrupt, int fd)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclRegisterEventNotify(userInterrupt, fd) ;
}
int xclExecWait(xclDeviceHandle handle, int timeoutMilliSec)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclExecWait(timeoutMilliSec) ;
}
int xclUpgradeFirmware(xclDeviceHandle handle, const char *fileName)
{
return 0;
}
int xclBootFPGA(xclDeviceHandle handle)
{
return 0;
}
unsigned xclProbe()
{
if(!xclemulation::isXclEmulationModeHwEmuOrSwEmu())
{
std::string initMsg ="ERROR: [HW-EMU 08] Please set XCL_EMULATION_MODE to \"hw_emu\" to run hardware emulation. ";
std::cout<<initMsg<<std::endl;
return 0;
}
static int xclProbeCallCnt=0;
static unsigned int deviceIndex = 0;
//Ensure xclProbe is called only once as we load all the devices in the single go
//xclProbe call happens during the load of the library, no need to explicit call
if (xclProbeCallCnt == 1) {
return deviceIndex;
}
std::vector<std::tuple<xclDeviceInfo2, std::list<xclemulation::DDRBank>, bool, bool, FeatureRomHeader, boost::property_tree::ptree> > devicesInfo;
getDevicesInfo(devicesInfo);
if(devicesInfo.size() == 0)
return 1;//old behavior
for(auto &it:devicesInfo)
{
xclDeviceInfo2 info = std::get<0>(it);
std::list<xclemulation::DDRBank> DDRBankList = std::get<1>(it);
bool bUnified = std::get<2>(it);
bool bXPR = std::get<3>(it);
FeatureRomHeader fRomHeader = std::get<4>(it);
boost::property_tree::ptree platformData = std::get<5>(it);
xclhwemhal2::HwEmShim *handle = new xclhwemhal2::HwEmShim(deviceIndex, info, DDRBankList, bUnified, bXPR, fRomHeader, platformData);
xclhwemhal2::devices[deviceIndex++] = handle;
}
xclProbeCallCnt++;
return deviceIndex;
}
unsigned int xclAllocUserPtrBO(xclDeviceHandle handle, void *userptr, size_t size, unsigned flags)
{
//std::cout << "xclAllocUserPtrBO called.. " << handle << std::endl;
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return mNullBO;
return drv->xclAllocUserPtrBO(userptr,size,flags);
}
xclDeviceHandle xclOpen(unsigned deviceIndex, const char *logfileName, xclVerbosityLevel level)
{
xclDeviceInfo2 info;
std::strcpy(info.mName, "xilinx:pcie-hw-em:7v3:1.0");
info.mMagic = 0X586C0C6C;
info.mHALMajorVersion = XCLHAL_MAJOR_VER;
info.mHALMinorVersion = XCLHAL_MINOR_VER;
info.mVendorId = 0x10ee;
info.mDeviceId = 0x0000;
info.mSubsystemVendorId = 0x0000;
info.mDeviceVersion = 0x0000;
info.mDDRSize = xclemulation::MEMSIZE_4G;
info.mDataAlignment = DDR_BUFFER_ALIGNMENT;
info.mDDRBankCount = 1;
for(unsigned int i = 0; i < 4 ;i++)
info.mOCLFrequency[i] = 200;
std::list<xclemulation::DDRBank> DDRBankList;
xclemulation::DDRBank bank;
bank.ddrSize = xclemulation::MEMSIZE_4G;
DDRBankList.push_back(bank);
FeatureRomHeader fRomHeader;
std::memset(&fRomHeader, 0, sizeof(FeatureRomHeader));
boost::property_tree::ptree platformData;
xclhwemhal2::HwEmShim *handle = NULL;
bool bDefaultDevice = false;
std::map<unsigned int, xclhwemhal2::HwEmShim*>::iterator it = xclhwemhal2::devices.find(deviceIndex);
if(it != xclhwemhal2::devices.end())
{
handle = (*it).second;
}
else
{
handle = new xclhwemhal2::HwEmShim(deviceIndex, info, DDRBankList, false, false, fRomHeader, platformData);
bDefaultDevice = true;
}
if (!xclhwemhal2::HwEmShim::handleCheck(handle)) {
delete handle;
handle = 0;
}
if(handle)
{
handle->xclOpen(logfileName);
if(bDefaultDevice)
{
std::string sDummyDeviceMsg ="CRITICAL WARNING: [HW-EMU 08-0] Unable to find emconfig.json. Using default device \"xilinx:pcie-hw-em:7v3:1.0\"";
handle->logMessage(sDummyDeviceMsg);
}
}
return (xclDeviceHandle *)handle;
}
void xclClose(xclDeviceHandle handle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return ;
try {
drv->xclClose();
if (xclhwemhal2::HwEmShim::handleCheck(handle) && xclhwemhal2::devices.size() == 0)
delete ((xclhwemhal2::HwEmShim *)handle);
}
catch (const std::exception& ex) {
xrt_core::send_exception_message(ex.what());
}
}
int xclLoadXclBin(xclDeviceHandle handle, const xclBin *buffer)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
#ifdef DISABLE_DOWNLOAD_XCLBIN
int ret = 0;
#else
auto ret = drv->xclLoadXclBin(buffer);
#endif
if (!ret) {
auto device = xrt_core::get_userpf_device(drv);
device->register_axlf(buffer);
#ifndef DISABLE_DOWNLOAD_XCLBIN
ret = xrt_core::scheduler::init(handle, buffer);
#endif
}
return ret;
}
size_t xclWrite(xclDeviceHandle handle, xclAddressSpace space, uint64_t offset, const void *hostBuf, size_t size)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclWrite(space, offset, hostBuf, size);
}
size_t xclRead(xclDeviceHandle handle, xclAddressSpace space, uint64_t offset, void *hostBuf, size_t size)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclRead(space, offset, hostBuf, size);
}
int xclGetDeviceInfo2(xclDeviceHandle handle, xclDeviceInfo2 *info)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclGetDeviceInfo2(info);
}
unsigned int xclVersion ()
{
return 2;
}
size_t xclGetDeviceTimestamp(xclDeviceHandle handle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclGetDeviceTimestamp() : -1;
}
ssize_t xclUnmgdPwrite(xclDeviceHandle handle, unsigned flags, const void *buf, size_t count, uint64_t offset)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclUnmgdPwrite(flags, buf, count, offset) : -ENODEV;
}
ssize_t xclUnmgdPread(xclDeviceHandle handle, unsigned flags, void *buf, size_t count, uint64_t offset)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclUnmgdPread(flags, buf, count, offset) : -ENODEV;
}
//QDMA Support
//
int xclCreateWriteQueue(xclDeviceHandle handle, xclQueueContext *q_ctx, uint64_t *q_hdl)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclCreateWriteQueue(q_ctx, q_hdl) : -ENODEV;
}
int xclCreateReadQueue(xclDeviceHandle handle, xclQueueContext *q_ctx, uint64_t *q_hdl)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclCreateReadQueue(q_ctx, q_hdl) : -ENODEV;
}
int xclDestroyQueue(xclDeviceHandle handle, uint64_t q_hdl)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclDestroyQueue(q_hdl) : -ENODEV;
}
void *xclAllocQDMABuf(xclDeviceHandle handle, size_t size, uint64_t *buf_hdl)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclAllocQDMABuf(size, buf_hdl) : NULL;
}
int xclFreeQDMABuf(xclDeviceHandle handle, uint64_t buf_hdl)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclFreeQDMABuf(buf_hdl) : -ENODEV;
}
ssize_t xclWriteQueue(xclDeviceHandle handle, uint64_t q_hdl, xclQueueRequest *wr)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclWriteQueue(q_hdl, wr) : -ENODEV;
}
ssize_t xclReadQueue(xclDeviceHandle handle, uint64_t q_hdl, xclQueueRequest *wr)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclReadQueue(q_hdl, wr) : -ENODEV;
}
int xclPollCompletion(xclDeviceHandle handle, int min_compl, int max_compl, xclReqCompletion *comps, int* actual, int timeout)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclPollCompletion(min_compl, max_compl, comps, actual, timeout) : -ENODEV;
}
size_t xclDebugReadIPStatus(xclDeviceHandle handle, xclDebugReadType type, void* debugResults)
{
return 0;
}
double xclGetDeviceClockFreqMHz(xclDeviceHandle handle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclGetDeviceClockFreqMHz();
}
double xclGetReadMaxBandwidthMBps(xclDeviceHandle handle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclGetReadMaxBandwidthMBps();
}
double xclGetWriteMaxBandwidthMBps(xclDeviceHandle handle)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
if (!drv)
return -1;
return drv->xclGetWriteMaxBandwidthMBps();
}
/*
* API to get number of live processes.
* Applicable only for System Flow as it supports Multiple processes on same device.
* For Hardware Emulation, return 0
*/
uint32_t xclGetNumLiveProcesses(xclDeviceHandle handle)
{
return 0;
}
/*
* API to get path to the debug_ip_layout file. Needs to be implemented
*/
int xclGetDebugIPlayoutPath(xclDeviceHandle handle, char* layoutPath, size_t size)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclGetDebugIPlayoutPath(layoutPath, size) : -ENODEV;
}
int xclGetTraceBufferInfo(xclDeviceHandle handle, uint32_t nSamples, uint32_t& traceSamples, uint32_t& traceBufSz)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclGetTraceBufferInfo(nSamples, traceSamples, traceBufSz) : -ENODEV;
}
int xclReadTraceData(xclDeviceHandle handle, void* traceBuf, uint32_t traceBufSz, uint32_t numSamples, uint64_t ipBaseAddress, uint32_t& wordsPerSample)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclReadTraceData(traceBuf, traceBufSz, numSamples, ipBaseAddress, wordsPerSample) : -ENODEV;
}
int xclLogMsg(xclDeviceHandle handle, xrtLogMsgLevel level, const char* tag, const char* format, ...)
{
va_list args;
va_start(args, format);
int ret = xclhwemhal2::HwEmShim::xclLogMsg(level, tag, format, args);
va_end(args);
return ret;
}
int xclCreateProfileResults(xclDeviceHandle handle, ProfileResults** results)
{
return 0;
}
int xclGetProfileResults(xclDeviceHandle handle, ProfileResults* results)
{
return 0;
}
int xclDestroyProfileResults(xclDeviceHandle handle, ProfileResults* results)
{
return 0;
}
void
xclGetDebugIpLayout(xclDeviceHandle hdl, char* buffer, size_t size, size_t* size_ret)
{
if(size_ret)
*size_ret = 0;
return;
}
int xclGetSubdevPath(xclDeviceHandle handle, const char* subdev,
uint32_t idx, char* path, size_t size)
{
return 0;
}
//Mapped CU register space for xclRegRead/Write()
int xclRegWrite(xclDeviceHandle handle, uint32_t cu_index, uint32_t offset, uint32_t data)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclRegWrite(cu_index, offset, data) : -ENODEV;
}
int xclRegRead(xclDeviceHandle handle, uint32_t cu_index, uint32_t offset, uint32_t *datap)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclRegRead(cu_index, offset, datap) : -ENODEV;
}
int
xclP2pEnable(xclDeviceHandle handle, bool enable, bool force)
{
return -ENOSYS;
}
int
xclCmaEnable(xclDeviceHandle handle, bool enable, uint64_t force)
{
return -ENOSYS;
}
int
xclInternalResetDevice(xclDeviceHandle handle, xclResetKind kind)
{
return -ENOSYS;
}
int
xclUpdateSchedulerStat(xclDeviceHandle handle)
{
return -ENOSYS;
}
//Get CU index from IP_LAYOUT section for corresponding kernel name
int xclIPName2Index(xclDeviceHandle handle, const char *name)
{
xclhwemhal2::HwEmShim *drv = xclhwemhal2::HwEmShim::handleCheck(handle);
return drv ? drv->xclIPName2Index(name) : -ENODEV;
}
|
/*=============================================================================
Library: CppMicroServices
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================================*/
#include "usServiceException.h"
#include <ostream>
US_BEGIN_NAMESPACE
ServiceException::ServiceException(const std::string& msg, const Type& type)
: std::runtime_error(msg), type(type)
{
}
ServiceException::ServiceException(const ServiceException& o)
: std::runtime_error(o), type(o.type)
{
}
ServiceException& ServiceException::operator=(const ServiceException& o)
{
std::runtime_error::operator=(o);
this->type = o.type;
return *this;
}
ServiceException::Type ServiceException::GetType() const
{
return type;
}
US_END_NAMESPACE
US_USE_NAMESPACE
std::ostream& operator<<(std::ostream& os, const ServiceException& exc)
{
return os << "ServiceException: " << exc.what();
}
|
#include <polarbear/systems/systemmanager.hpp>
void SystemManager::AddSystem(std::shared_ptr<System>& system)
{
systems.push_back(std::move(system));
};
void SystemManager::AddSystem(std::shared_ptr<System>&& system)
{
systems.push_back(std::move(system));
};
void SystemManager::AddSystem(System*&& system)
{
systems.push_back(std::shared_ptr<System>(system));
}
void SystemManager::AddEntity(Entity& entity)
{
// This is good enough for now, with a mostly static list of entities and components, but in the future this will need some sort of hook on AddComponent/RemoveComponent
auto entity_ptr = std::make_shared<Entity>(std::move(entity));
for (auto &system : systems)
{
system->entity_added(entity_ptr);
}
entities.push_back(entity_ptr);
};
void SystemManager::update(ms time_elapsed)
{
accumulated_time += time_elapsed;
while (accumulated_time > ms_per_update)
{
for (auto &system : systems)
{
system->update(ms_per_update);
}
accumulated_time -= ms_per_update;
}
};
void SystemManager::SetUpdateTime(s update_time)
{
ms_per_update = ms(update_time);
};
std::vector<std::shared_ptr<Entity>> SystemManager::GetEntities(std::bitset<max_components> mask)
{
auto matching_entities = std::vector<std::shared_ptr<Entity>>{};
for (auto& entity : entities)
{
if ((mask & entity->component_mask) == mask)
{
matching_entities.push_back(entity);
}
}
return matching_entities;
}
|
// Copyright (c) 2012-2017, The CryptoNote developers, The Bytecoin developers
// Copyright (c) 2014-2018, The Monero Project
// Copyright (c) 2018, The TurtleCoin Developers
// Copyright (c) 2018, The Plenteum Developers
//
// Please see the included LICENSE file for more information.
#include "FormatTools.h"
#include <cstdio>
#include <ctime>
#include <config/CryptoNoteConfig.h>
#include "CryptoNoteCore/Core.h"
#include "Rpc/CoreRpcServerCommandsDefinitions.h"
#include <boost/format.hpp>
namespace Common {
//--------------------------------------------------------------------------------
std::string get_mining_speed(uint32_t hr) {
if (hr>1e9) return (boost::format("%.2f GH/s") % (hr/1e9)).str();
if (hr>1e6) return (boost::format("%.2f MH/s") % (hr/1e6)).str();
if (hr>1e3) return (boost::format("%.2f KH/s") % (hr/1e3)).str();
return (boost::format("%.0f H/s") % hr).str();
}
//--------------------------------------------------------------------------------
std::string get_sync_percentage(uint64_t height, uint64_t target_height) {
/* Don't divide by zero */
if (height == 0 || target_height == 0)
{
return "0.00";
}
/* So we don't have > 100% */
if (height > target_height)
{
height = target_height;
}
float pc = 100.0f * height / target_height;
if (height < target_height && pc > 99.99f) {
pc = 99.99f; // to avoid 100% when not fully synced
}
return (boost::format("%.2f") % pc).str();
}
enum ForkStatus { UpToDate, ForkLater, ForkSoonReady, ForkSoonNotReady, OutOfDate };
ForkStatus get_fork_status(uint64_t height, std::vector<uint64_t> upgrade_heights, uint64_t supported_height)
{
/* Allow fork heights to be empty */
if (upgrade_heights.empty())
{
return UpToDate;
}
uint64_t next_fork = 0;
for (auto upgrade : upgrade_heights)
{
/* We have hit an upgrade already that the user cannot support */
if (height >= upgrade && supported_height < upgrade)
{
return OutOfDate;
}
/* Get the next fork height */
if (upgrade > height)
{
next_fork = upgrade;
break;
}
}
float days = (next_fork - height) / CryptoNote::parameters::EXPECTED_NUMBER_OF_BLOCKS_PER_DAY;
/* Next fork in < 14 days away */
if (days < 14)
{
/* Software doesn't support the next fork yet */
if (supported_height < next_fork)
{
return ForkSoonNotReady;
}
else
{
return ForkSoonReady;
}
}
if (height > next_fork)
{
return UpToDate;
}
return ForkLater;
}
std::string get_fork_time(uint64_t height, std::vector<uint64_t> upgrade_heights)
{
uint64_t next_fork = 0;
for (auto upgrade : upgrade_heights)
{
/* Get the next fork height */
if (upgrade > height)
{
next_fork = upgrade;
break;
}
}
float days = (next_fork - height) / CryptoNote::parameters::EXPECTED_NUMBER_OF_BLOCKS_PER_DAY;
if (height == next_fork)
{
return " (forking now),";
}
else if (days < 1)
{
return (boost::format(" (next fork in %.1f hours),") % (days * 24)).str();
}
else
{
return (boost::format(" (next fork in %.1f days),") % days).str();
}
}
std::string get_update_status(ForkStatus forkStatus, uint64_t height, std::vector<uint64_t> upgrade_heights)
{
switch(forkStatus)
{
case UpToDate:
case ForkLater:
{
return " up to date";
}
case ForkSoonReady:
{
return get_fork_time(height, upgrade_heights) + " up to date";
}
case ForkSoonNotReady:
{
return get_fork_time(height, upgrade_heights) + " update needed";
}
case OutOfDate:
{
return " out of date, likely forked";
}
default:
{
throw std::runtime_error("Unexpected case unhandled");
}
}
}
//--------------------------------------------------------------------------------
std::string get_upgrade_info(uint64_t supported_height, std::vector<uint64_t> upgrade_heights)
{
for (auto upgrade : upgrade_heights)
{
if (upgrade > supported_height)
{
return "The network forked at height " + std::to_string(upgrade) + ", please update your software: " + CryptoNote::LATEST_VERSION_URL;
}
}
/* This shouldnt happen */
return std::string();
}
//--------------------------------------------------------------------------------
std::string get_status_string(CryptoNote::COMMAND_RPC_GET_INFO::response iresp) {
std::stringstream ss;
std::time_t uptime = std::time(nullptr) - iresp.start_time;
auto forkStatus = get_fork_status(iresp.network_height, iresp.upgrade_heights, iresp.supported_height);
ss << "Height: " << iresp.height << "/" << iresp.network_height
<< " (" << get_sync_percentage(iresp.height, iresp.network_height) << "%) "
<< "on " << (iresp.testnet ? "testnet, " : "mainnet, ")
<< (iresp.synced ? "synced, " : "syncing, ")
<< "net hash " << get_mining_speed(iresp.hashrate) << ", "
<< "v" << +iresp.major_version << ","
<< get_update_status(forkStatus, iresp.network_height, iresp.upgrade_heights)
<< ", " << iresp.outgoing_connections_count << "(out)+" << iresp.incoming_connections_count << "(in) connections, "
<< "uptime " << (unsigned int)floor(uptime / 60.0 / 60.0 / 24.0)
<< "d " << (unsigned int)floor(fmod((uptime / 60.0 / 60.0), 24.0))
<< "h " << (unsigned int)floor(fmod((uptime / 60.0), 60.0))
<< "m " << (unsigned int)fmod(uptime, 60.0) << "s";
if (forkStatus == OutOfDate)
{
ss << std::endl << get_upgrade_info(iresp.supported_height, iresp.upgrade_heights);
}
return ss.str();
}
}
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PADDLE_WITH_XPU
#include "paddle/fluid/operators/batch_norm_op.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DDim = framework::DDim;
template <typename DeviceContext, typename T>
class BatchNormXPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto epsilon = ctx.Attr<float>("epsilon");
const auto momentum = ctx.Attr<float>("momentum");
const auto is_test = ctx.Attr<bool>("is_test");
const auto use_global_stats = ctx.Attr<bool>("use_global_stats");
const auto trainable_stats = ctx.Attr<bool>("trainable_statistics");
bool test_mode = is_test && (!trainable_stats);
bool global_stats = test_mode || use_global_stats;
const auto& data_layout_str = ctx.Attr<std::string>("data_layout");
const auto data_layout = framework::StringToDataLayout(data_layout_str);
PADDLE_ENFORCE_EQ(data_layout, DataLayout::kNCHW,
platform::errors::InvalidArgument(
"The 'data_layout' attribute must be NCHW. But "
"recevived 'data_layout' is [%s].",
data_layout_str));
const auto* x = ctx.Input<Tensor>("X");
const auto& x_dims = x->dims();
PADDLE_ENFORCE_EQ(x_dims.size(), 4,
platform::errors::InvalidArgument(
"The input tensor X's dimension must equal to 4. But "
"received X's shape = [%s], X's dimension = [%d].",
x_dims, x_dims.size()));
const int N = x_dims[0];
const int C = x_dims[1];
const int H = x_dims[2];
const int W = x_dims[3];
const auto* scale = ctx.Input<Tensor>("Scale");
const auto* bias = ctx.Input<Tensor>("Bias");
const auto* x_data = x->data<T>();
const auto* scale_data = scale->data<T>();
const auto* bias_data = bias->data<T>();
auto* y = ctx.Output<Tensor>("Y");
auto* y_data = y->mutable_data<T>(ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<DeviceContext>();
if (!global_stats) {
auto* mean_out = ctx.Output<Tensor>("MeanOut");
auto* variance_out = ctx.Output<Tensor>("VarianceOut");
auto* saved_mean = ctx.Output<Tensor>("SavedMean");
auto* saved_variance = ctx.Output<Tensor>("SavedVariance");
mean_out->mutable_data<T>(ctx.GetPlace());
variance_out->mutable_data<T>(ctx.GetPlace());
saved_mean->mutable_data<T>(ctx.GetPlace());
saved_variance->mutable_data<T>(ctx.GetPlace());
auto* mean_out_data = mean_out->data<T>();
auto* variance_out_data = variance_out->data<T>();
auto* saved_mean_data = saved_mean->data<T>();
auto* saved_variance_data = saved_variance->data<T>();
int r = xpu::batch_norm<T>(dev_ctx.x_context(), x_data, y_data, N, C, H,
W, epsilon, momentum, scale_data, bias_data,
saved_mean_data, saved_variance_data,
mean_out_data, variance_out_data, true);
PADDLE_ENFORCE_EQ(
r, XPU_SUCCESS,
platform::errors::External("XPU API(batch_norm_train_forward) return "
"wrong value[%d], please check whether "
"Baidu Kunlun Card is properly installed.",
r));
} else {
const auto* mean = ctx.Input<Tensor>("Mean");
const auto* variance = ctx.Input<Tensor>("Variance");
const auto* mean_data = mean->data<T>();
const auto* variance_data = variance->data<T>();
int r = xpu::batch_norm_infer_forward(
dev_ctx.x_context(), epsilon, N, C, H, W, x_data, y_data, scale_data,
bias_data, mean_data, variance_data);
PADDLE_ENFORCE_EQ(
r, XPU_SUCCESS,
platform::errors::External("XPU API(batch_norm_infer_forward) return "
"wrong value[%d], please check whether "
"Baidu Kunlun Card is properly installed.",
r));
}
}
};
template <typename DeviceContext, typename T>
class BatchNormGradXPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
const auto* x = ctx.Input<Tensor>("X");
const auto* dy = ctx.Input<Tensor>(framework::GradVarName("Y"));
const auto* scale = ctx.Input<Tensor>("Scale");
const auto* saved_mean = ctx.Input<Tensor>("SavedMean");
// SavedVariance have been reverted in forward operator
const auto* saved_inv_variance = ctx.Input<Tensor>("SavedVariance");
const auto& data_layout_str = ctx.Attr<std::string>("data_layout");
const auto data_layout = framework::StringToDataLayout(data_layout_str);
PADDLE_ENFORCE_EQ(data_layout, DataLayout::kNCHW,
platform::errors::InvalidArgument(
"The 'data_layout' attribute must be NCHW. But "
"recevived 'data_layout' is [%s].",
data_layout_str));
const auto& x_dims = x->dims();
PADDLE_ENFORCE_EQ(x_dims.size(), 4,
platform::errors::InvalidArgument(
"The input tensor X's dimension must equal to 4. But "
"received X's shape = [%s], X's dimension = [%d].",
x_dims, x_dims.size()));
const int N = x_dims[0];
const int C = x_dims[1];
const int H = x_dims[2];
const int W = x_dims[3];
const auto* x_data = x->data<T>();
const auto* dy_data = dy->data<T>();
const auto* scale_data = scale->data<T>();
const auto* saved_mean_data = saved_mean->data<T>();
const auto* saved_inv_variance_data = saved_inv_variance->data<T>();
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
auto* dscale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto* dbias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto* dx_data = dx->mutable_data<T>(ctx.GetPlace());
auto* dscale_data = dscale->mutable_data<T>(ctx.GetPlace());
auto* dbias_data = dbias->mutable_data<T>(ctx.GetPlace());
auto& dev_ctx = ctx.template device_context<DeviceContext>();
int r = xpu::batch_norm_backward(dev_ctx.x_context(), N, C, H, W, x_data,
dy_data, scale_data, saved_mean_data,
saved_inv_variance_data, dx_data,
dscale_data, dbias_data);
PADDLE_ENFORCE_EQ(
r, XPU_SUCCESS,
platform::errors::External("XPU API(batch_norm_infer_forward) return "
"wrong value[%d], please check whether "
"Baidu Kunlun Card is properly installed.",
r));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_XPU_KERNEL(
batch_norm,
ops::BatchNormXPUKernel<paddle::platform::XPUDeviceContext, float>);
REGISTER_OP_XPU_KERNEL(
batch_norm_grad,
ops::BatchNormGradXPUKernel<paddle::platform::XPUDeviceContext, float>);
#endif // PADDLE_WITH_XPU
|
#pragma once
#include <sstream>
#include <silicon/api.hh>
#include <silicon/backends/ws_api.hh>
namespace sl
{
const char* type_string(const void*) { return "void"; }
const char* type_string(const std::string*) { return "string"; }
const char* type_string(const int*) { return "int"; }
const char* type_string(const float*) { return "float"; }
const char* type_string(const double*) { return "double"; }
template <typename T>
std::string type_string(const std::vector<T>*) {
std::stringstream res;
res << "vector of " << type_string(static_cast<const T*>(nullptr));
return std::move(res.str());
}
template <typename... T>
std::string type_string(const sio<T...>* o)
{
std::stringstream res; res << "{";
bool first = true;
foreach(*o) | [&] (auto& m) {
if (!first) res << ", ";
first = false;
res << m.symbol().name() << ": " << type_string(&m.value());
};
res << "}";
return std::move(res.str());
}
template <typename... R, typename P>
std::string procedure_description(const http_route<R...>& route, P f)
{
std::stringstream res;
res << route.verb_as_string() << ": ";
foreach(route.path) | [&] (auto e)
{
static_if<is_symbol<decltype(e)>::value>(
[&] (auto e2) { res << std::string("/") + e2.name(); },
[&] (auto e2) { res << std::string("/[") << e2.symbol().name() << ": "
<< type_string(&e2.value()) << "]"; }, e);
};
res << "(";
typedef std::remove_reference_t<decltype(f.function())> F;
typedef callable_return_type_t<F> ret_type;
first_sio_of_tuple_t<callable_arguments_tuple_t<F>> args;
auto get_post_args = iod::cat(route.get_params, route.post_params);
bool first = true;
foreach(get_post_args) | [&] (auto& a) {
if (!first) res << ", ";
first = false;
res << a.symbol().name() << ": " << type_string(&a.value());
};
res << ") -> " << type_string((ret_type*) 0);
return res.str();
}
template <typename... R, typename P>
std::string procedure_description(const ws_route<R...>& route, P f)
{
std::stringstream res;
foreach(route.path) | [&] (auto e)
{
res << std::string("/") + e.name();
};
res << "(";
typedef std::remove_reference_t<decltype(f.function())> F;
typedef callable_return_type_t<F> ret_type;
first_sio_of_tuple_t<callable_arguments_tuple_t<F>> args;
auto get_post_args = route.params;
bool first = true;
foreach(get_post_args) | [&] (auto& a) {
if (!first) res << ", ";
first = false;
res << a.symbol().name() << ": " << type_string(&a.value());
};
res << ") -> " << type_string((ret_type*) 0);
return res.str();
}
template <typename... R, typename P>
std::string procedure_description(const rmq::route<R...>& route, P f)
{
std::stringstream res;
res << route.exchange_as_string() << ": " << route.path_as_string(false);
res << "(";
typedef std::remove_reference_t<decltype(f.function())> F;
typedef callable_return_type_t<F> ret_type;
first_sio_of_tuple_t<callable_arguments_tuple_t<F>> args;
auto params = route.all_params();
bool first = true;
foreach(params) | [&] (auto& a) {
if (!first) res << ", ";
first = false;
res << a.symbol().name() << ": " << type_string(&a.value());
};
res << ") -> " << type_string((ret_type*) 0);
return res.str();
}
template <typename A>
std::string api_description(A& api)
{
std::stringstream res;
foreach(api) | [&] (auto& m)
{
static_if<is_tuple<decltype(m.content)>::value>(
[&] (auto m) { // If sio, recursion.
res << api_description(m.content);
},
[&] (auto m) { // Else, register the procedure.
res << procedure_description(m.route, m.content) << std::endl;
}, m);
};
return res.str();
}
}
|
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <string>
#include "base/prefs/json_pref_store.h"
#include "base/prefs/mock_pref_change_callback.h"
#include "base/prefs/pref_change_registrar.h"
#include "base/prefs/pref_registry_simple.h"
#include "base/prefs/pref_value_store.h"
#include "base/prefs/testing_pref_service.h"
#include "base/prefs/testing_pref_store.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using testing::_;
using testing::Mock;
const char kPrefName[] = "pref.name";
TEST(PrefServiceTest, NoObserverFire) {
TestingPrefServiceSimple prefs;
const char pref_name[] = "homepage";
prefs.registry()->RegisterStringPref(pref_name, std::string());
const char new_pref_value[] = "http://www.google.com/";
MockPrefChangeCallback obs(&prefs);
PrefChangeRegistrar registrar;
registrar.Init(&prefs);
registrar.Add(pref_name, obs.GetCallback());
// This should fire the checks in MockPrefChangeCallback::OnPreferenceChanged.
const base::StringValue expected_value(new_pref_value);
obs.Expect(pref_name, &expected_value);
prefs.SetString(pref_name, new_pref_value);
Mock::VerifyAndClearExpectations(&obs);
// Setting the pref to the same value should not set the pref value a second
// time.
EXPECT_CALL(obs, OnPreferenceChanged(_)).Times(0);
prefs.SetString(pref_name, new_pref_value);
Mock::VerifyAndClearExpectations(&obs);
// Clearing the pref should cause the pref to fire.
const base::StringValue expected_default_value((std::string()));
obs.Expect(pref_name, &expected_default_value);
prefs.ClearPref(pref_name);
Mock::VerifyAndClearExpectations(&obs);
// Clearing the pref again should not cause the pref to fire.
EXPECT_CALL(obs, OnPreferenceChanged(_)).Times(0);
prefs.ClearPref(pref_name);
Mock::VerifyAndClearExpectations(&obs);
}
TEST(PrefServiceTest, HasPrefPath) {
TestingPrefServiceSimple prefs;
const char path[] = "fake.path";
// Shouldn't initially have a path.
EXPECT_FALSE(prefs.HasPrefPath(path));
// Register the path. This doesn't set a value, so the path still shouldn't
// exist.
prefs.registry()->RegisterStringPref(path, std::string());
EXPECT_FALSE(prefs.HasPrefPath(path));
// Set a value and make sure we have a path.
prefs.SetString(path, "blah");
EXPECT_TRUE(prefs.HasPrefPath(path));
}
TEST(PrefServiceTest, Observers) {
const char pref_name[] = "homepage";
TestingPrefServiceSimple prefs;
prefs.SetUserPref(pref_name,
base::Value::CreateStringValue("http://www.cnn.com"));
prefs.registry()->RegisterStringPref(pref_name, std::string());
const char new_pref_value[] = "http://www.google.com/";
const base::StringValue expected_new_pref_value(new_pref_value);
MockPrefChangeCallback obs(&prefs);
PrefChangeRegistrar registrar;
registrar.Init(&prefs);
registrar.Add(pref_name, obs.GetCallback());
PrefChangeRegistrar registrar_two;
registrar_two.Init(&prefs);
// This should fire the checks in MockPrefChangeCallback::OnPreferenceChanged.
obs.Expect(pref_name, &expected_new_pref_value);
prefs.SetString(pref_name, new_pref_value);
Mock::VerifyAndClearExpectations(&obs);
// Now try adding a second pref observer.
const char new_pref_value2[] = "http://www.youtube.com/";
const base::StringValue expected_new_pref_value2(new_pref_value2);
MockPrefChangeCallback obs2(&prefs);
obs.Expect(pref_name, &expected_new_pref_value2);
obs2.Expect(pref_name, &expected_new_pref_value2);
registrar_two.Add(pref_name, obs2.GetCallback());
// This should fire the checks in obs and obs2.
prefs.SetString(pref_name, new_pref_value2);
Mock::VerifyAndClearExpectations(&obs);
Mock::VerifyAndClearExpectations(&obs2);
// Set a recommended value.
const base::StringValue recommended_pref_value("http://www.gmail.com/");
obs.Expect(pref_name, &expected_new_pref_value2);
obs2.Expect(pref_name, &expected_new_pref_value2);
// This should fire the checks in obs and obs2 but with an unchanged value
// as the recommended value is being overridden by the user-set value.
prefs.SetRecommendedPref(pref_name, recommended_pref_value.DeepCopy());
Mock::VerifyAndClearExpectations(&obs);
Mock::VerifyAndClearExpectations(&obs2);
// Make sure obs2 still works after removing obs.
registrar.Remove(pref_name);
EXPECT_CALL(obs, OnPreferenceChanged(_)).Times(0);
obs2.Expect(pref_name, &expected_new_pref_value);
// This should only fire the observer in obs2.
prefs.SetString(pref_name, new_pref_value);
Mock::VerifyAndClearExpectations(&obs);
Mock::VerifyAndClearExpectations(&obs2);
}
// Make sure that if a preference changes type, so the wrong type is stored in
// the user pref file, it uses the correct fallback value instead.
TEST(PrefServiceTest, GetValueChangedType) {
const int kTestValue = 10;
TestingPrefServiceSimple prefs;
prefs.registry()->RegisterIntegerPref(kPrefName, kTestValue);
// Check falling back to a recommended value.
prefs.SetUserPref(kPrefName,
base::Value::CreateStringValue("not an integer"));
const PrefService::Preference* pref = prefs.FindPreference(kPrefName);
ASSERT_TRUE(pref);
const base::Value* value = pref->GetValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
int actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kTestValue, actual_int_value);
}
TEST(PrefServiceTest, GetValueAndGetRecommendedValue) {
const int kDefaultValue = 5;
const int kUserValue = 10;
const int kRecommendedValue = 15;
TestingPrefServiceSimple prefs;
prefs.registry()->RegisterIntegerPref(kPrefName, kDefaultValue);
// Create pref with a default value only.
const PrefService::Preference* pref = prefs.FindPreference(kPrefName);
ASSERT_TRUE(pref);
// Check that GetValue() returns the default value.
const base::Value* value = pref->GetValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
int actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kDefaultValue, actual_int_value);
// Check that GetRecommendedValue() returns no value.
value = pref->GetRecommendedValue();
ASSERT_FALSE(value);
// Set a user-set value.
prefs.SetUserPref(kPrefName, base::Value::CreateIntegerValue(kUserValue));
// Check that GetValue() returns the user-set value.
value = pref->GetValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kUserValue, actual_int_value);
// Check that GetRecommendedValue() returns no value.
value = pref->GetRecommendedValue();
ASSERT_FALSE(value);
// Set a recommended value.
prefs.SetRecommendedPref(kPrefName,
base::Value::CreateIntegerValue(kRecommendedValue));
// Check that GetValue() returns the user-set value.
value = pref->GetValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kUserValue, actual_int_value);
// Check that GetRecommendedValue() returns the recommended value.
value = pref->GetRecommendedValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kRecommendedValue, actual_int_value);
// Remove the user-set value.
prefs.RemoveUserPref(kPrefName);
// Check that GetValue() returns the recommended value.
value = pref->GetValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kRecommendedValue, actual_int_value);
// Check that GetRecommendedValue() returns the recommended value.
value = pref->GetRecommendedValue();
ASSERT_TRUE(value);
EXPECT_EQ(base::Value::TYPE_INTEGER, value->GetType());
actual_int_value = -1;
EXPECT_TRUE(value->GetAsInteger(&actual_int_value));
EXPECT_EQ(kRecommendedValue, actual_int_value);
}
class PrefServiceSetValueTest : public testing::Test {
protected:
static const char kName[];
static const char kValue[];
PrefServiceSetValueTest() : observer_(&prefs_) {}
TestingPrefServiceSimple prefs_;
MockPrefChangeCallback observer_;
};
const char PrefServiceSetValueTest::kName[] = "name";
const char PrefServiceSetValueTest::kValue[] = "value";
TEST_F(PrefServiceSetValueTest, SetStringValue) {
const char default_string[] = "default";
const base::StringValue default_value(default_string);
prefs_.registry()->RegisterStringPref(kName, default_string);
PrefChangeRegistrar registrar;
registrar.Init(&prefs_);
registrar.Add(kName, observer_.GetCallback());
// Changing the controlling store from default to user triggers notification.
observer_.Expect(kName, &default_value);
prefs_.Set(kName, default_value);
Mock::VerifyAndClearExpectations(&observer_);
EXPECT_CALL(observer_, OnPreferenceChanged(_)).Times(0);
prefs_.Set(kName, default_value);
Mock::VerifyAndClearExpectations(&observer_);
base::StringValue new_value(kValue);
observer_.Expect(kName, &new_value);
prefs_.Set(kName, new_value);
Mock::VerifyAndClearExpectations(&observer_);
}
TEST_F(PrefServiceSetValueTest, SetDictionaryValue) {
prefs_.registry()->RegisterDictionaryPref(kName);
PrefChangeRegistrar registrar;
registrar.Init(&prefs_);
registrar.Add(kName, observer_.GetCallback());
EXPECT_CALL(observer_, OnPreferenceChanged(_)).Times(0);
prefs_.RemoveUserPref(kName);
Mock::VerifyAndClearExpectations(&observer_);
base::DictionaryValue new_value;
new_value.SetString(kName, kValue);
observer_.Expect(kName, &new_value);
prefs_.Set(kName, new_value);
Mock::VerifyAndClearExpectations(&observer_);
EXPECT_CALL(observer_, OnPreferenceChanged(_)).Times(0);
prefs_.Set(kName, new_value);
Mock::VerifyAndClearExpectations(&observer_);
base::DictionaryValue empty;
observer_.Expect(kName, &empty);
prefs_.Set(kName, empty);
Mock::VerifyAndClearExpectations(&observer_);
}
TEST_F(PrefServiceSetValueTest, SetListValue) {
prefs_.registry()->RegisterListPref(kName);
PrefChangeRegistrar registrar;
registrar.Init(&prefs_);
registrar.Add(kName, observer_.GetCallback());
EXPECT_CALL(observer_, OnPreferenceChanged(_)).Times(0);
prefs_.RemoveUserPref(kName);
Mock::VerifyAndClearExpectations(&observer_);
base::ListValue new_value;
new_value.Append(base::Value::CreateStringValue(kValue));
observer_.Expect(kName, &new_value);
prefs_.Set(kName, new_value);
Mock::VerifyAndClearExpectations(&observer_);
EXPECT_CALL(observer_, OnPreferenceChanged(_)).Times(0);
prefs_.Set(kName, new_value);
Mock::VerifyAndClearExpectations(&observer_);
base::ListValue empty;
observer_.Expect(kName, &empty);
prefs_.Set(kName, empty);
Mock::VerifyAndClearExpectations(&observer_);
}
|
// RUN: %clang_cc1 -emit-llvm -triple %itanium_abi_triple %s -o - | \
// RUN: FileCheck -check-prefix CHECK-ITANIUM %s
// RUN: %clang_cc1 -emit-llvm -triple wasm32-unknown-unknown %s -o - | \
// RUN: FileCheck -check-prefix CHECK-WEBASSEMBLY32 %s
// RUN: %clang_cc1 -emit-llvm -triple wasm64-unknown-unknown %s -o - | \
// RUN: FileCheck -check-prefix CHECK-WEBASSEMBLY64 %s
// rdar://7268289
class t {
public:
virtual void foo(void);
void bar(void);
};
void
t::bar(void) {
// CHECK-ITANIUM: @_ZN1t3barEv({{.*}}) #0 align 2 {
// CHECK-WEBASSEMBLY32: @_ZN1t3barEv({{.*}}) #0 {
// CHECK-WEBASSEMBLY64: @_ZN1t3barEv({{.*}}) #0 {
}
void
t::foo(void) {
// CHECK-ITANIUM: @_ZN1t3fooEv({{.*}}) unnamed_addr #0 align 2 {
// CHECK-WEBASSEMBLY32: @_ZN1t3fooEv({{.*}}) unnamed_addr #0 {
// CHECK-WEBASSEMBLY64: @_ZN1t3fooEv({{.*}}) unnamed_addr #0 {
}
|
/*
* yosys -- Yosys Open SYnthesis Suite
*
* Copyright (C) 2012 Claire Xenia Wolf <claire@yosyshq.com>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#include "kernel/yosys.h"
#include "kernel/sigtools.h"
USING_YOSYS_NAMESPACE
PRIVATE_NAMESPACE_BEGIN
static SigSpec or_generator(Module *module, const SigSpec &sig)
{
switch (GetSize(sig))
{
case 0:
return State::S0;
case 1:
return sig;
case 2:
return module->Or(NEW_ID, sig[0], sig[1]);
default:
return module->ReduceOr(NEW_ID, sig);
}
}
static SigSpec recursive_mux_generator(Module *module, const SigSpec &sig_data, const SigSpec &sig_sel, SigSpec &sig_or)
{
if (GetSize(sig_sel) == 1) {
sig_or.append(sig_sel);
return sig_data;
}
int left_size = GetSize(sig_sel) / 2;
int right_size = GetSize(sig_sel) - left_size;
int stride = GetSize(sig_data) / GetSize(sig_sel);
SigSpec left_data = sig_data.extract(0, stride*left_size);
SigSpec right_data = sig_data.extract(stride*left_size, stride*right_size);
SigSpec left_sel = sig_sel.extract(0, left_size);
SigSpec right_sel = sig_sel.extract(left_size, right_size);
SigSpec left_or, left_result, right_result;
left_result = recursive_mux_generator(module, left_data, left_sel, left_or);
right_result = recursive_mux_generator(module, right_data, right_sel, sig_or);
left_or = or_generator(module, left_or);
sig_or.append(left_or);
return module->Mux(NEW_ID, right_result, left_result, left_or);
}
struct PmuxtreePass : public Pass {
PmuxtreePass() : Pass("pmuxtree", "transform $pmux cells to trees of $mux cells") { }
void help() override
{
// |---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|---v---|
log("\n");
log(" pmuxtree [selection]\n");
log("\n");
log("This pass transforms $pmux cells to trees of $mux cells.\n");
log("\n");
}
void execute(std::vector<std::string> args, RTLIL::Design *design) override
{
log_header(design, "Executing PMUXTREE pass.\n");
size_t argidx;
for (argidx = 1; argidx < args.size(); argidx++) {
break;
}
extra_args(args, argidx, design);
for (auto module : design->selected_modules())
for (auto cell : module->selected_cells())
{
if (cell->type != ID($pmux))
continue;
SigSpec sig_data = cell->getPort(ID::B);
SigSpec sig_sel = cell->getPort(ID::S);
if (!cell->getPort(ID::A).is_fully_undef()) {
sig_data.append(cell->getPort(ID::A));
SigSpec sig_sel_or = module->ReduceOr(NEW_ID, sig_sel);
sig_sel.append(module->Not(NEW_ID, sig_sel_or));
}
SigSpec result, result_or;
result = recursive_mux_generator(module, sig_data, sig_sel, result_or);
module->connect(cell->getPort(ID::Y), result);
module->remove(cell);
}
}
} PmuxtreePass;
PRIVATE_NAMESPACE_END
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
#pragma pack(push, 8)
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
// Including type: PersistentScriptableObject
#include "GlobalNamespace/PersistentScriptableObject.hpp"
// Completed includes
// Begin forward declares
// Forward declaring namespace: GlobalNamespace
namespace GlobalNamespace {
// Forward declaring type: ObjectiveValueFormatterSO
class ObjectiveValueFormatterSO;
}
// Completed forward declares
// Type namespace:
namespace GlobalNamespace {
// Autogenerated type: MissionObjectiveTypeSO
class MissionObjectiveTypeSO : public GlobalNamespace::PersistentScriptableObject {
public:
// private System.String _objectiveName
// Offset: 0x18
::Il2CppString* objectiveName;
// private System.Boolean _noConditionValue
// Offset: 0x20
bool noConditionValue;
// private ObjectiveValueFormatterSO _objectiveValueFormater
// Offset: 0x28
GlobalNamespace::ObjectiveValueFormatterSO* objectiveValueFormater;
// public System.String get_objectiveName()
// Offset: 0xC0D1DC
::Il2CppString* get_objectiveName();
// public System.String get_objectiveNameLocalized()
// Offset: 0xC0D014
::Il2CppString* get_objectiveNameLocalized();
// public System.Boolean get_noConditionValue()
// Offset: 0xC0D1E4
bool get_noConditionValue();
// public ObjectiveValueFormatterSO get_objectiveValueFormater()
// Offset: 0xC0D1EC
GlobalNamespace::ObjectiveValueFormatterSO* get_objectiveValueFormater();
// public System.Void .ctor()
// Offset: 0xC0D1F4
// Implemented from: PersistentScriptableObject
// Base method: System.Void PersistentScriptableObject::.ctor()
// Base method: System.Void ScriptableObject::.ctor()
// Base method: System.Void Object::.ctor()
// Base method: System.Void Object::.ctor()
static MissionObjectiveTypeSO* New_ctor();
}; // MissionObjectiveTypeSO
}
#include "extern/beatsaber-hook/shared/utils/il2cpp-type-check.hpp"
DEFINE_IL2CPP_ARG_TYPE(GlobalNamespace::MissionObjectiveTypeSO*, "", "MissionObjectiveTypeSO");
#pragma pack(pop)
|
#include<iostream>
#include<stdlib.h>
#include<string.h>
#include<fstream>
#include<conio.h>
#include<stdbool.h>
#include <Windows.h>
using namespace std;
struct node
{
char name[30];
char number[15];
char address[40];
char email[40];
struct node *left, *right, *mid;
};
struct node* searchin(struct node* root,char name[]);
void modify(struct node* ptr);
void savein(struct node *head,char fn[]);
void gosave();
void insertstackpush(struct node* temp);
void load();
struct node* pop(struct node*stack);
//void display();
void display();
void print();
void save();
//void savetofile(char i,int hash);
void savein(struct node *head,char fn[]);
struct node *input(struct node *);
void deletestackpush(struct node* );
struct node* deleteNode(struct node*root[], char key[]);
struct node* deleteNodeinside(struct node*, char key[],char number[]);
struct node *findmin(struct node *tree);
void inorder(int i);
void inorderinside(struct node *);
//struct node *insert(struct node *head[],struct node *new_node);
struct node *insertinside(struct node *,struct node *);
struct node *findmin(struct node *tree);
struct node *insertstack = NULL,*deletestack = NULL,*head[26];
int main(){
struct node *n_node,*ptr;
char ans;
char n[30],num[15];
int hash,k =0;
for(int i=0;i<26;i++)
{
head[i]=NULL;
}
int option;
char key[40];
int temp_status;
printf("\n**********************************************************************************************PHONEBOOK************************************************************************************************************");
printf("\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------");
load();
do{
cout<<"\nMENU\n 1)INSERT\n2)PRINT\n3)SEARCH AND MODIFY\n4)DELETE A CONTACT\n5)EXIT\nENTER YOU CHOICE\n";
cin>>option;
switch(option)
{
case 1:
do{
n_node = input(n_node);
hash = int(n_node->name[0]) % 65;
//strcpy(n,n_node->name);
insertstackpush(n_node);
head[hash] = insertinside(head[hash],n_node);
cout<<"\nCONTINUE ? (y/n)\t";
cin>>ans;
}while(ans == 'y' || ans == 'Y');
break;
case 2:
//cout<<" PRINT\n";
print();
break;
case 3:
cout<<"ENTER THE NAME TO BE SEARCHED\n";
cin>>key;
while(key[k] != '\0'){
key[k] = toupper(key[k]);
k++;
}
hash = int (key[0]) % 65;
ptr = searchin(head[hash], key);
if(ptr!=NULL){
cout<<"\n\nNAME : "<< ptr -> name<<endl;
cout<<"NUMBER : "<< ptr -> number<<endl;
cout<<"ADDRESS : "<< ptr -> address<<endl;
cout<<"E-MAIL : "<< ptr -> email<<endl;
cout<<"\n\nDo you want to modify? (y/n)\n\n"<<endl;
cin>>ans;
if(ans == 'y' || ans == 'Y'){
modify(ptr);
//deletestackpush(ptr);
}
}
k=0;
//display();
//cout<<" SEARCH(*for representation)\n";
break;
case 4:
cout<<"ENTER THE NAME\n";
cin>>key;
cout<<"ENTER THE NUMBER\n";
cin>>num;
while(key[k] != '\0'){
key[k] = toupper(key[k]);
k++;
}
hash = int (key[0]) % 65;
head[hash] = deleteNodeinside(head[hash],key,num);
k=0;
break;
case 5:
break;
default:
cout<<"ENTER VALID CHOICE\n";
break;
}
}while(option!=5);
//save();
gosave();
return 0;
}
struct node *insertinside(struct node *head,struct node *new_node)
{
if(head == NULL)
{
new_node -> left = NULL;
new_node -> right = NULL;
head = new_node;
//insertstackpush(new_node);
}
else
{
if(strcmp(head -> name, new_node -> name) == 0)
{
if(strcmp(head -> number, new_node -> number) == 0){
cout<<"\n\t\tAlready exist!!!!"<<endl;
}
else{
head -> mid = insertinside(head -> mid, new_node);
//insertstackpush(new_node);
}
}
else if(strcmp(head -> name, new_node -> name) > 0)
{
head -> left = insertinside(head -> left, new_node);
//insertstackpush(new_node);
}
else
{
head -> right = insertinside(head -> right, new_node);
//insertstackpush(new_node);
}
}
return head;
}
void print()
{
//cout<<"printing ";
for(int i =0;i<=26;i++){
if(head[i] == NULL){ continue;}
if(head[i] != NULL){
inorderinside(head[i]);
//inorder(i);
}
}
}
void inorderinside(struct node *head)
{
if(head != NULL){
inorderinside(head -> left);
cout<<"\n\nNAME : "<< head -> name;
cout<<"\nNUMBER : "<< head -> number;
cout<<"\nADDRESS : "<< head -> address;
cout<<"\nE-MAIL : "<< head -> email;
inorderinside(head -> mid);
inorderinside(head -> right);
}
}
void load(){
struct node *new__node;
int hash ;
char name[30];
char number[15];
char address[40];
char email[40];
char c[40];
cout<<"\n\n\n\t\tPlease wait for a moment\n";
cout<<"\t\tRetrieving data form file\n\n";
char fn[] = "C:/Users/Admin/Documents/PhoneBook/Directory_A.txt";
Sleep(1000);
for(char i = 'A'; i <= 'Z' ; ++i){
fn[45] = i;
hash = int(i) % 65;
//cout<< fn<< endl;
fstream ob(fn);
while(!ob.eof()){
ob.getline(c, 40);
if(c[0] != '\0' ){
strcpy(name, c);
ob.getline(c, 40);
strcpy(number, c);
ob.getline(c, 40);
strcpy(address, c);
ob.getline(c, 40);
strcpy(email, c);
new__node = (struct node*)malloc(sizeof(struct node));
//cout<< name<<endl;
strcpy(new__node -> name, name);
strcpy(new__node -> number, number);
strcpy(new__node -> address, address);
strcpy(new__node -> email, email);
head[hash] = insertinside(head[hash],new__node);
}
}
ob.close();
ob.open(fn,ios::trunc);
ob.close();
}
}
struct node *input(struct node *ne_node){
char name[30];
char number[15];
char address[40];
char email[40];
int k = 0;
//cout<<"\nENTER NAME : ";
cin.getline(name,30);
while((strlen(name)==0)){
cout<<"\nENTER VALID NAME : ";
cin.getline(name, 30);
}
while(name[k] != '\0'){
name[k] = toupper(name[k]);
k++;
}
cout<<"\nENTER NUMBER : ";
cin.getline(number,15);
while(strlen(number)!=10){
cout<<"\nENTER NUMBER : ";
cin.getline(number,15);
}
//cout<<number<<endl;
cout<<"\nENTER ADDRESS : ";
cin.getline(address,40);
//cout<<number<<endl;
cout<<"\nENTER E-MAIL : ";
cin.getline(email,40);
ne_node = (struct node*)malloc(sizeof(struct node));
strcpy(ne_node -> name, name);
strcpy(ne_node -> number, number);
strcpy(ne_node -> address, address);
strcpy(ne_node -> email, email);
return ne_node;
}
struct node* deleteNodeinside(struct node* root, char key[],char number[])
{
struct node* temp;
char mob_num[15];
if (root== NULL){
return root;
}
/*
if(strcmp(root -> name,key) == 0){
if(strcmp(root -> number,number) != 0){
if(root ->mid-> mid != NULL){
root->mid = deleteNodeinside(root -> mid , key,number);
}
}
}*/
if (strcmp(root -> name,key) > 0){
//if(strcmp(root -> number,key) == 0){
root->left = deleteNodeinside(root->left, key,number);
//}
}
else if (strcmp(root -> name,key) < 0){
//if(strcmp(root -> number,key) == 0){
root->right = deleteNodeinside(root->right, key,number);
//}
}
else {
// if(root->mid == NULL){
//cout<<"NAME::"<<root -> name<<endl;
// cout<<"NAME::"<<root -> number<<endl;
//cout<<"NAME::"<<root -> address<<endl;
if(strcmp(root -> number,number) == 0){
if(root->mid != NULL){
temp = findmin(root->mid);
strcpy(root -> name,temp -> mid-> name);
strcpy(root -> number,temp -> mid-> number);
strcpy(root -> address,temp -> mid-> address);
strcpy(root -> email,temp -> mid-> email);
deletestackpush(temp->mid);
free(temp->mid);
temp -> mid = NULL;
return root;
}
}
if(strcmp(root -> number,number) != 0){
temp = root;
while(strcmp(temp-> mid ->number,number)!=0){
temp=temp->mid;
}deletestackpush(temp->mid);
free(temp-> mid);
if(temp -> mid -> mid !=NULL){
temp -> mid = temp -> mid -> mid;
}
else{temp -> mid =NULL;}
// deletestackpush(temp->mid);
// free(temp-> mid);
return temp;
}
if (root->left == NULL) {
temp = root->right;
deletestackpush(root);
free(root);
return temp;
}
else if (root->right == NULL) {
temp = root->left;
deletestackpush(root);
free(root);
return temp;
}
temp = findmin(root->right);
/*if(root -> mid !=NULL){
temp = root -> mid;
deletestackpush(root);
free(root);
return temp;
}*/
strcpy(root -> name,temp -> name);
strcpy(root -> number,temp -> number);
strcpy(root -> address,temp -> address);
strcpy(root -> email,temp -> email);
root->right = deleteNodeinside(root->right, temp->name,temp -> number);
deletestackpush(root);
//}
/* else{
temp = root -> mid;
deletestackpush(root);
free(root);
return temp;
}*/
}
return root;
}
/*
struct node* deleteNodecenter(struct node* root, char key[],char number[]){
struct node* temp;
char mob_num[15];
if (root== NULL){
return root;
}
if(strcmp(root -> name,key) == 0){
if(root -> mid != NULL){
if(strcmp(root -> number,key) != 0){
root->mid = deleteNodecenter(root->mid, key,number);
}
}
}
temp = root -> mid;
while(temp -> mid!=NULL){
temp = temp->mid;
}
cout<<"Name::::::::"<<temp -> number<<endl;
strcpy(root -> name,temp -> name);
strcpy(root -> number,temp -> number);
strcpy(root -> address,temp -> address);
strcpy(root -> email,temp -> email);
root -> mid = deleteNodecenter(temp->mid, temp -> name,temp -> number);
//free(root);
return root;
}*/
struct node* findmin(struct node* tree)
{
struct node* node1 = tree;
if(tree -> mid ==NULL){
while (node1 != NULL && node1->left != NULL)
node1 = node1->left;
return node1;
}
else if(tree -> mid !=NULL){
while (node1 != NULL && node1->mid ->mid!= NULL)
node1 = node1->mid;
return node1;
}
}
void insertstackpush(struct node* temp){
struct node* newnode;
newnode = (struct node*) malloc(sizeof(struct node));
strcpy(newnode -> name,temp -> name);
strcpy(newnode -> number,temp -> number);
strcpy(newnode -> address,temp -> address);
strcpy(newnode -> email,temp -> email);
newnode->left = insertstack;
insertstack = newnode;
}
void display() {
struct node* ptr;
if(insertstack==NULL)
cout<<"stack is empty";
else {
ptr = insertstack;
cout<<"Stack elements are: ";
while (ptr != NULL) {
cout<< ptr->name <<" ";
ptr = ptr->left;
}
}
}
void deletestackpush(struct node* temp){
struct node* newnode;
newnode = (struct node*) malloc(sizeof(struct node));
strcpy(newnode -> name,temp -> name);
strcpy(newnode -> number,temp -> number);
strcpy(newnode -> address,temp -> address);
strcpy(newnode -> email,temp -> email);
newnode->left = deletestack;
deletestack = newnode;
}
void gosave(){
fstream ob;
char fn[] = "C:/Users/Admin/Documents/PhoneBook/Directory_A.txt", c[40];
int i = 0,hash,j;
//int coun = count(insertstack);
//for(i=0;i<coun;i++){
Sleep(2000);
while(insertstack !=NULL){
//hash = int(insertstack -> name[0]) % 65;
fn[45] = insertstack -> name[0];
ob.open(fn,ios::app);
ob<<"\n"<<insertstack -> name<<"\n"<<insertstack -> number<<"\n"<<insertstack -> address<<"\n"<<insertstack -> email;
ob.close();
insertstack = pop(insertstack);
// void display();
}
//coun = count(deletestack);
//for(j=0;j<coun;j++){
while(deletestack != NULL){
//cout<< fn[45]<<endl;
hash = int(deletestack -> name[0]) % 65;
fn[45] = deletestack -> name[0];
ob.open(fn,ios::out);
savein(head[hash],fn);
ob.close();
deletestack = pop(deletestack);
}
}
struct node* pop(struct node*stack) {
struct node* temp;
if(stack != NULL){
temp = stack;
//temp = top;
stack = stack->left;
free(temp);
return stack;
}
}
void savein(struct node *head,char fn[]){
ofstream ob;
//char fn[] = "C:/Users/Admin/Documents/PhoneBook/Directory_A.txt";
//fn[45] = i;
// ob.close();
if(head != NULL){
//ofstream ob(fn);
//ob.open(fn,ios::trunc);
//ob.close();
ob.open(fn,ios::app);
ob<<"\n"<<head -> name<<"\n"<<head -> number<<"\n"<<head -> address<<"\n"<<head -> email;
ob.close();
savein(head -> left,fn);
savein(head -> right,fn);
}
}
struct node* searchin(struct node* root,char name[])
{
if(root==NULL)
{
cout<<"\nElement not in list";
return root;
}
else if(strcmp(root->name,name)==0)
{
return root;
}
else if(strcmp(root->name,name)<0)
{
root->right = searchin(root->right,name);
return root->right;
}
else if(strcmp(root->name,name)>0)
{
root->left = searchin(root->left,name);
return root->left;
}
}
void modify(struct node* ptr)
{
char a;int hash1,hash,k=0;
struct node*ptr1 = (struct node*)malloc(sizeof(struct node));
cout<<"\nWhich entity has to be modified??\nN - NAME\nA - ADDRESS\nP - PHONE NUMBER\nE - MAIL_ID\n";
cin>>a;
switch(a)
{
case ('N'):
char name[30];
//cout<<"\nEnter name of subscriber: ";
cin.getline(name,30);
while(strlen(name)==0)
{
cout<<"\nEnter name of subscriber: ";
cin.getline(name,30);
}
while(name[k] != '\0'){
name[k] = toupper(name[k]);
k++;
}
hash1 = int(name[0]) % 65;
hash = int(ptr -> name[0]) % 65;
strcpy(ptr1->name,name);
strcpy(ptr1->address,ptr->address);
strcpy(ptr1->number,ptr->number);
strcpy(ptr1->email,ptr->email);
head[hash1] = insertinside(head[hash1], ptr1);
print();
head[hash] = deleteNodeinside(head[hash],ptr -> name,ptr -> number);
print();
cout<<"\n\nNAME:::::"<<ptr1 -> name<<endl;
deletestackpush(ptr1);
deletestackpush(ptr);
break;
case ('A'):
char address[40];
//cout<<"\nEnter address of subscriber: ";
cin.getline(address,40);
while(strlen(address)==0)
{
cout<<"\nEnter address of subscriber: ";
cin.getline(address,40);
}
strcpy(ptr->address,address);
deletestackpush(ptr);
break;
case ('P'):
char no[20];
//cout<<"\nEnter phone number of subscriber: ";
cin.getline(no,20);
while(strlen(no)!=10)
{
cout<<"\nEnter phone number of subscriber: ";
cin.getline(no,20);
}
strcpy(ptr->number,no);
deletestackpush(ptr);
break;
case ('E'):
char mail[40];
//cout<<"\nEnter E-mail ID of subscriber: ";
cin.getline(mail,40);
while(strlen(mail)==0)
{
cout<<"\nEnter E-mail ID of subscriber: ";
cin.getline(mail,40);
}
strcpy(ptr->email, mail);
deletestackpush(ptr);
break;
default:
cout<<"\nUnrecognized character entered!\nKindly re-enter character from list";
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.