text
stringlengths 5
1.04M
|
|---|
//===- CodeCoverage.cpp - Coverage tool based on profiling instrumentation-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The 'CodeCoverageTool' class implements a command line tool to analyze and
// report coverage information using the profiling instrumentation and code
// coverage mapping.
//
//===----------------------------------------------------------------------===//
#include "CoverageExporterJson.h"
#include "CoverageExporterLcov.h"
#include "CoverageFilters.h"
#include "CoverageReport.h"
#include "CoverageSummaryInfo.h"
#include "CoverageViewOptions.h"
#include "RenderingSupport.h"
#include "SourceCoverageView.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ProfileData/Coverage/CoverageMapping.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/ThreadPool.h"
#include "llvm/Support/Threading.h"
#include "llvm/Support/ToolOutputFile.h"
#include <functional>
#include <map>
#include <system_error>
using namespace llvm;
using namespace coverage;
void exportCoverageDataToJson(const coverage::CoverageMapping &CoverageMapping,
const CoverageViewOptions &Options,
raw_ostream &OS);
namespace {
/// The implementation of the coverage tool.
class CodeCoverageTool {
public:
enum Command {
/// The show command.
Show,
/// The report command.
Report,
/// The export command.
Export
};
int run(Command Cmd, int argc, const char **argv);
private:
/// Print the error message to the error output stream.
void error(const Twine &Message, StringRef Whence = "");
/// Print the warning message to the error output stream.
void warning(const Twine &Message, StringRef Whence = "");
/// Convert \p Path into an absolute path and append it to the list
/// of collected paths.
void addCollectedPath(const std::string &Path);
/// If \p Path is a regular file, collect the path. If it's a
/// directory, recursively collect all of the paths within the directory.
void collectPaths(const std::string &Path);
/// Return a memory buffer for the given source file.
ErrorOr<const MemoryBuffer &> getSourceFile(StringRef SourceFile);
/// Create source views for the expansions of the view.
void attachExpansionSubViews(SourceCoverageView &View,
ArrayRef<ExpansionRecord> Expansions,
const CoverageMapping &Coverage);
/// Create the source view of a particular function.
std::unique_ptr<SourceCoverageView>
createFunctionView(const FunctionRecord &Function,
const CoverageMapping &Coverage);
/// Create the main source view of a particular source file.
std::unique_ptr<SourceCoverageView>
createSourceFileView(StringRef SourceFile, const CoverageMapping &Coverage);
/// Load the coverage mapping data. Return nullptr if an error occurred.
std::unique_ptr<CoverageMapping> load();
/// Create a mapping from files in the Coverage data to local copies
/// (path-equivalence).
void remapPathNames(const CoverageMapping &Coverage);
/// Remove input source files which aren't mapped by \p Coverage.
void removeUnmappedInputs(const CoverageMapping &Coverage);
/// If a demangler is available, demangle all symbol names.
void demangleSymbols(const CoverageMapping &Coverage);
/// Write out a source file view to the filesystem.
void writeSourceFileView(StringRef SourceFile, CoverageMapping *Coverage,
CoveragePrinter *Printer, bool ShowFilenames);
typedef llvm::function_ref<int(int, const char **)> CommandLineParserType;
int doShow(int argc, const char **argv,
CommandLineParserType commandLineParser);
int doReport(int argc, const char **argv,
CommandLineParserType commandLineParser);
int doExport(int argc, const char **argv,
CommandLineParserType commandLineParser);
std::vector<StringRef> ObjectFilenames;
CoverageViewOptions ViewOpts;
CoverageFiltersMatchAll Filters;
CoverageFilters IgnoreFilenameFilters;
/// The path to the indexed profile.
std::string PGOFilename;
/// A list of input source files.
std::vector<std::string> SourceFiles;
/// In -path-equivalence mode, this maps the absolute paths from the coverage
/// mapping data to the input source files.
StringMap<std::string> RemappedFilenames;
/// The coverage data path to be remapped from, and the source path to be
/// remapped to, when using -path-equivalence.
Optional<std::pair<std::string, std::string>> PathRemapping;
/// The architecture the coverage mapping data targets.
std::vector<StringRef> CoverageArches;
/// A cache for demangled symbols.
DemangleCache DC;
/// A lock which guards printing to stderr.
std::mutex ErrsLock;
/// A container for input source file buffers.
std::mutex LoadedSourceFilesLock;
std::vector<std::pair<std::string, std::unique_ptr<MemoryBuffer>>>
LoadedSourceFiles;
/// Whitelist from -name-whitelist to be used for filtering.
std::unique_ptr<SpecialCaseList> NameWhitelist;
};
}
static std::string getErrorString(const Twine &Message, StringRef Whence,
bool Warning) {
std::string Str = (Warning ? "warning" : "error");
Str += ": ";
if (!Whence.empty())
Str += Whence.str() + ": ";
Str += Message.str() + "\n";
return Str;
}
void CodeCoverageTool::error(const Twine &Message, StringRef Whence) {
std::unique_lock<std::mutex> Guard{ErrsLock};
ViewOpts.colored_ostream(errs(), raw_ostream::RED)
<< getErrorString(Message, Whence, false);
}
void CodeCoverageTool::warning(const Twine &Message, StringRef Whence) {
std::unique_lock<std::mutex> Guard{ErrsLock};
ViewOpts.colored_ostream(errs(), raw_ostream::RED)
<< getErrorString(Message, Whence, true);
}
void CodeCoverageTool::addCollectedPath(const std::string &Path) {
SmallString<128> EffectivePath(Path);
if (std::error_code EC = sys::fs::make_absolute(EffectivePath)) {
error(EC.message(), Path);
return;
}
sys::path::remove_dots(EffectivePath, /*remove_dot_dots=*/true);
if (!IgnoreFilenameFilters.matchesFilename(EffectivePath))
SourceFiles.emplace_back(EffectivePath.str());
}
void CodeCoverageTool::collectPaths(const std::string &Path) {
llvm::sys::fs::file_status Status;
llvm::sys::fs::status(Path, Status);
if (!llvm::sys::fs::exists(Status)) {
if (PathRemapping)
addCollectedPath(Path);
else
warning("Source file doesn't exist, proceeded by ignoring it.", Path);
return;
}
if (llvm::sys::fs::is_regular_file(Status)) {
addCollectedPath(Path);
return;
}
if (llvm::sys::fs::is_directory(Status)) {
std::error_code EC;
for (llvm::sys::fs::recursive_directory_iterator F(Path, EC), E;
F != E; F.increment(EC)) {
auto Status = F->status();
if (!Status) {
warning(Status.getError().message(), F->path());
continue;
}
if (Status->type() == llvm::sys::fs::file_type::regular_file)
addCollectedPath(F->path());
}
}
}
ErrorOr<const MemoryBuffer &>
CodeCoverageTool::getSourceFile(StringRef SourceFile) {
// If we've remapped filenames, look up the real location for this file.
std::unique_lock<std::mutex> Guard{LoadedSourceFilesLock};
if (!RemappedFilenames.empty()) {
auto Loc = RemappedFilenames.find(SourceFile);
if (Loc != RemappedFilenames.end())
SourceFile = Loc->second;
}
for (const auto &Files : LoadedSourceFiles)
if (sys::fs::equivalent(SourceFile, Files.first))
return *Files.second;
auto Buffer = MemoryBuffer::getFile(SourceFile);
if (auto EC = Buffer.getError()) {
error(EC.message(), SourceFile);
return EC;
}
LoadedSourceFiles.emplace_back(SourceFile, std::move(Buffer.get()));
return *LoadedSourceFiles.back().second;
}
void CodeCoverageTool::attachExpansionSubViews(
SourceCoverageView &View, ArrayRef<ExpansionRecord> Expansions,
const CoverageMapping &Coverage) {
if (!ViewOpts.ShowExpandedRegions)
return;
for (const auto &Expansion : Expansions) {
auto ExpansionCoverage = Coverage.getCoverageForExpansion(Expansion);
if (ExpansionCoverage.empty())
continue;
auto SourceBuffer = getSourceFile(ExpansionCoverage.getFilename());
if (!SourceBuffer)
continue;
auto SubViewExpansions = ExpansionCoverage.getExpansions();
auto SubView =
SourceCoverageView::create(Expansion.Function.Name, SourceBuffer.get(),
ViewOpts, std::move(ExpansionCoverage));
attachExpansionSubViews(*SubView, SubViewExpansions, Coverage);
View.addExpansion(Expansion.Region, std::move(SubView));
}
}
std::unique_ptr<SourceCoverageView>
CodeCoverageTool::createFunctionView(const FunctionRecord &Function,
const CoverageMapping &Coverage) {
auto FunctionCoverage = Coverage.getCoverageForFunction(Function);
if (FunctionCoverage.empty())
return nullptr;
auto SourceBuffer = getSourceFile(FunctionCoverage.getFilename());
if (!SourceBuffer)
return nullptr;
auto Expansions = FunctionCoverage.getExpansions();
auto View = SourceCoverageView::create(DC.demangle(Function.Name),
SourceBuffer.get(), ViewOpts,
std::move(FunctionCoverage));
attachExpansionSubViews(*View, Expansions, Coverage);
return View;
}
std::unique_ptr<SourceCoverageView>
CodeCoverageTool::createSourceFileView(StringRef SourceFile,
const CoverageMapping &Coverage) {
auto SourceBuffer = getSourceFile(SourceFile);
if (!SourceBuffer)
return nullptr;
auto FileCoverage = Coverage.getCoverageForFile(SourceFile);
if (FileCoverage.empty())
return nullptr;
auto Expansions = FileCoverage.getExpansions();
auto View = SourceCoverageView::create(SourceFile, SourceBuffer.get(),
ViewOpts, std::move(FileCoverage));
attachExpansionSubViews(*View, Expansions, Coverage);
if (!ViewOpts.ShowFunctionInstantiations)
return View;
for (const auto &Group : Coverage.getInstantiationGroups(SourceFile)) {
// Skip functions which have a single instantiation.
if (Group.size() < 2)
continue;
for (const FunctionRecord *Function : Group.getInstantiations()) {
std::unique_ptr<SourceCoverageView> SubView{nullptr};
StringRef Funcname = DC.demangle(Function->Name);
if (Function->ExecutionCount > 0) {
auto SubViewCoverage = Coverage.getCoverageForFunction(*Function);
auto SubViewExpansions = SubViewCoverage.getExpansions();
SubView = SourceCoverageView::create(
Funcname, SourceBuffer.get(), ViewOpts, std::move(SubViewCoverage));
attachExpansionSubViews(*SubView, SubViewExpansions, Coverage);
}
unsigned FileID = Function->CountedRegions.front().FileID;
unsigned Line = 0;
for (const auto &CR : Function->CountedRegions)
if (CR.FileID == FileID)
Line = std::max(CR.LineEnd, Line);
View->addInstantiation(Funcname, Line, std::move(SubView));
}
}
return View;
}
static bool modifiedTimeGT(StringRef LHS, StringRef RHS) {
sys::fs::file_status Status;
if (sys::fs::status(LHS, Status))
return false;
auto LHSTime = Status.getLastModificationTime();
if (sys::fs::status(RHS, Status))
return false;
auto RHSTime = Status.getLastModificationTime();
return LHSTime > RHSTime;
}
std::unique_ptr<CoverageMapping> CodeCoverageTool::load() {
for (StringRef ObjectFilename : ObjectFilenames)
if (modifiedTimeGT(ObjectFilename, PGOFilename))
warning("profile data may be out of date - object is newer",
ObjectFilename);
auto CoverageOrErr =
CoverageMapping::load(ObjectFilenames, PGOFilename, CoverageArches);
if (Error E = CoverageOrErr.takeError()) {
error("Failed to load coverage: " + toString(std::move(E)),
join(ObjectFilenames.begin(), ObjectFilenames.end(), ", "));
return nullptr;
}
auto Coverage = std::move(CoverageOrErr.get());
unsigned Mismatched = Coverage->getMismatchedCount();
if (Mismatched) {
warning(Twine(Mismatched) + " functions have mismatched data");
if (ViewOpts.Debug) {
for (const auto &HashMismatch : Coverage->getHashMismatches())
errs() << "hash-mismatch: "
<< "No profile record found for '" << HashMismatch.first << "'"
<< " with hash = 0x" << Twine::utohexstr(HashMismatch.second)
<< '\n';
}
}
remapPathNames(*Coverage);
if (!SourceFiles.empty())
removeUnmappedInputs(*Coverage);
demangleSymbols(*Coverage);
return Coverage;
}
void CodeCoverageTool::remapPathNames(const CoverageMapping &Coverage) {
if (!PathRemapping)
return;
// Convert remapping paths to native paths with trailing seperators.
auto nativeWithTrailing = [](StringRef Path) -> std::string {
if (Path.empty())
return "";
SmallString<128> NativePath;
sys::path::native(Path, NativePath);
if (!sys::path::is_separator(NativePath.back()))
NativePath += sys::path::get_separator();
return NativePath.c_str();
};
std::string RemapFrom = nativeWithTrailing(PathRemapping->first);
std::string RemapTo = nativeWithTrailing(PathRemapping->second);
// Create a mapping from coverage data file paths to local paths.
for (StringRef Filename : Coverage.getUniqueSourceFiles()) {
SmallString<128> NativeFilename;
sys::path::native(Filename, NativeFilename);
if (NativeFilename.startswith(RemapFrom)) {
RemappedFilenames[Filename] =
RemapTo + NativeFilename.substr(RemapFrom.size()).str();
}
}
// Convert input files from local paths to coverage data file paths.
StringMap<std::string> InvRemappedFilenames;
for (const auto &RemappedFilename : RemappedFilenames)
InvRemappedFilenames[RemappedFilename.getValue()] = RemappedFilename.getKey();
for (std::string &Filename : SourceFiles) {
SmallString<128> NativeFilename;
sys::path::native(Filename, NativeFilename);
auto CovFileName = InvRemappedFilenames.find(NativeFilename);
if (CovFileName != InvRemappedFilenames.end())
Filename = CovFileName->second;
}
}
void CodeCoverageTool::removeUnmappedInputs(const CoverageMapping &Coverage) {
std::vector<StringRef> CoveredFiles = Coverage.getUniqueSourceFiles();
auto UncoveredFilesIt = SourceFiles.end();
// The user may have specified source files which aren't in the coverage
// mapping. Filter these files away.
UncoveredFilesIt = std::remove_if(
SourceFiles.begin(), SourceFiles.end(), [&](const std::string &SF) {
return !std::binary_search(CoveredFiles.begin(), CoveredFiles.end(),
SF);
});
SourceFiles.erase(UncoveredFilesIt, SourceFiles.end());
}
void CodeCoverageTool::demangleSymbols(const CoverageMapping &Coverage) {
if (!ViewOpts.hasDemangler())
return;
// Pass function names to the demangler in a temporary file.
int InputFD;
SmallString<256> InputPath;
std::error_code EC =
sys::fs::createTemporaryFile("demangle-in", "list", InputFD, InputPath);
if (EC) {
error(InputPath, EC.message());
return;
}
ToolOutputFile InputTOF{InputPath, InputFD};
unsigned NumSymbols = 0;
for (const auto &Function : Coverage.getCoveredFunctions()) {
InputTOF.os() << Function.Name << '\n';
++NumSymbols;
}
InputTOF.os().close();
// Use another temporary file to store the demangler's output.
int OutputFD;
SmallString<256> OutputPath;
EC = sys::fs::createTemporaryFile("demangle-out", "list", OutputFD,
OutputPath);
if (EC) {
error(OutputPath, EC.message());
return;
}
ToolOutputFile OutputTOF{OutputPath, OutputFD};
OutputTOF.os().close();
// Invoke the demangler.
std::vector<StringRef> ArgsV;
for (StringRef Arg : ViewOpts.DemanglerOpts)
ArgsV.push_back(Arg);
Optional<StringRef> Redirects[] = {InputPath.str(), OutputPath.str(), {""}};
std::string ErrMsg;
int RC = sys::ExecuteAndWait(ViewOpts.DemanglerOpts[0], ArgsV,
/*env=*/None, Redirects, /*secondsToWait=*/0,
/*memoryLimit=*/0, &ErrMsg);
if (RC) {
error(ErrMsg, ViewOpts.DemanglerOpts[0]);
return;
}
// Parse the demangler's output.
auto BufOrError = MemoryBuffer::getFile(OutputPath);
if (!BufOrError) {
error(OutputPath, BufOrError.getError().message());
return;
}
std::unique_ptr<MemoryBuffer> DemanglerBuf = std::move(*BufOrError);
SmallVector<StringRef, 8> Symbols;
StringRef DemanglerData = DemanglerBuf->getBuffer();
DemanglerData.split(Symbols, '\n', /*MaxSplit=*/NumSymbols,
/*KeepEmpty=*/false);
if (Symbols.size() != NumSymbols) {
error("Demangler did not provide expected number of symbols");
return;
}
// Cache the demangled names.
unsigned I = 0;
for (const auto &Function : Coverage.getCoveredFunctions())
// On Windows, lines in the demangler's output file end with "\r\n".
// Splitting by '\n' keeps '\r's, so cut them now.
DC.DemangledNames[Function.Name] = Symbols[I++].rtrim();
}
void CodeCoverageTool::writeSourceFileView(StringRef SourceFile,
CoverageMapping *Coverage,
CoveragePrinter *Printer,
bool ShowFilenames) {
auto View = createSourceFileView(SourceFile, *Coverage);
if (!View) {
warning("The file '" + SourceFile + "' isn't covered.");
return;
}
auto OSOrErr = Printer->createViewFile(SourceFile, /*InToplevel=*/false);
if (Error E = OSOrErr.takeError()) {
error("Could not create view file!", toString(std::move(E)));
return;
}
auto OS = std::move(OSOrErr.get());
View->print(*OS.get(), /*Wholefile=*/true,
/*ShowSourceName=*/ShowFilenames,
/*ShowTitle=*/ViewOpts.hasOutputDirectory());
Printer->closeViewFile(std::move(OS));
}
int CodeCoverageTool::run(Command Cmd, int argc, const char **argv) {
cl::opt<std::string> CovFilename(
cl::Positional, cl::desc("Covered executable or object file."));
cl::list<std::string> CovFilenames(
"object", cl::desc("Coverage executable or object file"), cl::ZeroOrMore,
cl::CommaSeparated);
cl::list<std::string> InputSourceFiles(
cl::Positional, cl::desc("<Source files>"), cl::ZeroOrMore);
cl::opt<bool> DebugDumpCollectedPaths(
"dump-collected-paths", cl::Optional, cl::Hidden,
cl::desc("Show the collected paths to source files"));
cl::opt<std::string, true> PGOFilename(
"instr-profile", cl::Required, cl::location(this->PGOFilename),
cl::desc(
"File with the profile data obtained after an instrumented run"));
cl::list<std::string> Arches(
"arch", cl::desc("architectures of the coverage mapping binaries"));
cl::opt<bool> DebugDump("dump", cl::Optional,
cl::desc("Show internal debug dump"));
cl::opt<CoverageViewOptions::OutputFormat> Format(
"format", cl::desc("Output format for line-based coverage reports"),
cl::values(clEnumValN(CoverageViewOptions::OutputFormat::Text, "text",
"Text output"),
clEnumValN(CoverageViewOptions::OutputFormat::HTML, "html",
"HTML output"),
clEnumValN(CoverageViewOptions::OutputFormat::Lcov, "lcov",
"lcov tracefile output")),
cl::init(CoverageViewOptions::OutputFormat::Text));
cl::opt<std::string> PathRemap(
"path-equivalence", cl::Optional,
cl::desc("<from>,<to> Map coverage data paths to local source file "
"paths"));
cl::OptionCategory FilteringCategory("Function filtering options");
cl::list<std::string> NameFilters(
"name", cl::Optional,
cl::desc("Show code coverage only for functions with the given name"),
cl::ZeroOrMore, cl::cat(FilteringCategory));
cl::list<std::string> NameFilterFiles(
"name-whitelist", cl::Optional,
cl::desc("Show code coverage only for functions listed in the given "
"file"),
cl::ZeroOrMore, cl::cat(FilteringCategory));
cl::list<std::string> NameRegexFilters(
"name-regex", cl::Optional,
cl::desc("Show code coverage only for functions that match the given "
"regular expression"),
cl::ZeroOrMore, cl::cat(FilteringCategory));
cl::list<std::string> IgnoreFilenameRegexFilters(
"ignore-filename-regex", cl::Optional,
cl::desc("Skip source code files with file paths that match the given "
"regular expression"),
cl::ZeroOrMore, cl::cat(FilteringCategory));
cl::opt<double> RegionCoverageLtFilter(
"region-coverage-lt", cl::Optional,
cl::desc("Show code coverage only for functions with region coverage "
"less than the given threshold"),
cl::cat(FilteringCategory));
cl::opt<double> RegionCoverageGtFilter(
"region-coverage-gt", cl::Optional,
cl::desc("Show code coverage only for functions with region coverage "
"greater than the given threshold"),
cl::cat(FilteringCategory));
cl::opt<double> LineCoverageLtFilter(
"line-coverage-lt", cl::Optional,
cl::desc("Show code coverage only for functions with line coverage less "
"than the given threshold"),
cl::cat(FilteringCategory));
cl::opt<double> LineCoverageGtFilter(
"line-coverage-gt", cl::Optional,
cl::desc("Show code coverage only for functions with line coverage "
"greater than the given threshold"),
cl::cat(FilteringCategory));
cl::opt<cl::boolOrDefault> UseColor(
"use-color", cl::desc("Emit colored output (default=autodetect)"),
cl::init(cl::BOU_UNSET));
cl::list<std::string> DemanglerOpts(
"Xdemangler", cl::desc("<demangler-path>|<demangler-option>"));
cl::opt<bool> RegionSummary(
"show-region-summary", cl::Optional,
cl::desc("Show region statistics in summary table"),
cl::init(true));
cl::opt<bool> InstantiationSummary(
"show-instantiation-summary", cl::Optional,
cl::desc("Show instantiation statistics in summary table"));
cl::opt<bool> SummaryOnly(
"summary-only", cl::Optional,
cl::desc("Export only summary information for each source file"));
cl::opt<unsigned> NumThreads(
"num-threads", cl::init(0),
cl::desc("Number of merge threads to use (default: autodetect)"));
cl::alias NumThreadsA("j", cl::desc("Alias for --num-threads"),
cl::aliasopt(NumThreads));
auto commandLineParser = [&, this](int argc, const char **argv) -> int {
cl::ParseCommandLineOptions(argc, argv, "LLVM code coverage tool\n");
ViewOpts.Debug = DebugDump;
if (!CovFilename.empty())
ObjectFilenames.emplace_back(CovFilename);
for (const std::string &Filename : CovFilenames)
ObjectFilenames.emplace_back(Filename);
if (ObjectFilenames.empty()) {
errs() << "No filenames specified!\n";
::exit(1);
}
ViewOpts.Format = Format;
switch (ViewOpts.Format) {
case CoverageViewOptions::OutputFormat::Text:
ViewOpts.Colors = UseColor == cl::BOU_UNSET
? sys::Process::StandardOutHasColors()
: UseColor == cl::BOU_TRUE;
break;
case CoverageViewOptions::OutputFormat::HTML:
if (UseColor == cl::BOU_FALSE)
errs() << "Color output cannot be disabled when generating html.\n";
ViewOpts.Colors = true;
break;
case CoverageViewOptions::OutputFormat::Lcov:
if (UseColor == cl::BOU_TRUE)
errs() << "Color output cannot be enabled when generating lcov.\n";
ViewOpts.Colors = false;
break;
}
// If path-equivalence was given and is a comma seperated pair then set
// PathRemapping.
auto EquivPair = StringRef(PathRemap).split(',');
if (!(EquivPair.first.empty() && EquivPair.second.empty()))
PathRemapping = EquivPair;
// If a demangler is supplied, check if it exists and register it.
if (!DemanglerOpts.empty()) {
auto DemanglerPathOrErr = sys::findProgramByName(DemanglerOpts[0]);
if (!DemanglerPathOrErr) {
error("Could not find the demangler!",
DemanglerPathOrErr.getError().message());
return 1;
}
DemanglerOpts[0] = *DemanglerPathOrErr;
ViewOpts.DemanglerOpts.swap(DemanglerOpts);
}
// Read in -name-whitelist files.
if (!NameFilterFiles.empty()) {
std::string SpecialCaseListErr;
NameWhitelist =
SpecialCaseList::create(NameFilterFiles, SpecialCaseListErr);
if (!NameWhitelist)
error(SpecialCaseListErr);
}
// Create the function filters
if (!NameFilters.empty() || NameWhitelist || !NameRegexFilters.empty()) {
auto NameFilterer = llvm::make_unique<CoverageFilters>();
for (const auto &Name : NameFilters)
NameFilterer->push_back(llvm::make_unique<NameCoverageFilter>(Name));
if (NameWhitelist)
NameFilterer->push_back(
llvm::make_unique<NameWhitelistCoverageFilter>(*NameWhitelist));
for (const auto &Regex : NameRegexFilters)
NameFilterer->push_back(
llvm::make_unique<NameRegexCoverageFilter>(Regex));
Filters.push_back(std::move(NameFilterer));
}
if (RegionCoverageLtFilter.getNumOccurrences() ||
RegionCoverageGtFilter.getNumOccurrences() ||
LineCoverageLtFilter.getNumOccurrences() ||
LineCoverageGtFilter.getNumOccurrences()) {
auto StatFilterer = llvm::make_unique<CoverageFilters>();
if (RegionCoverageLtFilter.getNumOccurrences())
StatFilterer->push_back(llvm::make_unique<RegionCoverageFilter>(
RegionCoverageFilter::LessThan, RegionCoverageLtFilter));
if (RegionCoverageGtFilter.getNumOccurrences())
StatFilterer->push_back(llvm::make_unique<RegionCoverageFilter>(
RegionCoverageFilter::GreaterThan, RegionCoverageGtFilter));
if (LineCoverageLtFilter.getNumOccurrences())
StatFilterer->push_back(llvm::make_unique<LineCoverageFilter>(
LineCoverageFilter::LessThan, LineCoverageLtFilter));
if (LineCoverageGtFilter.getNumOccurrences())
StatFilterer->push_back(llvm::make_unique<LineCoverageFilter>(
RegionCoverageFilter::GreaterThan, LineCoverageGtFilter));
Filters.push_back(std::move(StatFilterer));
}
// Create the ignore filename filters.
for (const auto &RE : IgnoreFilenameRegexFilters)
IgnoreFilenameFilters.push_back(
llvm::make_unique<NameRegexCoverageFilter>(RE));
if (!Arches.empty()) {
for (const std::string &Arch : Arches) {
if (Triple(Arch).getArch() == llvm::Triple::ArchType::UnknownArch) {
error("Unknown architecture: " + Arch);
return 1;
}
CoverageArches.emplace_back(Arch);
}
if (CoverageArches.size() != ObjectFilenames.size()) {
error("Number of architectures doesn't match the number of objects");
return 1;
}
}
// IgnoreFilenameFilters are applied even when InputSourceFiles specified.
for (const std::string &File : InputSourceFiles)
collectPaths(File);
if (DebugDumpCollectedPaths) {
for (const std::string &SF : SourceFiles)
outs() << SF << '\n';
::exit(0);
}
ViewOpts.ShowRegionSummary = RegionSummary;
ViewOpts.ShowInstantiationSummary = InstantiationSummary;
ViewOpts.ExportSummaryOnly = SummaryOnly;
ViewOpts.NumThreads = NumThreads;
return 0;
};
switch (Cmd) {
case Show:
return doShow(argc, argv, commandLineParser);
case Report:
return doReport(argc, argv, commandLineParser);
case Export:
return doExport(argc, argv, commandLineParser);
}
return 0;
}
int CodeCoverageTool::doShow(int argc, const char **argv,
CommandLineParserType commandLineParser) {
cl::OptionCategory ViewCategory("Viewing options");
cl::opt<bool> ShowLineExecutionCounts(
"show-line-counts", cl::Optional,
cl::desc("Show the execution counts for each line"), cl::init(true),
cl::cat(ViewCategory));
cl::opt<bool> ShowRegions(
"show-regions", cl::Optional,
cl::desc("Show the execution counts for each region"),
cl::cat(ViewCategory));
cl::opt<bool> ShowBestLineRegionsCounts(
"show-line-counts-or-regions", cl::Optional,
cl::desc("Show the execution counts for each line, or the execution "
"counts for each region on lines that have multiple regions"),
cl::cat(ViewCategory));
cl::opt<bool> ShowExpansions("show-expansions", cl::Optional,
cl::desc("Show expanded source regions"),
cl::cat(ViewCategory));
cl::opt<bool> ShowInstantiations("show-instantiations", cl::Optional,
cl::desc("Show function instantiations"),
cl::init(true), cl::cat(ViewCategory));
cl::opt<std::string> ShowOutputDirectory(
"output-dir", cl::init(""),
cl::desc("Directory in which coverage information is written out"));
cl::alias ShowOutputDirectoryA("o", cl::desc("Alias for --output-dir"),
cl::aliasopt(ShowOutputDirectory));
cl::opt<uint32_t> TabSize(
"tab-size", cl::init(2),
cl::desc(
"Set tab expansion size for html coverage reports (default = 2)"));
cl::opt<std::string> ProjectTitle(
"project-title", cl::Optional,
cl::desc("Set project title for the coverage report"));
auto Err = commandLineParser(argc, argv);
if (Err)
return Err;
if (ViewOpts.Format == CoverageViewOptions::OutputFormat::Lcov) {
error("Lcov format should be used with 'llvm-cov export'.");
return 1;
}
ViewOpts.ShowLineNumbers = true;
ViewOpts.ShowLineStats = ShowLineExecutionCounts.getNumOccurrences() != 0 ||
!ShowRegions || ShowBestLineRegionsCounts;
ViewOpts.ShowRegionMarkers = ShowRegions || ShowBestLineRegionsCounts;
ViewOpts.ShowExpandedRegions = ShowExpansions;
ViewOpts.ShowFunctionInstantiations = ShowInstantiations;
ViewOpts.ShowOutputDirectory = ShowOutputDirectory;
ViewOpts.TabSize = TabSize;
ViewOpts.ProjectTitle = ProjectTitle;
if (ViewOpts.hasOutputDirectory()) {
if (auto E = sys::fs::create_directories(ViewOpts.ShowOutputDirectory)) {
error("Could not create output directory!", E.message());
return 1;
}
}
sys::fs::file_status Status;
if (sys::fs::status(PGOFilename, Status)) {
error("profdata file error: can not get the file status. \n");
return 1;
}
auto ModifiedTime = Status.getLastModificationTime();
std::string ModifiedTimeStr = to_string(ModifiedTime);
size_t found = ModifiedTimeStr.rfind(':');
ViewOpts.CreatedTimeStr = (found != std::string::npos)
? "Created: " + ModifiedTimeStr.substr(0, found)
: "Created: " + ModifiedTimeStr;
auto Coverage = load();
if (!Coverage)
return 1;
auto Printer = CoveragePrinter::create(ViewOpts);
if (SourceFiles.empty())
// Get the source files from the function coverage mapping.
for (StringRef Filename : Coverage->getUniqueSourceFiles()) {
if (!IgnoreFilenameFilters.matchesFilename(Filename))
SourceFiles.push_back(Filename);
}
// Create an index out of the source files.
if (ViewOpts.hasOutputDirectory()) {
if (Error E = Printer->createIndexFile(SourceFiles, *Coverage, Filters)) {
error("Could not create index file!", toString(std::move(E)));
return 1;
}
}
if (!Filters.empty()) {
// Build the map of filenames to functions.
std::map<llvm::StringRef, std::vector<const FunctionRecord *>>
FilenameFunctionMap;
for (const auto &SourceFile : SourceFiles)
for (const auto &Function : Coverage->getCoveredFunctions(SourceFile))
if (Filters.matches(*Coverage.get(), Function))
FilenameFunctionMap[SourceFile].push_back(&Function);
// Only print filter matching functions for each file.
for (const auto &FileFunc : FilenameFunctionMap) {
StringRef File = FileFunc.first;
const auto &Functions = FileFunc.second;
auto OSOrErr = Printer->createViewFile(File, /*InToplevel=*/false);
if (Error E = OSOrErr.takeError()) {
error("Could not create view file!", toString(std::move(E)));
return 1;
}
auto OS = std::move(OSOrErr.get());
bool ShowTitle = ViewOpts.hasOutputDirectory();
for (const auto *Function : Functions) {
auto FunctionView = createFunctionView(*Function, *Coverage);
if (!FunctionView) {
warning("Could not read coverage for '" + Function->Name + "'.");
continue;
}
FunctionView->print(*OS.get(), /*WholeFile=*/false,
/*ShowSourceName=*/true, ShowTitle);
ShowTitle = false;
}
Printer->closeViewFile(std::move(OS));
}
return 0;
}
// Show files
bool ShowFilenames =
(SourceFiles.size() != 1) || ViewOpts.hasOutputDirectory() ||
(ViewOpts.Format == CoverageViewOptions::OutputFormat::HTML);
auto NumThreads = ViewOpts.NumThreads;
// If NumThreads is not specified, auto-detect a good default.
if (NumThreads == 0)
NumThreads =
std::max(1U, std::min(llvm::heavyweight_hardware_concurrency(),
unsigned(SourceFiles.size())));
if (!ViewOpts.hasOutputDirectory() || NumThreads == 1) {
for (const std::string &SourceFile : SourceFiles)
writeSourceFileView(SourceFile, Coverage.get(), Printer.get(),
ShowFilenames);
} else {
// In -output-dir mode, it's safe to use multiple threads to print files.
ThreadPool Pool(NumThreads);
for (const std::string &SourceFile : SourceFiles)
Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile,
Coverage.get(), Printer.get(), ShowFilenames);
Pool.wait();
}
return 0;
}
int CodeCoverageTool::doReport(int argc, const char **argv,
CommandLineParserType commandLineParser) {
cl::opt<bool> ShowFunctionSummaries(
"show-functions", cl::Optional, cl::init(false),
cl::desc("Show coverage summaries for each function"));
auto Err = commandLineParser(argc, argv);
if (Err)
return Err;
if (ViewOpts.Format == CoverageViewOptions::OutputFormat::HTML) {
error("HTML output for summary reports is not yet supported.");
return 1;
} else if (ViewOpts.Format == CoverageViewOptions::OutputFormat::Lcov) {
error("Lcov format should be used with 'llvm-cov export'.");
return 1;
}
auto Coverage = load();
if (!Coverage)
return 1;
CoverageReport Report(ViewOpts, *Coverage.get());
if (!ShowFunctionSummaries) {
if (SourceFiles.empty())
Report.renderFileReports(llvm::outs(), IgnoreFilenameFilters);
else
Report.renderFileReports(llvm::outs(), SourceFiles);
} else {
if (SourceFiles.empty()) {
error("Source files must be specified when -show-functions=true is "
"specified");
return 1;
}
Report.renderFunctionReports(SourceFiles, DC, llvm::outs());
}
return 0;
}
int CodeCoverageTool::doExport(int argc, const char **argv,
CommandLineParserType commandLineParser) {
cl::OptionCategory ExportCategory("Exporting options");
cl::opt<bool> SkipExpansions("skip-expansions", cl::Optional,
cl::desc("Don't export expanded source regions"),
cl::cat(ExportCategory));
cl::opt<bool> SkipFunctions("skip-functions", cl::Optional,
cl::desc("Don't export per-function data"),
cl::cat(ExportCategory));
auto Err = commandLineParser(argc, argv);
if (Err)
return Err;
ViewOpts.SkipExpansions = SkipExpansions;
ViewOpts.SkipFunctions = SkipFunctions;
if (ViewOpts.Format != CoverageViewOptions::OutputFormat::Text &&
ViewOpts.Format != CoverageViewOptions::OutputFormat::Lcov) {
error("Coverage data can only be exported as textual JSON or an "
"lcov tracefile.");
return 1;
}
auto Coverage = load();
if (!Coverage) {
error("Could not load coverage information");
return 1;
}
std::unique_ptr<CoverageExporter> Exporter;
switch (ViewOpts.Format) {
case CoverageViewOptions::OutputFormat::Text:
Exporter = llvm::make_unique<CoverageExporterJson>(*Coverage.get(),
ViewOpts, outs());
break;
case CoverageViewOptions::OutputFormat::HTML:
// Unreachable because we should have gracefully terminated with an error
// above.
llvm_unreachable("Export in HTML is not supported!");
case CoverageViewOptions::OutputFormat::Lcov:
Exporter = llvm::make_unique<CoverageExporterLcov>(*Coverage.get(),
ViewOpts, outs());
break;
}
if (SourceFiles.empty())
Exporter->renderRoot(IgnoreFilenameFilters);
else
Exporter->renderRoot(SourceFiles);
return 0;
}
int showMain(int argc, const char *argv[]) {
CodeCoverageTool Tool;
return Tool.run(CodeCoverageTool::Show, argc, argv);
}
int reportMain(int argc, const char *argv[]) {
CodeCoverageTool Tool;
return Tool.run(CodeCoverageTool::Report, argc, argv);
}
int exportMain(int argc, const char *argv[]) {
CodeCoverageTool Tool;
return Tool.run(CodeCoverageTool::Export, argc, argv);
}
|
#include "common/upstream/logical_dns_cluster.h"
#include <chrono>
#include <list>
#include <string>
#include <vector>
#include "common/common/fmt.h"
#include "common/config/utility.h"
#include "common/network/address_impl.h"
#include "common/network/utility.h"
#include "common/protobuf/protobuf.h"
#include "common/protobuf/utility.h"
namespace Envoy {
namespace Upstream {
LogicalDnsCluster::LogicalDnsCluster(const envoy::api::v2::Cluster& cluster,
Runtime::Loader& runtime, Stats::Store& stats,
Ssl::ContextManager& ssl_context_manager,
Network::DnsResolverSharedPtr dns_resolver,
ThreadLocal::SlotAllocator& tls, ClusterManager& cm,
Event::Dispatcher& dispatcher, bool added_via_api)
: ClusterImplBase(cluster, cm.sourceAddress(), runtime, stats, ssl_context_manager,
added_via_api),
dns_resolver_(dns_resolver),
dns_refresh_rate_ms_(
std::chrono::milliseconds(PROTOBUF_GET_MS_OR_DEFAULT(cluster, dns_refresh_rate, 5000))),
tls_(tls.allocateSlot()),
resolve_timer_(dispatcher.createTimer([this]() -> void { startResolve(); })) {
const auto& hosts = cluster.hosts();
if (hosts.size() != 1) {
throw EnvoyException("logical_dns clusters must have a single host");
}
switch (cluster.dns_lookup_family()) {
case envoy::api::v2::Cluster::V6_ONLY:
dns_lookup_family_ = Network::DnsLookupFamily::V6Only;
break;
case envoy::api::v2::Cluster::V4_ONLY:
dns_lookup_family_ = Network::DnsLookupFamily::V4Only;
break;
case envoy::api::v2::Cluster::AUTO:
dns_lookup_family_ = Network::DnsLookupFamily::Auto;
break;
default:
NOT_REACHED;
}
const auto& socket_address = hosts[0].socket_address();
dns_url_ = fmt::format("tcp://{}:{}", socket_address.address(), socket_address.port_value());
hostname_ = Network::Utility::hostFromTcpUrl(dns_url_);
Network::Utility::portFromTcpUrl(dns_url_);
tls_->set([](Event::Dispatcher&) -> ThreadLocal::ThreadLocalObjectSharedPtr {
return std::make_shared<PerThreadCurrentHostData>();
});
}
void LogicalDnsCluster::startPreInit() { startResolve(); }
LogicalDnsCluster::~LogicalDnsCluster() {
if (active_dns_query_) {
active_dns_query_->cancel();
}
}
void LogicalDnsCluster::startResolve() {
std::string dns_address = Network::Utility::hostFromTcpUrl(dns_url_);
ENVOY_LOG(debug, "starting async DNS resolution for {}", dns_address);
info_->stats().update_attempt_.inc();
active_dns_query_ = dns_resolver_->resolve(
dns_address, dns_lookup_family_,
[this,
dns_address](std::list<Network::Address::InstanceConstSharedPtr>&& address_list) -> void {
active_dns_query_ = nullptr;
ENVOY_LOG(debug, "async DNS resolution complete for {}", dns_address);
info_->stats().update_success_.inc();
if (!address_list.empty()) {
// TODO(mattklein123): Move port handling into the DNS interface.
ASSERT(address_list.front() != nullptr);
Network::Address::InstanceConstSharedPtr new_address =
Network::Utility::getAddressWithPort(*address_list.front(),
Network::Utility::portFromTcpUrl(dns_url_));
if (!current_resolved_address_ || !(*new_address == *current_resolved_address_)) {
current_resolved_address_ = new_address;
// Capture URL to avoid a race with another update.
tls_->runOnAllThreads([this, new_address]() -> void {
tls_->getTyped<PerThreadCurrentHostData>().current_resolved_address_ = new_address;
});
}
if (!logical_host_) {
// TODO(mattklein123): The logical host is only used in /clusters admin output. We used
// to show the friendly DNS name in that output, but currently there is no way to
// express a DNS name inside of an Address::Instance. For now this is OK but we might
// want to do better again later.
switch (address_list.front()->ip()->version()) {
case Network::Address::IpVersion::v4:
logical_host_.reset(
new LogicalHost(info_, hostname_, Network::Utility::getIpv4AnyAddress(), *this));
break;
case Network::Address::IpVersion::v6:
logical_host_.reset(
new LogicalHost(info_, hostname_, Network::Utility::getIpv6AnyAddress(), *this));
break;
}
HostVectorSharedPtr new_hosts(new std::vector<HostSharedPtr>());
new_hosts->emplace_back(logical_host_);
// Given the current config, only EDS clusters support multiple priorities.
ASSERT(priority_set_.hostSetsPerPriority().size() == 1);
auto& first_host_set = priority_set_.getOrCreateHostSet(0);
first_host_set.updateHosts(new_hosts, createHealthyHostList(*new_hosts),
empty_host_lists_, empty_host_lists_, *new_hosts, {});
}
}
onPreInitComplete();
resolve_timer_->enableTimer(dns_refresh_rate_ms_);
});
}
Upstream::Host::CreateConnectionData LogicalDnsCluster::LogicalHost::createConnection(
Event::Dispatcher& dispatcher,
const Network::ConnectionSocket::OptionsSharedPtr& options) const {
PerThreadCurrentHostData& data = parent_.tls_->getTyped<PerThreadCurrentHostData>();
ASSERT(data.current_resolved_address_);
return {HostImpl::createConnection(dispatcher, *parent_.info_, data.current_resolved_address_,
options),
HostDescriptionConstSharedPtr{
new RealHostDescription(data.current_resolved_address_, shared_from_this())}};
}
} // namespace Upstream
} // namespace Envoy
|
#ifndef DISPLAY_INCLUDE_GUARD_
#define DISPLAY_INCLUDE_GUARD_
#include <stdint.h>
//*****************************************************************************
//
// Make sure all of the definitions in this header have a C binding.
//
//*****************************************************************************
#define CONFIG_WROVER_KIT_V2 1
#define CONFIG_LCD_USE_FAST_PINS 0
#define DISPLAY_WIDTH 240
#define DISPLAY_HEIGHT 320
typedef struct s_point_s {
uint16_t x;
uint16_t y;
} point_s;
extern uint8_t vram[];
extern uint16_t myPalette[];
// low level screen functions
void ili9341_init();
// VRAM functions
void clear_vram();
void clear_vram(
const uint16_t x,
const uint16_t y,
const uint16_t width,
const uint16_t height);
void display_vram();
void blit_vram(
const uint16_t x,
const uint16_t y,
const uint16_t width,
const uint16_t height);
// text functions
void Draw_8x12_char(
char* _char_matrix,
int x_start,
int y_start,
unsigned char clr);
void Draw_8x12_string(
char* str,
unsigned char len,
int x_start,
int y_start,
unsigned char clr);
void Draw_5x8_char(
char* _char_matrix,
int x_start,
int y_start,
unsigned char clr);
void Draw_5x8_string(
char* str,
unsigned char len,
int x_start,
int y_start,
unsigned char clr);
// drawing functions
void draw_rectangle(
const point_s pos,
const uint16_t width,
const uint16_t height,
const uint8_t outline,
const uint8_t fill);
void draw_circle(
const point_s pos,
const uint16_t radius,
const uint8_t outline,
const uint8_t fill);
void draw_line(
const point_s start,
const point_s end,
const uint8_t color);
#endif //DISPLAY_INCLUDE_GUARD_
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#include "Peephole.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "ControlFlow.h"
#include "DexClass.h"
#include "IRInstruction.h"
#include "PassManager.h"
#include "DexUtil.h"
#include "Walkers.h"
namespace {
constexpr const char* METRIC_SIMPLE_CLS_NAME_REMOVED =
"num_simple_cls_name_removed";
constexpr const char* METRIC_CHECK_CASTS_REMOVED =
"num_redundant_check_cast_removed";
constexpr const char* METRIC_CHECK_CASTS_SUPER_REMOVED =
"num_redundant_check_casts_super_removed";
const DexMethod* method_Class_getSimpleName() {
static auto const ret = DexMethod::make_method("Ljava/lang/Class;",
"getSimpleName",
"Ljava/lang/String;",
{}); // Return type and params
return ret;
}
DexString* get_simple_name(const DexType* type) {
std::string full(type->get_name()->c_str());
auto lpos = full.rfind('/');
auto simple = full.substr(lpos + 1, full.size() - lpos - 2);
return DexString::make_string(simple.c_str(), (uint32_t) simple.size());
}
/**
* Peephole optimizations find dataflow patterns within a single basic block
* and replace them with faster patterns. The general pattern is to replace
* the final operation with a faster one, which may no longer depend on all the
* intermediate instructions in the pattern. The peepholer doesn't clean up
* these intermediate instructions, under the assumption that dead code
* elimination will do so later.
*/
class PeepholeOptimizer {
private:
using RegWriters = std::vector<ssize_t>;
using DataflowSources =
std::vector<std::pair<IRInstruction*, std::vector<ssize_t>>>;
const ssize_t kInvalid = -1;
const std::vector<DexClass*>& m_scope;
PassManager& m_pass_mgr;
std::unordered_map<IRInstruction*, IRInstruction*> m_replacements;
ssize_t m_last_call;
RegWriters m_last_writer;
DataflowSources m_dataflow_sources;
int m_stats_check_casts_removed;
int m_stats_check_casts_super_removed;
int m_stats_simple_name;
/**
* Explicitly clear the dataflow analysis structures to (a) avoid passing
* them around all over the place, and (b) avoid re-allocating them for every
* block.
*/
void init_dataflow(int16_t regs) {
m_last_call = kInvalid;
m_last_writer.resize(regs);
std::fill(m_last_writer.begin(), m_last_writer.end(), kInvalid);
m_dataflow_sources.clear();
}
IRInstruction* peephole_patterns(IRInstruction* insn) {
if (is_move_result(insn->opcode())) {
/*
* const-class vA, Lsome/Class;
* invoke-virtual {vA} Ljava/lang/Class;.getSimpleName()
* move-result vB
*/
if (m_last_call < 0) {
return insn;
}
auto invokep = m_dataflow_sources[m_last_call];
auto invoke = invokep.first;
auto const& invoke_srcs = invokep.second;
if (invoke->get_method() != method_Class_getSimpleName()) {
return insn;
}
if (invoke_srcs[0] < 0) {
return insn;
}
auto const_class = m_dataflow_sources[invoke_srcs[0]].first;
if (const_class->opcode() != OPCODE_CONST_CLASS) {
return insn;
}
auto clstype = const_class->get_type();
m_stats_simple_name++;
return (new IRInstruction(OPCODE_CONST_STRING))
->set_string(get_simple_name(clstype))
->set_dest(insn->dest());
}
if (insn->opcode() == OPCODE_CHECK_CAST) {
/*
* invoke-virtual Lsome/Class;
* move-result vA;
* check-cast vA, Lsome/Class;
*
*/
auto move_result_idx = m_last_writer[insn->src(0)];
if (move_result_idx == kInvalid) {
return insn;
}
auto move_resultp = m_dataflow_sources[move_result_idx];
auto move_result = move_resultp.first;
if (!is_move_result(move_result->opcode())) {
return insn;
}
auto const& move_result_srcs = move_resultp.second;
if (move_result_srcs[0] == kInvalid) {
return insn;
}
auto invokep = m_dataflow_sources[move_result_srcs[0]];
auto invoke = invokep.first;
auto invoke_return_type = invoke->get_method()->get_proto()->get_rtype();
auto check_type = insn->get_type();
if (check_type != invoke_return_type) {
if (!check_cast(invoke_return_type, check_type)) {
return insn;
}
m_stats_check_casts_super_removed++;
}
m_stats_check_casts_removed++;
return (new IRInstruction(OPCODE_NOP));
}
return insn;
}
/**
* Build a dataflow graph for this block. Keep track of the last writer of
* each register and use that to compute the source instructions for each
* subsequent instruction. (NB: we could do away with storing the sources if
* the patterns were only two instructions deep.) For compactness we use
* vectors indexed by opcode position rather than maps.
*/
void peephole_block(Block* block, uint16_t regs) {
init_dataflow(regs);
size_t index = 0;
for (auto& mei : *block) {
if (mei.type == MFLOW_OPCODE) {
auto newop = peephole_patterns(mei.insn);
if (newop != mei.insn) {
m_replacements.emplace(mei.insn, newop);
}
if (is_invoke(newop->opcode())) {
m_last_call = index;
}
m_dataflow_sources.push_back(
std::make_pair(newop, std::vector<ssize_t>()));
auto& sources = m_dataflow_sources.back().second;
if (is_move_result(newop->opcode())) {
sources.push_back(m_last_call);
}
for (unsigned i = 0; i < newop->srcs_size(); ++i) {
sources.push_back(m_last_writer[newop->src(i)]);
}
if (newop->dests_size()) {
m_last_writer[newop->dest()] = index;
}
++index;
}
}
}
void apply_peepholes(IRCode* transform) {
for (auto rep : m_replacements) {
transform->replace_opcode(rep.first, rep.second);
}
}
void peephole(DexMethod* method) {
auto transform = method->get_code();
m_replacements.clear();
transform->build_cfg();
auto const& blocks = transform->cfg().blocks();
for (auto const& block : blocks) {
peephole_block(block, method->get_code()->get_registers_size());
}
apply_peepholes(&*transform);
}
void print_stats() {
TRACE(PEEPHOLE, 1,
"%d SimpleClassName instances removed \n", m_stats_simple_name);
TRACE(PEEPHOLE, 1,
"%d redundant check-cast instances removed \n",
m_stats_check_casts_removed);
TRACE(PEEPHOLE, 1,
"%d redundant check-cast instances from super removed \n",
m_stats_check_casts_super_removed);
m_pass_mgr.incr_metric(
METRIC_SIMPLE_CLS_NAME_REMOVED, m_stats_simple_name);
m_pass_mgr.incr_metric(
METRIC_CHECK_CASTS_REMOVED, m_stats_check_casts_removed);
m_pass_mgr.incr_metric(
METRIC_CHECK_CASTS_SUPER_REMOVED, m_stats_check_casts_super_removed);
}
public:
PeepholeOptimizer(const std::vector<DexClass*>& scope, PassManager& mgr)
: m_scope(scope),
m_pass_mgr(mgr),
m_stats_check_casts_removed(0),
m_stats_check_casts_super_removed(0),
m_stats_simple_name(0) {}
void run() {
walk_methods(m_scope,
[&](DexMethod* m) {
if (m->get_code()) {
peephole(m);
}
});
print_stats();
}
};
}
////////////////////////////////////////////////////////////////////////////////
void PeepholePass::run_pass(DexStoresVector& stores, ConfigFiles& cfg, PassManager& mgr) {
auto scope = build_class_scope(stores);
PeepholeOptimizer(scope, mgr).run();
}
static PeepholePass s_pass;
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include <algorithm>
#include <iostream>
#include <set>
#include <unistd.h>
#include <unordered_set>
#include "db/dbformat.h"
#include "db/db_impl.h"
#include "db/filename.h"
#include "db/version_set.h"
#include "db/write_batch_internal.h"
#include "rocksdb/cache.h"
#include "rocksdb/compaction_filter.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/perf_context.h"
#include "rocksdb/slice.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/table.h"
#include "rocksdb/table_properties.h"
#include "table/block_based_table_factory.h"
#include "table/plain_table_factory.h"
#include "util/hash.h"
#include "util/hash_linklist_rep.h"
#include "utilities/merge_operators.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/statistics.h"
#include "util/testharness.h"
#include "util/sync_point.h"
#include "util/testutil.h"
namespace rocksdb {
static bool SnappyCompressionSupported(const CompressionOptions& options) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Snappy_Compress(options, in.data(), in.size(), &out);
}
static bool ZlibCompressionSupported(const CompressionOptions& options) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::Zlib_Compress(options, in.data(), in.size(), &out);
}
static bool BZip2CompressionSupported(const CompressionOptions& options) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::BZip2_Compress(options, in.data(), in.size(), &out);
}
static bool LZ4CompressionSupported(const CompressionOptions &options) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::LZ4_Compress(options, in.data(), in.size(), &out);
}
static bool LZ4HCCompressionSupported(const CompressionOptions &options) {
std::string out;
Slice in = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
return port::LZ4HC_Compress(options, in.data(), in.size(), &out);
}
static std::string RandomString(Random *rnd, int len) {
std::string r;
test::RandomString(rnd, len, &r);
return r;
}
namespace anon {
class AtomicCounter {
private:
port::Mutex mu_;
int count_;
public:
AtomicCounter() : count_(0) { }
void Increment() {
MutexLock l(&mu_);
count_++;
}
int Read() {
MutexLock l(&mu_);
return count_;
}
void Reset() {
MutexLock l(&mu_);
count_ = 0;
}
};
}
// Special Env used to delay background operations
class SpecialEnv : public EnvWrapper {
public:
// sstable Sync() calls are blocked while this pointer is non-nullptr.
port::AtomicPointer delay_sstable_sync_;
// Simulate no-space errors while this pointer is non-nullptr.
port::AtomicPointer no_space_;
// Simulate non-writable file system while this pointer is non-nullptr
port::AtomicPointer non_writable_;
// Force sync of manifest files to fail while this pointer is non-nullptr
port::AtomicPointer manifest_sync_error_;
// Force write to manifest files to fail while this pointer is non-nullptr
port::AtomicPointer manifest_write_error_;
// Force write to log files to fail while this pointer is non-nullptr
port::AtomicPointer log_write_error_;
bool count_random_reads_;
anon::AtomicCounter random_read_counter_;
anon::AtomicCounter sleep_counter_;
explicit SpecialEnv(Env* base) : EnvWrapper(base) {
delay_sstable_sync_.Release_Store(nullptr);
no_space_.Release_Store(nullptr);
non_writable_.Release_Store(nullptr);
count_random_reads_ = false;
manifest_sync_error_.Release_Store(nullptr);
manifest_write_error_.Release_Store(nullptr);
log_write_error_.Release_Store(nullptr);
}
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
const EnvOptions& soptions) {
class SSTableFile : public WritableFile {
private:
SpecialEnv* env_;
unique_ptr<WritableFile> base_;
public:
SSTableFile(SpecialEnv* env, unique_ptr<WritableFile>&& base)
: env_(env),
base_(std::move(base)) {
}
Status Append(const Slice& data) {
if (env_->no_space_.Acquire_Load() != nullptr) {
// Drop writes on the floor
return Status::OK();
} else {
return base_->Append(data);
}
}
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
while (env_->delay_sstable_sync_.Acquire_Load() != nullptr) {
env_->SleepForMicroseconds(100000);
}
return base_->Sync();
}
};
class ManifestFile : public WritableFile {
private:
SpecialEnv* env_;
unique_ptr<WritableFile> base_;
public:
ManifestFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
: env_(env), base_(std::move(b)) { }
Status Append(const Slice& data) {
if (env_->manifest_write_error_.Acquire_Load() != nullptr) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
}
}
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() {
if (env_->manifest_sync_error_.Acquire_Load() != nullptr) {
return Status::IOError("simulated sync error");
} else {
return base_->Sync();
}
}
};
class LogFile : public WritableFile {
private:
SpecialEnv* env_;
unique_ptr<WritableFile> base_;
public:
LogFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
: env_(env), base_(std::move(b)) { }
Status Append(const Slice& data) {
if (env_->log_write_error_.Acquire_Load() != nullptr) {
return Status::IOError("simulated writer error");
} else {
return base_->Append(data);
}
}
Status Close() { return base_->Close(); }
Status Flush() { return base_->Flush(); }
Status Sync() { return base_->Sync(); }
};
if (non_writable_.Acquire_Load() != nullptr) {
return Status::IOError("simulated write error");
}
Status s = target()->NewWritableFile(f, r, soptions);
if (s.ok()) {
if (strstr(f.c_str(), ".sst") != nullptr) {
r->reset(new SSTableFile(this, std::move(*r)));
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
r->reset(new ManifestFile(this, std::move(*r)));
} else if (strstr(f.c_str(), "log") != nullptr) {
r->reset(new LogFile(this, std::move(*r)));
}
}
return s;
}
Status NewRandomAccessFile(const std::string& f,
unique_ptr<RandomAccessFile>* r,
const EnvOptions& soptions) {
class CountingFile : public RandomAccessFile {
private:
unique_ptr<RandomAccessFile> target_;
anon::AtomicCounter* counter_;
public:
CountingFile(unique_ptr<RandomAccessFile>&& target,
anon::AtomicCounter* counter)
: target_(std::move(target)), counter_(counter) {
}
virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const {
counter_->Increment();
return target_->Read(offset, n, result, scratch);
}
};
Status s = target()->NewRandomAccessFile(f, r, soptions);
if (s.ok() && count_random_reads_) {
r->reset(new CountingFile(std::move(*r), &random_read_counter_));
}
return s;
}
virtual void SleepForMicroseconds(int micros) {
sleep_counter_.Increment();
target()->SleepForMicroseconds(micros);
}
};
class DBTest {
private:
const FilterPolicy* filter_policy_;
protected:
// Sequence of option configurations to try
enum OptionConfig {
kDefault,
kPlainTableFirstBytePrefix,
kPlainTableAllBytesPrefix,
kVectorRep,
kHashLinkList,
kMergePut,
kFilter,
kUncompressed,
kNumLevel_3,
kDBLogDir,
kWalDir,
kManifestFileSize,
kCompactOnFlush,
kPerfOptions,
kDeletesFilterFirst,
kHashSkipList,
kUniversalCompaction,
kCompressedBlockCache,
kInfiniteMaxOpenFiles,
kEnd
};
int option_config_;
public:
std::string dbname_;
SpecialEnv* env_;
DB* db_;
Options last_options_;
// Skip some options, as they may not be applicable to a specific test.
// To add more skip constants, use values 4, 8, 16, etc.
enum OptionSkip {
kNoSkip = 0,
kSkipDeletesFilterFirst = 1,
kSkipUniversalCompaction = 2,
kSkipMergePut = 4,
kSkipPlainTable = 8
};
DBTest() : option_config_(kDefault),
env_(new SpecialEnv(Env::Default())) {
last_options_.max_background_flushes = 0;
filter_policy_ = NewBloomFilterPolicy(10);
dbname_ = test::TmpDir() + "/db_test";
ASSERT_OK(DestroyDB(dbname_, Options()));
db_ = nullptr;
Reopen();
}
~DBTest() {
delete db_;
ASSERT_OK(DestroyDB(dbname_, Options()));
delete env_;
delete filter_policy_;
}
// Switch to a fresh database with the next option configuration to
// test. Return false if there are no more configurations to test.
bool ChangeOptions(int skip_mask = kNoSkip) {
// skip some options
for(option_config_++; option_config_ < kEnd; option_config_++) {
if ((skip_mask & kSkipDeletesFilterFirst) &&
option_config_ == kDeletesFilterFirst) {
continue;
}
if ((skip_mask & kSkipUniversalCompaction) &&
option_config_ == kUniversalCompaction) {
continue;
}
if ((skip_mask & kSkipMergePut) && option_config_ == kMergePut) {
continue;
}
if ((skip_mask & kSkipPlainTable)
&& (option_config_ == kPlainTableAllBytesPrefix
|| option_config_ == kPlainTableFirstBytePrefix)) {
continue;
}
break;
}
if (option_config_ >= kEnd) {
Destroy(&last_options_);
return false;
} else {
DestroyAndReopen();
return true;
}
}
// Switch between different compaction styles (we have only 2 now).
bool ChangeCompactOptions(Options* prev_options = nullptr) {
if (option_config_ == kDefault) {
option_config_ = kUniversalCompaction;
if (prev_options == nullptr) {
prev_options = &last_options_;
}
Destroy(prev_options);
TryReopen();
return true;
} else {
return false;
}
}
// Return the current option configuration.
Options CurrentOptions() {
Options options;
options.paranoid_checks = false;
options.max_background_flushes = 0;
switch (option_config_) {
case kHashSkipList:
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.memtable_factory.reset(NewHashSkipListRepFactory());
break;
case kPlainTableFirstBytePrefix:
options.table_factory.reset(new PlainTableFactory());
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.allow_mmap_reads = true;
options.max_sequential_skip_in_iterations = 999999;
break;
case kPlainTableAllBytesPrefix:
options.table_factory.reset(new PlainTableFactory());
options.prefix_extractor.reset(NewNoopTransform());
options.allow_mmap_reads = true;
options.max_sequential_skip_in_iterations = 999999;
break;
case kMergePut:
options.merge_operator = MergeOperators::CreatePutOperator();
break;
case kFilter:
options.filter_policy = filter_policy_;
break;
case kUncompressed:
options.compression = kNoCompression;
break;
case kNumLevel_3:
options.num_levels = 3;
break;
case kDBLogDir:
options.db_log_dir = test::TmpDir();
break;
case kWalDir:
options.wal_dir = "/tmp/wal";
break;
case kManifestFileSize:
options.max_manifest_file_size = 50; // 50 bytes
case kCompactOnFlush:
options.purge_redundant_kvs_while_flush =
!options.purge_redundant_kvs_while_flush;
break;
case kPerfOptions:
options.hard_rate_limit = 2.0;
options.rate_limit_delay_max_milliseconds = 2;
// TODO -- test more options
break;
case kDeletesFilterFirst:
options.filter_deletes = true;
break;
case kVectorRep:
options.memtable_factory.reset(new VectorRepFactory(100));
break;
case kHashLinkList:
options.prefix_extractor.reset(NewFixedPrefixTransform(1));
options.memtable_factory.reset(NewHashLinkListRepFactory(4));
break;
case kUniversalCompaction:
options.compaction_style = kCompactionStyleUniversal;
break;
case kCompressedBlockCache:
options.allow_mmap_writes = true;
options.block_cache_compressed = NewLRUCache(8*1024*1024);
break;
case kInfiniteMaxOpenFiles:
options.max_open_files = -1;
break;
default:
break;
}
return options;
}
DBImpl* dbfull() {
return reinterpret_cast<DBImpl*>(db_);
}
void Reopen(Options* options = nullptr) {
ASSERT_OK(TryReopen(options));
}
void Close() {
delete db_;
db_ = nullptr;
}
void DestroyAndReopen(Options* options = nullptr) {
//Destroy using last options
Destroy(&last_options_);
ASSERT_OK(TryReopen(options));
}
void Destroy(Options* options) {
delete db_;
db_ = nullptr;
ASSERT_OK(DestroyDB(dbname_, *options));
}
Status PureReopen(Options* options, DB** db) {
return DB::Open(*options, dbname_, db);
}
Status ReadOnlyReopen(Options* options) {
return DB::OpenForReadOnly(*options, dbname_, &db_);
}
Status TryReopen(Options* options = nullptr) {
delete db_;
db_ = nullptr;
Options opts;
if (options != nullptr) {
opts = *options;
} else {
opts = CurrentOptions();
opts.create_if_missing = true;
}
last_options_ = opts;
return DB::Open(opts, dbname_, &db_);
}
Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
if (kMergePut == option_config_ ) {
return db_->Merge(wo, k, v);
} else {
return db_->Put(wo, k, v);
}
}
Status Delete(const std::string& k) {
return db_->Delete(WriteOptions(), k);
}
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.verify_checksums = true;
options.snapshot = snapshot;
std::string result;
Status s = db_->Get(options, k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
// Return a string that contains all key,value pairs in order,
// formatted like "(k1->v1)(k2->v2)".
std::string Contents() {
std::vector<std::string> forward;
std::string result;
Iterator* iter = db_->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
std::string s = IterStatus(iter);
result.push_back('(');
result.append(s);
result.push_back(')');
forward.push_back(s);
}
// Check reverse iteration results are the reverse of forward results
unsigned int matched = 0;
for (iter->SeekToLast(); iter->Valid(); iter->Prev()) {
ASSERT_LT(matched, forward.size());
ASSERT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]);
matched++;
}
ASSERT_EQ(matched, forward.size());
delete iter;
return result;
}
std::string AllEntriesFor(const Slice& user_key) {
Iterator* iter = dbfull()->TEST_NewInternalIterator();
InternalKey target(user_key, kMaxSequenceNumber, kTypeValue);
iter->Seek(target.Encode());
std::string result;
if (!iter->status().ok()) {
result = iter->status().ToString();
} else {
result = "[ ";
bool first = true;
while (iter->Valid()) {
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
if (!ParseInternalKey(iter->key(), &ikey)) {
result += "CORRUPTED";
} else {
if (last_options_.comparator->Compare(ikey.user_key, user_key) != 0) {
break;
}
if (!first) {
result += ", ";
}
first = false;
switch (ikey.type) {
case kTypeValue:
result += iter->value().ToString();
break;
case kTypeMerge:
// keep it the same as kTypeValue for testing kMergePut
result += iter->value().ToString();
break;
case kTypeDeletion:
result += "DEL";
break;
default:
assert(false);
break;
}
}
iter->Next();
}
if (!first) {
result += " ";
}
result += "]";
}
delete iter;
return result;
}
int NumTableFilesAtLevel(int level) {
std::string property;
ASSERT_TRUE(
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(level),
&property));
return atoi(property.c_str());
}
int TotalTableFiles() {
int result = 0;
for (int level = 0; level < db_->NumberLevels(); level++) {
result += NumTableFilesAtLevel(level);
}
return result;
}
// Return spread of files per level
std::string FilesPerLevel() {
std::string result;
int last_non_zero_offset = 0;
for (int level = 0; level < db_->NumberLevels(); level++) {
int f = NumTableFilesAtLevel(level);
char buf[100];
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
}
}
result.resize(last_non_zero_offset);
return result;
}
int CountFiles() {
std::vector<std::string> files;
env_->GetChildren(dbname_, &files);
std::vector<std::string> logfiles;
if (dbname_ != last_options_.wal_dir) {
env_->GetChildren(last_options_.wal_dir, &logfiles);
}
return static_cast<int>(files.size() + logfiles.size());
}
int CountLiveFiles() {
std::vector<std::string> files;
uint64_t manifest_file_size;
db_->GetLiveFiles(files, &manifest_file_size);
return files.size();
}
uint64_t Size(const Slice& start, const Slice& limit) {
Range r(start, limit);
uint64_t size;
db_->GetApproximateSizes(&r, 1, &size);
return size;
}
void Compact(const Slice& start, const Slice& limit) {
db_->CompactRange(&start, &limit);
}
// Do n memtable compactions, each of which produces an sstable
// covering the range [small,large].
void MakeTables(int n, const std::string& small, const std::string& large) {
for (int i = 0; i < n; i++) {
Put(small, "begin");
Put(large, "end");
dbfull()->TEST_FlushMemTable();
}
}
// Prevent pushing of new sstables into deeper levels by adding
// tables that cover a specified range to all levels.
void FillLevels(const std::string& smallest, const std::string& largest) {
MakeTables(db_->NumberLevels(), smallest, largest);
}
void DumpFileCounts(const char* label) {
fprintf(stderr, "---\n%s:\n", label);
fprintf(stderr, "maxoverlap: %lld\n",
static_cast<long long>(
dbfull()->TEST_MaxNextLevelOverlappingBytes()));
for (int level = 0; level < db_->NumberLevels(); level++) {
int num = NumTableFilesAtLevel(level);
if (num > 0) {
fprintf(stderr, " level %3d : %d files\n", level, num);
}
}
}
std::string DumpSSTableList() {
std::string property;
db_->GetProperty("rocksdb.sstables", &property);
return property;
}
std::string IterStatus(Iterator* iter) {
std::string result;
if (iter->Valid()) {
result = iter->key().ToString() + "->" + iter->value().ToString();
} else {
result = "(invalid)";
}
return result;
}
Options OptionsForLogIterTest() {
Options options = CurrentOptions();
options.create_if_missing = true;
options.WAL_ttl_seconds = 1000;
return options;
}
std::unique_ptr<TransactionLogIterator> OpenTransactionLogIter(
const SequenceNumber seq) {
unique_ptr<TransactionLogIterator> iter;
Status status = dbfull()->GetUpdatesSince(seq, &iter);
ASSERT_OK(status);
ASSERT_TRUE(iter->Valid());
return std::move(iter);
}
std::string DummyString(size_t len, char c = 'a') {
return std::string(len, c);
}
void VerifyIterLast(std::string expected_key) {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), expected_key);
delete iter;
}
// Used to test InplaceUpdate
// If previous value is nullptr or delta is > than previous value,
// sets newValue with delta
// If previous value is not empty,
// updates previous value with 'b' string of previous value size - 1.
static UpdateStatus
updateInPlaceSmallerSize(char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
if (prevValue == nullptr) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
} else {
*prevSize = *prevSize - 1;
std::string str_b = std::string(*prevSize, 'b');
memcpy(prevValue, str_b.c_str(), str_b.size());
return UpdateStatus::UPDATED_INPLACE;
}
}
static UpdateStatus
updateInPlaceSmallerVarintSize(char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
if (prevValue == nullptr) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
} else {
*prevSize = 1;
std::string str_b = std::string(*prevSize, 'b');
memcpy(prevValue, str_b.c_str(), str_b.size());
return UpdateStatus::UPDATED_INPLACE;
}
}
static UpdateStatus
updateInPlaceLargerSize(char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
*newValue = std::string(delta.size(), 'c');
return UpdateStatus::UPDATED;
}
static UpdateStatus
updateInPlaceNoAction(char* prevValue, uint32_t* prevSize,
Slice delta, std::string* newValue) {
return UpdateStatus::UPDATE_FAILED;
}
// Utility method to test InplaceUpdate
void validateNumberOfEntries(int numValues) {
Iterator* iter = dbfull()->TEST_NewInternalIterator();
iter->SeekToFirst();
ASSERT_EQ(iter->status().ok(), true);
int seq = numValues;
while (iter->Valid()) {
ParsedInternalKey ikey;
ikey.sequence = -1;
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
// checks sequence number for updates
ASSERT_EQ(ikey.sequence, (unsigned)seq--);
iter->Next();
}
delete iter;
ASSERT_EQ(0, seq);
}
void CopyFile(const std::string& source, const std::string& destination,
uint64_t size = 0) {
const EnvOptions soptions;
unique_ptr<SequentialFile> srcfile;
ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
unique_ptr<WritableFile> destfile;
ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
if (size == 0) {
// default argument means copy everything
ASSERT_OK(env_->GetFileSize(source, &size));
}
char buffer[4096];
Slice slice;
while (size > 0) {
uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
ASSERT_OK(srcfile->Read(one, &slice, buffer));
ASSERT_OK(destfile->Append(slice));
size -= slice.size();
}
ASSERT_OK(destfile->Close());
}
};
static std::string Key(int i) {
char buf[100];
snprintf(buf, sizeof(buf), "key%06d", i);
return std::string(buf);
}
static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type);
}
// A helper function that ensures the table properties returned in
// `GetPropertiesOfAllTablesTest` is correct.
// This test assumes entries size is differnt for each of the tables.
void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
TablePropertiesCollection props;
ASSERT_OK(db->GetPropertiesOfAllTables(&props));
assert(props.size() == 4);
ASSERT_EQ(4U, props.size());
std::unordered_set<uint64_t> unique_entries;
// Indirect test
uint64_t sum = 0;
for (const auto& item : props) {
unique_entries.insert(item.second->num_entries);
sum += item.second->num_entries;
}
ASSERT_EQ(props.size(), unique_entries.size());
ASSERT_EQ(expected_entries_size, sum);
}
std::unordered_map<std::string, size_t> GetMemoryUsage(MemTable* memtable) {
const auto& arena = memtable->TEST_GetArena();
return {{"memtable.approximate.usage", memtable->ApproximateMemoryUsage()},
{"arena.approximate.usage", arena.ApproximateMemoryUsage()},
{"arena.allocated.memory", arena.MemoryAllocatedBytes()},
{"arena.unused.bytes", arena.AllocatedAndUnused()},
{"irregular.blocks", arena.IrregularBlockNum()}};
}
void PrintMemoryUsage(const std::unordered_map<std::string, size_t>& usage) {
for (const auto& item : usage) {
std::cout << "\t" << item.first << ": " << item.second << std::endl;
}
}
void AddRandomKV(MemTable* memtable, Random* rnd, size_t arena_block_size) {
memtable->Add(0, kTypeValue, RandomString(rnd, 20) /* key */,
// make sure we will be able to generate some over sized entries
RandomString(rnd, rnd->Uniform(arena_block_size / 4) * 1.15 +
10) /* value */);
}
TEST(DBTest, Empty) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
Put("k1", std::string(100000, 'x')); // Fill memtable
Put("k2", std::string(100000, 'y')); // Trigger compaction
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
} while (ChangeOptions());
}
TEST(DBTest, ReadOnlyDB) {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("bar", "v2"));
ASSERT_OK(Put("foo", "v3"));
Close();
Options options;
ASSERT_OK(ReadOnlyReopen(&options));
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
Iterator* iter = db_->NewIterator(ReadOptions());
int count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(iter->status());
++count;
}
ASSERT_EQ(count, 2);
delete iter;
}
// Make sure that when options.block_cache is set, after a new table is
// created its index/filter blocks are added to block cache.
TEST(DBTest, IndexAndFilterBlocksOfNewTableAddedToCache) {
Options options = CurrentOptions();
std::unique_ptr<const FilterPolicy> filter_policy(NewBloomFilterPolicy(20));
options.filter_policy = filter_policy.get();
options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics();
BlockBasedTableOptions table_options;
table_options.cache_index_and_filter_blocks = true;
options.table_factory.reset(new BlockBasedTableFactory(table_options));
DestroyAndReopen(&options);
ASSERT_OK(db_->Put(WriteOptions(), "key", "val"));
// Create a new talbe.
ASSERT_OK(dbfull()->Flush(FlushOptions()));
// index/filter blocks added to block cache right after table creation.
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(2, /* only index/filter were added */
TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS));
// Make sure filter block is in cache.
std::string value;
ReadOptions ropt;
db_->KeyMayExist(ReadOptions(), "key", &value);
// Miss count should remain the same.
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
db_->KeyMayExist(ReadOptions(), "key", &value);
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
// Make sure index block is in cache.
auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT);
value = Get("key");
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(index_block_hit + 1,
TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
value = Get("key");
ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS));
ASSERT_EQ(index_block_hit + 2,
TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT));
}
TEST(DBTest, GetPropertiesOfAllTablesTest) {
Options options = CurrentOptions();
Reopen(&options);
// Create 4 tables
for (int table = 0; table < 4; ++table) {
for (int i = 0; i < 10 + table; ++i) {
db_->Put(WriteOptions(), std::to_string(table * 100 + i), "val");
}
db_->Flush(FlushOptions());
}
// 1. Read table properties directly from file
Reopen(&options);
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
// 2. Put two tables to table cache and
Reopen(&options);
// fetch key from 1st and 2nd table, which will internally place that table to
// the table cache.
for (int i = 0; i < 2; ++i) {
Get(std::to_string(i * 100 + 0));
}
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
// 3. Put all tables to table cache
Reopen(&options);
// fetch key from 1st and 2nd table, which will internally place that table to
// the table cache.
for (int i = 0; i < 4; ++i) {
Get(std::to_string(i * 100 + 0));
}
VerifyTableProperties(db_, 10 + 11 + 12 + 13);
}
TEST(DBTest, LevelLimitReopen) {
Options options = CurrentOptions();
Reopen(&options);
const std::string value(1024 * 1024, ' ');
int i = 0;
while (NumTableFilesAtLevel(2) == 0) {
ASSERT_OK(Put(Key(i++), value));
}
options.num_levels = 1;
options.max_bytes_for_level_multiplier_additional.resize(1, 1);
Status s = TryReopen(&options);
ASSERT_EQ(s.IsInvalidArgument(), true);
ASSERT_EQ(s.ToString(),
"Invalid argument: db has more levels than options.num_levels");
options.num_levels = 10;
options.max_bytes_for_level_multiplier_additional.resize(10, 1);
ASSERT_OK(TryReopen(&options));
}
TEST(DBTest, Preallocation) {
const std::string src = dbname_ + "/alloc_test";
unique_ptr<WritableFile> srcfile;
const EnvOptions soptions;
ASSERT_OK(env_->NewWritableFile(src, &srcfile, soptions));
srcfile->SetPreallocationBlockSize(1024 * 1024);
// No writes should mean no preallocation
size_t block_size, last_allocated_block;
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
ASSERT_EQ(last_allocated_block, 0UL);
// Small write should preallocate one block
srcfile->Append("test");
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
ASSERT_EQ(last_allocated_block, 1UL);
// Write an entire preallocation block, make sure we increased by two.
std::string buf(block_size, ' ');
srcfile->Append(buf);
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
ASSERT_EQ(last_allocated_block, 2UL);
// Write five more blocks at once, ensure we're where we need to be.
buf = std::string(block_size * 5, ' ');
srcfile->Append(buf);
srcfile->GetPreallocationStatus(&block_size, &last_allocated_block);
ASSERT_EQ(last_allocated_block, 7UL);
}
TEST(DBTest, PutDeleteGet) {
do {
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
ASSERT_OK(db_->Delete(WriteOptions(), "foo"));
ASSERT_EQ("NOT_FOUND", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetFromImmutableLayer) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(env_); // Block sync calls
Put("k1", std::string(100000, 'x')); // Fill memtable
Put("k2", std::string(100000, 'y')); // Trigger compaction
ASSERT_EQ("v1", Get("foo"));
env_->delay_sstable_sync_.Release_Store(nullptr); // Release sync calls
} while (ChangeOptions());
}
TEST(DBTest, GetFromVersions) {
do {
ASSERT_OK(Put("foo", "v1"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("v1", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetSnapshot) {
do {
// Try with both a short key and a long key
for (int i = 0; i < 2; i++) {
std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x');
ASSERT_OK(Put(key, "v1"));
const Snapshot* s1 = db_->GetSnapshot();
ASSERT_OK(Put(key, "v2"));
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("v2", Get(key));
ASSERT_EQ("v1", Get(key, s1));
db_->ReleaseSnapshot(s1);
}
} while (ChangeOptions());
}
TEST(DBTest, GetLevel0Ordering) {
do {
// Check that we process level-0 files in correct order. The code
// below generates two level-0 files where the earlier one comes
// before the later one in the level-0 file list since the earlier
// one has a smaller "smallest" key.
ASSERT_OK(Put("bar", "b"));
ASSERT_OK(Put("foo", "v1"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(Put("foo", "v2"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetOrderedByLevels) {
do {
ASSERT_OK(Put("foo", "v1"));
Compact("a", "z");
ASSERT_EQ("v1", Get("foo"));
ASSERT_OK(Put("foo", "v2"));
ASSERT_EQ("v2", Get("foo"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("v2", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, GetPicksCorrectFile) {
do {
// Arrange to have multiple files in a non-level-0 level.
ASSERT_OK(Put("a", "va"));
Compact("a", "b");
ASSERT_OK(Put("x", "vx"));
Compact("x", "y");
ASSERT_OK(Put("f", "vf"));
Compact("f", "g");
ASSERT_EQ("va", Get("a"));
ASSERT_EQ("vf", Get("f"));
ASSERT_EQ("vx", Get("x"));
} while (ChangeOptions());
}
TEST(DBTest, GetEncountersEmptyLevel) {
do {
// Arrange for the following to happen:
// * sstable A in level 0
// * nothing in level 1
// * sstable B in level 2
// Then do enough Get() calls to arrange for an automatic compaction
// of sstable A. A bug would cause the compaction to be marked as
// occuring at level 1 (instead of the correct level 0).
// Step 1: First place sstables in levels 0 and 2
int compaction_count = 0;
while (NumTableFilesAtLevel(0) == 0 ||
NumTableFilesAtLevel(2) == 0) {
ASSERT_LE(compaction_count, 100) << "could not fill levels 0 and 2";
compaction_count++;
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_FlushMemTable();
}
// Step 2: clear level 1 if necessary.
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 1);
// Step 3: read a bunch of times
for (int i = 0; i < 1000; i++) {
ASSERT_EQ("NOT_FOUND", Get("missing"));
}
// Step 4: Wait for compaction to finish
env_->SleepForMicroseconds(1000000);
ASSERT_EQ(NumTableFilesAtLevel(0), 1); // XXX
} while (ChangeOptions(kSkipUniversalCompaction));
}
// KeyMayExist can lead to a few false positives, but not false negatives.
// To make test deterministic, use a much larger number of bits per key-20 than
// bits in the key, so that false positives are eliminated
TEST(DBTest, KeyMayExist) {
do {
ReadOptions ropts;
std::string value;
Options options = CurrentOptions();
options.filter_policy = NewBloomFilterPolicy(20);
options.statistics = rocksdb::CreateDBStatistics();
Reopen(&options);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
bool value_found = false;
ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
ASSERT_TRUE(value_found);
ASSERT_EQ("b", value);
dbfull()->Flush(FlushOptions());
value.clear();
long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(db_->KeyMayExist(ropts, "a", &value, &value_found));
ASSERT_TRUE(!value_found);
// assert that no new files were opened and no new blocks were
// read into block cache.
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_OK(db_->Delete(WriteOptions(), "a"));
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
dbfull()->Flush(FlushOptions());
dbfull()->CompactRange(nullptr, nullptr);
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "a", &value));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
ASSERT_OK(db_->Delete(WriteOptions(), "c"));
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(!db_->KeyMayExist(ropts, "c", &value));
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete options.filter_policy;
// KeyMayExist function only checks data in block caches, which is not used
// by plain table format.
} while (ChangeOptions(kSkipPlainTable));
}
TEST(DBTest, NonBlockingIteration) {
do {
ReadOptions non_blocking_opts, regular_opts;
Options options = CurrentOptions();
options.statistics = rocksdb::CreateDBStatistics();
non_blocking_opts.read_tier = kBlockCacheTier;
Reopen(&options);
// write one kv to the database.
ASSERT_OK(db_->Put(WriteOptions(), "a", "b"));
// scan using non-blocking iterator. We should find it because
// it is in memtable.
Iterator* iter = db_->NewIterator(non_blocking_opts);
int count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(iter->status());
count++;
}
ASSERT_EQ(count, 1);
delete iter;
// flush memtable to storage. Now, the key should not be in the
// memtable neither in the block cache.
dbfull()->Flush(FlushOptions());
// verify that a non-blocking iterator does not find any
// kvs. Neither does it do any IOs to storage.
long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
iter = db_->NewIterator(non_blocking_opts);
count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
count++;
}
ASSERT_EQ(count, 0);
ASSERT_TRUE(iter->status().IsIncomplete());
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete iter;
// read in the specified block via a regular get
ASSERT_EQ(Get("a"), "b");
// verify that we can find it via a non-blocking scan
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
iter = db_->NewIterator(non_blocking_opts);
count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
ASSERT_OK(iter->status());
count++;
}
ASSERT_EQ(count, 1);
ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS));
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
delete iter;
// This test verifies block cache behaviors, which is not used by plain
// table format.
} while (ChangeOptions(kSkipPlainTable));
}
// A delete is skipped for key if KeyMayExist(key) returns False
// Tests Writebatch consistency and proper delete behaviour
TEST(DBTest, FilterDeletes) {
do {
Options options = CurrentOptions();
options.filter_policy = NewBloomFilterPolicy(20);
options.filter_deletes = true;
Reopen(&options);
WriteBatch batch;
batch.Delete("a");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(AllEntriesFor("a"), "[ ]"); // Delete skipped
batch.Clear();
batch.Put("a", "b");
batch.Delete("a");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(Get("a"), "NOT_FOUND");
ASSERT_EQ(AllEntriesFor("a"), "[ DEL, b ]"); // Delete issued
batch.Clear();
batch.Delete("c");
batch.Put("c", "d");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(Get("c"), "d");
ASSERT_EQ(AllEntriesFor("c"), "[ d ]"); // Delete skipped
batch.Clear();
dbfull()->Flush(FlushOptions()); // A stray Flush
batch.Delete("c");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(AllEntriesFor("c"), "[ DEL, d ]"); // Delete issued
batch.Clear();
delete options.filter_policy;
} while (ChangeCompactOptions());
}
TEST(DBTest, IterSeekBeforePrev) {
ASSERT_OK(Put("a", "b"));
ASSERT_OK(Put("c", "d"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put("0", "f"));
ASSERT_OK(Put("1", "h"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put("2", "j"));
auto iter = db_->NewIterator(ReadOptions());
iter->Seek(Slice("c"));
iter->Prev();
iter->Seek(Slice("a"));
iter->Prev();
delete iter;
}
std::string MakeLongKey(size_t length, char c) {
return std::string(length, c);
}
TEST(DBTest, IterLongKeys) {
ASSERT_OK(Put(MakeLongKey(20, 0), "0"));
ASSERT_OK(Put(MakeLongKey(32, 2), "2"));
ASSERT_OK(Put("a", "b"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put(MakeLongKey(50, 1), "1"));
ASSERT_OK(Put(MakeLongKey(127, 3), "3"));
ASSERT_OK(Put(MakeLongKey(64, 4), "4"));
auto iter = db_->NewIterator(ReadOptions());
// Create a key that needs to be skipped for Seq too new
iter->Seek(MakeLongKey(20, 0));
ASSERT_EQ(IterStatus(iter), MakeLongKey(20, 0) + "->0");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(64, 4) + "->4");
delete iter;
iter = db_->NewIterator(ReadOptions());
iter->Seek(MakeLongKey(50, 1));
ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2");
iter->Next();
ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3");
delete iter;
}
TEST(DBTest, IterNextWithNewerSeq) {
ASSERT_OK(Put("0", "0"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put("a", "b"));
ASSERT_OK(Put("c", "d"));
ASSERT_OK(Put("d", "e"));
auto iter = db_->NewIterator(ReadOptions());
// Create a key that needs to be skipped for Seq too new
for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
i++) {
ASSERT_OK(Put("b", "f"));
}
iter->Seek(Slice("a"));
ASSERT_EQ(IterStatus(iter), "a->b");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->d");
delete iter;
}
TEST(DBTest, IterPrevWithNewerSeq) {
ASSERT_OK(Put("0", "0"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put("a", "b"));
ASSERT_OK(Put("c", "d"));
ASSERT_OK(Put("d", "e"));
auto iter = db_->NewIterator(ReadOptions());
// Create a key that needs to be skipped for Seq too new
for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
i++) {
ASSERT_OK(Put("b", "f"));
}
iter->Seek(Slice("d"));
ASSERT_EQ(IterStatus(iter), "d->e");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "c->d");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->b");
iter->Prev();
delete iter;
}
TEST(DBTest, IterPrevWithNewerSeq2) {
ASSERT_OK(Put("0", "0"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(Put("a", "b"));
ASSERT_OK(Put("c", "d"));
ASSERT_OK(Put("d", "e"));
auto iter = db_->NewIterator(ReadOptions());
iter->Seek(Slice("c"));
ASSERT_EQ(IterStatus(iter), "c->d");
// Create a key that needs to be skipped for Seq too new
for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1;
i++) {
ASSERT_OK(Put("b", "f"));
}
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->b");
iter->Prev();
delete iter;
}
TEST(DBTest, IterEmpty) {
do {
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("foo");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
} while (ChangeCompactOptions());
}
TEST(DBTest, IterSingle) {
do {
ASSERT_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
} while (ChangeCompactOptions());
}
TEST(DBTest, IterMulti) {
do {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", "vb"));
ASSERT_OK(Put("c", "vc"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->Seek("");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("a");
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Seek("ax");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("b");
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Seek("z");
ASSERT_EQ(IterStatus(iter), "(invalid)");
// Switch from reverse to forward
iter->SeekToLast();
iter->Prev();
iter->Prev();
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
// Switch from forward to reverse
iter->SeekToFirst();
iter->Next();
iter->Next();
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
// Make sure iter stays at snapshot
ASSERT_OK(Put("a", "va2"));
ASSERT_OK(Put("a2", "va3"));
ASSERT_OK(Put("b", "vb2"));
ASSERT_OK(Put("c", "vc2"));
ASSERT_OK(Delete("b"));
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->vb");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
} while (ChangeCompactOptions());
}
// Check that we can skip over a run of user keys
// by using reseek rather than sequential scan
TEST(DBTest, IterReseek) {
Options options = CurrentOptions();
options.max_sequential_skip_in_iterations = 3;
options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics();
DestroyAndReopen(&options);
// insert two keys with same userkey and verify that
// reseek is not invoked. For each of these test cases,
// verify that we can find the next key "b".
ASSERT_OK(Put("a", "one"));
ASSERT_OK(Put("a", "two"));
ASSERT_OK(Put("b", "bone"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "a->two");
iter->Next();
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
// insert a total of three keys with same userkey and verify
// that reseek is still not invoked.
ASSERT_OK(Put("a", "three"));
iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->three");
iter->Next();
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
// insert a total of four keys with same userkey and verify
// that reseek is invoked.
ASSERT_OK(Put("a", "four"));
iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->four");
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0);
iter->Next();
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1);
ASSERT_EQ(IterStatus(iter), "b->bone");
delete iter;
// Testing reverse iterator
// At this point, we have three versions of "a" and one version of "b".
// The reseek statistics is already at 1.
int num_reseeks =
(int)TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION);
// Insert another version of b and assert that reseek is not invoked
ASSERT_OK(Put("b", "btwo"));
iter = db_->NewIterator(ReadOptions());
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "b->btwo");
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks);
iter->Prev();
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 1);
ASSERT_EQ(IterStatus(iter), "a->four");
delete iter;
// insert two more versions of b. This makes a total of 4 versions
// of b and 4 versions of a.
ASSERT_OK(Put("b", "bthree"));
ASSERT_OK(Put("b", "bfour"));
iter = db_->NewIterator(ReadOptions());
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "b->bfour");
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 2);
iter->Prev();
// the previous Prev call should have invoked reseek
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION),
num_reseeks + 3);
ASSERT_EQ(IterStatus(iter), "a->four");
delete iter;
}
TEST(DBTest, IterSmallAndLargeMix) {
do {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", std::string(100000, 'b')));
ASSERT_OK(Put("c", "vc"));
ASSERT_OK(Put("d", std::string(100000, 'd')));
ASSERT_OK(Put("e", std::string(100000, 'e')));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Next();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Next();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Next();
ASSERT_EQ(IterStatus(iter), "(invalid)");
iter->SeekToLast();
ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "c->vc");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b'));
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter;
} while (ChangeCompactOptions());
}
TEST(DBTest, IterMultiWithDelete) {
do {
ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", "vb"));
ASSERT_OK(Put("c", "vc"));
ASSERT_OK(Delete("b"));
ASSERT_EQ("NOT_FOUND", Get("b"));
Iterator* iter = db_->NewIterator(ReadOptions());
iter->Seek("c");
ASSERT_EQ(IterStatus(iter), "c->vc");
if (!CurrentOptions().merge_operator) {
// TODO: merge operator does not support backward iteration yet
iter->Prev();
ASSERT_EQ(IterStatus(iter), "a->va");
}
delete iter;
} while (ChangeOptions());
}
TEST(DBTest, IterPrevMaxSkip) {
do {
for (int i = 0; i < 2; i++) {
db_->Put(WriteOptions(), "key1", "v1");
db_->Put(WriteOptions(), "key2", "v2");
db_->Put(WriteOptions(), "key3", "v3");
db_->Put(WriteOptions(), "key4", "v4");
db_->Put(WriteOptions(), "key5", "v5");
}
VerifyIterLast("key5->v5");
ASSERT_OK(db_->Delete(WriteOptions(), "key5"));
VerifyIterLast("key4->v4");
ASSERT_OK(db_->Delete(WriteOptions(), "key4"));
VerifyIterLast("key3->v3");
ASSERT_OK(db_->Delete(WriteOptions(), "key3"));
VerifyIterLast("key2->v2");
ASSERT_OK(db_->Delete(WriteOptions(), "key2"));
VerifyIterLast("key1->v1");
ASSERT_OK(db_->Delete(WriteOptions(), "key1"));
VerifyIterLast("(invalid)");
} while (ChangeOptions(kSkipMergePut));
}
TEST(DBTest, IterWithSnapshot) {
do {
ASSERT_OK(Put("key1", "val1"));
ASSERT_OK(Put("key2", "val2"));
ASSERT_OK(Put("key3", "val3"));
ASSERT_OK(Put("key4", "val4"));
ASSERT_OK(Put("key5", "val5"));
const Snapshot *snapshot = db_->GetSnapshot();
ReadOptions options;
options.snapshot = snapshot;
Iterator* iter = db_->NewIterator(options);
// Put more values after the snapshot
ASSERT_OK(Put("key100", "val100"));
ASSERT_OK(Put("key101", "val101"));
iter->Seek("key5");
ASSERT_EQ(IterStatus(iter), "key5->val5");
if (!CurrentOptions().merge_operator) {
// TODO: merge operator does not support backward iteration yet
iter->Prev();
ASSERT_EQ(IterStatus(iter), "key4->val4");
iter->Prev();
ASSERT_EQ(IterStatus(iter), "key3->val3");
iter->Next();
ASSERT_EQ(IterStatus(iter), "key4->val4");
iter->Next();
ASSERT_EQ(IterStatus(iter), "key5->val5");
iter->Next();
ASSERT_TRUE(!iter->Valid());
}
db_->ReleaseSnapshot(snapshot);
delete iter;
} while (ChangeOptions());
}
TEST(DBTest, Recover) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("baz", "v5"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v5", Get("baz"));
ASSERT_OK(Put("bar", "v2"));
ASSERT_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
ASSERT_OK(Put("foo", "v4"));
ASSERT_EQ("v4", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v5", Get("baz"));
} while (ChangeOptions());
}
TEST(DBTest, RecoverWithTableHandle) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.write_buffer_size = 100;
options.disable_auto_compactions = true;
DestroyAndReopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("bar", "v2"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(Put("foo", "v3"));
ASSERT_OK(Put("bar", "v4"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(Put("big", std::string(100, 'a')));
Reopen();
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_GetFilesMetaData(&files);
int total_files = 0;
for (const auto& level : files) {
total_files += level.size();
}
ASSERT_EQ(total_files, 3);
for (const auto& level : files) {
for (const auto& file : level) {
if (kInfiniteMaxOpenFiles == option_config_) {
ASSERT_TRUE(file.table_reader_handle != nullptr);
} else {
ASSERT_TRUE(file.table_reader_handle == nullptr);
}
}
}
} while (ChangeOptions());
}
TEST(DBTest, IgnoreRecoveredLog) {
std::string backup_logs = dbname_ + "/backup_logs";
// delete old files in backup_logs directory
env_->CreateDirIfMissing(backup_logs);
std::vector<std::string> old_files;
env_->GetChildren(backup_logs, &old_files);
for (auto& file : old_files) {
if (file != "." && file != "..") {
env_->DeleteFile(backup_logs + "/" + file);
}
}
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.merge_operator = MergeOperators::CreateUInt64AddOperator();
options.wal_dir = dbname_ + "/logs";
DestroyAndReopen(&options);
// fill up the DB
std::string one, two;
PutFixed64(&one, 1);
PutFixed64(&two, 2);
ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one)));
ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one)));
ASSERT_OK(db_->Merge(WriteOptions(), Slice("bar"), Slice(one)));
// copy the logs to backup
std::vector<std::string> logs;
env_->GetChildren(options.wal_dir, &logs);
for (auto& log : logs) {
if (log != ".." && log != ".") {
CopyFile(options.wal_dir + "/" + log, backup_logs + "/" + log);
}
}
// recover the DB
Reopen(&options);
ASSERT_EQ(two, Get("foo"));
ASSERT_EQ(one, Get("bar"));
Close();
// copy the logs from backup back to wal dir
for (auto& log : logs) {
if (log != ".." && log != ".") {
CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log);
}
}
// this should ignore the log files, recovery should not happen again
// if the recovery happens, the same merge operator would be called twice,
// leading to incorrect results
Reopen(&options);
ASSERT_EQ(two, Get("foo"));
ASSERT_EQ(one, Get("bar"));
Close();
Destroy(&options);
// copy the logs from backup back to wal dir
env_->CreateDirIfMissing(options.wal_dir);
for (auto& log : logs) {
if (log != ".." && log != ".") {
CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log);
// we won't be needing this file no more
env_->DeleteFile(backup_logs + "/" + log);
}
}
// assert that we successfully recovered only from logs, even though we
// destroyed the DB
Reopen(&options);
ASSERT_EQ(two, Get("foo"));
ASSERT_EQ(one, Get("bar"));
Close();
} while (ChangeOptions());
}
TEST(DBTest, RollLog) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("baz", "v5"));
Reopen();
for (int i = 0; i < 10; i++) {
Reopen();
}
ASSERT_OK(Put("foo", "v4"));
for (int i = 0; i < 10; i++) {
Reopen();
}
} while (ChangeOptions());
}
TEST(DBTest, WAL) {
do {
Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true;
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("bar"));
writeOpt.disableWAL = false;
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
writeOpt.disableWAL = true;
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
Reopen();
// Both value's should be present.
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ("v2", Get("foo"));
writeOpt.disableWAL = true;
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
writeOpt.disableWAL = false;
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
Reopen();
// again both values should be present.
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v3", Get("bar"));
} while (ChangeCompactOptions());
}
TEST(DBTest, CheckLock) {
do {
DB* localdb;
Options options = CurrentOptions();
ASSERT_OK(TryReopen(&options));
// second open should fail
ASSERT_TRUE(!(PureReopen(&options, &localdb)).ok());
} while (ChangeCompactOptions());
}
TEST(DBTest, FlushMultipleMemtable) {
do {
Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true;
options.max_write_buffer_number = 4;
options.min_write_buffer_number_to_merge = 3;
Reopen(&options);
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("bar"));
dbfull()->Flush(FlushOptions());
} while (ChangeCompactOptions());
}
TEST(DBTest, NumImmutableMemTable) {
do {
Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true;
options.max_write_buffer_number = 4;
options.min_write_buffer_number_to_merge = 3;
options.write_buffer_size = 1000000;
Reopen(&options);
std::string big_value(1000000 * 2, 'x');
std::string num;
SetPerfLevel(kEnableTime);;
ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "0");
perf_context.Reset();
Get("k1");
ASSERT_EQ(1, (int) perf_context.get_from_memtable_count);
ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "1");
perf_context.Reset();
Get("k1");
ASSERT_EQ(2, (int) perf_context.get_from_memtable_count);
perf_context.Reset();
Get("k2");
ASSERT_EQ(1, (int) perf_context.get_from_memtable_count);
ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cur-size-active-mem-table",
&num));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "2");
perf_context.Reset();
Get("k2");
ASSERT_EQ(2, (int) perf_context.get_from_memtable_count);
perf_context.Reset();
Get("k3");
ASSERT_EQ(1, (int) perf_context.get_from_memtable_count);
perf_context.Reset();
Get("k1");
ASSERT_EQ(3, (int) perf_context.get_from_memtable_count);
dbfull()->Flush(FlushOptions());
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.cur-size-active-mem-table",
&num));
// "208" is the size of the metadata of an empty skiplist, this would
// break if we change the default skiplist implementation
ASSERT_EQ(num, "208");
SetPerfLevel(kDisable);
} while (ChangeCompactOptions());
}
class SleepingBackgroundTask {
public:
SleepingBackgroundTask() : bg_cv_(&mutex_), should_sleep_(true) {}
void DoSleep() {
MutexLock l(&mutex_);
while (should_sleep_) {
bg_cv_.Wait();
}
}
void WakeUp() {
MutexLock l(&mutex_);
should_sleep_ = false;
bg_cv_.SignalAll();
}
static void DoSleepTask(void* arg) {
reinterpret_cast<SleepingBackgroundTask*>(arg)->DoSleep();
}
private:
port::Mutex mutex_;
port::CondVar bg_cv_; // Signalled when background work finishes
bool should_sleep_;
};
TEST(DBTest, GetProperty) {
// Set sizes to both background thread pool to be 1 and block them.
env_->SetBackgroundThreads(1, Env::HIGH);
env_->SetBackgroundThreads(1, Env::LOW);
SleepingBackgroundTask sleeping_task_low;
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low,
Env::Priority::LOW);
SleepingBackgroundTask sleeping_task_high;
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_high,
Env::Priority::HIGH);
Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true;
options.compaction_style = kCompactionStyleUniversal;
options.level0_file_num_compaction_trigger = 1;
options.compaction_options_universal.size_ratio = 50;
options.max_background_compactions = 1;
options.max_background_flushes = 1;
options.max_write_buffer_number = 10;
options.min_write_buffer_number_to_merge = 1;
options.write_buffer_size = 1000000;
Reopen(&options);
std::string big_value(1000000 * 2, 'x');
std::string num;
SetPerfLevel(kEnableTime);
ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
ASSERT_EQ(num, "0");
perf_context.Reset();
ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "1");
ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value));
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num));
ASSERT_EQ(num, "2");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
ASSERT_EQ(num, "1");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
ASSERT_EQ(num, "0");
sleeping_task_high.WakeUp();
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value));
ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value));
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num));
ASSERT_EQ(num, "0");
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num));
ASSERT_EQ(num, "1");
sleeping_task_low.WakeUp();
}
TEST(DBTest, FLUSH) {
do {
Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true;
SetPerfLevel(kEnableTime);;
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v1"));
// this will now also flush the last 2 writes
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v1"));
perf_context.Reset();
Get("foo");
ASSERT_TRUE((int) perf_context.get_from_output_files_time > 0);
Reopen();
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v1", Get("bar"));
writeOpt.disableWAL = true;
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v2"));
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v2"));
dbfull()->Flush(FlushOptions());
Reopen();
ASSERT_EQ("v2", Get("bar"));
perf_context.Reset();
ASSERT_EQ("v2", Get("foo"));
ASSERT_TRUE((int) perf_context.get_from_output_files_time > 0);
writeOpt.disableWAL = false;
ASSERT_OK(dbfull()->Put(writeOpt, "bar", "v3"));
ASSERT_OK(dbfull()->Put(writeOpt, "foo", "v3"));
dbfull()->Flush(FlushOptions());
Reopen();
// 'foo' should be there because its put
// has WAL enabled.
ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v3", Get("bar"));
SetPerfLevel(kDisable);
} while (ChangeCompactOptions());
}
TEST(DBTest, RecoveryWithEmptyLog) {
do {
ASSERT_OK(Put("foo", "v1"));
ASSERT_OK(Put("foo", "v2"));
Reopen();
Reopen();
ASSERT_OK(Put("foo", "v3"));
Reopen();
ASSERT_EQ("v3", Get("foo"));
} while (ChangeOptions());
}
// Check that writes done during a memtable compaction are recovered
// if the database is shutdown during the memtable compaction.
TEST(DBTest, RecoverDuringMemtableCompaction) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 1000000;
Reopen(&options);
// Trigger a long memtable compaction and reopen the database during it
ASSERT_OK(Put("foo", "v1")); // Goes to 1st log file
ASSERT_OK(Put("big1", std::string(10000000, 'x'))); // Fills memtable
ASSERT_OK(Put("big2", std::string(1000, 'y'))); // Triggers compaction
ASSERT_OK(Put("bar", "v2")); // Goes to new log file
Reopen(&options);
ASSERT_EQ("v1", Get("foo"));
ASSERT_EQ("v2", Get("bar"));
ASSERT_EQ(std::string(10000000, 'x'), Get("big1"));
ASSERT_EQ(std::string(1000, 'y'), Get("big2"));
} while (ChangeOptions());
}
TEST(DBTest, MinorCompactionsHappen) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 10000;
Reopen(&options);
const int N = 500;
int starting_num_tables = TotalTableFiles();
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), Key(i) + std::string(1000, 'v')));
}
int ending_num_tables = TotalTableFiles();
ASSERT_GT(ending_num_tables, starting_num_tables);
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
Reopen();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
}
} while (ChangeCompactOptions());
}
TEST(DBTest, ManifestRollOver) {
do {
Options options = CurrentOptions();
options.max_manifest_file_size = 10 ; // 10 bytes
Reopen(&options);
{
ASSERT_OK(Put("manifest_key1", std::string(1000, '1')));
ASSERT_OK(Put("manifest_key2", std::string(1000, '2')));
ASSERT_OK(Put("manifest_key3", std::string(1000, '3')));
uint64_t manifest_before_flush =
dbfull()->TEST_Current_Manifest_FileNo();
dbfull()->Flush(FlushOptions()); // This should trigger LogAndApply.
uint64_t manifest_after_flush =
dbfull()->TEST_Current_Manifest_FileNo();
ASSERT_GT(manifest_after_flush, manifest_before_flush);
Reopen(&options);
ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(),
manifest_after_flush);
// check if a new manifest file got inserted or not.
ASSERT_EQ(std::string(1000, '1'), Get("manifest_key1"));
ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2"));
ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3"));
}
} while (ChangeCompactOptions());
}
TEST(DBTest, IdentityAcrossRestarts) {
do {
std::string id1;
ASSERT_OK(db_->GetDbIdentity(id1));
Options options = CurrentOptions();
Reopen(&options);
std::string id2;
ASSERT_OK(db_->GetDbIdentity(id2));
// id1 should match id2 because identity was not regenerated
ASSERT_EQ(id1.compare(id2), 0);
std::string idfilename = IdentityFileName(dbname_);
ASSERT_OK(env_->DeleteFile(idfilename));
Reopen(&options);
std::string id3;
ASSERT_OK(db_->GetDbIdentity(id3));
// id1 should NOT match id3 because identity was regenerated
ASSERT_NE(id1.compare(id3), 0);
} while (ChangeCompactOptions());
}
TEST(DBTest, RecoverWithLargeLog) {
do {
{
Options options = CurrentOptions();
Reopen(&options);
ASSERT_OK(Put("big1", std::string(200000, '1')));
ASSERT_OK(Put("big2", std::string(200000, '2')));
ASSERT_OK(Put("small3", std::string(10, '3')));
ASSERT_OK(Put("small4", std::string(10, '4')));
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
}
// Make sure that if we re-open with a small write buffer size that
// we flush table files in the middle of a large log file.
Options options = CurrentOptions();
options.write_buffer_size = 100000;
Reopen(&options);
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
ASSERT_EQ(std::string(200000, '1'), Get("big1"));
ASSERT_EQ(std::string(200000, '2'), Get("big2"));
ASSERT_EQ(std::string(10, '3'), Get("small3"));
ASSERT_EQ(std::string(10, '4'), Get("small4"));
ASSERT_GT(NumTableFilesAtLevel(0), 1);
} while (ChangeCompactOptions());
}
TEST(DBTest, CompactionsGenerateMultipleFiles) {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
ASSERT_OK(Put(Key(i), values[i]));
}
// Reopening moves updates to level-0
Reopen(&options);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 1);
for (int i = 0; i < 80; i++) {
ASSERT_EQ(Get(Key(i)), values[i]);
}
}
TEST(DBTest, CompactionTrigger) {
Options options = CurrentOptions();
options.write_buffer_size = 100<<10; //100KB
options.num_levels = 3;
options.max_mem_compaction_level = 0;
options.level0_file_num_compaction_trigger = 3;
Reopen(&options);
Random rnd(301);
for (int num = 0;
num < options.level0_file_num_compaction_trigger - 1;
num++) {
std::vector<std::string> values;
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
ASSERT_OK(Put(Key(i), values[i]));
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
//generate one more file in level-0, and should trigger level-0 compaction
std::vector<std::string> values;
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
ASSERT_OK(Put(Key(i), values[i]));
}
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 1);
}
// This is a static filter used for filtering
// kvs during the compaction process.
static int cfilter_count;
static std::string NEW_VALUE = "NewValue";
class KeepFilter : public CompactionFilter {
public:
virtual bool Filter(int level, const Slice& key, const Slice& value,
std::string* new_value, bool* value_changed) const
override {
cfilter_count++;
return false;
}
virtual const char* Name() const override { return "KeepFilter"; }
};
class DeleteFilter : public CompactionFilter {
public:
virtual bool Filter(int level, const Slice& key, const Slice& value,
std::string* new_value, bool* value_changed) const
override {
cfilter_count++;
return true;
}
virtual const char* Name() const override { return "DeleteFilter"; }
};
class ChangeFilter : public CompactionFilter {
public:
explicit ChangeFilter() {}
virtual bool Filter(int level, const Slice& key, const Slice& value,
std::string* new_value, bool* value_changed) const
override {
assert(new_value != nullptr);
*new_value = NEW_VALUE;
*value_changed = true;
return false;
}
virtual const char* Name() const override { return "ChangeFilter"; }
};
class KeepFilterFactory : public CompactionFilterFactory {
public:
explicit KeepFilterFactory(bool check_context = false)
: check_context_(check_context) {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& context) override {
if (check_context_) {
ASSERT_EQ(expect_full_compaction_.load(), context.is_full_compaction);
ASSERT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction);
}
return std::unique_ptr<CompactionFilter>(new KeepFilter());
}
virtual const char* Name() const override { return "KeepFilterFactory"; }
bool check_context_;
std::atomic_bool expect_full_compaction_;
std::atomic_bool expect_manual_compaction_;
};
class DeleteFilterFactory : public CompactionFilterFactory {
public:
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& context) override {
if (context.is_manual_compaction) {
return std::unique_ptr<CompactionFilter>(new DeleteFilter());
} else {
return std::unique_ptr<CompactionFilter>(nullptr);
}
}
virtual const char* Name() const override { return "DeleteFilterFactory"; }
};
class ChangeFilterFactory : public CompactionFilterFactory {
public:
explicit ChangeFilterFactory() {}
virtual std::unique_ptr<CompactionFilter> CreateCompactionFilter(
const CompactionFilter::Context& context) override {
return std::unique_ptr<CompactionFilter>(new ChangeFilter());
}
virtual const char* Name() const override { return "ChangeFilterFactory"; }
};
// TODO(kailiu) The tests on UniversalCompaction has some issues:
// 1. A lot of magic numbers ("11" or "12").
// 2. Made assumption on the memtable flush conidtions, which may change from
// time to time.
TEST(DBTest, UniversalCompactionTrigger) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
// trigger compaction if there are >= 4 files
options.level0_file_num_compaction_trigger = 4;
KeepFilterFactory* filter = new KeepFilterFactory(true);
filter->expect_manual_compaction_.store(false);
options.compaction_filter_factory.reset(filter);
Reopen(&options);
Random rnd(301);
int key_idx = 0;
filter->expect_full_compaction_.store(true);
// Stage 1:
// Generate a set of files at level 0, but don't trigger level-0
// compaction.
for (int num = 0;
num < options.level0_file_num_compaction_trigger-1;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Suppose each file flushed from mem table has size 1. Now we compact
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
// file of size 4.
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 2:
// Now we have one file at level 0, with size 4. We also have some data in
// mem table. Let's continue generating new files at level 0, but don't
// trigger level-0 compaction.
// First, clean up memtable before inserting new data. This will generate
// a level-0 file, with size around 0.4 (according to previously written
// data amount).
filter->expect_full_compaction_.store(false);
dbfull()->Flush(FlushOptions());
for (int num = 0;
num < options.level0_file_num_compaction_trigger-3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
// After comapction, we should have 2 files, with size 4, 2.4.
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 3:
// Now we have 2 files at level 0, with size 4 and 2.4. Continue
// generating new files at level 0.
for (int num = 0;
num < options.level0_file_num_compaction_trigger-3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
// After comapction, we should have 3 files, with size 4, 2.4, 2.
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 4:
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
// new file of size 1.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Level-0 compaction is triggered, but no file will be picked up.
ASSERT_EQ(NumTableFilesAtLevel(0), 4);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 5:
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
// a new file of size 1.
filter->expect_full_compaction_.store(true);
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// All files at level 0 will be compacted into a single one.
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
}
TEST(DBTest, UniversalCompactionSizeAmplification) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
options.level0_file_num_compaction_trigger = 3;
// Trigger compaction if size amplification exceeds 110%
options.compaction_options_universal.
max_size_amplification_percent = 110;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
// Generate two files in Level 0. Both files are approx the same size.
for (int num = 0;
num < options.level0_file_num_compaction_trigger-1;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
// Flush whatever is remaining in memtable. This is typically
// small, which should not trigger size ratio based compaction
// but will instead trigger size amplification.
dbfull()->Flush(FlushOptions());
dbfull()->TEST_WaitForCompact();
// Verify that size amplification did occur
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
}
TEST(DBTest, UniversalCompactionOptions) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
options.level0_file_num_compaction_trigger = 4;
options.num_levels = 1;
options.compaction_options_universal.compression_size_percent = -1;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
for (int num = 0;
num < options.level0_file_num_compaction_trigger;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
if (num < options.level0_file_num_compaction_trigger - 1) {
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
}
dbfull()->TEST_WaitForCompact();
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
}
TEST(DBTest, UniversalCompactionStopStyleSimilarSize) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
// trigger compaction if there are >= 4 files
options.level0_file_num_compaction_trigger = 4;
options.compaction_options_universal.size_ratio = 10;
options.compaction_options_universal.stop_style = kCompactionStopStyleSimilarSize;
options.num_levels=1;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
// Stage 1:
// Generate a set of files at level 0, but don't trigger level-0
// compaction.
for (int num = 0;
num < options.level0_file_num_compaction_trigger-1;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Suppose each file flushed from mem table has size 1. Now we compact
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
// file of size 4.
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
// Stage 2:
// Now we have one file at level 0, with size 4. We also have some data in
// mem table. Let's continue generating new files at level 0, but don't
// trigger level-0 compaction.
// First, clean up memtable before inserting new data. This will generate
// a level-0 file, with size around 0.4 (according to previously written
// data amount).
dbfull()->Flush(FlushOptions());
for (int num = 0;
num < options.level0_file_num_compaction_trigger-3;
num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
// After compaction, we should have 3 files, with size 4, 0.4, 2.
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
// Stage 3:
// Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one
// more file at level-0, which should trigger level-0 compaction.
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Level-0 compaction is triggered, but no file will be picked up.
ASSERT_EQ(NumTableFilesAtLevel(0), 4);
}
#if defined(SNAPPY) && defined(ZLIB) && defined(BZIP2)
TEST(DBTest, CompressedCache) {
int num_iter = 80;
// Run this test three iterations.
// Iteration 1: only a uncompressed block cache
// Iteration 2: only a compressed block cache
// Iteration 3: both block cache and compressed cache
for (int iter = 0; iter < 3; iter++) {
Options options = CurrentOptions();
options.write_buffer_size = 64*1024; // small write buffer
options.statistics = rocksdb::CreateDBStatistics();
switch (iter) {
case 0:
// only uncompressed block cache
options.block_cache = NewLRUCache(8*1024);
options.block_cache_compressed = nullptr;
break;
case 1:
// no block cache, only compressed cache
options.no_block_cache = true;
options.block_cache = nullptr;
options.block_cache_compressed = NewLRUCache(8*1024);
break;
case 2:
// both compressed and uncompressed block cache
options.block_cache = NewLRUCache(1024);
options.block_cache_compressed = NewLRUCache(8*1024);
break;
default:
ASSERT_TRUE(false);
}
Reopen(&options);
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
std::string str;
for (int i = 0; i < num_iter; i++) {
if (i % 4 == 0) { // high compression ratio
str = RandomString(&rnd, 1000);
}
values.push_back(str);
ASSERT_OK(Put(Key(i), values[i]));
}
// flush all data from memtable so that reads are from block cache
dbfull()->Flush(FlushOptions());
for (int i = 0; i < num_iter; i++) {
ASSERT_EQ(Get(Key(i)), values[i]);
}
// check that we triggered the appropriate code paths in the cache
switch (iter) {
case 0:
// only uncompressed block cache
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 1:
// no block cache, only compressed cache
ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
case 2:
// both compressed and uncompressed block cache
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0);
ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0);
break;
default:
ASSERT_TRUE(false);
}
}
}
static std::string CompressibleString(Random* rnd, int len) {
std::string r;
test::CompressibleString(rnd, 0.8, len, &r);
return r;
}
TEST(DBTest, UniversalCompactionCompressRatio1) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 1;
options.compaction_options_universal.compression_size_percent = 70;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
// The first compaction (2) is compressed.
for (int num = 0; num < 2; num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
ASSERT_LT((int)dbfull()->TEST_GetLevel0TotalSize(), 110000 * 2 * 0.9);
// The second compaction (4) is compressed
for (int num = 0; num < 2; num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
ASSERT_LT((int)dbfull()->TEST_GetLevel0TotalSize(), 110000 * 4 * 0.9);
// The third compaction (2 4) is compressed since this time it is
// (1 1 3.2) and 3.2/5.2 doesn't reach ratio.
for (int num = 0; num < 2; num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
ASSERT_LT((int)dbfull()->TEST_GetLevel0TotalSize(), 110000 * 6 * 0.9);
// When we start for the compaction up to (2 4 8), the latest
// compressed is not compressed.
for (int num = 0; num < 8; num++) {
// Write 110KB (11 values, each 10K)
for (int i = 0; i < 11; i++) {
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
ASSERT_GT((int)dbfull()->TEST_GetLevel0TotalSize(),
110000 * 11 * 0.8 + 110000 * 2);
}
TEST(DBTest, UniversalCompactionCompressRatio2) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
options.level0_file_num_compaction_trigger = 2;
options.num_levels = 1;
options.compaction_options_universal.compression_size_percent = 95;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
// When we start for the compaction up to (2 4 8), the latest
// compressed is compressed given the size ratio to compress.
for (int num = 0; num < 14; num++) {
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_WaitForCompact();
}
ASSERT_LT((int)dbfull()->TEST_GetLevel0TotalSize(),
120000 * 12 * 0.8 + 120000 * 2);
}
#endif
TEST(DBTest, ConvertCompactionStyle) {
Random rnd(301);
int max_key_level_insert = 200;
int max_key_universal_insert = 600;
// Stage 1: generate a db with level compaction
Options options = CurrentOptions();
options.write_buffer_size = 100<<10; //100KB
options.num_levels = 4;
options.level0_file_num_compaction_trigger = 3;
options.max_bytes_for_level_base = 500<<10; // 500KB
options.max_bytes_for_level_multiplier = 1;
options.target_file_size_base = 200<<10; // 200KB
options.target_file_size_multiplier = 1;
Reopen(&options);
for (int i = 0; i <= max_key_level_insert; i++) {
// each value is 10K
ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
}
dbfull()->Flush(FlushOptions());
dbfull()->TEST_WaitForCompact();
ASSERT_GT(TotalTableFiles(), 1);
int non_level0_num_files = 0;
for (int i = 1; i < dbfull()->NumberLevels(); i++) {
non_level0_num_files += NumTableFilesAtLevel(i);
}
ASSERT_GT(non_level0_num_files, 0);
// Stage 2: reopen with universal compaction - should fail
options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
Status s = TryReopen(&options);
ASSERT_TRUE(s.IsInvalidArgument());
// Stage 3: compact into a single file and move the file to level 0
options = CurrentOptions();
options.disable_auto_compactions = true;
options.target_file_size_base = INT_MAX;
options.target_file_size_multiplier = 1;
options.max_bytes_for_level_base = INT_MAX;
options.max_bytes_for_level_multiplier = 1;
Reopen(&options);
dbfull()->CompactRange(nullptr, nullptr,
true /* reduce level */,
0 /* reduce to level 0 */);
for (int i = 0; i < dbfull()->NumberLevels(); i++) {
int num = NumTableFilesAtLevel(i);
if (i == 0) {
ASSERT_EQ(num, 1);
} else {
ASSERT_EQ(num, 0);
}
}
// Stage 4: re-open in universal compaction style and do some db operations
options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
options.level0_file_num_compaction_trigger = 3;
Reopen(&options);
for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, 10000)));
}
dbfull()->Flush(FlushOptions());
dbfull()->TEST_WaitForCompact();
for (int i = 1; i < dbfull()->NumberLevels(); i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// verify keys inserted in both level compaction style and universal
// compaction style
std::string keys_in_db;
Iterator* iter = dbfull()->NewIterator(ReadOptions());
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
keys_in_db.append(iter->key().ToString());
keys_in_db.push_back(',');
}
delete iter;
std::string expected_keys;
for (int i = 0; i <= max_key_universal_insert; i++) {
expected_keys.append(Key(i));
expected_keys.push_back(',');
}
ASSERT_EQ(keys_in_db, expected_keys);
}
void MinLevelHelper(DBTest* self, Options& options) {
Random rnd(301);
for (int num = 0;
num < options.level0_file_num_compaction_trigger - 1;
num++)
{
std::vector<std::string> values;
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
ASSERT_OK(self->Put(Key(i), values[i]));
}
self->dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1);
}
//generate one more file in level-0, and should trigger level-0 compaction
std::vector<std::string> values;
for (int i = 0; i < 12; i++) {
values.push_back(RandomString(&rnd, 10000));
ASSERT_OK(self->Put(Key(i), values[i]));
}
self->dbfull()->TEST_WaitForCompact();
ASSERT_EQ(self->NumTableFilesAtLevel(0), 0);
ASSERT_EQ(self->NumTableFilesAtLevel(1), 1);
}
// returns false if the calling-Test should be skipped
bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
int lev, int strategy) {
fprintf(stderr, "Test with compression options : window_bits = %d, level = %d, strategy = %d}\n", wbits, lev, strategy);
options.write_buffer_size = 100<<10; //100KB
options.num_levels = 3;
options.max_mem_compaction_level = 0;
options.level0_file_num_compaction_trigger = 3;
options.create_if_missing = true;
if (SnappyCompressionSupported(CompressionOptions(wbits, lev, strategy))) {
type = kSnappyCompression;
fprintf(stderr, "using snappy\n");
} else if (ZlibCompressionSupported(
CompressionOptions(wbits, lev, strategy))) {
type = kZlibCompression;
fprintf(stderr, "using zlib\n");
} else if (BZip2CompressionSupported(
CompressionOptions(wbits, lev, strategy))) {
type = kBZip2Compression;
fprintf(stderr, "using bzip2\n");
} else if (LZ4CompressionSupported(
CompressionOptions(wbits, lev, strategy))) {
type = kLZ4Compression;
fprintf(stderr, "using lz4\n");
} else if (LZ4HCCompressionSupported(
CompressionOptions(wbits, lev, strategy))) {
type = kLZ4HCCompression;
fprintf(stderr, "using lz4hc\n");
} else {
fprintf(stderr, "skipping test, compression disabled\n");
return false;
}
options.compression_per_level.resize(options.num_levels);
// do not compress L0
for (int i = 0; i < 1; i++) {
options.compression_per_level[i] = kNoCompression;
}
for (int i = 1; i < options.num_levels; i++) {
options.compression_per_level[i] = type;
}
return true;
}
TEST(DBTest, MinLevelToCompress1) {
Options options = CurrentOptions();
CompressionType type;
if (!MinLevelToCompress(type, options, -14, -1, 0)) {
return;
}
Reopen(&options);
MinLevelHelper(this, options);
// do not compress L0 and L1
for (int i = 0; i < 2; i++) {
options.compression_per_level[i] = kNoCompression;
}
for (int i = 2; i < options.num_levels; i++) {
options.compression_per_level[i] = type;
}
DestroyAndReopen(&options);
MinLevelHelper(this, options);
}
TEST(DBTest, MinLevelToCompress2) {
Options options = CurrentOptions();
CompressionType type;
if (!MinLevelToCompress(type, options, 15, -1, 0)) {
return;
}
Reopen(&options);
MinLevelHelper(this, options);
// do not compress L0 and L1
for (int i = 0; i < 2; i++) {
options.compression_per_level[i] = kNoCompression;
}
for (int i = 2; i < options.num_levels; i++) {
options.compression_per_level[i] = type;
}
DestroyAndReopen(&options);
MinLevelHelper(this, options);
}
TEST(DBTest, RepeatedWritesToSameKey) {
do {
Options options = CurrentOptions();
options.env = env_;
options.write_buffer_size = 100000; // Small write buffer
Reopen(&options);
// We must have at most one file per level except for level-0,
// which may have up to kL0_StopWritesTrigger files.
const int kMaxFiles = dbfull()->NumberLevels() +
dbfull()->Level0StopWriteTrigger();
Random rnd(301);
std::string value = RandomString(&rnd, 2 * options.write_buffer_size);
for (int i = 0; i < 5 * kMaxFiles; i++) {
Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles);
}
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdate) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
Reopen(&options);
// Update key with values of smaller size
int numValues = 10;
for (int i = numValues; i > 0; i--) {
std::string value = DummyString(i, 'a');
ASSERT_OK(Put("key", value));
ASSERT_EQ(value, Get("key"));
}
// Only 1 instance for that key.
validateNumberOfEntries(1);
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdateLargeNewValue) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
Reopen(&options);
// Update key with values of larger size
int numValues = 10;
for (int i = 0; i < numValues; i++) {
std::string value = DummyString(i, 'a');
ASSERT_OK(Put("key", value));
ASSERT_EQ(value, Get("key"));
}
// All 10 updates exist in the internal iterator
validateNumberOfEntries(numValues);
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdateCallbackSmallerSize) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
options.inplace_callback =
rocksdb::DBTest::updateInPlaceSmallerSize;
Reopen(&options);
// Update key with values of smaller size
int numValues = 10;
ASSERT_OK(Put("key", DummyString(numValues, 'a')));
ASSERT_EQ(DummyString(numValues, 'c'), Get("key"));
for (int i = numValues; i > 0; i--) {
ASSERT_OK(Put("key", DummyString(i, 'a')));
ASSERT_EQ(DummyString(i - 1, 'b'), Get("key"));
}
// Only 1 instance for that key.
validateNumberOfEntries(1);
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdateCallbackSmallerVarintSize) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
options.inplace_callback =
rocksdb::DBTest::updateInPlaceSmallerVarintSize;
Reopen(&options);
// Update key with values of smaller varint size
int numValues = 265;
ASSERT_OK(Put("key", DummyString(numValues, 'a')));
ASSERT_EQ(DummyString(numValues, 'c'), Get("key"));
for (int i = numValues; i > 0; i--) {
ASSERT_OK(Put("key", DummyString(i, 'a')));
ASSERT_EQ(DummyString(1, 'b'), Get("key"));
}
// Only 1 instance for that key.
validateNumberOfEntries(1);
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdateCallbackLargeNewValue) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
options.inplace_callback =
rocksdb::DBTest::updateInPlaceLargerSize;
Reopen(&options);
// Update key with values of larger size
int numValues = 10;
for (int i = 0; i < numValues; i++) {
ASSERT_OK(Put("key", DummyString(i, 'a')));
ASSERT_EQ(DummyString(i, 'c'), Get("key"));
}
// No inplace updates. All updates are puts with new seq number
// All 10 updates exist in the internal iterator
validateNumberOfEntries(numValues);
} while (ChangeCompactOptions());
}
TEST(DBTest, InPlaceUpdateCallbackNoAction) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.inplace_update_support = true;
options.env = env_;
options.write_buffer_size = 100000;
options.inplace_callback =
rocksdb::DBTest::updateInPlaceNoAction;
Reopen(&options);
// Callback function requests no actions from db
ASSERT_OK(Put("key", DummyString(1, 'a')));
ASSERT_EQ(Get("key"), "NOT_FOUND");
} while (ChangeCompactOptions());
}
TEST(DBTest, CompactionFilter) {
Options options = CurrentOptions();
options.max_open_files = -1;
options.num_levels = 3;
options.max_mem_compaction_level = 0;
options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
Reopen(&options);
// Write 100K keys, these are written to a few files in L0.
const std::string value(10, 'x');
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
// Push all files to the highest level L2. Verify that
// the compaction is each level invokes the filter for
// all the keys in that level.
cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_NE(NumTableFilesAtLevel(2), 0);
cfilter_count = 0;
// All the files are in the lowest level.
// Verify that all but the 100001st record
// has sequence number zero. The 100001st record
// is at the tip of this snapshot and cannot
// be zeroed out.
// TODO: figure out sequence number squashtoo
int count = 0;
int total = 0;
Iterator* iter = dbfull()->TEST_NewInternalIterator();
iter->SeekToFirst();
ASSERT_OK(iter->status());
while (iter->Valid()) {
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
ikey.sequence = -1;
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
total++;
if (ikey.sequence != 0) {
count++;
}
iter->Next();
}
ASSERT_EQ(total, 100000);
ASSERT_EQ(count, 1);
delete iter;
// overwrite all the 100K keys once again.
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
// push all files to the highest level L2. This
// means that all keys should pass at least once
// via the compaction filter
cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_NE(NumTableFilesAtLevel(2), 0);
// create a new database with the compaction
// filter in such a way that it deletes all keys
options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
options.create_if_missing = true;
DestroyAndReopen(&options);
// write all the keys once again.
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
ASSERT_NE(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
ASSERT_EQ(NumTableFilesAtLevel(2), 0);
// Push all files to the highest level L2. This
// triggers the compaction filter to delete all keys,
// verify that at the end of the compaction process,
// nothing is left.
cfilter_count = 0;
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 100000);
cfilter_count = 0;
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(cfilter_count, 0);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
// Scan the entire database to ensure that nothing is left
iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
count = 0;
while (iter->Valid()) {
count++;
iter->Next();
}
ASSERT_EQ(count, 0);
delete iter;
// The sequence number of the remaining record
// is not zeroed out even though it is at the
// level Lmax because this record is at the tip
// TODO: remove the following or design a different
// test
count = 0;
iter = dbfull()->TEST_NewInternalIterator();
iter->SeekToFirst();
ASSERT_OK(iter->status());
while (iter->Valid()) {
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
ASSERT_NE(ikey.sequence, (unsigned)0);
count++;
iter->Next();
}
ASSERT_EQ(count, 0);
delete iter;
}
TEST(DBTest, CompactionFilterWithValueChange) {
do {
Options options = CurrentOptions();
options.num_levels = 3;
options.max_mem_compaction_level = 0;
options.compaction_filter_factory =
std::make_shared<ChangeFilterFactory>();
Reopen(&options);
// Write 100K+1 keys, these are written to a few files
// in L0. We do this so that the current snapshot points
// to the 100001 key.The compaction filter is not invoked
// on keys that are visible via a snapshot because we
// anyways cannot delete it.
const std::string value(10, 'x');
for (int i = 0; i < 100001; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
// push all files to lower levels
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
// re-write all data again
for (int i = 0; i < 100001; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
// push all files to lower levels. This should
// invoke the compaction filter for all 100000 keys.
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
// verify that all keys now have the new value that
// was set by the compaction process.
for (int i = 0; i < 100001; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
std::string newvalue = Get(key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
}
} while (ChangeCompactOptions());
}
TEST(DBTest, CompactionFilterContextManual) {
KeepFilterFactory* filter = new KeepFilterFactory();
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.compaction_filter_factory.reset(filter);
options.compression = kNoCompression;
options.level0_file_num_compaction_trigger = 8;
Reopen(&options);
int num_keys_per_file = 400;
for (int j = 0; j < 3; j++) {
// Write several keys.
const std::string value(10, 'x');
for (int i = 0; i < num_keys_per_file; i++) {
char key[100];
snprintf(key, sizeof(key), "B%08d%02d", i, j);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
// Make sure next file is much smaller so automatic compaction will not
// be triggered.
num_keys_per_file /= 2;
}
// Force a manual compaction
cfilter_count = 0;
filter->expect_manual_compaction_.store(true);
filter->expect_full_compaction_.store(false); // Manual compaction always
// set this flag.
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(cfilter_count, 700);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
// Verify total number of keys is correct after manual compaction.
int count = 0;
int total = 0;
Iterator* iter = dbfull()->TEST_NewInternalIterator();
iter->SeekToFirst();
ASSERT_OK(iter->status());
while (iter->Valid()) {
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
ikey.sequence = -1;
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
total++;
if (ikey.sequence != 0) {
count++;
}
iter->Next();
}
ASSERT_EQ(total, 700);
ASSERT_EQ(count, 1);
delete iter;
}
class KeepFilterV2 : public CompactionFilterV2 {
public:
virtual std::vector<bool> Filter(int level,
const SliceVector& keys,
const SliceVector& existing_values,
std::vector<std::string>* new_values,
std::vector<bool>* values_changed)
const override {
cfilter_count++;
std::vector<bool> ret;
new_values->clear();
values_changed->clear();
for (unsigned int i = 0; i < keys.size(); ++i) {
values_changed->push_back(false);
ret.push_back(false);
}
return ret;
}
virtual const char* Name() const override {
return "KeepFilterV2";
}
};
class DeleteFilterV2 : public CompactionFilterV2 {
public:
virtual std::vector<bool> Filter(int level,
const SliceVector& keys,
const SliceVector& existing_values,
std::vector<std::string>* new_values,
std::vector<bool>* values_changed)
const override {
cfilter_count++;
new_values->clear();
values_changed->clear();
std::vector<bool> ret;
for (unsigned int i = 0; i < keys.size(); ++i) {
values_changed->push_back(false);
ret.push_back(true);
}
return ret;
}
virtual const char* Name() const override {
return "DeleteFilterV2";
}
};
class ChangeFilterV2 : public CompactionFilterV2 {
public:
virtual std::vector<bool> Filter(int level,
const SliceVector& keys,
const SliceVector& existing_values,
std::vector<std::string>* new_values,
std::vector<bool>* values_changed)
const override {
std::vector<bool> ret;
new_values->clear();
values_changed->clear();
for (unsigned int i = 0; i < keys.size(); ++i) {
values_changed->push_back(true);
new_values->push_back(NEW_VALUE);
ret.push_back(false);
}
return ret;
}
virtual const char* Name() const override {
return "ChangeFilterV2";
}
};
class KeepFilterFactoryV2 : public CompactionFilterFactoryV2 {
public:
explicit KeepFilterFactoryV2(const SliceTransform* prefix_extractor)
: CompactionFilterFactoryV2(prefix_extractor) { }
virtual std::unique_ptr<CompactionFilterV2>
CreateCompactionFilterV2(
const CompactionFilterContext& context) override {
return std::unique_ptr<CompactionFilterV2>(new KeepFilterV2());
}
virtual const char* Name() const override {
return "KeepFilterFactoryV2";
}
};
class DeleteFilterFactoryV2 : public CompactionFilterFactoryV2 {
public:
explicit DeleteFilterFactoryV2(const SliceTransform* prefix_extractor)
: CompactionFilterFactoryV2(prefix_extractor) { }
virtual std::unique_ptr<CompactionFilterV2>
CreateCompactionFilterV2(
const CompactionFilterContext& context) override {
return std::unique_ptr<CompactionFilterV2>(new DeleteFilterV2());
}
virtual const char* Name() const override {
return "DeleteFilterFactoryV2";
}
};
class ChangeFilterFactoryV2 : public CompactionFilterFactoryV2 {
public:
explicit ChangeFilterFactoryV2(const SliceTransform* prefix_extractor)
: CompactionFilterFactoryV2(prefix_extractor) { }
virtual std::unique_ptr<CompactionFilterV2>
CreateCompactionFilterV2(
const CompactionFilterContext& context) override {
return std::unique_ptr<CompactionFilterV2>(new ChangeFilterV2());
}
virtual const char* Name() const override {
return "ChangeFilterFactoryV2";
}
};
TEST(DBTest, CompactionFilterV2) {
Options options = CurrentOptions();
options.num_levels = 3;
options.max_mem_compaction_level = 0;
// extract prefix
std::unique_ptr<const SliceTransform> prefix_extractor;
prefix_extractor.reset(NewFixedPrefixTransform(8));
options.compaction_filter_factory_v2
= std::make_shared<KeepFilterFactoryV2>(prefix_extractor.get());
// In a testing environment, we can only flush the application
// compaction filter buffer using universal compaction
option_config_ = kUniversalCompaction;
options.compaction_style = (rocksdb::CompactionStyle)1;
Reopen(&options);
// Write 100K keys, these are written to a few files in L0.
const std::string value(10, 'x');
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%08d%010d", i , i);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
// All the files are in the lowest level.
int count = 0;
int total = 0;
Iterator* iter = dbfull()->TEST_NewInternalIterator();
iter->SeekToFirst();
ASSERT_OK(iter->status());
while (iter->Valid()) {
ParsedInternalKey ikey(Slice(), 0, kTypeValue);
ikey.sequence = -1;
ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true);
total++;
if (ikey.sequence != 0) {
count++;
}
iter->Next();
}
ASSERT_EQ(total, 100000);
// 1 snapshot only. Since we are using universal compacton,
// the sequence no is cleared for better compression
ASSERT_EQ(count, 1);
delete iter;
// create a new database with the compaction
// filter in such a way that it deletes all keys
options.compaction_filter_factory_v2 =
std::make_shared<DeleteFilterFactoryV2>(prefix_extractor.get());
options.create_if_missing = true;
DestroyAndReopen(&options);
// write all the keys once again.
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%08d%010d", i, i);
Put(key, value);
}
dbfull()->TEST_FlushMemTable();
ASSERT_NE(NumTableFilesAtLevel(0), 0);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
// Scan the entire database to ensure that nothing is left
iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst();
count = 0;
while (iter->Valid()) {
count++;
iter->Next();
}
ASSERT_EQ(count, 0);
delete iter;
}
TEST(DBTest, CompactionFilterV2WithValueChange) {
Options options = CurrentOptions();
options.num_levels = 3;
options.max_mem_compaction_level = 0;
std::unique_ptr<const SliceTransform> prefix_extractor;
prefix_extractor.reset(NewFixedPrefixTransform(8));
options.compaction_filter_factory_v2 =
std::make_shared<ChangeFilterFactoryV2>(prefix_extractor.get());
// In a testing environment, we can only flush the application
// compaction filter buffer using universal compaction
option_config_ = kUniversalCompaction;
options.compaction_style = (rocksdb::CompactionStyle)1;
Reopen(&options);
// Write 100K+1 keys, these are written to a few files
// in L0. We do this so that the current snapshot points
// to the 100001 key.The compaction filter is not invoked
// on keys that are visible via a snapshot because we
// anyways cannot delete it.
const std::string value(10, 'x');
for (int i = 0; i < 100001; i++) {
char key[100];
snprintf(key, sizeof(key), "B%08d%010d", i, i);
Put(key, value);
}
// push all files to lower levels
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
// verify that all keys now have the new value that
// was set by the compaction process.
for (int i = 0; i < 100001; i++) {
char key[100];
snprintf(key, sizeof(key), "B%08d%010d", i, i);
std::string newvalue = Get(key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
}
}
TEST(DBTest, CompactionFilterV2NULLPrefix) {
Options options = CurrentOptions();
options.num_levels = 3;
options.max_mem_compaction_level = 0;
std::unique_ptr<const SliceTransform> prefix_extractor;
prefix_extractor.reset(NewFixedPrefixTransform(8));
options.compaction_filter_factory_v2 =
std::make_shared<ChangeFilterFactoryV2>(prefix_extractor.get());
// In a testing environment, we can only flush the application
// compaction filter buffer using universal compaction
option_config_ = kUniversalCompaction;
options.compaction_style = (rocksdb::CompactionStyle)1;
Reopen(&options);
// Write 100K+1 keys, these are written to a few files
// in L0. We do this so that the current snapshot points
// to the 100001 key.The compaction filter is not invoked
// on keys that are visible via a snapshot because we
// anyways cannot delete it.
const std::string value(10, 'x');
char first_key[100];
snprintf(first_key, sizeof(first_key), "%s0000%010d", "NULL", 1);
Put(first_key, value);
for (int i = 1; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "%08d%010d", i, i);
Put(key, value);
}
char last_key[100];
snprintf(last_key, sizeof(last_key), "%s0000%010d", "NULL", 2);
Put(last_key, value);
// push all files to lower levels
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// verify that all keys now have the new value that
// was set by the compaction process.
std::string newvalue = Get(first_key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
newvalue = Get(last_key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
for (int i = 1; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "%08d%010d", i, i);
std::string newvalue = Get(key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
}
}
TEST(DBTest, SparseMerge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen(&options);
FillLevels("A", "Z");
// Suppose there is:
// small amount of data with prefix A
// large amount of data with prefix B
// small amount of data with prefix C
// and that recent updates have made small changes to all three prefixes.
// Check that we do not do a compaction that merges all of B in one shot.
const std::string value(1000, 'x');
Put("A", "va");
// Write approximately 100MB of "B" values
for (int i = 0; i < 100000; i++) {
char key[100];
snprintf(key, sizeof(key), "B%010d", i);
Put(key, value);
}
Put("C", "vc");
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
// Make sparse update
Put("A", "va2");
Put("B100", "bvalue2");
Put("C", "vc2");
dbfull()->TEST_FlushMemTable();
// Compactions should not cause us to create a situation where
// a file overlaps too much data at the next level.
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
} while (ChangeCompactOptions());
}
static bool Between(uint64_t val, uint64_t low, uint64_t high) {
bool result = (val >= low) && (val <= high);
if (!result) {
fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n",
(unsigned long long)(val),
(unsigned long long)(low),
(unsigned long long)(high));
}
return result;
}
TEST(DBTest, ApproximateSizes) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
options.compression = kNoCompression;
DestroyAndReopen();
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
Reopen(&options);
ASSERT_TRUE(Between(Size("", "xyz"), 0, 0));
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
const int N = 80;
static const int S1 = 100000;
static const int S2 = 105000; // Allow some expansion from metadata
Random rnd(301);
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), RandomString(&rnd, S1)));
}
// 0 because GetApproximateSizes() does not account for memtable space
ASSERT_TRUE(Between(Size("", Key(50)), 0, 0));
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
for (int compact_start = 0; compact_start < N; compact_start += 10) {
for (int i = 0; i < N; i += 10) {
ASSERT_TRUE(Between(Size("", Key(i)), S1*i, S2*i));
ASSERT_TRUE(Between(Size("", Key(i)+".suffix"), S1*(i+1), S2*(i+1)));
ASSERT_TRUE(Between(Size(Key(i), Key(i+10)), S1*10, S2*10));
}
ASSERT_TRUE(Between(Size("", Key(50)), S1*50, S2*50));
ASSERT_TRUE(Between(Size("", Key(50)+".suffix"), S1*50, S2*50));
std::string cstart_str = Key(compact_start);
std::string cend_str = Key(compact_start + 9);
Slice cstart = cstart_str;
Slice cend = cend_str;
dbfull()->TEST_CompactRange(0, &cstart, &cend);
}
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GT(NumTableFilesAtLevel(1), 0);
}
// ApproximateOffsetOf() is not yet implemented in plain table format.
} while (ChangeOptions(kSkipUniversalCompaction | kSkipPlainTable));
}
TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
do {
Options options = CurrentOptions();
options.compression = kNoCompression;
Reopen();
Random rnd(301);
std::string big1 = RandomString(&rnd, 100000);
ASSERT_OK(Put(Key(0), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(1), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(2), big1));
ASSERT_OK(Put(Key(3), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(4), big1));
ASSERT_OK(Put(Key(5), RandomString(&rnd, 10000)));
ASSERT_OK(Put(Key(6), RandomString(&rnd, 300000)));
ASSERT_OK(Put(Key(7), RandomString(&rnd, 10000)));
// Check sizes across recovery by reopening a few times
for (int run = 0; run < 3; run++) {
Reopen(&options);
ASSERT_TRUE(Between(Size("", Key(0)), 0, 0));
ASSERT_TRUE(Between(Size("", Key(1)), 10000, 11000));
ASSERT_TRUE(Between(Size("", Key(2)), 20000, 21000));
ASSERT_TRUE(Between(Size("", Key(3)), 120000, 121000));
ASSERT_TRUE(Between(Size("", Key(4)), 130000, 131000));
ASSERT_TRUE(Between(Size("", Key(5)), 230000, 231000));
ASSERT_TRUE(Between(Size("", Key(6)), 240000, 241000));
ASSERT_TRUE(Between(Size("", Key(7)), 540000, 541000));
ASSERT_TRUE(Between(Size("", Key(8)), 550000, 560000));
ASSERT_TRUE(Between(Size(Key(3), Key(5)), 110000, 111000));
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
}
// ApproximateOffsetOf() is not yet implemented in plain table format.
} while (ChangeOptions(kSkipPlainTable));
}
TEST(DBTest, IteratorPinsRef) {
do {
Put("foo", "hello");
// Get iterator that will yield the current contents of the DB.
Iterator* iter = db_->NewIterator(ReadOptions());
// Write to force compactions
Put("foo", "newvalue1");
for (int i = 0; i < 100; i++) {
ASSERT_OK(Put(Key(i), Key(i) + std::string(100000, 'v'))); // 100K values
}
Put("foo", "newvalue2");
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ("foo", iter->key().ToString());
ASSERT_EQ("hello", iter->value().ToString());
iter->Next();
ASSERT_TRUE(!iter->Valid());
delete iter;
} while (ChangeCompactOptions());
}
TEST(DBTest, Snapshot) {
do {
Put("foo", "v1");
const Snapshot* s1 = db_->GetSnapshot();
Put("foo", "v2");
const Snapshot* s2 = db_->GetSnapshot();
Put("foo", "v3");
const Snapshot* s3 = db_->GetSnapshot();
Put("foo", "v4");
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v3", Get("foo", s3));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s3);
ASSERT_EQ("v1", Get("foo", s1));
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s1);
ASSERT_EQ("v2", Get("foo", s2));
ASSERT_EQ("v4", Get("foo"));
db_->ReleaseSnapshot(s2);
ASSERT_EQ("v4", Get("foo"));
} while (ChangeOptions());
}
TEST(DBTest, HiddenValuesAreRemoved) {
do {
Random rnd(301);
FillLevels("a", "z");
std::string big = RandomString(&rnd, 50000);
Put("foo", big);
Put("pastfoo", "v");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "tiny");
Put("pastfoo2", "v2"); // Advance sequence number one more
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_GT(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(big, Get("foo", snapshot));
ASSERT_TRUE(Between(Size("", "pastfoo"), 50000, 60000));
db_->ReleaseSnapshot(snapshot);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny, " + big + " ]");
Slice x("x");
dbfull()->TEST_CompactRange(0, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_GE(NumTableFilesAtLevel(1), 1);
dbfull()->TEST_CompactRange(1, nullptr, &x);
ASSERT_EQ(AllEntriesFor("foo"), "[ tiny ]");
ASSERT_TRUE(Between(Size("", "pastfoo"), 0, 1000));
// ApproximateOffsetOf() is not yet implemented in plain table format,
// which is used by Size().
} while (ChangeOptions(kSkipUniversalCompaction | kSkipPlainTable));
}
TEST(DBTest, CompactBetweenSnapshots) {
do {
Random rnd(301);
FillLevels("a", "z");
Put("foo", "first");
const Snapshot* snapshot1 = db_->GetSnapshot();
Put("foo", "second");
Put("foo", "third");
Put("foo", "fourth");
const Snapshot* snapshot2 = db_->GetSnapshot();
Put("foo", "fifth");
Put("foo", "sixth");
// All entries (including duplicates) exist
// before any compaction is triggered.
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ("fourth", Get("foo", snapshot2));
ASSERT_EQ("first", Get("foo", snapshot1));
ASSERT_EQ(AllEntriesFor("foo"),
"[ sixth, fifth, fourth, third, second, first ]");
// After a compaction, "second", "third" and "fifth" should
// be removed
FillLevels("a", "z");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ("fourth", Get("foo", snapshot2));
ASSERT_EQ("first", Get("foo", snapshot1));
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth, first ]");
// after we release the snapshot1, only two values left
db_->ReleaseSnapshot(snapshot1);
FillLevels("a", "z");
dbfull()->CompactRange(nullptr, nullptr);
// We have only one valid snapshot snapshot2. Since snapshot1 is
// not valid anymore, "first" should be removed by a compaction.
ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ("fourth", Get("foo", snapshot2));
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth, fourth ]");
// after we release the snapshot2, only one value should be left
db_->ReleaseSnapshot(snapshot2);
FillLevels("a", "z");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ("sixth", Get("foo"));
ASSERT_EQ(AllEntriesFor("foo"), "[ sixth ]");
} while (ChangeOptions());
}
TEST(DBTest, DeletionMarkers1) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
const int last = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_FlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
if (CurrentOptions().purge_redundant_kvs_while_flush) {
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
} else {
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
}
Slice z("z");
dbfull()->TEST_CompactRange(last-2, nullptr, &z);
// DEL eliminated, but v1 remains because we aren't compacting that level
// (DEL can be eliminated because v2 hides v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
}
TEST(DBTest, DeletionMarkers2) {
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
const int last = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo => v1 is now in last level
// Place a table at level last-1 to prevent merging with preceding mutation
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_FlushMemTable();
ASSERT_EQ(NumTableFilesAtLevel(last), 1);
ASSERT_EQ(NumTableFilesAtLevel(last-1), 1);
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
ASSERT_OK(dbfull()->TEST_FlushMemTable()); // Moves to level last-2
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last-2, nullptr, nullptr);
// DEL kept: "last" file overlaps
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v1 ]");
dbfull()->TEST_CompactRange(last-1, nullptr, nullptr);
// Merging last-1 w/ last, so we are the base level for "foo", so
// DEL is removed. (as is v1).
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
}
TEST(DBTest, OverlapInLevel0) {
do {
int tmp = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(tmp, 2) << "Fix test to match config";
//Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0.
ASSERT_OK(Put("100", "v100"));
ASSERT_OK(Put("999", "v999"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(Delete("100"));
ASSERT_OK(Delete("999"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("0,1,1", FilesPerLevel());
// Make files spanning the following ranges in level-0:
// files[0] 200 .. 900
// files[1] 300 .. 500
// Note that files are sorted by smallest key.
ASSERT_OK(Put("300", "v300"));
ASSERT_OK(Put("500", "v500"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(Put("200", "v200"));
ASSERT_OK(Put("600", "v600"));
ASSERT_OK(Put("900", "v900"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("2,1,1", FilesPerLevel());
// Compact away the placeholder files we created initially
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
dbfull()->TEST_CompactRange(2, nullptr, nullptr);
ASSERT_EQ("2", FilesPerLevel());
// Do a memtable compaction. Before bug-fix, the compaction would
// not detect the overlap with level-0 files and would incorrectly place
// the deletion in a deeper level.
ASSERT_OK(Delete("600"));
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("3", FilesPerLevel());
ASSERT_EQ("NOT_FOUND", Get("600"));
} while (ChangeOptions(kSkipUniversalCompaction));
}
TEST(DBTest, L0_CompactionBug_Issue44_a) {
do {
Reopen();
ASSERT_OK(Put("b", "v"));
Reopen();
ASSERT_OK(Delete("b"));
ASSERT_OK(Delete("a"));
Reopen();
ASSERT_OK(Delete("a"));
Reopen();
ASSERT_OK(Put("a", "v"));
Reopen();
Reopen();
ASSERT_EQ("(a->v)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(a->v)", Contents());
} while (ChangeCompactOptions());
}
TEST(DBTest, L0_CompactionBug_Issue44_b) {
do {
Reopen();
Put("","");
Reopen();
Delete("e");
Put("","");
Reopen();
Put("c", "cv");
Reopen();
Put("","");
Reopen();
Put("","");
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
Reopen();
Put("d","dv");
Reopen();
Put("","");
Reopen();
Delete("d");
Delete("b");
Reopen();
ASSERT_EQ("(->)(c->cv)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(->)(c->cv)", Contents());
} while (ChangeCompactOptions());
}
TEST(DBTest, ComparatorCheck) {
class NewComparator : public Comparator {
public:
virtual const char* Name() const { return "rocksdb.NewComparator"; }
virtual int Compare(const Slice& a, const Slice& b) const {
return BytewiseComparator()->Compare(a, b);
}
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
BytewiseComparator()->FindShortestSeparator(s, l);
}
virtual void FindShortSuccessor(std::string* key) const {
BytewiseComparator()->FindShortSuccessor(key);
}
};
Options new_options;
NewComparator cmp;
do {
new_options = CurrentOptions();
new_options.comparator = &cmp;
Status s = TryReopen(&new_options);
ASSERT_TRUE(!s.ok());
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
<< s.ToString();
} while (ChangeCompactOptions(&new_options));
}
TEST(DBTest, CustomComparator) {
class NumberComparator : public Comparator {
public:
virtual const char* Name() const { return "test.NumberComparator"; }
virtual int Compare(const Slice& a, const Slice& b) const {
return ToNumber(a) - ToNumber(b);
}
virtual void FindShortestSeparator(std::string* s, const Slice& l) const {
ToNumber(*s); // Check format
ToNumber(l); // Check format
}
virtual void FindShortSuccessor(std::string* key) const {
ToNumber(*key); // Check format
}
private:
static int ToNumber(const Slice& x) {
// Check that there are no extra characters.
ASSERT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size()-1] == ']')
<< EscapeString(x);
int val;
char ignored;
ASSERT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1)
<< EscapeString(x);
return val;
}
};
Options new_options;
NumberComparator cmp;
do {
new_options = CurrentOptions();
new_options.create_if_missing = true;
new_options.comparator = &cmp;
new_options.filter_policy = nullptr; // Cannot use bloom filters
new_options.write_buffer_size = 1000; // Compact more often
DestroyAndReopen(&new_options);
ASSERT_OK(Put("[10]", "ten"));
ASSERT_OK(Put("[0x14]", "twenty"));
for (int i = 0; i < 2; i++) {
ASSERT_EQ("ten", Get("[10]"));
ASSERT_EQ("ten", Get("[0xa]"));
ASSERT_EQ("twenty", Get("[20]"));
ASSERT_EQ("twenty", Get("[0x14]"));
ASSERT_EQ("NOT_FOUND", Get("[15]"));
ASSERT_EQ("NOT_FOUND", Get("[0xf]"));
Compact("[0]", "[9999]");
}
for (int run = 0; run < 2; run++) {
for (int i = 0; i < 1000; i++) {
char buf[100];
snprintf(buf, sizeof(buf), "[%d]", i*10);
ASSERT_OK(Put(buf, buf));
}
Compact("[0]", "[1000000]");
}
} while (ChangeCompactOptions(&new_options));
}
TEST(DBTest, ManualCompaction) {
ASSERT_EQ(dbfull()->MaxMemCompactionLevel(), 2)
<< "Need to update this test to match kMaxMemCompactLevel";
// iter - 0 with 7 levels
// iter - 1 with 3 levels
for (int iter = 0; iter < 2; ++iter) {
MakeTables(3, "p", "q");
ASSERT_EQ("1,1,1", FilesPerLevel());
// Compaction range falls before files
Compact("", "c");
ASSERT_EQ("1,1,1", FilesPerLevel());
// Compaction range falls after files
Compact("r", "z");
ASSERT_EQ("1,1,1", FilesPerLevel());
// Compaction range overlaps files
Compact("p1", "p9");
ASSERT_EQ("0,0,1", FilesPerLevel());
// Populate a different range
MakeTables(3, "c", "e");
ASSERT_EQ("1,1,2", FilesPerLevel());
// Compact just the new range
Compact("b", "f");
ASSERT_EQ("0,0,2", FilesPerLevel());
// Compact all
MakeTables(1, "a", "z");
ASSERT_EQ("0,1,2", FilesPerLevel());
db_->CompactRange(nullptr, nullptr);
ASSERT_EQ("0,0,1", FilesPerLevel());
if (iter == 0) {
Options options = CurrentOptions();
options.num_levels = 3;
options.create_if_missing = true;
DestroyAndReopen(&options);
}
}
}
TEST(DBTest, DBOpen_Options) {
std::string dbname = test::TmpDir() + "/db_options_test";
ASSERT_OK(DestroyDB(dbname, Options()));
// Does not exist, and create_if_missing == false: error
DB* db = nullptr;
Options opts;
opts.create_if_missing = false;
Status s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr);
ASSERT_TRUE(db == nullptr);
// Does not exist, and create_if_missing == true: OK
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
// Does exist, and error_if_exists == true: error
opts.create_if_missing = false;
opts.error_if_exists = true;
s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr);
ASSERT_TRUE(db == nullptr);
// Does exist, and error_if_exists == false: OK
opts.create_if_missing = true;
opts.error_if_exists = false;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
}
TEST(DBTest, DBOpen_Change_NumLevels) {
std::string dbname = test::TmpDir() + "/db_change_num_levels";
ASSERT_OK(DestroyDB(dbname, Options()));
Options opts;
Status s;
DB* db = nullptr;
opts.create_if_missing = true;
s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != nullptr);
db->Put(WriteOptions(), "a", "123");
db->Put(WriteOptions(), "b", "234");
db->CompactRange(nullptr, nullptr);
delete db;
db = nullptr;
opts.create_if_missing = false;
opts.num_levels = 2;
s = DB::Open(opts, dbname, &db);
ASSERT_TRUE(strstr(s.ToString().c_str(), "Invalid argument") != nullptr);
ASSERT_TRUE(db == nullptr);
}
TEST(DBTest, DestroyDBMetaDatabase) {
std::string dbname = test::TmpDir() + "/db_meta";
std::string metadbname = MetaDatabaseName(dbname, 0);
std::string metametadbname = MetaDatabaseName(metadbname, 0);
// Destroy previous versions if they exist. Using the long way.
ASSERT_OK(DestroyDB(metametadbname, Options()));
ASSERT_OK(DestroyDB(metadbname, Options()));
ASSERT_OK(DestroyDB(dbname, Options()));
// Setup databases
Options opts;
opts.create_if_missing = true;
DB* db = nullptr;
ASSERT_OK(DB::Open(opts, dbname, &db));
delete db;
db = nullptr;
ASSERT_OK(DB::Open(opts, metadbname, &db));
delete db;
db = nullptr;
ASSERT_OK(DB::Open(opts, metametadbname, &db));
delete db;
db = nullptr;
// Delete databases
ASSERT_OK(DestroyDB(dbname, Options()));
// Check if deletion worked.
opts.create_if_missing = false;
ASSERT_TRUE(!(DB::Open(opts, dbname, &db)).ok());
ASSERT_TRUE(!(DB::Open(opts, metadbname, &db)).ok());
ASSERT_TRUE(!(DB::Open(opts, metametadbname, &db)).ok());
}
// Check that number of files does not grow when we are out of space
TEST(DBTest, NoSpace) {
do {
Options options = CurrentOptions();
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo"));
Compact("a", "z");
const int num_files = CountFiles();
env_->no_space_.Release_Store(env_); // Force out-of-space errors
env_->sleep_counter_.Reset();
for (int i = 0; i < 5; i++) {
for (int level = 0; level < dbfull()->NumberLevels()-1; level++) {
dbfull()->TEST_CompactRange(level, nullptr, nullptr);
}
}
std::string property_value;
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
ASSERT_EQ("5", property_value);
env_->no_space_.Release_Store(nullptr);
ASSERT_LT(CountFiles(), num_files + 3);
// Check that compaction attempts slept after errors
ASSERT_GE(env_->sleep_counter_.Read(), 5);
} while (ChangeCompactOptions());
}
// Check background error counter bumped on flush failures.
TEST(DBTest, NoSpaceFlush) {
do {
Options options = CurrentOptions();
options.env = env_;
options.max_background_flushes = 1;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
env_->no_space_.Release_Store(env_); // Force out-of-space errors
std::string property_value;
// Background error count is 0 now.
ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value));
ASSERT_EQ("0", property_value);
dbfull()->TEST_FlushMemTable(false);
// Wait 300 milliseconds or background-errors turned 1 from 0.
int time_to_sleep_limit = 300000;
while (time_to_sleep_limit > 0) {
int to_sleep = (time_to_sleep_limit > 1000) ? 1000 : time_to_sleep_limit;
time_to_sleep_limit -= to_sleep;
env_->SleepForMicroseconds(to_sleep);
ASSERT_TRUE(
db_->GetProperty("rocksdb.background-errors", &property_value));
if (property_value == "1") {
break;
}
}
ASSERT_EQ("1", property_value);
env_->no_space_.Release_Store(nullptr);
} while (ChangeCompactOptions());
}
TEST(DBTest, NonWritableFileSystem) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 1000;
options.env = env_;
Reopen(&options);
ASSERT_OK(Put("foo", "v1"));
env_->non_writable_.Release_Store(env_); // Force errors for new files
std::string big(100000, 'x');
int errors = 0;
for (int i = 0; i < 20; i++) {
if (!Put("foo", big).ok()) {
errors++;
env_->SleepForMicroseconds(100000);
}
}
ASSERT_GT(errors, 0);
env_->non_writable_.Release_Store(nullptr);
} while (ChangeCompactOptions());
}
TEST(DBTest, ManifestWriteError) {
// Test for the following problem:
// (a) Compaction produces file F
// (b) Log record containing F is written to MANIFEST file, but Sync() fails
// (c) GC deletes F
// (d) After reopening DB, reads fail since deleted F is named in log record
// We iterate twice. In the second iteration, everything is the
// same except the log record never makes it to the MANIFEST file.
for (int iter = 0; iter < 2; iter++) {
port::AtomicPointer* error_type = (iter == 0)
? &env_->manifest_sync_error_
: &env_->manifest_write_error_;
// Insert foo=>bar mapping
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.error_if_exists = false;
DestroyAndReopen(&options);
ASSERT_OK(Put("foo", "bar"));
ASSERT_EQ("bar", Get("foo"));
// Memtable compaction (will succeed)
dbfull()->TEST_FlushMemTable();
ASSERT_EQ("bar", Get("foo"));
const int last = dbfull()->MaxMemCompactionLevel();
ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level
// Merging compaction (will fail)
error_type->Release_Store(env_);
dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail
ASSERT_EQ("bar", Get("foo"));
// Recovery: should not lose data
error_type->Release_Store(nullptr);
Reopen(&options);
ASSERT_EQ("bar", Get("foo"));
}
}
TEST(DBTest, PutFailsParanoid) {
// Test the following:
// (a) A random put fails in paranoid mode (simulate by sync fail)
// (b) All other puts have to fail, even if writes would succeed
// (c) All of that should happen ONLY if paranoid_checks = true
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.error_if_exists = false;
options.paranoid_checks = true;
DestroyAndReopen(&options);
Status s;
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foo1", "bar1"));
// simulate error
env_->log_write_error_.Release_Store(env_);
s = Put("foo2", "bar2");
ASSERT_TRUE(!s.ok());
env_->log_write_error_.Release_Store(nullptr);
s = Put("foo3", "bar3");
// the next put should fail, too
ASSERT_TRUE(!s.ok());
// but we're still able to read
ASSERT_EQ("bar", Get("foo"));
// do the same thing with paranoid checks off
options.paranoid_checks = false;
DestroyAndReopen(&options);
ASSERT_OK(Put("foo", "bar"));
ASSERT_OK(Put("foo1", "bar1"));
// simulate error
env_->log_write_error_.Release_Store(env_);
s = Put("foo2", "bar2");
ASSERT_TRUE(!s.ok());
env_->log_write_error_.Release_Store(nullptr);
s = Put("foo3", "bar3");
// the next put should NOT fail
ASSERT_TRUE(s.ok());
}
TEST(DBTest, FilesDeletedAfterCompaction) {
do {
ASSERT_OK(Put("foo", "v2"));
Compact("a", "z");
const int num_files = CountLiveFiles();
for (int i = 0; i < 10; i++) {
ASSERT_OK(Put("foo", "v2"));
Compact("a", "z");
}
ASSERT_EQ(CountLiveFiles(), num_files);
} while (ChangeCompactOptions());
}
TEST(DBTest, BloomFilter) {
do {
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
options.no_block_cache = true;
options.filter_policy = NewBloomFilterPolicy(10);
Reopen(&options);
// Populate multiple layers
const int N = 10000;
for (int i = 0; i < N; i++) {
ASSERT_OK(Put(Key(i), Key(i)));
}
Compact("a", "z");
for (int i = 0; i < N; i += 100) {
ASSERT_OK(Put(Key(i), Key(i)));
}
dbfull()->TEST_FlushMemTable();
// Prevent auto compactions triggered by seeks
env_->delay_sstable_sync_.Release_Store(env_);
// Lookup present keys. Should rarely read from small sstable.
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i), Get(Key(i)));
}
int reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d present => %d reads\n", N, reads);
ASSERT_GE(reads, N);
ASSERT_LE(reads, N + 2*N/100);
// Lookup present keys. Should rarely read from either sstable.
env_->random_read_counter_.Reset();
for (int i = 0; i < N; i++) {
ASSERT_EQ("NOT_FOUND", Get(Key(i) + ".missing"));
}
reads = env_->random_read_counter_.Read();
fprintf(stderr, "%d missing => %d reads\n", N, reads);
ASSERT_LE(reads, 3*N/100);
env_->delay_sstable_sync_.Release_Store(nullptr);
Close();
delete options.filter_policy;
} while (ChangeCompactOptions());
}
TEST(DBTest, SnapshotFiles) {
do {
Options options = CurrentOptions();
options.write_buffer_size = 100000000; // Large write buffer
Reopen(&options);
Random rnd(301);
// Write 8MB (80 values, each 100K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
for (int i = 0; i < 80; i++) {
values.push_back(RandomString(&rnd, 100000));
ASSERT_OK(Put(Key(i), values[i]));
}
// assert that nothing makes it to disk yet.
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
// get a file snapshot
uint64_t manifest_number = 0;
uint64_t manifest_size = 0;
std::vector<std::string> files;
dbfull()->DisableFileDeletions();
dbfull()->GetLiveFiles(files, &manifest_size);
// CURRENT, MANIFEST, *.sst files
ASSERT_EQ(files.size(), 3U);
uint64_t number = 0;
FileType type;
// copy these files to a new snapshot directory
std::string snapdir = dbname_ + ".snapdir/";
std::string mkdir = "mkdir -p " + snapdir;
ASSERT_EQ(system(mkdir.c_str()), 0);
for (unsigned int i = 0; i < files.size(); i++) {
// our clients require that GetLiveFiles returns
// files with "/" as first character!
ASSERT_EQ(files[i][0], '/');
std::string src = dbname_ + files[i];
std::string dest = snapdir + files[i];
uint64_t size;
ASSERT_OK(env_->GetFileSize(src, &size));
// record the number and the size of the
// latest manifest file
if (ParseFileName(files[i].substr(1), &number, &type)) {
if (type == kDescriptorFile) {
if (number > manifest_number) {
manifest_number = number;
ASSERT_GE(size, manifest_size);
size = manifest_size; // copy only valid MANIFEST data
}
}
}
CopyFile(src, dest, size);
}
// release file snapshot
dbfull()->DisableFileDeletions();
// overwrite one key, this key should not appear in the snapshot
std::vector<std::string> extras;
for (unsigned int i = 0; i < 1; i++) {
extras.push_back(RandomString(&rnd, 100000));
ASSERT_OK(Put(Key(i), extras[i]));
}
// verify that data in the snapshot are correct
Options opts;
DB* snapdb;
opts.create_if_missing = false;
Status stat = DB::Open(opts, snapdir, &snapdb);
ASSERT_OK(stat);
ReadOptions roptions;
std::string val;
for (unsigned int i = 0; i < 80; i++) {
stat = snapdb->Get(roptions, Key(i), &val);
ASSERT_EQ(values[i].compare(val), 0);
}
delete snapdb;
// look at the new live files after we added an 'extra' key
// and after we took the first snapshot.
uint64_t new_manifest_number = 0;
uint64_t new_manifest_size = 0;
std::vector<std::string> newfiles;
dbfull()->DisableFileDeletions();
dbfull()->GetLiveFiles(newfiles, &new_manifest_size);
// find the new manifest file. assert that this manifest file is
// the same one as in the previous snapshot. But its size should be
// larger because we added an extra key after taking the
// previous shapshot.
for (unsigned int i = 0; i < newfiles.size(); i++) {
std::string src = dbname_ + "/" + newfiles[i];
// record the lognumber and the size of the
// latest manifest file
if (ParseFileName(newfiles[i].substr(1), &number, &type)) {
if (type == kDescriptorFile) {
if (number > new_manifest_number) {
uint64_t size;
new_manifest_number = number;
ASSERT_OK(env_->GetFileSize(src, &size));
ASSERT_GE(size, new_manifest_size);
}
}
}
}
ASSERT_EQ(manifest_number, new_manifest_number);
ASSERT_GT(new_manifest_size, manifest_size);
// release file snapshot
dbfull()->DisableFileDeletions();
} while (ChangeCompactOptions());
}
TEST(DBTest, CompactOnFlush) {
do {
Options options = CurrentOptions();
options.purge_redundant_kvs_while_flush = true;
options.disable_auto_compactions = true;
Reopen(&options);
Put("foo", "v1");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v1 ]");
// Write two new keys
Put("a", "begin");
Put("z", "end");
dbfull()->TEST_FlushMemTable();
// Case1: Delete followed by a put
Delete("foo");
Put("foo", "v2");
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, DEL, v1 ]");
// After the current memtable is flushed, the DEL should
// have been removed
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v2, v1 ]");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ v2 ]");
// Case 2: Delete followed by another delete
Delete("foo");
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, DEL, v2 ]");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v2 ]");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
// Case 3: Put followed by a delete
Put("foo", "v3");
Delete("foo");
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL, v3 ]");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ DEL ]");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
// Case 4: Put followed by another Put
Put("foo", "v4");
Put("foo", "v5");
ASSERT_EQ(AllEntriesFor("foo"), "[ v5, v4 ]");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ v5 ]");
// clear database
Delete("foo");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
// Case 5: Put followed by snapshot followed by another Put
// Both puts should remain.
Put("foo", "v6");
const Snapshot* snapshot = db_->GetSnapshot();
Put("foo", "v7");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v7, v6 ]");
db_->ReleaseSnapshot(snapshot);
// clear database
Delete("foo");
dbfull()->CompactRange(nullptr, nullptr);
ASSERT_EQ(AllEntriesFor("foo"), "[ ]");
// Case 5: snapshot followed by a put followed by another Put
// Only the last put should remain.
const Snapshot* snapshot1 = db_->GetSnapshot();
Put("foo", "v8");
Put("foo", "v9");
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]");
db_->ReleaseSnapshot(snapshot1);
} while (ChangeCompactOptions());
}
std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
std::vector<std::string> files;
std::vector<uint64_t> log_files;
env->GetChildren(path, &files);
uint64_t number;
FileType type;
for (size_t i = 0; i < files.size(); ++i) {
if (ParseFileName(files[i], &number, &type)) {
if (type == kLogFile) {
log_files.push_back(number);
}
}
}
return std::move(log_files);
}
TEST(DBTest, WALArchivalTtl) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.WAL_ttl_seconds = 1000;
DestroyAndReopen(&options);
// TEST : Create DB with a ttl and no size limit.
// Put some keys. Count the log files present in the DB just after insert.
// Re-open db. Causes deletion/archival to take place.
// Assert that the files moved under "/archive".
// Reopen db with small ttl.
// Assert that archive was removed.
std::string archiveDir = ArchivalDirectory(dbname_);
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
ASSERT_OK(Put(Key(10 * i + j), DummyString(1024)));
}
std::vector<uint64_t> log_files = ListLogFiles(env_, dbname_);
options.create_if_missing = false;
Reopen(&options);
std::vector<uint64_t> logs = ListLogFiles(env_, archiveDir);
std::set<uint64_t> archivedFiles(logs.begin(), logs.end());
for (auto& log : log_files) {
ASSERT_TRUE(archivedFiles.find(log) != archivedFiles.end());
}
}
std::vector<uint64_t> log_files = ListLogFiles(env_, archiveDir);
ASSERT_TRUE(log_files.size() > 0);
options.WAL_ttl_seconds = 1;
env_->SleepForMicroseconds(2 * 1000 * 1000);
Reopen(&options);
log_files = ListLogFiles(env_, archiveDir);
ASSERT_TRUE(log_files.empty());
} while (ChangeCompactOptions());
}
uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) {
uint64_t dir_size = 0;
std::vector<std::string> files;
env->GetChildren(dir_path, &files);
for (auto& f : files) {
uint64_t number;
FileType type;
if (ParseFileName(f, &number, &type) && type == kLogFile) {
std::string const file_path = dir_path + "/" + f;
uint64_t file_size;
env->GetFileSize(file_path, &file_size);
dir_size += file_size;
}
}
return dir_size;
}
TEST(DBTest, WALArchivalSizeLimit) {
do {
Options options = CurrentOptions();
options.create_if_missing = true;
options.WAL_ttl_seconds = 0;
options.WAL_size_limit_MB = 1000;
// TEST : Create DB with huge size limit and no ttl.
// Put some keys. Count the archived log files present in the DB
// just after insert. Assert that there are many enough.
// Change size limit. Re-open db.
// Assert that archive is not greater than WAL_size_limit_MB.
// Set ttl and time_to_check_ to small values. Re-open db.
// Assert that there are no archived logs left.
DestroyAndReopen(&options);
for (int i = 0; i < 128 * 128; ++i) {
ASSERT_OK(Put(Key(i), DummyString(1024)));
}
Reopen(&options);
std::string archive_dir = ArchivalDirectory(dbname_);
std::vector<std::uint64_t> log_files = ListLogFiles(env_, archive_dir);
ASSERT_TRUE(log_files.size() > 2);
options.WAL_size_limit_MB = 8;
Reopen(&options);
dbfull()->TEST_PurgeObsoleteteWAL();
uint64_t archive_size = GetLogDirSize(archive_dir, env_);
ASSERT_TRUE(archive_size <= options.WAL_size_limit_MB * 1024 * 1024);
options.WAL_ttl_seconds = 1;
dbfull()->TEST_SetDefaultTimeToCheck(1);
env_->SleepForMicroseconds(2 * 1000 * 1000);
Reopen(&options);
dbfull()->TEST_PurgeObsoleteteWAL();
log_files = ListLogFiles(env_, archive_dir);
ASSERT_TRUE(log_files.empty());
} while (ChangeCompactOptions());
}
SequenceNumber ReadRecords(
std::unique_ptr<TransactionLogIterator>& iter,
int& count) {
count = 0;
SequenceNumber lastSequence = 0;
BatchResult res;
while (iter->Valid()) {
res = iter->GetBatch();
ASSERT_TRUE(res.sequence > lastSequence);
++count;
lastSequence = res.sequence;
ASSERT_OK(iter->status());
iter->Next();
}
return res.sequence;
}
void ExpectRecords(
const int expected_no_records,
std::unique_ptr<TransactionLogIterator>& iter) {
int num_records;
ReadRecords(iter, num_records);
ASSERT_EQ(num_records, expected_no_records);
}
TEST(DBTest, TransactionLogIterator) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
Put("key1", DummyString(1024));
Put("key2", DummyString(1024));
Put("key2", DummyString(1024));
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
{
auto iter = OpenTransactionLogIter(0);
ExpectRecords(3, iter);
}
Reopen(&options);
env_->SleepForMicroseconds(2 * 1000 * 1000);{
Put("key4", DummyString(1024));
Put("key5", DummyString(1024));
Put("key6", DummyString(1024));
}
{
auto iter = OpenTransactionLogIter(0);
ExpectRecords(6, iter);
}
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorRace) {
// Setup sync point dependency to reproduce the race condition of
// a log file moved to archived dir, in the middle of GetSortedWalFiles
rocksdb::SyncPoint::GetInstance()->LoadDependency(
{ { "DBImpl::GetSortedWalFiles:1", "DBImpl::PurgeObsoleteFiles:1" },
{ "DBImpl::PurgeObsoleteFiles:2", "DBImpl::GetSortedWalFiles:2" },
});
do {
rocksdb::SyncPoint::GetInstance()->ClearTrace();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
Put("key1", DummyString(1024));
dbfull()->Flush(FlushOptions());
Put("key2", DummyString(1024));
dbfull()->Flush(FlushOptions());
Put("key3", DummyString(1024));
dbfull()->Flush(FlushOptions());
Put("key4", DummyString(1024));
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
{
auto iter = OpenTransactionLogIter(0);
ExpectRecords(4, iter);
}
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
// trigger async flush, and log move. Well, log move will
// wait until the GetSortedWalFiles:1 to reproduce the race
// condition
FlushOptions flush_options;
flush_options.wait = false;
dbfull()->Flush(flush_options);
// "key5" would be written in a new memtable and log
Put("key5", DummyString(1024));
{
// this iter would miss "key4" if not fixed
auto iter = OpenTransactionLogIter(0);
ExpectRecords(5, iter);
}
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
// Do a plain Reopen.
Put("key1", DummyString(1024));
// Two reopens should create a zero record WAL file.
Reopen(&options);
Reopen(&options);
Put("key2", DummyString(1024));
auto iter = OpenTransactionLogIter(0);
ExpectRecords(2, iter);
} while (ChangeCompactOptions());
}
// TODO(kailiu) disable the in non-linux platforms to temporarily solve
// // the unit test failure.
#ifdef OS_LINUX
TEST(DBTest, TransactionLogIteratorStallAtLastRecord) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
Put("key1", DummyString(1024));
auto iter = OpenTransactionLogIter(0);
ASSERT_OK(iter->status());
ASSERT_TRUE(iter->Valid());
iter->Next();
ASSERT_TRUE(!iter->Valid());
ASSERT_OK(iter->status());
Put("key2", DummyString(1024));
iter->Next();
ASSERT_OK(iter->status());
ASSERT_TRUE(iter->Valid());
} while (ChangeCompactOptions());
}
#endif
TEST(DBTest, TransactionLogIteratorJustEmptyFile) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
unique_ptr<TransactionLogIterator> iter;
Status status = dbfull()->GetUpdatesSince(0, &iter);
// Check that an empty iterator is returned
ASSERT_TRUE(!iter->Valid());
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorCheckAfterRestart) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
Put("key1", DummyString(1024));
Put("key2", DummyString(1023));
dbfull()->Flush(FlushOptions());
Reopen(&options);
auto iter = OpenTransactionLogIter(0);
ExpectRecords(2, iter);
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorCorruptedLog) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
for (int i = 0; i < 1024; i++) {
Put("key"+std::to_string(i), DummyString(10));
}
dbfull()->Flush(FlushOptions());
// Corrupt this log to create a gap
rocksdb::VectorLogPtr wal_files;
ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
const auto logfilePath = dbname_ + "/" + wal_files.front()->PathName();
ASSERT_EQ(
0,
truncate(logfilePath.c_str(), wal_files.front()->SizeFileBytes() / 2));
// Insert a new entry to a new log file
Put("key1025", DummyString(10));
// Try to read from the beginning. Should stop before the gap and read less
// than 1025 entries
auto iter = OpenTransactionLogIter(0);
int count;
int last_sequence_read = ReadRecords(iter, count);
ASSERT_LT(last_sequence_read, 1025);
// Try to read past the gap, should be able to seek to key1025
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
ExpectRecords(1, iter2);
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorBatchOperations) {
do {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
WriteBatch batch;
batch.Put("key1", DummyString(1024));
batch.Put("key2", DummyString(1024));
batch.Put("key3", DummyString(1024));
batch.Delete("key2");
dbfull()->Write(WriteOptions(), &batch);
dbfull()->Flush(FlushOptions());
Reopen(&options);
Put("key4", DummyString(1024));
auto iter = OpenTransactionLogIter(3);
ExpectRecords(2, iter);
} while (ChangeCompactOptions());
}
TEST(DBTest, TransactionLogIteratorBlobs) {
Options options = OptionsForLogIterTest();
DestroyAndReopen(&options);
{
WriteBatch batch;
batch.Put("key1", DummyString(1024));
batch.Put("key2", DummyString(1024));
batch.PutLogData(Slice("blob1"));
batch.Put("key3", DummyString(1024));
batch.PutLogData(Slice("blob2"));
batch.Delete("key2");
dbfull()->Write(WriteOptions(), &batch);
Reopen(&options);
}
auto res = OpenTransactionLogIter(0)->GetBatch();
struct Handler : public WriteBatch::Handler {
std::string seen;
virtual void Put(const Slice& key, const Slice& value) {
seen += "Put(" + key.ToString() + ", " + std::to_string(value.size()) +
")";
}
virtual void Merge(const Slice& key, const Slice& value) {
seen += "Merge(" + key.ToString() + ", " + std::to_string(value.size()) +
")";
}
virtual void LogData(const Slice& blob) {
seen += "LogData(" + blob.ToString() + ")";
}
virtual void Delete(const Slice& key) {
seen += "Delete(" + key.ToString() + ")";
}
} handler;
res.writeBatchPtr->Iterate(&handler);
ASSERT_EQ("Put(key1, 1024)"
"Put(key2, 1024)"
"LogData(blob1)"
"Put(key3, 1024)"
"LogData(blob2)"
"Delete(key2)", handler.seen);
}
TEST(DBTest, ReadCompaction) {
std::string value(4096, '4'); // a string of size 4K
{
Options options = CurrentOptions();
options.create_if_missing = true;
options.max_open_files = 20; // only 10 file in file-cache
options.target_file_size_base = 512;
options.write_buffer_size = 64 * 1024;
options.filter_policy = nullptr;
options.block_size = 4096;
options.no_block_cache = true;
options.disable_seek_compaction = false;
Reopen(&options);
// Write 8MB (2000 values, each 4K)
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
std::vector<std::string> values;
for (int i = 0; i < 2000; i++) {
ASSERT_OK(Put(Key(i), value));
}
// clear level 0 and 1 if necessary.
dbfull()->TEST_FlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1), 0);
// write some new keys into level 0
for (int i = 0; i < 2000; i = i + 16) {
ASSERT_OK(Put(Key(i), value));
}
dbfull()->Flush(FlushOptions());
// Wait for any write compaction to finish
dbfull()->TEST_WaitForCompact();
// remember number of files in each level
int l1 = NumTableFilesAtLevel(0);
int l2 = NumTableFilesAtLevel(1);
int l3 = NumTableFilesAtLevel(3);
ASSERT_NE(NumTableFilesAtLevel(0), 0);
ASSERT_NE(NumTableFilesAtLevel(1), 0);
ASSERT_NE(NumTableFilesAtLevel(2), 0);
// read a bunch of times, trigger read compaction
for (int j = 0; j < 100; j++) {
for (int i = 0; i < 2000; i++) {
Get(Key(i));
}
}
// wait for read compaction to finish
env_->SleepForMicroseconds(1000000);
// verify that the number of files have decreased
// in some level, indicating that there was a compaction
ASSERT_TRUE(NumTableFilesAtLevel(0) < l1 ||
NumTableFilesAtLevel(1) < l2 ||
NumTableFilesAtLevel(2) < l3);
}
}
// Multi-threaded test:
namespace {
static const int kNumThreads = 4;
static const int kTestSeconds = 10;
static const int kNumKeys = 1000;
struct MTState {
DBTest* test;
port::AtomicPointer stop;
port::AtomicPointer counter[kNumThreads];
port::AtomicPointer thread_done[kNumThreads];
};
struct MTThread {
MTState* state;
int id;
};
static void MTThreadBody(void* arg) {
MTThread* t = reinterpret_cast<MTThread*>(arg);
int id = t->id;
DB* db = t->state->test->db_;
uintptr_t counter = 0;
fprintf(stderr, "... starting thread %d\n", id);
Random rnd(1000 + id);
std::string value;
char valbuf[1500];
while (t->state->stop.Acquire_Load() == nullptr) {
t->state->counter[id].Release_Store(reinterpret_cast<void*>(counter));
int key = rnd.Uniform(kNumKeys);
char keybuf[20];
snprintf(keybuf, sizeof(keybuf), "%016d", key);
if (rnd.OneIn(2)) {
// Write values of the form <key, my id, counter>.
// We add some padding for force compactions.
snprintf(valbuf, sizeof(valbuf), "%d.%d.%-1000d",
key, id, static_cast<int>(counter));
ASSERT_OK(t->state->test->Put(Slice(keybuf), Slice(valbuf)));
} else {
// Read a value and verify that it matches the pattern written above.
Status s = db->Get(ReadOptions(), Slice(keybuf), &value);
if (s.IsNotFound()) {
// Key has not yet been written
} else {
// Check that the writer thread counter is >= the counter in the value
ASSERT_OK(s);
int k, w, c;
ASSERT_EQ(3, sscanf(value.c_str(), "%d.%d.%d", &k, &w, &c)) << value;
ASSERT_EQ(k, key);
ASSERT_GE(w, 0);
ASSERT_LT(w, kNumThreads);
ASSERT_LE((unsigned int)c, reinterpret_cast<uintptr_t>(
t->state->counter[w].Acquire_Load()));
}
}
counter++;
}
t->state->thread_done[id].Release_Store(t);
fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter));
}
} // namespace
TEST(DBTest, MultiThreaded) {
do {
// Initialize state
MTState mt;
mt.test = this;
mt.stop.Release_Store(0);
for (int id = 0; id < kNumThreads; id++) {
mt.counter[id].Release_Store(0);
mt.thread_done[id].Release_Store(0);
}
// Start threads
MTThread thread[kNumThreads];
for (int id = 0; id < kNumThreads; id++) {
thread[id].state = &mt;
thread[id].id = id;
env_->StartThread(MTThreadBody, &thread[id]);
}
// Let them run for a while
env_->SleepForMicroseconds(kTestSeconds * 1000000);
// Stop the threads and wait for them to finish
mt.stop.Release_Store(&mt);
for (int id = 0; id < kNumThreads; id++) {
while (mt.thread_done[id].Acquire_Load() == nullptr) {
env_->SleepForMicroseconds(100000);
}
}
} while (ChangeOptions());
}
// Group commit test:
namespace {
static const int kGCNumThreads = 4;
static const int kGCNumKeys = 1000;
struct GCThread {
DB* db;
int id;
std::atomic<bool> done;
};
static void GCThreadBody(void* arg) {
GCThread* t = reinterpret_cast<GCThread*>(arg);
int id = t->id;
DB* db = t->db;
WriteOptions wo;
for (int i = 0; i < kGCNumKeys; ++i) {
std::string kv(std::to_string(i + id * kGCNumKeys));
ASSERT_OK(db->Put(wo, kv, kv));
}
t->done = true;
}
} // namespace
TEST(DBTest, GroupCommitTest) {
do {
// Start threads
GCThread thread[kGCNumThreads];
for (int id = 0; id < kGCNumThreads; id++) {
thread[id].id = id;
thread[id].db = db_;
thread[id].done = false;
env_->StartThread(GCThreadBody, &thread[id]);
}
for (int id = 0; id < kGCNumThreads; id++) {
while (thread[id].done == false) {
env_->SleepForMicroseconds(100000);
}
}
std::vector<std::string> expected_db;
for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) {
expected_db.push_back(std::to_string(i));
}
sort(expected_db.begin(), expected_db.end());
Iterator* itr = db_->NewIterator(ReadOptions());
itr->SeekToFirst();
for (auto x : expected_db) {
ASSERT_TRUE(itr->Valid());
ASSERT_EQ(itr->key().ToString(), x);
ASSERT_EQ(itr->value().ToString(), x);
itr->Next();
}
ASSERT_TRUE(!itr->Valid());
delete itr;
} while (ChangeOptions());
}
namespace {
typedef std::map<std::string, std::string> KVMap;
}
class ModelDB: public DB {
public:
class ModelSnapshot : public Snapshot {
public:
KVMap map_;
};
explicit ModelDB(const Options& options): options_(options) { }
virtual Status Put(const WriteOptions& o, const Slice& k, const Slice& v) {
return DB::Put(o, k, v);
}
virtual Status Merge(const WriteOptions& o, const Slice& k, const Slice& v) {
return DB::Merge(o, k, v);
}
virtual Status Delete(const WriteOptions& o, const Slice& key) {
return DB::Delete(o, key);
}
virtual Status Get(const ReadOptions& options,
const Slice& key, std::string* value) {
return Status::NotSupported(key);
}
virtual std::vector<Status> MultiGet(const ReadOptions& options,
const std::vector<Slice>& keys,
std::vector<std::string>* values) {
std::vector<Status> s(keys.size(),
Status::NotSupported("Not implemented."));
return s;
}
virtual Status GetPropertiesOfAllTables(TablePropertiesCollection* props) {
return Status();
}
virtual bool KeyMayExist(const ReadOptions& options,
const Slice& key,
std::string* value,
bool* value_found = nullptr) {
if (value_found != nullptr) {
*value_found = false;
}
return true; // Not Supported directly
}
virtual Iterator* NewIterator(const ReadOptions& options) {
if (options.snapshot == nullptr) {
KVMap* saved = new KVMap;
*saved = map_;
return new ModelIter(saved, true);
} else {
const KVMap* snapshot_state =
&(reinterpret_cast<const ModelSnapshot*>(options.snapshot)->map_);
return new ModelIter(snapshot_state, false);
}
}
virtual const Snapshot* GetSnapshot() {
ModelSnapshot* snapshot = new ModelSnapshot;
snapshot->map_ = map_;
return snapshot;
}
virtual void ReleaseSnapshot(const Snapshot* snapshot) {
delete reinterpret_cast<const ModelSnapshot*>(snapshot);
}
virtual Status Write(const WriteOptions& options, WriteBatch* batch) {
class Handler : public WriteBatch::Handler {
public:
KVMap* map_;
virtual void Put(const Slice& key, const Slice& value) {
(*map_)[key.ToString()] = value.ToString();
}
virtual void Merge(const Slice& key, const Slice& value) {
// ignore merge for now
//(*map_)[key.ToString()] = value.ToString();
}
virtual void Delete(const Slice& key) {
map_->erase(key.ToString());
}
};
Handler handler;
handler.map_ = &map_;
return batch->Iterate(&handler);
}
virtual bool GetProperty(const Slice& property, std::string* value) {
return false;
}
virtual void GetApproximateSizes(const Range* r, int n, uint64_t* sizes) {
for (int i = 0; i < n; i++) {
sizes[i] = 0;
}
}
virtual Status CompactRange(const Slice* start, const Slice* end,
bool reduce_level, int target_level) {
return Status::NotSupported("Not supported operation.");
}
virtual int NumberLevels()
{
return 1;
}
virtual int MaxMemCompactionLevel()
{
return 1;
}
virtual int Level0StopWriteTrigger()
{
return -1;
}
virtual const std::string& GetName() const {
return name_;
}
virtual Env* GetEnv() const {
return nullptr;
}
virtual const Options& GetOptions() const {
return options_;
}
virtual Status Flush(const rocksdb::FlushOptions& options) {
Status ret;
return ret;
}
virtual Status DisableFileDeletions() {
return Status::OK();
}
virtual Status EnableFileDeletions(bool force) {
return Status::OK();
}
virtual Status GetLiveFiles(std::vector<std::string>&, uint64_t* size,
bool flush_memtable = true) {
return Status::OK();
}
virtual Status GetSortedWalFiles(VectorLogPtr& files) {
return Status::OK();
}
virtual Status DeleteFile(std::string name) {
return Status::OK();
}
virtual Status GetDbIdentity(std::string& identity) {
return Status::OK();
}
virtual SequenceNumber GetLatestSequenceNumber() const {
return 0;
}
virtual Status GetUpdatesSince(
rocksdb::SequenceNumber, unique_ptr<rocksdb::TransactionLogIterator>*,
const TransactionLogIterator::ReadOptions&
read_options = TransactionLogIterator::ReadOptions()) {
return Status::NotSupported("Not supported in Model DB");
}
private:
class ModelIter: public Iterator {
public:
ModelIter(const KVMap* map, bool owned)
: map_(map), owned_(owned), iter_(map_->end()) {
}
~ModelIter() {
if (owned_) delete map_;
}
virtual bool Valid() const { return iter_ != map_->end(); }
virtual void SeekToFirst() { iter_ = map_->begin(); }
virtual void SeekToLast() {
if (map_->empty()) {
iter_ = map_->end();
} else {
iter_ = map_->find(map_->rbegin()->first);
}
}
virtual void Seek(const Slice& k) {
iter_ = map_->lower_bound(k.ToString());
}
virtual void Next() { ++iter_; }
virtual void Prev() { --iter_; }
virtual Slice key() const { return iter_->first; }
virtual Slice value() const { return iter_->second; }
virtual Status status() const { return Status::OK(); }
private:
const KVMap* const map_;
const bool owned_; // Do we own map_
KVMap::const_iterator iter_;
};
const Options options_;
KVMap map_;
std::string name_ = "";
};
static std::string RandomKey(Random* rnd, int minimum = 0) {
int len;
do {
len = (rnd->OneIn(3)
? 1 // Short sometimes to encourage collisions
: (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10)));
} while (len < minimum);
return test::RandomKey(rnd, len);
}
static bool CompareIterators(int step,
DB* model,
DB* db,
const Snapshot* model_snap,
const Snapshot* db_snap) {
ReadOptions options;
options.snapshot = model_snap;
Iterator* miter = model->NewIterator(options);
options.snapshot = db_snap;
Iterator* dbiter = db->NewIterator(options);
bool ok = true;
int count = 0;
for (miter->SeekToFirst(), dbiter->SeekToFirst();
ok && miter->Valid() && dbiter->Valid();
miter->Next(), dbiter->Next()) {
count++;
if (miter->key().compare(dbiter->key()) != 0) {
fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n",
step,
EscapeString(miter->key()).c_str(),
EscapeString(dbiter->key()).c_str());
ok = false;
break;
}
if (miter->value().compare(dbiter->value()) != 0) {
fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n",
step,
EscapeString(miter->key()).c_str(),
EscapeString(miter->value()).c_str(),
EscapeString(miter->value()).c_str());
ok = false;
}
}
if (ok) {
if (miter->Valid() != dbiter->Valid()) {
fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n",
step, miter->Valid(), dbiter->Valid());
ok = false;
}
}
delete miter;
delete dbiter;
return ok;
}
TEST(DBTest, Randomized) {
Random rnd(test::RandomSeed());
do {
ModelDB model(CurrentOptions());
const int N = 10000;
const Snapshot* model_snap = nullptr;
const Snapshot* db_snap = nullptr;
std::string k, v;
for (int step = 0; step < N; step++) {
// TODO(sanjay): Test Get() works
int p = rnd.Uniform(100);
int minimum = 0;
if (option_config_ == kHashSkipList ||
option_config_ == kHashLinkList ||
option_config_ == kPlainTableFirstBytePrefix) {
minimum = 1;
}
if (p < 45) { // Put
k = RandomKey(&rnd, minimum);
v = RandomString(&rnd,
rnd.OneIn(20)
? 100 + rnd.Uniform(100)
: rnd.Uniform(8));
ASSERT_OK(model.Put(WriteOptions(), k, v));
ASSERT_OK(db_->Put(WriteOptions(), k, v));
} else if (p < 90) { // Delete
k = RandomKey(&rnd, minimum);
ASSERT_OK(model.Delete(WriteOptions(), k));
ASSERT_OK(db_->Delete(WriteOptions(), k));
} else { // Multi-element batch
WriteBatch b;
const int num = rnd.Uniform(8);
for (int i = 0; i < num; i++) {
if (i == 0 || !rnd.OneIn(10)) {
k = RandomKey(&rnd, minimum);
} else {
// Periodically re-use the same key from the previous iter, so
// we have multiple entries in the write batch for the same key
}
if (rnd.OneIn(2)) {
v = RandomString(&rnd, rnd.Uniform(10));
b.Put(k, v);
} else {
b.Delete(k);
}
}
ASSERT_OK(model.Write(WriteOptions(), &b));
ASSERT_OK(db_->Write(WriteOptions(), &b));
}
if ((step % 100) == 0) {
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap));
// Save a snapshot from each DB this time that we'll use next
// time we compare things, to make sure the current state is
// preserved with the snapshot
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
Reopen();
ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr));
model_snap = model.GetSnapshot();
db_snap = db_->GetSnapshot();
}
}
if (model_snap != nullptr) model.ReleaseSnapshot(model_snap);
if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap);
} while (ChangeOptions(kSkipDeletesFilterFirst));
}
TEST(DBTest, MultiGetSimple) {
do {
ASSERT_OK(db_->Put(WriteOptions(),"k1","v1"));
ASSERT_OK(db_->Put(WriteOptions(),"k2","v2"));
ASSERT_OK(db_->Put(WriteOptions(),"k3","v3"));
ASSERT_OK(db_->Put(WriteOptions(),"k4","v4"));
ASSERT_OK(db_->Delete(WriteOptions(),"k4"));
ASSERT_OK(db_->Put(WriteOptions(),"k5","v5"));
ASSERT_OK(db_->Delete(WriteOptions(),"no_key"));
std::vector<Slice> keys(6);
keys[0] = "k1";
keys[1] = "k2";
keys[2] = "k3";
keys[3] = "k4";
keys[4] = "k5";
keys[5] = "no_key";
std::vector<std::string> values(20,"Temporary data to be overwritten");
std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
ASSERT_EQ(values.size(),keys.size());
ASSERT_EQ(values[0], "v1");
ASSERT_EQ(values[1], "v2");
ASSERT_EQ(values[2], "v3");
ASSERT_EQ(values[4], "v5");
ASSERT_OK(s[0]);
ASSERT_OK(s[1]);
ASSERT_OK(s[2]);
ASSERT_TRUE(s[3].IsNotFound());
ASSERT_OK(s[4]);
ASSERT_TRUE(s[5].IsNotFound());
} while (ChangeCompactOptions());
}
TEST(DBTest, MultiGetEmpty) {
do {
// Empty Key Set
std::vector<Slice> keys;
std::vector<std::string> values;
std::vector<Status> s = db_->MultiGet(ReadOptions(),keys,&values);
ASSERT_EQ((int)s.size(),0);
// Empty Database, Empty Key Set
DestroyAndReopen();
s = db_->MultiGet(ReadOptions(), keys, &values);
ASSERT_EQ((int)s.size(),0);
// Empty Database, Search for Keys
keys.resize(2);
keys[0] = "a";
keys[1] = "b";
s = db_->MultiGet(ReadOptions(),keys,&values);
ASSERT_EQ((int)s.size(), 2);
ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound());
} while (ChangeCompactOptions());
}
void PrefixScanInit(DBTest *dbtest) {
char buf[100];
std::string keystr;
const int small_range_sstfiles = 5;
const int big_range_sstfiles = 5;
// Generate 11 sst files with the following prefix ranges.
// GROUP 0: [0,10] (level 1)
// GROUP 1: [1,2], [2,3], [3,4], [4,5], [5, 6] (level 0)
// GROUP 2: [0,6], [0,7], [0,8], [0,9], [0,10] (level 0)
//
// A seek with the previous API would do 11 random I/Os (to all the
// files). With the new API and a prefix filter enabled, we should
// only do 2 random I/O, to the 2 files containing the key.
// GROUP 0
snprintf(buf, sizeof(buf), "%02d______:start", 0);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
snprintf(buf, sizeof(buf), "%02d______:end", 10);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
dbtest->dbfull()->TEST_FlushMemTable();
dbtest->dbfull()->CompactRange(nullptr, nullptr); // move to level 1
// GROUP 1
for (int i = 1; i <= small_range_sstfiles; i++) {
snprintf(buf, sizeof(buf), "%02d______:start", i);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
snprintf(buf, sizeof(buf), "%02d______:end", i+1);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
dbtest->dbfull()->TEST_FlushMemTable();
}
// GROUP 2
for (int i = 1; i <= big_range_sstfiles; i++) {
std::string keystr;
snprintf(buf, sizeof(buf), "%02d______:start", 0);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
snprintf(buf, sizeof(buf), "%02d______:end",
small_range_sstfiles+i+1);
keystr = std::string(buf);
ASSERT_OK(dbtest->Put(keystr, keystr));
dbtest->dbfull()->TEST_FlushMemTable();
}
}
TEST(DBTest, PrefixScan) {
ReadOptions ro = ReadOptions();
int count;
Slice prefix;
Slice key;
char buf[100];
Iterator* iter;
snprintf(buf, sizeof(buf), "03______:");
prefix = Slice(buf, 8);
key = Slice(buf, 9);
// db configs
env_->count_random_reads_ = true;
Options options = CurrentOptions();
options.env = env_;
options.no_block_cache = true;
options.filter_policy = NewBloomFilterPolicy(10);
options.prefix_extractor.reset(NewFixedPrefixTransform(8));
options.whole_key_filtering = false;
options.disable_auto_compactions = true;
options.max_background_compactions = 2;
options.create_if_missing = true;
options.disable_seek_compaction = true;
// Tricky: options.prefix_extractor will be released by
// NewHashSkipListRepFactory after use.
options.memtable_factory.reset(NewHashSkipListRepFactory());
// prefix specified, with blooms: 2 RAND I/Os
// SeekToFirst
DestroyAndReopen(&options);
PrefixScanInit(this);
count = 0;
env_->random_read_counter_.Reset();
ro.prefix = &prefix;
iter = db_->NewIterator(ro);
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
assert(iter->key().starts_with(prefix));
count++;
}
ASSERT_OK(iter->status());
delete iter;
ASSERT_EQ(count, 2);
ASSERT_EQ(env_->random_read_counter_.Read(), 2);
// prefix specified, with blooms: 2 RAND I/Os
// Seek
DestroyAndReopen(&options);
PrefixScanInit(this);
count = 0;
env_->random_read_counter_.Reset();
ro.prefix = &prefix;
iter = db_->NewIterator(ro);
for (iter->Seek(key); iter->Valid(); iter->Next()) {
assert(iter->key().starts_with(prefix));
count++;
}
ASSERT_OK(iter->status());
delete iter;
ASSERT_EQ(count, 2);
ASSERT_EQ(env_->random_read_counter_.Read(), 2);
// no prefix specified: 11 RAND I/Os
DestroyAndReopen(&options);
PrefixScanInit(this);
count = 0;
env_->random_read_counter_.Reset();
iter = db_->NewIterator(ReadOptions());
for (iter->Seek(prefix); iter->Valid(); iter->Next()) {
if (! iter->key().starts_with(prefix)) {
break;
}
count++;
}
ASSERT_OK(iter->status());
delete iter;
ASSERT_EQ(count, 2);
ASSERT_EQ(env_->random_read_counter_.Read(), 11);
Close();
delete options.filter_policy;
}
std::string MakeKey(unsigned int num) {
char buf[30];
snprintf(buf, sizeof(buf), "%016u", num);
return std::string(buf);
}
void BM_LogAndApply(int iters, int num_base_files) {
std::string dbname = test::TmpDir() + "/rocksdb_test_benchmark";
ASSERT_OK(DestroyDB(dbname, Options()));
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
Status s = DB::Open(opts, dbname, &db);
ASSERT_OK(s);
ASSERT_TRUE(db != nullptr);
delete db;
db = nullptr;
Env* env = Env::Default();
port::Mutex mu;
MutexLock l(&mu);
InternalKeyComparator cmp(BytewiseComparator());
Options options;
EnvOptions sopt;
VersionSet vset(dbname, &options, sopt, nullptr, &cmp);
ASSERT_OK(vset.Recover());
VersionEdit vbase;
uint64_t fnum = 1;
for (int i = 0; i < num_base_files; i++) {
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
vbase.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
}
ASSERT_OK(vset.LogAndApply(&vbase, &mu));
uint64_t start_micros = env->NowMicros();
for (int i = 0; i < iters; i++) {
VersionEdit vedit;
vedit.DeleteFile(2, fnum);
InternalKey start(MakeKey(2*fnum), 1, kTypeValue);
InternalKey limit(MakeKey(2*fnum+1), 1, kTypeDeletion);
vedit.AddFile(2, fnum++, 1 /* file size */, start, limit, 1, 1);
vset.LogAndApply(&vedit, &mu);
}
uint64_t stop_micros = env->NowMicros();
unsigned int us = stop_micros - start_micros;
char buf[16];
snprintf(buf, sizeof(buf), "%d", num_base_files);
fprintf(stderr,
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
buf, iters, us, ((float)us) / iters);
}
TEST(DBTest, TailingIteratorSingle) {
ReadOptions read_options;
read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
iter->SeekToFirst();
ASSERT_TRUE(!iter->Valid());
// add a record and check that iter can see it
ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor"));
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(iter->key().ToString(), "mirko");
iter->Next();
ASSERT_TRUE(!iter->Valid());
}
TEST(DBTest, TailingIteratorKeepAdding) {
ReadOptions read_options;
read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
std::string value(1024, 'a');
const int num_records = 10000;
for (int i = 0; i < num_records; ++i) {
char buf[32];
snprintf(buf, sizeof(buf), "%016d", i);
Slice key(buf, 16);
ASSERT_OK(db_->Put(WriteOptions(), key, value));
iter->Seek(key);
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(iter->key().compare(key), 0);
}
}
TEST(DBTest, TailingIteratorDeletes) {
ReadOptions read_options;
read_options.tailing = true;
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
// write a single record, read it using the iterator, then delete it
ASSERT_OK(db_->Put(WriteOptions(), "0test", "test"));
iter->SeekToFirst();
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(iter->key().ToString(), "0test");
ASSERT_OK(db_->Delete(WriteOptions(), "0test"));
// write many more records
const int num_records = 10000;
std::string value(1024, 'A');
for (int i = 0; i < num_records; ++i) {
char buf[32];
snprintf(buf, sizeof(buf), "1%015d", i);
Slice key(buf, 16);
ASSERT_OK(db_->Put(WriteOptions(), key, value));
}
// force a flush to make sure that no records are read from memtable
dbfull()->TEST_FlushMemTable();
// skip "0test"
iter->Next();
// make sure we can read all new records using the existing iterator
int count = 0;
for (; iter->Valid(); iter->Next(), ++count) ;
ASSERT_EQ(count, num_records);
}
TEST(DBTest, TailingIteratorPrefixSeek) {
ReadOptions read_options;
read_options.tailing = true;
read_options.prefix_seek = true;
Options options = CurrentOptions();
options.env = env_;
options.create_if_missing = true;
options.disable_auto_compactions = true;
options.prefix_extractor.reset(NewFixedPrefixTransform(2));
options.memtable_factory.reset(NewHashSkipListRepFactory());
DestroyAndReopen(&options);
std::unique_ptr<Iterator> iter(db_->NewIterator(read_options));
ASSERT_OK(db_->Put(WriteOptions(), "0101", "test"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(db_->Put(WriteOptions(), "0202", "test"));
// Seek(0102) shouldn't find any records since 0202 has a different prefix
iter->Seek("0102");
ASSERT_TRUE(!iter->Valid());
iter->Seek("0202");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(iter->key().ToString(), "0202");
iter->Next();
ASSERT_TRUE(!iter->Valid());
}
} // namespace rocksdb
int main(int argc, char** argv) {
if (argc > 1 && std::string(argv[1]) == "--benchmark") {
rocksdb::BM_LogAndApply(1000, 1);
rocksdb::BM_LogAndApply(1000, 100);
rocksdb::BM_LogAndApply(1000, 10000);
rocksdb::BM_LogAndApply(100, 100000);
return 0;
}
return rocksdb::test::RunAllTests();
}
|
#include "TFOpacityWidget.h"
#include "TFOpacityInfoWidget.h"
#include "TFUtils.h"
#include <QAction>
#include <QPaintEvent>
#include <QPainter>
#include <glm/glm.hpp>
#include <vapor/ParamsMgr.h>
#include <vapor/RenderParams.h>
using namespace VAPoR;
using glm::clamp;
using glm::vec2;
using std::vector;
static vec2 qvec2(const QPoint &qp) { return vec2(qp.x(), qp.y()); }
static vec2 qvec2(const QPointF &qp) { return vec2(qp.x(), qp.y()); }
static QPointF qvec2(const vec2 &v) { return QPointF(v.x, v.y); }
static vec2 Project(vec2 a, vec2 b, vec2 p) {
vec2 n = glm::normalize(b - a);
float t = glm::dot(n, p - a);
return n * t + a;
}
static float DistanceToLine(vec2 a, vec2 b, vec2 p) {
vec2 n = glm::normalize(b - a);
float t = glm::dot(n, p - a);
if (t < 0)
return glm::distance(a, p);
if (t > glm::distance(a, b))
return glm::distance(b, p);
vec2 projection = n * t + a;
return glm::distance(projection, p);
}
#define CONTROL_POINT_RADIUS (4.0f)
#define PADDING (CONTROL_POINT_RADIUS + 1.0f)
TFOpacityMap::TFOpacityMap(const std::string &variableNameTag, TFMapWidget *parent)
: TFMap(variableNameTag, parent) {}
QSize TFOpacityMap::minimumSizeHint() const { return QSize(100, 75); }
void TFOpacityMap::LostFocus() { DeselectControlPoint(); }
#define PROPERTY_INDEX ("index")
#define PROPERTY_LOCATION ("location")
void TFOpacityMap::PopulateContextMenu(QMenu *menu, const glm::vec2 &p) {
auto selected = findSelectedControlPoint(p);
if (selected != _controlPoints.EndPoints())
menu->addAction("Delete control point", this, SLOT(menuDeleteSelectedControlPoint()))
->setProperty(PROPERTY_INDEX, QVariant(selected.Index()));
else
menu->addAction("Add control point", this, SLOT(menuAddControlPoint()))
->setProperty(PROPERTY_LOCATION, QVariant(qvec2(PixelToNDC(p))));
}
void TFOpacityMap::PopulateSettingsMenu(QMenu *menu) const {
menu->addAction("Save Transfer Function", this, SLOT(menuSave()));
menu->addAction("Load Transfer Function", this, SLOT(menuLoad()));
}
void TFOpacityMap::paramsUpdate() {
MapperFunction *mf = getRenderParams()->GetMapperFunc(getVariableName());
// TODO Multiple opacity maps?
// int n = mf->getNumOpacityMaps();
// printf("# opacity maps = %i\n", n);
OpacityMap *om = mf->GetOpacityMap(0);
vector<double> cp = om->GetControlPoints();
_controlPoints.Resize(cp.size() / 2);
for (int i = 0; i < cp.size(); i += 2) {
_controlPoints[i / 2].y = cp[i];
_controlPoints[i / 2].x = cp[i + 1];
}
update();
if (_selectedControl > -1)
UpdateInfo(_controlPoints[_selectedControl].x, _controlPoints[_selectedControl].y);
}
TFInfoWidget *TFOpacityMap::createInfoWidget() {
TFOpacityInfoWidget *info = new TFOpacityInfoWidget(getVariableNameTag());
connect(info, SIGNAL(ControlPointChanged(float, float)), this,
SLOT(UpdateFromInfo(float, float)));
connect(this, SIGNAL(UpdateInfo(float, float)), info, SLOT(SetControlPoint(float, float)));
connect(this, SIGNAL(ControlPointDeselected()), info, SLOT(DeselectControlPoint()));
return info;
}
void TFOpacityMap::paintEvent(QPainter &p) {
// p.setViewport(10, 10, 30, 30);
// p.setWindow(10, 10, 30, 30);
// p.fillRect(event->rect(), QBrush(QColor(64, 32, 64)));
if (_controlPoints.Size()) {
ControlPointList &cp = _controlPoints;
for (auto it = cp.BeginLines(); it != cp.EndLines(); ++it) {
p.drawLine(NDCToQPixel(it.a()), NDCToQPixel(it.b()));
// p.drawEllipse(qvec2(Project(NDCToPixel(it.a()), NDCToPixel(it.b()), m)),
// 2, 2);
}
for (auto it = --cp.EndPoints(); it != --cp.BeginPoints(); --it)
drawControl(p, NDCToQPixel(*it), it.Index() == _selectedControl);
}
}
void TFOpacityMap::mousePressEvent(QMouseEvent *event) {
emit Activated(this);
vec2 mouse = qvec2(event->localPos());
auto it = findSelectedControlPoint(mouse);
auto lineIt = findSelectedControlLine(mouse);
if (it != _controlPoints.EndPoints()) {
_draggedControl = it;
_dragOffset = NDCToPixel(*it) - mouse;
_isDraggingControl = true;
selectControlPoint(it);
getParamsMgr()->BeginSaveStateGroup("Move opacity control point");
} else if (lineIt != _controlPoints.EndLines()) {
_draggedLine = lineIt;
_dragOffset = NDCToPixel(lineIt.a()) - mouse;
_dragOffsetB = NDCToPixel(lineIt.b()) - mouse;
_isDraggingLine = true;
getParamsMgr()->BeginSaveStateGroup("Move opacity control line");
} else {
DeselectControlPoint();
event->ignore();
}
}
void TFOpacityMap::mouseReleaseEvent(QMouseEvent *event) {
if (_isDraggingControl || _isDraggingLine) {
opacityChanged();
getParamsMgr()->EndSaveStateGroup();
} else
event->ignore();
_isDraggingControl = false;
_isDraggingLine = false;
}
void TFOpacityMap::mouseMoveEvent(QMouseEvent *event) {
vec2 mouse = qvec2(event->pos());
m = mouse;
// const int i = _draggedID;
// ControlPointList &cp = _controlPoints;
// const int N = cp.Size();
if (_isDraggingControl) {
const auto &it = _draggedControl;
vec2 newVal =
glm::clamp(PixelToNDC(mouse + _dragOffset), vec2(it.IsFirst() ? 0 : (*(it - 1)).x, 0),
vec2(it.IsLast() ? 1 : (*(it + 1)).x, 1));
*_draggedControl = newVal;
emit UpdateInfo(newVal.x, newVal.y);
update();
opacityChanged();
getParamsMgr()->IntermediateChange();
} else if (_isDraggingLine) {
auto &it = _draggedLine;
it.setA(glm::clamp(PixelToNDC(mouse + _dragOffset),
vec2(it.IsFirst() ? 0 : (it - 1).a().x, 0),
vec2(it.IsLast() ? 1 : (it + 1).b().x, 1)));
it.setB(glm::clamp(PixelToNDC(mouse + _dragOffsetB),
vec2(it.IsFirst() ? 0 : (it - 1).a().x, 0),
vec2(it.IsLast() ? 1 : (it + 1).b().x, 1)));
update();
opacityChanged();
getParamsMgr()->IntermediateChange();
} else {
event->ignore();
}
}
void TFOpacityMap::mouseDoubleClickEvent(QMouseEvent *event) {
vec2 mouse = qvec2(event->pos());
ControlPointList &cp = _controlPoints;
auto controlPointIt = findSelectedControlPoint(mouse);
if (controlPointIt != cp.EndPoints()) {
deleteControlPoint(controlPointIt);
return;
}
auto controlLineIt = findSelectedControlLine(mouse);
if (controlLineIt != cp.EndLines()) {
const vec2 a = NDCToPixel(controlLineIt.a());
const vec2 b = NDCToPixel(controlLineIt.b());
addControlPoint(PixelToNDC(Project(a, b, mouse)));
return;
}
event->ignore();
}
void TFOpacityMap::opacityChanged() {
if (!getRenderParams())
return;
MapperFunction *mf = getRenderParams()->GetMapperFunc(getVariableName());
OpacityMap *om = mf->GetOpacityMap(0);
vector<double> cp(_controlPoints.Size() * 2);
for (int i = 0; i < _controlPoints.Size(); i++) {
cp[i * 2] = _controlPoints[i].y;
cp[i * 2 + 1] = _controlPoints[i].x;
}
om->SetControlPoints(cp);
}
bool TFOpacityMap::controlPointContainsPixel(const vec2 &cp, const vec2 &pixel) const {
return glm::distance(pixel, NDCToPixel(cp)) <= GetControlPointRadius();
}
ControlPointList::PointIterator TFOpacityMap::findSelectedControlPoint(const glm::vec2 &mouse) {
const auto end = _controlPoints.EndPoints();
for (auto it = _controlPoints.BeginPoints(); it != end; ++it)
if (controlPointContainsPixel(*it, mouse))
return it;
return end;
}
ControlPointList::LineIterator TFOpacityMap::findSelectedControlLine(const glm::vec2 &mouse) {
ControlPointList &cp = _controlPoints;
const float radius = GetControlPointRadius();
for (auto it = cp.BeginLines(); it != cp.EndLines(); ++it) {
const vec2 a = NDCToPixel(it.a());
const vec2 b = NDCToPixel(it.b());
if (DistanceToLine(a, b, mouse) <= radius)
return it;
}
return cp.EndLines();
}
void TFOpacityMap::selectControlPoint(ControlPointList::PointIterator it) {
_selectedControl = it.Index();
update();
emit UpdateInfo((*it).x, (*it).y);
}
void TFOpacityMap::deleteControlPoint(ControlPointList::PointIterator it) {
if (_isDraggingControl || _isDraggingLine) {
getParamsMgr()->EndSaveStateGroup();
_isDraggingControl = false;
_isDraggingLine = false;
}
if (_selectedControl == it.Index())
DeselectControlPoint();
else if (_selectedControl > it.Index())
_selectedControl--;
_controlPoints.Remove(it);
update();
opacityChanged();
}
void TFOpacityMap::addControlPoint(const glm::vec2 &ndc) {
ControlPointList &cp = _controlPoints;
int index = cp.Add(ndc);
selectControlPoint(cp.BeginPoints() + index);
update();
opacityChanged();
}
void TFOpacityMap::menuDeleteSelectedControlPoint() {
emit Activated(this);
QVariant indexVariant = sender()->property(PROPERTY_INDEX);
if (indexVariant.isValid()) {
int index = indexVariant.toInt();
if (index >= 0 && index < _controlPoints.Size())
deleteControlPoint(_controlPoints.BeginPoints() + index);
}
}
void TFOpacityMap::menuAddControlPoint() {
emit Activated(this);
QVariant location = sender()->property(PROPERTY_LOCATION);
if (location.isValid())
addControlPoint(qvec2(location.toPointF()));
}
void TFOpacityMap::menuLoad() {
RenderParams *rp = getRenderParams();
if (!rp)
return;
TFUtils::LoadTransferFunction(getParamsMgr(), rp->GetMapperFunc(getVariableName()));
}
void TFOpacityMap::menuSave() {
RenderParams *rp = getRenderParams();
if (!rp)
return;
TFUtils::SaveTransferFunction(getParamsMgr(), rp->GetMapperFunc(getVariableName()));
}
void TFOpacityMap::DeselectControlPoint() {
_selectedControl = -1;
update();
emit ControlPointDeselected();
}
void TFOpacityMap::UpdateFromInfo(float value, float opacity) {
assert(_selectedControl >= 0);
assert(value >= 0 && value <= 1);
assert(opacity >= 0 && opacity <= 1);
_controlPoints.Remove(_controlPoints.BeginPoints() + _selectedControl);
_selectedControl = _controlPoints.Add(vec2(value, opacity));
opacityChanged();
}
TFOpacityWidget::TFOpacityWidget(const std::string &variableNameTag)
: TFMapWidget(new TFOpacityMap(variableNameTag, this)) {
this->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::MinimumExpanding);
// this->setFrameStyle(QFrame::Box);
}
|
// Geometric Tools, Inc.
// http://www.geometrictools.com
// Copyright (c) 1998-2006. All Rights Reserved
//
// The Wild Magic Version 4 Restricted Libraries source code is supplied
// under the terms of the license agreement
// http://www.geometrictools.com/License/Wm4RestrictedLicense.pdf
// and may not be copied or disclosed except in accordance with the terms
// of that agreement.
#include "VertexCollapse.h"
#include "Wm4DistVector3Segment3.h"
//----------------------------------------------------------------------------
VertexCollapse::VertexCollapse (int iVQuantity, Vector3f*& rakVertex,
bool bClosed, int*& raiMap, int& riEQuantity, int*& raiEdge)
{
raiMap = WM4_NEW int[iVQuantity];
if (bClosed)
{
riEQuantity = iVQuantity;
raiEdge = WM4_NEW int[2*riEQuantity];
if (iVQuantity == 3)
{
raiMap[0] = 0;
raiMap[1] = 1;
raiMap[2] = 3;
raiEdge[0] = 0; raiEdge[1] = 1;
raiEdge[2] = 1; raiEdge[3] = 2;
raiEdge[4] = 2; raiEdge[5] = 0;
return;
}
}
else
{
riEQuantity = iVQuantity-1;
raiEdge = WM4_NEW int[2*riEQuantity];
if (iVQuantity == 2)
{
raiMap[0] = 0;
raiMap[1] = 1;
raiEdge[0] = 0; raiEdge[1] = 1;
return;
}
}
// create the heap of records
InitializeHeap(iVQuantity,rakVertex,bClosed);
BuildHeap();
assert( IsValid() );
// create the level of detail information for the polyline
int* aiCollapse = WM4_NEW int[iVQuantity];
CollapseVertices(iVQuantity,rakVertex,aiCollapse);
ComputeEdges(iVQuantity,bClosed,aiCollapse,raiMap,riEQuantity,raiEdge);
ReorderVertices(iVQuantity,rakVertex,aiCollapse,riEQuantity,raiEdge);
WM4_DELETE[] aiCollapse;
}
//----------------------------------------------------------------------------
VertexCollapse::~VertexCollapse ()
{
WM4_DELETE[] m_akRecord;
WM4_DELETE[] m_apkHeap;
}
//----------------------------------------------------------------------------
float VertexCollapse::GetWeight (int iM, int iZ, int iP, Vector3f* akVertex)
{
Segment3f kSegment;
kSegment.Origin = 0.5f*(akVertex[iM] + akVertex[iP]);
kSegment.Direction = akVertex[iP] - akVertex[iM];
float fLength = kSegment.Direction.Normalize();
kSegment.Extent = 0.5f*fLength;
float fDist = DistVector3Segment3f(akVertex[iZ],kSegment).Get();
return (fLength > 0.0f ? fDist/fLength : Mathf::MAX_REAL);
}
//----------------------------------------------------------------------------
void VertexCollapse::InitializeHeap (int iVQuantity, Vector3f* akVertex,
bool bClosed)
{
// Build the initial heap of weights, a max heap. The weights are set
// to negative values so that we get a min heap. TO DO: Modify the
// code to directly implement a min heap.
m_iHQuantity = iVQuantity;
m_akRecord = WM4_NEW Record[m_iHQuantity];
m_apkHeap = WM4_NEW Record*[m_iHQuantity];
int i;
for (i = 0; i < m_iHQuantity; i++)
{
m_akRecord[i].m_iVIndex = i;
m_akRecord[i].m_iHIndex = i;
m_akRecord[i].m_pkLAdj = &m_akRecord[(m_iHQuantity+i-1)%m_iHQuantity];
m_akRecord[i].m_pkRAdj = &m_akRecord[(i+1)%m_iHQuantity];
m_apkHeap[i] = &m_akRecord[i];
}
int iQm1 = m_iHQuantity - 1;
if (bClosed)
{
int iQm2 = m_iHQuantity - 2;
m_akRecord[0].m_fWeight = GetWeight(iQm1,0,1,akVertex);
m_akRecord[iQm1].m_fWeight = GetWeight(iQm2,iQm1,0,akVertex);
}
else
{
m_akRecord[0].m_fWeight = Mathf::MAX_REAL;
m_akRecord[iQm1].m_fWeight = Mathf::MAX_REAL;
}
for (int iM = 0, iZ = 1, iP = 2; iZ < iQm1; iM++, iZ++, iP++)
{
m_akRecord[iZ].m_fWeight = GetWeight(iM,iZ,iP,akVertex);
}
}
//----------------------------------------------------------------------------
void VertexCollapse::BuildHeap ()
{
int iLast = m_iHQuantity - 1;
for (int iLeft = iLast/2; iLeft >= 0; iLeft--)
{
Record* pkRecord = m_apkHeap[iLeft];
int iPa = iLeft, iCh = 2*iLeft + 1;
while (iCh <= iLast)
{
if (iCh < iLast)
{
if (m_apkHeap[iCh]->m_fWeight > m_apkHeap[iCh+1]->m_fWeight)
{
iCh++;
}
}
if (m_apkHeap[iCh]->m_fWeight >= pkRecord->m_fWeight)
{
break;
}
m_apkHeap[iCh]->m_iHIndex = iPa;
m_apkHeap[iPa] = m_apkHeap[iCh];
iPa = iCh;
iCh = 2*iCh + 1;
}
pkRecord->m_iHIndex = iPa;
m_apkHeap[iPa] = pkRecord;
}
}
//----------------------------------------------------------------------------
int VertexCollapse::RemoveRoot (Vector3f* akVertex)
{
Record* pkRoot = m_apkHeap[0];
int iLast = m_iHQuantity - 1;
Record* pkRecord = m_apkHeap[iLast];
int iPa = 0, iCh = 1;
while (iCh <= iLast)
{
if (iCh < iLast)
{
if (m_apkHeap[iCh]->m_fWeight > m_apkHeap[iCh+1]->m_fWeight)
{
iCh++;
}
}
if (m_apkHeap[iCh]->m_fWeight >= pkRecord->m_fWeight)
{
break;
}
m_apkHeap[iCh]->m_iHIndex = iPa;
m_apkHeap[iPa] = m_apkHeap[iCh];
iPa = iCh;
iCh = 2*iCh + 1;
}
pkRecord->m_iHIndex = iPa;
m_apkHeap[iPa] = pkRecord;
m_iHQuantity--;
// remove root from the doubly-linked list
Record* pkLAdj = pkRoot->m_pkLAdj;
Record* pkRAdj = pkRoot->m_pkRAdj;
pkLAdj->m_pkRAdj = pkRAdj;
pkRAdj->m_pkLAdj = pkLAdj;
// update the weights of the vertices affected by the removal
int iM, iZ, iP;
float fWeight;
if (pkLAdj->m_fWeight != Mathf::MAX_REAL)
{
iZ = pkLAdj->m_iVIndex;
iM = pkLAdj->m_pkLAdj->m_iVIndex;
iP = pkLAdj->m_pkRAdj->m_iVIndex;
fWeight = GetWeight(iM,iZ,iP,akVertex);
Update(pkLAdj->m_iHIndex,fWeight);
assert(IsValid());
}
if (pkRAdj->m_fWeight != Mathf::MAX_REAL)
{
iZ = pkRAdj->m_iVIndex;
iM = pkRAdj->m_pkLAdj->m_iVIndex;
iP = pkRAdj->m_pkRAdj->m_iVIndex;
fWeight = GetWeight(iM,iZ,iP,akVertex);
Update(pkRAdj->m_iHIndex,fWeight);
assert(IsValid());
}
return pkRoot->m_iVIndex;
}
//----------------------------------------------------------------------------
void VertexCollapse::Update (int iHIndex, float fWeight)
{
Record* pkRecord = m_apkHeap[iHIndex];
int iPa, iCh, iChP, iMaxCh;
if (fWeight > pkRecord->m_fWeight)
{
pkRecord->m_fWeight = fWeight;
// new weight larger than old, propagate it towards the leaves
iPa = iHIndex;
iCh = 2*iPa+1;
while (iCh < m_iHQuantity)
{
// at least one child exists
if (iCh < m_iHQuantity-1)
{
// two children exist
iChP = iCh+1;
if (m_apkHeap[iCh]->m_fWeight <= m_apkHeap[iChP]->m_fWeight)
{
iMaxCh = iCh;
}
else
{
iMaxCh = iChP;
}
}
else
{
// one child exists
iMaxCh = iCh;
}
if (m_apkHeap[iMaxCh]->m_fWeight >= fWeight)
{
break;
}
m_apkHeap[iMaxCh]->m_iHIndex = iPa;
m_apkHeap[iPa] = m_apkHeap[iMaxCh];
pkRecord->m_iHIndex = iMaxCh;
m_apkHeap[iMaxCh] = pkRecord;
iPa = iMaxCh;
iCh = 2*iPa+1;
}
}
else if (fWeight < pkRecord->m_fWeight)
{
pkRecord->m_fWeight = fWeight;
// new weight smaller than old, propagate it towards the root
iCh = iHIndex;
while (iCh > 0)
{
// a parent exists
iPa = (iCh-1)/2;
if (m_apkHeap[iPa]->m_fWeight <= fWeight)
{
break;
}
m_apkHeap[iPa]->m_iHIndex = iCh;
m_apkHeap[iCh] = m_apkHeap[iPa];
pkRecord->m_iHIndex = iPa;
pkRecord->m_fWeight = fWeight;
m_apkHeap[iPa] = pkRecord;
iCh = iPa;
}
}
}
//----------------------------------------------------------------------------
void VertexCollapse::CollapseVertices (int iVQuantity, Vector3f* akVertex,
int* aiCollapse)
{
for (int i = iVQuantity-1; i >= 0; i--)
{
aiCollapse[i] = RemoveRoot(akVertex);
}
}
//----------------------------------------------------------------------------
void VertexCollapse::ComputeEdges (int iVQuantity, bool bClosed,
int* aiCollapse, int* aiMap, int iEQuantity, int* aiEdge)
{
// Compute the edges (first to collapse is last in array). Do not
// collapse last line segment of open polyline. Do not collapse last
// triangle of closed polyline.
int i, iVIndex, iEIndex = 2*iEQuantity-1;
if (bClosed)
{
for (i = iVQuantity-1; i >= 0; i--)
{
iVIndex = aiCollapse[i];
aiEdge[iEIndex--] = (iVIndex+1) % iVQuantity;
aiEdge[iEIndex--] = iVIndex;
}
}
else
{
for (i = iVQuantity-1; i >= 2; i--)
{
iVIndex = aiCollapse[i];
aiEdge[iEIndex--] = iVIndex+1;
aiEdge[iEIndex--] = iVIndex;
}
iVIndex = aiCollapse[0];
aiEdge[0] = iVIndex;
aiEdge[1] = iVIndex+1;
}
// In the given edge order, find the index in the edge array that
// corresponds to a collapse vertex and save the index for the dynamic
// change in level of detail. This relies on the assumption that a
// vertex is shared by at most two edges.
iEIndex = 2*iEQuantity-1;
for (i = iVQuantity-1; i >= 0; i--)
{
iVIndex = aiCollapse[i];
for (int iE = 0; iE < 2*iEQuantity; iE++)
{
if (iVIndex == aiEdge[iE])
{
aiMap[i] = iE;
aiEdge[iE] = aiEdge[iEIndex];
break;
}
}
iEIndex -= 2;
if (bClosed)
{
if (iEIndex == 5)
{
break;
}
}
else
{
if (iEIndex == 1)
{
break;
}
}
}
// restore the edge array to full level of detail
if (bClosed)
{
for (i = 3; i < iVQuantity; i++)
{
iVIndex = aiCollapse[i];
aiEdge[aiMap[i]] = iVIndex;
}
}
else
{
for (i = 2; i < iVQuantity; i++)
{
iVIndex = aiCollapse[i];
aiEdge[aiMap[i]] = iVIndex;
}
}
}
//----------------------------------------------------------------------------
void VertexCollapse::ReorderVertices (int iVQuantity, Vector3f*& rakVertex,
int* aiCollapse, int iEQuantity, int* aiEdge)
{
int* aiPermute = WM4_NEW int[iVQuantity];
Vector3f* akPVertex = WM4_NEW Vector3f[iVQuantity];
int i;
for (i = 0; i < iVQuantity; i++)
{
int iVIndex = aiCollapse[i];
aiPermute[iVIndex] = i;
akPVertex[i] = rakVertex[iVIndex];
}
for (i = 0; i < 2*iEQuantity; i++)
{
aiEdge[i] = aiPermute[aiEdge[i]];
}
WM4_DELETE[] rakVertex;
rakVertex = akPVertex;
WM4_DELETE[] aiPermute;
}
//----------------------------------------------------------------------------
bool VertexCollapse::IsValid (int iStart, int iFinal)
{
for (int iC = iStart; iC <= iFinal; iC++)
{
int iP = (iC-1)/2;
if (iP > iStart)
{
if (m_apkHeap[iP]->m_fWeight > m_apkHeap[iC]->m_fWeight)
{
return false;
}
if (m_apkHeap[iP]->m_iHIndex != iP)
{
return false;
}
}
}
return true;
}
//----------------------------------------------------------------------------
bool VertexCollapse::IsValid ()
{
return IsValid(0,m_iHQuantity-1);
}
//----------------------------------------------------------------------------
|
/*
* Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
*/
#include <boost/uuid/uuid_io.hpp>
#include <vnc_cfg_types.h>
#include <cmn/agent_cmn.h>
#include <ifmap/ifmap_node.h>
#include <cfg/cfg_init.h>
#include <cfg/cfg_listener.h>
#include <oper/agent_sandesh.h>
#include <oper/ifmap_dependency_manager.h>
#include <oper/interface_common.h>
#include <oper/physical_device.h>
#include <oper/nexthop.h>
#include <oper/config_manager.h>
#include <vector>
#include <string>
using std::string;
/////////////////////////////////////////////////////////////////////////////
// PhysicalInterface routines
/////////////////////////////////////////////////////////////////////////////
PhysicalInterface::PhysicalInterface(const std::string &name) :
Interface(Interface::PHYSICAL, nil_uuid(), name, NULL), persistent_(false),
subtype_(INVALID), physical_device_(NULL) {
}
PhysicalInterface::~PhysicalInterface() {
}
PhysicalDevice *PhysicalInterface::physical_device() const {
return physical_device_.get();
}
string PhysicalInterface::ToString() const {
return "PORT <" + name() + ">";
}
bool PhysicalInterface::CmpInterface(const DBEntry &rhs) const {
const PhysicalInterface &a = static_cast<const PhysicalInterface &>(rhs);
return name_ < a.name_;
}
DBEntryBase::KeyPtr PhysicalInterface::GetDBRequestKey() const {
InterfaceKey *key = new PhysicalInterfaceKey(name_);
return DBEntryBase::KeyPtr(key);
}
bool PhysicalInterface::OnChange(const InterfaceTable *table,
const PhysicalInterfaceData *data) {
bool ret = false;
// Handle VRF Change
VrfKey key(data->vrf_name_);
VrfEntry *new_vrf = static_cast<VrfEntry *>
(table->agent()->vrf_table()->FindActiveEntry(&key));
if (new_vrf != vrf_.get()) {
vrf_.reset(new_vrf);
ret = true;
}
PhysicalDevice *dev =
table->agent()->physical_device_table()->Find(data->device_uuid_);
if (dev != physical_device_.get()) {
physical_device_.reset(dev);
ret = true;
}
return ret;
}
bool PhysicalInterface::Delete(const DBRequest *req) {
InterfaceNH::DeletePhysicalInterfaceNh(name_);
return true;
}
void PhysicalInterface::PostAdd() {
InterfaceNH::CreatePhysicalInterfaceNh(name_, mac_);
InterfaceTable *table = static_cast<InterfaceTable *>(get_table());
if (table->agent()->test_mode()) {
return;
}
// Interfaces in VMWARE must be put into promiscous mode
if (subtype_ != VMWARE) {
return;
}
int fd = socket(AF_LOCAL, SOCK_STREAM, 0);
assert(fd >= 0);
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, name_.c_str(), IF_NAMESIZE);
if (ioctl(fd, SIOCGIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Error <" << errno << ": " << strerror(errno) <<
"> setting promiscuous flag for interface <" << name_ << ">");
close(fd);
return;
}
ifr.ifr_flags |= IFF_PROMISC;
if (ioctl(fd, SIOCSIFFLAGS, (void *)&ifr) < 0) {
LOG(ERROR, "Error <" << errno << ": " << strerror(errno) <<
"> setting promiscuous flag for interface <" << name_ << ">");
close(fd);
return;
}
close(fd);
}
/////////////////////////////////////////////////////////////////////////////
// PhysicalInterfaceKey routines
/////////////////////////////////////////////////////////////////////////////
PhysicalInterfaceKey::PhysicalInterfaceKey(const std::string &name) :
InterfaceKey(AgentKey::ADD_DEL_CHANGE, Interface::PHYSICAL, nil_uuid(),
name, false) {
}
PhysicalInterfaceKey::~PhysicalInterfaceKey() {
}
Interface *PhysicalInterfaceKey::AllocEntry(const InterfaceTable *table) const {
return new PhysicalInterface(name_);
}
Interface *PhysicalInterfaceKey::AllocEntry(const InterfaceTable *table,
const InterfaceData *data) const {
PhysicalInterface *intf = new PhysicalInterface(name_);
const PhysicalInterfaceData *phy_data =
static_cast<const PhysicalInterfaceData *>(data);
intf->encap_type_ = phy_data->encap_type_;
intf->no_arp_ = phy_data->no_arp_;
intf->subtype_ = phy_data->subtype_;
intf->display_name_ = phy_data->display_name_;
if (intf->subtype_ == PhysicalInterface::VMWARE ||
intf->subtype_ == PhysicalInterface::CONFIG) {
intf->persistent_ = true;
}
intf->OnChange(table, phy_data);
return intf;
}
InterfaceKey *PhysicalInterfaceKey::Clone() const {
return new PhysicalInterfaceKey(name_);
}
/////////////////////////////////////////////////////////////////////////////
// PhysicalInterfaceData routines
/////////////////////////////////////////////////////////////////////////////
PhysicalInterfaceData::PhysicalInterfaceData(Agent *agent, IFMapNode *node,
const string &vrf_name,
PhysicalInterface::SubType subtype,
PhysicalInterface::EncapType encap,
bool no_arp,
const uuid &device_uuid,
const string &display_name,
const Ip4Address &ip,
Interface::Transport transport) :
InterfaceData(agent, node, transport), subtype_(subtype), encap_type_(encap),
no_arp_(no_arp), device_uuid_(device_uuid), display_name_(display_name),
ip_(ip) {
EthInit(vrf_name);
}
/////////////////////////////////////////////////////////////////////////////
// Config handling routines
/////////////////////////////////////////////////////////////////////////////
static PhysicalInterfaceKey *BuildKey(const std::string &name) {
return new PhysicalInterfaceKey(name);
}
bool InterfaceTable::PhysicalInterfaceIFNodeToReq(IFMapNode *node,
DBRequest &req,
const boost::uuids::uuid &u) {
// Enqueue request to config-manager if add/change
if ((req.oper != DBRequest::DB_ENTRY_DELETE) &&
(node->IsDeleted() == false)) {
agent()->config_manager()->AddPhysicalInterfaceNode(node);
return false;
}
autogen::PhysicalInterface *port =
static_cast <autogen::PhysicalInterface *>(node->GetObject());
assert(port);
// Get the physical-router from FQDN
string device = "";
vector<string> elements;
split(elements, node->name(), boost::is_any_of(":"), boost::token_compress_on);
if (elements.size() == 3) {
device = elements[1];
}
if (elements.size() == 3 && device != agent()->agent_name()) {
if (RemotePhysicalInterfaceIFNodeToReq(node, req, u)) {
Enqueue(&req);
}
return false;
}
req.key.reset(BuildKey(node->name()));
req.oper = DBRequest::DB_ENTRY_DELETE;
return true;
}
bool InterfaceTable::PhysicalInterfaceProcessConfig(IFMapNode *node,
DBRequest &req, const boost::uuids::uuid &u) {
if (node->IsDeleted()) {
return false;
}
autogen::PhysicalInterface *port =
static_cast <autogen::PhysicalInterface *>(node->GetObject());
assert(port);
// Get the physical-router from FQDN
string device = "";
vector<string> elements;
split(elements, node->name(), boost::is_any_of(":"), boost::token_compress_on);
if (elements.size() == 3) {
device = elements[1];
}
// If physical-router does not match agent_name, treat as remote interface
if (elements.size() == 3 && device != agent()->agent_name()) {
return RemotePhysicalInterfaceIFNodeToReq(node, req, u);
}
req.key.reset(BuildKey(node->name()));
boost::uuids::uuid dev_uuid = nil_uuid();
// Find link with physical-router adjacency
IFMapNode *adj_node = NULL;
adj_node = agent()->config_manager()->FindAdjacentIFMapNode(node,
"physical-router");
if (adj_node) {
autogen::PhysicalRouter *router =
static_cast<autogen::PhysicalRouter *>(adj_node->GetObject());
autogen::IdPermsType id_perms = router->id_perms();
CfgUuidSet(id_perms.uuid.uuid_mslong, id_perms.uuid.uuid_lslong,
dev_uuid);
}
req.oper = DBRequest::DB_ENTRY_ADD_CHANGE;
req.data.reset(new PhysicalInterfaceData(agent(), node,
agent()->fabric_vrf_name(),
PhysicalInterface::CONFIG,
PhysicalInterface::ETHERNET,
false, dev_uuid,
port->display_name(),
Ip4Address(0),
Interface::TRANSPORT_ETHERNET));
pi_ifnode_to_req_++;
Enqueue(&req);
return false;
}
/////////////////////////////////////////////////////////////////////////////
// Utility methods
/////////////////////////////////////////////////////////////////////////////
// Enqueue DBRequest to create a Host Interface
void PhysicalInterface::CreateReq(InterfaceTable *table, const string &ifname,
const string &vrf_name, SubType subtype,
EncapType encap, bool no_arp,
const uuid &device_uuid,
const Ip4Address &ip,
Interface::Transport transport) {
DBRequest req(DBRequest::DB_ENTRY_ADD_CHANGE);
req.key.reset(new PhysicalInterfaceKey(ifname));
req.data.reset(new PhysicalInterfaceData(NULL, NULL, vrf_name, subtype,
encap, no_arp, device_uuid,
ifname, ip, transport));
table->Enqueue(&req);
}
void PhysicalInterface::Create(InterfaceTable *table, const string &ifname,
const string &vrf_name, SubType subtype,
EncapType encap, bool no_arp,
const uuid &device_uuid,
const Ip4Address &ip,
Interface::Transport transport) {
DBRequest req(DBRequest::DB_ENTRY_ADD_CHANGE);
req.key.reset(new PhysicalInterfaceKey(ifname));
req.data.reset(new PhysicalInterfaceData(NULL, NULL, vrf_name, subtype,
encap, no_arp, device_uuid,
ifname, ip, transport));
table->Process(req);
}
// Enqueue DBRequest to delete a Host Interface
void PhysicalInterface::DeleteReq(InterfaceTable *table, const string &ifname) {
DBRequest req(DBRequest::DB_ENTRY_DELETE);
req.key.reset(new PhysicalInterfaceKey(ifname));
req.data.reset(NULL);
table->Enqueue(&req);
}
void PhysicalInterface::Delete(InterfaceTable *table, const string &ifname) {
DBRequest req(DBRequest::DB_ENTRY_DELETE);
req.key.reset(new PhysicalInterfaceKey(ifname));
req.data.reset(NULL);
table->Process(req);
}
|
static const char *value_errors[] = {
"test/errors/value/io/open1.yasl",
"test/errors/value/io/open2.yasl",
"test/errors/value/io/open3.yasl",
"test/errors/value/io/read1.yasl",
"test/errors/value/io/read2.yasl",
"test/errors/value/io/seek.yasl",
"test/errors/value/list/__get.yasl",
"test/errors/value/list/pop.yasl",
"test/errors/value/list/__set.yasl",
"test/errors/value/list/sort.yasl",
"test/errors/value/str/__get.yasl",
"test/errors/value/str/replace.yasl",
"test/errors/value/str/rep.yasl",
"test/errors/value/str/split.yasl",
};
|
/*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "SkAndroidCodec.h"
#include "SkBitmap.h"
#include "SkCodec.h"
#include "SkCommonFlags.h"
#include "SkImageEncoder.h"
#include "SkOSPath.h"
#include "SkStream.h"
#include "Resources.h"
#include "Test.h"
#include "sk_tool_utils.h"
#include <initializer_list>
#include <vector>
static void write_bm(const char* name, const SkBitmap& bm) {
if (FLAGS_writePath.isEmpty()) {
return;
}
SkString filename = SkOSPath::Join(FLAGS_writePath[0], name);
filename.appendf(".png");
SkFILEWStream file(filename.c_str());
if (!SkEncodeImage(&file, bm, SkEncodedImageFormat::kPNG, 100)) {
SkDebugf("failed to write '%s'\n", filename.c_str());
}
}
DEF_TEST(Codec_trunc, r) {
sk_sp<SkData> data(GetResourceAsData("images/box.gif"));
if (!data) {
return;
}
SkCodec::MakeFromData(SkData::MakeSubset(data.get(), 0, 23))->getFrameInfo();
}
// 565 does not support alpha, but there is no reason for it not to support an
// animated image with a frame that has alpha but then blends onto an opaque
// frame making the result opaque. Test that we can decode such a frame.
DEF_TEST(Codec_565, r) {
sk_sp<SkData> data(GetResourceAsData("images/blendBG.webp"));
if (!data) {
return;
}
std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(std::move(data)));
auto info = codec->getInfo().makeColorType(kRGB_565_SkColorType);
SkBitmap bm;
bm.allocPixels(info);
SkCodec::Options options;
options.fFrameIndex = 1;
options.fPriorFrame = SkCodec::kNone;
const auto result = codec->getPixels(info, bm.getPixels(), bm.rowBytes(),
&options);
REPORTER_ASSERT(r, result == SkCodec::kSuccess);
}
static bool restore_previous(const SkCodec::FrameInfo& info) {
return info.fDisposalMethod == SkCodecAnimation::DisposalMethod::kRestorePrevious;
}
DEF_TEST(Codec_frames, r) {
#define kOpaque kOpaque_SkAlphaType
#define kUnpremul kUnpremul_SkAlphaType
#define kKeep SkCodecAnimation::DisposalMethod::kKeep
#define kRestoreBG SkCodecAnimation::DisposalMethod::kRestoreBGColor
#define kRestorePrev SkCodecAnimation::DisposalMethod::kRestorePrevious
static const struct {
const char* fName;
int fFrameCount;
// One less than fFramecount, since the first frame is always
// independent.
std::vector<int> fRequiredFrames;
// Same, since the first frame should match getInfo
std::vector<SkAlphaType> fAlphas;
// The size of this one should match fFrameCount for animated, empty
// otherwise.
std::vector<int> fDurations;
int fRepetitionCount;
std::vector<SkCodecAnimation::DisposalMethod> fDisposalMethods;
} gRecs[] = {
{ "images/required.gif", 7,
{ 0, 1, 2, 3, 4, 5 },
{ kOpaque, kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul },
{ 100, 100, 100, 100, 100, 100, 100 },
0,
{ kKeep, kRestoreBG, kKeep, kKeep, kKeep, kRestoreBG, kKeep } },
{ "images/alphabetAnim.gif", 13,
{ SkCodec::kNone, 0, 0, 0, 0, 5, 6, SkCodec::kNone,
SkCodec::kNone, 9, 10, 11 },
{ kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul,
kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul },
{ 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100 },
0,
{ kKeep, kRestorePrev, kRestorePrev, kRestorePrev, kRestorePrev,
kRestoreBG, kKeep, kRestoreBG, kRestoreBG, kKeep, kKeep,
kRestoreBG, kKeep } },
{ "images/randPixelsAnim2.gif", 4,
// required frames
{ 0, 0, 1 },
// alphas
{ kOpaque, kOpaque, kOpaque },
// durations
{ 0, 1000, 170, 40 },
// repetition count
0,
{ kKeep, kKeep, kRestorePrev, kKeep } },
{ "images/randPixelsAnim.gif", 13,
// required frames
{ 0, 1, 2, 3, 4, 3, 6, 7, 7, 7, 9, 9 },
{ kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul,
kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul, kUnpremul },
// durations
{ 0, 1000, 170, 40, 220, 7770, 90, 90, 90, 90, 90, 90, 90 },
// repetition count
0,
{ kKeep, kKeep, kKeep, kKeep, kRestoreBG, kRestoreBG, kRestoreBG,
kRestoreBG, kRestorePrev, kRestoreBG, kRestorePrev, kRestorePrev,
kRestorePrev, } },
{ "images/box.gif", 1, {}, {}, {}, 0, { kKeep } },
{ "images/color_wheel.gif", 1, {}, {}, {}, 0, { kKeep } },
{ "images/test640x479.gif", 4, { 0, 1, 2 },
{ kOpaque, kOpaque, kOpaque },
{ 200, 200, 200, 200 },
SkCodec::kRepetitionCountInfinite,
{ kKeep, kKeep, kKeep, kKeep } },
{ "images/colorTables.gif", 2, { 0 }, { kOpaque }, { 1000, 1000 }, 5,
{ kKeep, kKeep } },
{ "images/arrow.png", 1, {}, {}, {}, 0, {} },
{ "images/google_chrome.ico", 1, {}, {}, {}, 0, {} },
{ "images/brickwork-texture.jpg", 1, {}, {}, {}, 0, {} },
#if defined(SK_CODEC_DECODES_RAW) && (!defined(_WIN32))
{ "images/dng_with_preview.dng", 1, {}, {}, {}, 0, {} },
#endif
{ "images/mandrill.wbmp", 1, {}, {}, {}, 0, {} },
{ "images/randPixels.bmp", 1, {}, {}, {}, 0, {} },
{ "images/yellow_rose.webp", 1, {}, {}, {}, 0, {} },
{ "images/webp-animated.webp", 3, { 0, 1 }, { kOpaque, kOpaque },
{ 1000, 500, 1000 }, SkCodec::kRepetitionCountInfinite,
{ kKeep, kKeep, kKeep } },
{ "images/blendBG.webp", 7, { 0, SkCodec::kNone, SkCodec::kNone, SkCodec::kNone,
4, 4 },
{ kOpaque, kOpaque, kUnpremul, kOpaque, kUnpremul, kUnpremul },
{ 525, 500, 525, 437, 609, 729, 444 }, 7,
{ kKeep, kKeep, kKeep, kKeep, kKeep, kKeep, kKeep } },
{ "images/required.webp", 7,
{ 0, 1, 1, SkCodec::kNone, 4, 4 },
{ kOpaque, kUnpremul, kUnpremul, kOpaque, kOpaque, kOpaque },
{ 100, 100, 100, 100, 100, 100, 100 },
1,
{ kKeep, kRestoreBG, kKeep, kKeep, kKeep, kRestoreBG, kKeep } },
};
#undef kOpaque
#undef kUnpremul
#undef kKeep
#undef kRestorePrev
#undef kRestoreBG
for (const auto& rec : gRecs) {
sk_sp<SkData> data(GetResourceAsData(rec.fName));
if (!data) {
// Useful error statement, but sometimes people run tests without
// resources, and they do not want to see these messages.
//ERRORF(r, "Missing resources? Could not find '%s'", rec.fName);
continue;
}
std::unique_ptr<SkCodec> codec(SkCodec::MakeFromData(data));
if (!codec) {
ERRORF(r, "Failed to create an SkCodec from '%s'", rec.fName);
continue;
}
{
SkCodec::FrameInfo frameInfo;
REPORTER_ASSERT(r, !codec->getFrameInfo(0, &frameInfo));
}
const int repetitionCount = codec->getRepetitionCount();
if (repetitionCount != rec.fRepetitionCount) {
ERRORF(r, "%s repetition count does not match! expected: %i\tactual: %i",
rec.fName, rec.fRepetitionCount, repetitionCount);
}
const int expected = rec.fFrameCount;
if (rec.fRequiredFrames.size() + 1 != static_cast<size_t>(expected)) {
ERRORF(r, "'%s' has wrong number entries in fRequiredFrames; expected: %i\tactual: %i",
rec.fName, expected - 1, rec.fRequiredFrames.size());
continue;
}
if (expected > 1) {
if (rec.fDurations.size() != static_cast<size_t>(expected)) {
ERRORF(r, "'%s' has wrong number entries in fDurations; expected: %i\tactual: %i",
rec.fName, expected, rec.fDurations.size());
continue;
}
if (rec.fAlphas.size() + 1 != static_cast<size_t>(expected)) {
ERRORF(r, "'%s' has wrong number entries in fAlphas; expected: %i\tactual: %i",
rec.fName, expected - 1, rec.fAlphas.size());
continue;
}
if (rec.fDisposalMethods.size() != static_cast<size_t>(expected)) {
ERRORF(r, "'%s' has wrong number entries in fDisposalMethods; "
"expected %i\tactual: %i",
rec.fName, expected, rec.fDisposalMethods.size());
continue;
}
}
enum class TestMode {
kVector,
kIndividual,
};
for (auto mode : { TestMode::kVector, TestMode::kIndividual }) {
// Re-create the codec to reset state and test parsing.
codec = SkCodec::MakeFromData(data);
int frameCount;
std::vector<SkCodec::FrameInfo> frameInfos;
switch (mode) {
case TestMode::kVector:
frameInfos = codec->getFrameInfo();
// getFrameInfo returns empty set for non-animated.
frameCount = frameInfos.empty() ? 1 : frameInfos.size();
break;
case TestMode::kIndividual:
frameCount = codec->getFrameCount();
break;
}
if (frameCount != expected) {
ERRORF(r, "'%s' expected frame count: %i\tactual: %i",
rec.fName, expected, frameCount);
continue;
}
// From here on, we are only concerned with animated images.
if (1 == frameCount) {
continue;
}
for (int i = 0; i < frameCount; i++) {
SkCodec::FrameInfo frameInfo;
switch (mode) {
case TestMode::kVector:
frameInfo = frameInfos[i];
break;
case TestMode::kIndividual:
REPORTER_ASSERT(r, codec->getFrameInfo(i, nullptr));
REPORTER_ASSERT(r, codec->getFrameInfo(i, &frameInfo));
break;
}
if (rec.fDurations[i] != frameInfo.fDuration) {
ERRORF(r, "%s frame %i's durations do not match! expected: %i\tactual: %i",
rec.fName, i, rec.fDurations[i], frameInfo.fDuration);
}
auto to_string = [](SkAlphaType alpha) {
switch (alpha) {
case kUnpremul_SkAlphaType:
return "unpremul";
case kOpaque_SkAlphaType:
return "opaque";
default:
SkASSERT(false);
return "unknown";
}
};
auto expectedAlpha = 0 == i ? codec->getInfo().alphaType() : rec.fAlphas[i-1];
auto alpha = frameInfo.fAlphaType;
if (expectedAlpha != alpha) {
ERRORF(r, "%s's frame %i has wrong alpha type! expected: %s\tactual: %s",
rec.fName, i, to_string(expectedAlpha), to_string(alpha));
}
if (0 == i) {
REPORTER_ASSERT(r, frameInfo.fRequiredFrame == SkCodec::kNone);
} else if (rec.fRequiredFrames[i-1] != frameInfo.fRequiredFrame) {
ERRORF(r, "%s's frame %i has wrong dependency! expected: %i\tactual: %i",
rec.fName, i, rec.fRequiredFrames[i-1], frameInfo.fRequiredFrame);
}
REPORTER_ASSERT(r, frameInfo.fDisposalMethod == rec.fDisposalMethods[i]);
}
if (TestMode::kIndividual == mode) {
// No need to test decoding twice.
continue;
}
// Compare decoding in multiple ways:
// - Start from scratch for each frame. |codec| will have to decode the required frame
// (and any it depends on) to decode. This is stored in |cachedFrames|.
// - Provide the frame that a frame depends on, so |codec| just has to blend.
// - Provide a frame after the required frame, which will be covered up by the newest
// frame.
// All should look the same.
std::vector<SkBitmap> cachedFrames(frameCount);
const auto info = codec->getInfo().makeColorType(kN32_SkColorType);
auto decode = [&](SkBitmap* bm, int index, int cachedIndex) {
auto decodeInfo = info;
if (index > 0) {
decodeInfo = info.makeAlphaType(frameInfos[index].fAlphaType);
}
bm->allocPixels(decodeInfo);
if (cachedIndex != SkCodec::kNone) {
// First copy the pixels from the cached frame
const bool success = sk_tool_utils::copy_to(bm, kN32_SkColorType,
cachedFrames[cachedIndex]);
REPORTER_ASSERT(r, success);
}
SkCodec::Options opts;
opts.fFrameIndex = index;
opts.fPriorFrame = cachedIndex;
const auto result = codec->getPixels(decodeInfo, bm->getPixels(), bm->rowBytes(),
&opts);
if (cachedIndex != SkCodec::kNone && restore_previous(frameInfos[cachedIndex])) {
if (result == SkCodec::kInvalidParameters) {
return true;
}
ERRORF(r, "Using a kRestorePrevious frame as fPriorFrame should fail");
return false;
}
if (result != SkCodec::kSuccess) {
ERRORF(r, "Failed to decode frame %i from %s when providing prior frame %i, "
"error %i", index, rec.fName, cachedIndex, result);
}
return result == SkCodec::kSuccess;
};
for (int i = 0; i < frameCount; i++) {
SkBitmap& cachedFrame = cachedFrames[i];
if (!decode(&cachedFrame, i, SkCodec::kNone)) {
continue;
}
const auto reqFrame = frameInfos[i].fRequiredFrame;
if (reqFrame == SkCodec::kNone) {
// Nothing to compare against.
continue;
}
for (int j = reqFrame; j < i; j++) {
SkBitmap frame;
if (restore_previous(frameInfos[j])) {
(void) decode(&frame, i, j);
continue;
}
if (!decode(&frame, i, j)) {
continue;
}
// Now verify they're equal.
const size_t rowLen = info.bytesPerPixel() * info.width();
for (int y = 0; y < info.height(); y++) {
const void* cachedAddr = cachedFrame.getAddr(0, y);
SkASSERT(cachedAddr != nullptr);
const void* addr = frame.getAddr(0, y);
SkASSERT(addr != nullptr);
const bool lineMatches = memcmp(cachedAddr, addr, rowLen) == 0;
if (!lineMatches) {
SkString name = SkStringPrintf("cached_%i", i);
write_bm(name.c_str(), cachedFrame);
name = SkStringPrintf("frame_%i", i);
write_bm(name.c_str(), frame);
ERRORF(r, "%s's frame %i is different (starting from line %i) when "
"providing prior frame %i!", rec.fName, i, y, j);
break;
}
}
}
}
}
}
}
// Verify that a webp image can be animated scaled down. This image has a
// kRestoreBG frame, so it is an interesting image to test. After decoding that
// frame, we have to erase its rectangle. The rectangle has to be adjusted
// based on the scaled size.
DEF_TEST(AndroidCodec_animated, r) {
if (GetResourcePath().isEmpty()) {
return;
}
const char* file = "images/required.webp";
sk_sp<SkData> data(GetResourceAsData(file));
if (!data) {
ERRORF(r, "Missing %s", file);
return;
}
auto codec = SkAndroidCodec::MakeFromCodec(SkCodec::MakeFromData(std::move(data)));
if (!codec) {
ERRORF(r, "Failed to decode %s", file);
return;
}
auto info = codec->getInfo().makeAlphaType(kPremul_SkAlphaType);
for (int sampleSize : { 8, 32, 100 }) {
auto dimensions = codec->codec()->getScaledDimensions(1.0f / sampleSize);
info = info.makeWH(dimensions.width(), dimensions.height());
SkBitmap bm;
bm.allocPixels(info);
SkCodec::Options options;
for (int i = 0; i < codec->codec()->getFrameCount(); ++i) {
SkCodec::FrameInfo frameInfo;
REPORTER_ASSERT(r, codec->codec()->getFrameInfo(i, &frameInfo));
if (5 == i) {
REPORTER_ASSERT(r, frameInfo.fDisposalMethod
== SkCodecAnimation::DisposalMethod::kRestoreBGColor);
}
options.fFrameIndex = i;
options.fPriorFrame = i - 1;
info = info.makeAlphaType(frameInfo.fAlphaType);
const auto result = codec->codec()->getPixels(info, bm.getPixels(), bm.rowBytes(),
&options);
REPORTER_ASSERT(r, result == SkCodec::kSuccess);
}
}
}
|
/*
* Author: Crownstone Team
* Copyright: Crownstone (https://crownstone.rocks)
* Date: Apr 15, 2020
* License: LGPLv3+, Apache License 2.0, and/or MIT (triple-licensed)
*/
#include <ble/cs_Nordic.h>
#include <drivers/cs_GpRegRet.h>
#include <logging/cs_Logger.h>
#include <util/cs_BleError.h>
// Set true to enable debug logs.
#define CS_GPREGRET_DEBUG false
#if CS_GPREGRET_DEBUG == true
#define LOGGpRegRetDebug LOGd
#else
#define LOGGpRegRetDebug LOGnone
#endif
uint32_t GpRegRet::getValue(GpRegRetId registerId) {
uint32_t gpregret;
sd_power_gpregret_get(registerId, &gpregret);
return gpregret;
}
void GpRegRet::clearAll() {
LOGGpRegRetDebug("clearAll");
uint32_t retCode;
uint32_t mask = 0xFFFFFFFF;
retCode = sd_power_gpregret_clr(GpRegRetId::GPREGRET, mask);
APP_ERROR_CHECK(retCode);
printRegRet();
}
void GpRegRet::clearCounter() {
LOGGpRegRetDebug("clearCounter");
uint32_t retCode;
uint32_t mask = CS_GPREGRET_COUNTER_MASK;
retCode = sd_power_gpregret_clr(GpRegRetId::GPREGRET, mask);
APP_ERROR_CHECK(retCode);
printRegRet();
}
void GpRegRet::setCounter(uint32_t value) {
LOGGpRegRetDebug("setCounter %u", value);
if (value > CS_GPREGRET_COUNTER_MAX) {
LOGe("Value too high: %u", value);
value = CS_GPREGRET_COUNTER_MAX;
}
uint32_t retCode;
clearCounter();
retCode = sd_power_gpregret_set(GpRegRetId::GPREGRET, value);
APP_ERROR_CHECK(retCode);
printRegRet();
}
void GpRegRet::clearFlags() {
LOGGpRegRetDebug("clearFlags");
uint32_t retCode;
uint32_t mask = CS_GPREGRET_COUNTER_MASK;
mask = ~mask;
retCode = sd_power_gpregret_clr(GpRegRetId::GPREGRET, mask);
APP_ERROR_CHECK(retCode);
printRegRet();
}
void GpRegRet::setFlag(GpRegRetFlag flag) {
LOGGpRegRetDebug("setFlag %u", flag);
uint32_t retCode;
retCode = sd_power_gpregret_set(GpRegRetId::GPREGRET, flag);
APP_ERROR_CHECK(retCode);
printRegRet();
}
void GpRegRet::clearFlag(GpRegRetFlag flag) {
LOGGpRegRetDebug("clearFlag %u", flag);
uint32_t retCode;
uint32_t mask = flag;
retCode = sd_power_gpregret_clr(GpRegRetId::GPREGRET, mask);
APP_ERROR_CHECK(retCode);
printRegRet();
}
bool GpRegRet::isFlagSet(GpRegRetFlag flag) {
LOGGpRegRetDebug("isFlagSet %u", flag);
uint32_t gpregret = getValue(GpRegRetId::GPREGRET);
return (gpregret & flag);
}
void GpRegRet::printRegRet() {
#if CS_GPREGRET_DEBUG == true
LOGGpRegRetDebug("GPRegRet: %u %u", getValue(GPREGRET), getValue(GPREGRET2));
#endif
}
|
#include <halley/concurrency/concurrent.h>
#include <halley/concurrency/executor.h>
#include <halley/support/exception.h>
#include "halley/text/string_converter.h"
#include "halley/support/logger.h"
using namespace Halley;
Executors* Executors::instance = nullptr;
ExecutionQueue::ExecutionQueue()
: aborted(false)
{
hasTasks.store(false);
}
TaskBase ExecutionQueue::getNext()
{
std::unique_lock<std::mutex> lock(mutex);
while (queue.empty()) {
if (!aborted) {
condition.wait(lock);
}
if (aborted) {
queue.clear();
return TaskBase([] () {});
}
}
TaskBase value = queue.front();
queue.pop_front();
return value;
}
std::vector<TaskBase> ExecutionQueue::getAll()
{
std::unique_lock<std::mutex> lock(mutex);
hasTasks.store(false);
std::vector<TaskBase> tasks(queue.begin(), queue.end());
queue.clear();
return tasks;
}
void ExecutionQueue::addToQueue(TaskBase task)
{
#if HAS_THREADS
std::unique_lock<std::mutex> lock(mutex);
queue.emplace_back(task);
hasTasks.store(true);
condition.notify_one();
#else
task();
#endif
}
Executors& Executors::get()
{
if (!instance) {
throw Exception("Executors instance not defined");
}
return *instance;
}
void Executors::set(Executors& e)
{
instance = &e;
}
size_t ExecutionQueue::threadCount() const
{
return attachedCount.load();
}
void ExecutionQueue::onAttached()
{
++attachedCount;
}
void ExecutionQueue::onDetached()
{
--attachedCount;
}
void ExecutionQueue::abort()
{
{
std::unique_lock<std::mutex> lock(mutex);
if (aborted) {
return;
}
aborted = true;
}
condition.notify_all();
}
ExecutionQueue& ExecutionQueue::getDefault()
{
return Executors::get().getCPU();
}
Executor::Executor(ExecutionQueue& queue)
: queue(queue)
, running(true)
{
#if HAS_THREADS
queue.onAttached();
#endif
}
Executor::~Executor()
{
#if HAS_THREADS
queue.onDetached();
#endif
}
bool Executor::runPending()
{
#if HAS_THREADS
auto tasks = queue.getAll();
for (auto& t : tasks) {
t();
}
#endif
return false;
}
void Executor::runForever()
{
#if HAS_THREADS
try {
while (running) {
auto next = queue.getNext();
if (running) {
next();
}
}
} catch (std::exception& e) {
Logger::logError("Executor aborting due to exception.");
Logger::logException(e);
} catch (...) {
Logger::logError("Executor aborting due to unknown exception.");
}
#endif
}
void Executor::stop()
{
#if HAS_THREADS
running = false;
queue.abort();
#endif
}
ThreadPool::ThreadPool(const String& name, ExecutionQueue& queue, size_t n, MakeThread makeThread)
: name(name)
{
#if HAS_THREADS
for (size_t i = 0; i < n; i++) {
executors.emplace_back(std::make_unique<Executor>(queue));
}
threads.resize(n);
for (size_t i = 0; i < n; i++) {
threads[i] = makeThread(name + " Pool " + toString(i), [this, i]()
{
try {
executors[i]->runForever();
} catch (std::exception& e) {
Logger::logException(e);
} catch (...) {
Logger::logError("Unknown exception in thread pool");
}
});
}
#endif
}
ThreadPool::~ThreadPool()
{
#if HAS_THREADS
for (auto& e: executors) {
e->stop();
}
for (auto& t : threads) {
t.join();
}
#endif
}
|
// Leetcode 146. LRU Cache
class LRUCache {
public:
int capacity, cnt;
struct Node{
Node *l,*r;
int key;
int value;
};
Node *head,*tail;
unordered_map<int,Node*> cache_map;
LRUCache(int capacity): capacity(capacity) {
head = new Node();
tail = new Node();
head -> r = tail;
tail -> l = head;
cnt = 0;
}
int get(int key) {
auto it = cache_map.find(key);
if(it == cache_map.end())
return -1;
Node* current = it ->second;
current -> l -> r = current -> r;
current -> r -> l = current -> l;
current -> r = head -> r;
current -> l = head;
current -> l -> r = current;
current -> r -> l = current;
return current -> value;
}
void put(int key, int value) {
auto it = cache_map.find(key);
Node* current;
if(it != cache_map.end()){
current = it -> second;
current -> value = value;
current -> l -> r = current -> r;
current -> r -> l = current -> l;
}
else{
current = new Node();
current -> key = key;
current -> value = value;
cnt ++;
cache_map[key] = current;
}
current -> r = head -> r;
current -> l = head;
current -> l -> r = current;
current -> r -> l = current;
if(cnt > capacity){
current = tail -> l;
current -> l -> r = current -> r;
current -> r -> l = current -> l;
cache_map.erase(current->key);
delete current;
cnt --;
}
}
};
/**
* Your LRUCache object will be instantiated and called as such:
* LRUCache obj = new LRUCache(capacity);
* int param_1 = obj.get(key);
* obj.put(key,value);
*/
|
/*=========================================================================
*
* Copyright Insight Software Consortium
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*=========================================================================*/
// Software Guide : BeginLatex
//
// This example illustrates the use of the \doxygen{RegularStepGradientDescentOptimizer}
// in the context of a deformable registration problem. The code of this example is almost
// identical to the one in Section~\ref{sec:DeformableRegistration8}.
//
// \index{itk::BSplineTransform}
// \index{itk::BSplineTransform!DeformableRegistration}
// \index{itk::RegularStepGradientDescentOptimizer}
//
//
// Software Guide : EndLatex
#include "itkImageRegistrationMethod.h"
#include "itkMattesMutualInformationImageToImageMetric.h"
#include "itkTimeProbesCollectorBase.h"
#include "itkMemoryProbesCollectorBase.h"
// Software Guide : BeginLatex
//
// The following are the most relevant headers to this example.
//
// \index{itk::BSplineTransform!header}
// \index{itk::RegularStepGradientDescentOptimizer!header}
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
#include "itkBSplineTransform.h"
#include "itkRegularStepGradientDescentOptimizer.h"
// Software Guide : EndCodeSnippet
#include "itkImageFileReader.h"
#include "itkImageFileWriter.h"
#include "itkResampleImageFilter.h"
#include "itkCastImageFilter.h"
#include "itkSquaredDifferenceImageFilter.h"
#include "itkTransformFileReader.h"
// The following section of code implements a Command observer
// used to monitor the evolution of the registration process.
//
#include "itkCommand.h"
class CommandIterationUpdate : public itk::Command
{
public:
typedef CommandIterationUpdate Self;
typedef itk::Command Superclass;
typedef itk::SmartPointer<Self> Pointer;
itkNewMacro( Self );
protected:
CommandIterationUpdate() {};
public:
typedef itk::RegularStepGradientDescentOptimizer OptimizerType;
typedef const OptimizerType * OptimizerPointer;
void Execute(itk::Object *caller, const itk::EventObject & event)
{
Execute( (const itk::Object *)caller, event);
}
void Execute(const itk::Object * object, const itk::EventObject & event)
{
OptimizerPointer optimizer =
dynamic_cast< OptimizerPointer >( object );
if( !(itk::IterationEvent().CheckEvent( &event )) )
{
return;
}
std::cout << optimizer->GetCurrentIteration() << " ";
std::cout << optimizer->GetValue() << " ";
std::cout << std::endl;
}
};
int main( int argc, char *argv[] )
{
if( argc < 4 )
{
std::cerr << "Missing Parameters " << std::endl;
std::cerr << "Usage: " << argv[0];
std::cerr << " fixedImageFile movingImageFile outputImagefile ";
std::cerr << " [differenceOutputfile] [differenceBeforeRegistration] ";
std::cerr << " [deformationField] ";
std::cerr << " [useExplicitPDFderivatives ] [useCachingBSplineWeights ] ";
std::cerr << " [filenameForFinalTransformParameters] ";
std::cerr << " [maximumStepLength] [maximumNumberOfIterations]";
std::cerr << std::endl;
return EXIT_FAILURE;
}
const unsigned int ImageDimension = 3;
typedef signed short PixelType;
typedef itk::Image< PixelType, ImageDimension > FixedImageType;
typedef itk::Image< PixelType, ImageDimension > MovingImageType;
const unsigned int SpaceDimension = ImageDimension;
const unsigned int SplineOrder = 3;
typedef double CoordinateRepType;
typedef itk::BSplineTransform<
CoordinateRepType,
SpaceDimension,
SplineOrder > TransformType;
typedef itk::RegularStepGradientDescentOptimizer OptimizerType;
typedef itk::MattesMutualInformationImageToImageMetric<
FixedImageType,
MovingImageType > MetricType;
typedef itk:: LinearInterpolateImageFunction<
MovingImageType,
double > InterpolatorType;
typedef itk::ImageRegistrationMethod<
FixedImageType,
MovingImageType > RegistrationType;
MetricType::Pointer metric = MetricType::New();
OptimizerType::Pointer optimizer = OptimizerType::New();
InterpolatorType::Pointer interpolator = InterpolatorType::New();
RegistrationType::Pointer registration = RegistrationType::New();
registration->SetMetric( metric );
registration->SetOptimizer( optimizer );
registration->SetInterpolator( interpolator );
TransformType::Pointer transform = TransformType::New();
registration->SetTransform( transform );
typedef itk::TransformFileReader TransformReaderType;
TransformReaderType::Pointer transformReader = TransformReaderType::New();
typedef itk::ImageFileReader< FixedImageType > FixedImageReaderType;
typedef itk::ImageFileReader< MovingImageType > MovingImageReaderType;
FixedImageReaderType::Pointer fixedImageReader = FixedImageReaderType::New();
MovingImageReaderType::Pointer movingImageReader = MovingImageReaderType::New();
fixedImageReader->SetFileName( argv[1] );
movingImageReader->SetFileName( argv[2] );
FixedImageType::ConstPointer fixedImage = fixedImageReader->GetOutput();
registration->SetFixedImage( fixedImage );
registration->SetMovingImage( movingImageReader->GetOutput() );
fixedImageReader->Update();
FixedImageType::RegionType fixedRegion = fixedImage->GetBufferedRegion();
registration->SetFixedImageRegion( fixedRegion );
unsigned int numberOfGridNodesInOneDimension = 5;
// Software Guide : BeginCodeSnippet
TransformType::PhysicalDimensionsType fixedPhysicalDimensions;
TransformType::MeshSizeType meshSize;
TransformType::OriginType fixedOrigin;
for( unsigned int i=0; i< SpaceDimension; i++ )
{
fixedOrigin = fixedImage->GetOrigin()[i];
fixedPhysicalDimensions[i] = fixedImage->GetSpacing()[i] *
static_cast<double>(
fixedImage->GetLargestPossibleRegion().GetSize()[i] - 1 );
}
meshSize.Fill( numberOfGridNodesInOneDimension - SplineOrder );
transform->SetTransformDomainOrigin( fixedOrigin );
transform->SetTransformDomainPhysicalDimensions(
fixedPhysicalDimensions );
transform->SetTransformDomainMeshSize( meshSize );
transform->SetTransformDomainDirection( fixedImage->GetDirection() );
typedef TransformType::ParametersType ParametersType;
const unsigned int numberOfParameters =
transform->GetNumberOfParameters();
ParametersType parameters( numberOfParameters );
parameters.Fill( 0.0 );
transform->SetParameters( parameters );
registration->SetInitialTransformParameters( transform->GetParameters() );
// Software Guide : EndCodeSnippet
parameters.Fill( 0.0 );
transform->SetParameters( parameters );
registration->SetInitialTransformParameters( transform->GetParameters() );
// Software Guide : EndCodeSnippet
// Software Guide : BeginLatex
//
// Next we set the parameters of the RegularStepGradientDescentOptimizer object.
//
// Software Guide : EndLatex
// Software Guide : BeginCodeSnippet
optimizer->SetMaximumStepLength( 10.0 );
optimizer->SetMinimumStepLength( 0.01 );
optimizer->SetRelaxationFactor( 0.7 );
optimizer->SetNumberOfIterations( 50 );
// Software Guide : EndCodeSnippet
// Optionally, get the step length from the command line arguments
if( argc > 12 )
{
optimizer->SetMaximumStepLength( atof( argv[12] ) );
}
// Optionally, get the number of iterations from the command line arguments
if( argc > 13 )
{
optimizer->SetNumberOfIterations( atoi( argv[13] ) );
}
// Create the Command observer and register it with the optimizer.
//
CommandIterationUpdate::Pointer observer = CommandIterationUpdate::New();
optimizer->AddObserver( itk::IterationEvent(), observer );
metric->SetNumberOfHistogramBins( 50 );
const unsigned int numberOfSamples =
static_cast<unsigned int>( fixedRegion.GetNumberOfPixels() * 20.0 / 100.0 );
metric->SetNumberOfSpatialSamples( numberOfSamples );
metric->ReinitializeSeed( 76926294 );
if( argc > 7 )
{
// Define whether to calculate the metric derivative by explicitly
// computing the derivatives of the joint PDF with respect to the Transform
// parameters, or doing it by progressively accumulating contributions from
// each bin in the joint PDF.
metric->SetUseExplicitPDFDerivatives( atoi( argv[7] ) );
}
if( argc > 8 )
{
// Define whether to cache the BSpline weights and indexes corresponding to
// each one of the samples used to compute the metric. Enabling caching will
// make the algorithm run faster but it will have a cost on the amount of memory
// that needs to be allocated. This option is only relevant when using the
// BSplineTransform.
metric->SetUseCachingOfBSplineWeights( atoi( argv[8] ) );
}
// Add time and memory probes
itk::TimeProbesCollectorBase chronometer;
itk::MemoryProbesCollectorBase memorymeter;
std::cout << std::endl << "Starting Registration" << std::endl;
try
{
memorymeter.Start( "Registration" );
chronometer.Start( "Registration" );
registration->StartRegistration();
chronometer.Stop( "Registration" );
memorymeter.Stop( "Registration" );
std::cout << "Optimizer stop condition = "
<< registration->GetOptimizer()->GetStopConditionDescription()
<< std::endl;
}
catch( itk::ExceptionObject & err )
{
std::cerr << "ExceptionObject caught !" << std::endl;
std::cerr << err << std::endl;
return EXIT_FAILURE;
}
OptimizerType::ParametersType finalParameters =
registration->GetLastTransformParameters();
// Report the time and memory taken by the registration
chronometer.Report( std::cout );
memorymeter.Report( std::cout );
transform->SetParameters( finalParameters );
typedef itk::ResampleImageFilter<
MovingImageType,
FixedImageType > ResampleFilterType;
ResampleFilterType::Pointer resample = ResampleFilterType::New();
resample->SetTransform( transform );
resample->SetInput( movingImageReader->GetOutput() );
resample->SetSize( fixedImage->GetLargestPossibleRegion().GetSize() );
resample->SetOutputOrigin( fixedImage->GetOrigin() );
resample->SetOutputSpacing( fixedImage->GetSpacing() );
resample->SetOutputDirection( fixedImage->GetDirection() );
// This value is set to zero in order to make easier to perform
// regression testing in this example. However, for didactic
// exercise it will be better to set it to a medium gray value
// such as 100 or 128.
resample->SetDefaultPixelValue( 0 );
typedef signed short OutputPixelType;
typedef itk::Image< OutputPixelType, ImageDimension > OutputImageType;
typedef itk::CastImageFilter<
FixedImageType,
OutputImageType > CastFilterType;
typedef itk::ImageFileWriter< OutputImageType > WriterType;
WriterType::Pointer writer = WriterType::New();
CastFilterType::Pointer caster = CastFilterType::New();
writer->SetFileName( argv[3] );
caster->SetInput( resample->GetOutput() );
writer->SetInput( caster->GetOutput() );
try
{
writer->Update();
}
catch( itk::ExceptionObject & err )
{
std::cerr << "ExceptionObject caught !" << std::endl;
std::cerr << err << std::endl;
return EXIT_FAILURE;
}
typedef itk::SquaredDifferenceImageFilter<
FixedImageType,
FixedImageType,
OutputImageType > DifferenceFilterType;
DifferenceFilterType::Pointer difference = DifferenceFilterType::New();
WriterType::Pointer writer2 = WriterType::New();
writer2->SetInput( difference->GetOutput() );
// Compute the difference image between the
// fixed and resampled moving image.
if( argc > 4 )
{
difference->SetInput1( fixedImageReader->GetOutput() );
difference->SetInput2( resample->GetOutput() );
writer2->SetFileName( argv[4] );
try
{
writer2->Update();
}
catch( itk::ExceptionObject & err )
{
std::cerr << "ExceptionObject caught !" << std::endl;
std::cerr << err << std::endl;
return EXIT_FAILURE;
}
}
// Compute the difference image between the
// fixed and moving image before registration.
if( argc > 5 )
{
writer2->SetFileName( argv[5] );
difference->SetInput1( fixedImageReader->GetOutput() );
difference->SetInput2( movingImageReader->GetOutput() );
try
{
writer2->Update();
}
catch( itk::ExceptionObject & err )
{
std::cerr << "ExceptionObject caught !" << std::endl;
std::cerr << err << std::endl;
return EXIT_FAILURE;
}
}
// Generate the explicit deformation field resulting from
// the registration.
if( argc > 6 )
{
typedef itk::Vector< float, ImageDimension > VectorType;
typedef itk::Image< VectorType, ImageDimension > DisplacementFieldType;
DisplacementFieldType::Pointer field = DisplacementFieldType::New();
field->SetRegions( fixedRegion );
field->SetOrigin( fixedImage->GetOrigin() );
field->SetSpacing( fixedImage->GetSpacing() );
field->SetDirection( fixedImage->GetDirection() );
field->Allocate();
typedef itk::ImageRegionIterator< DisplacementFieldType > FieldIterator;
FieldIterator fi( field, fixedRegion );
fi.GoToBegin();
TransformType::InputPointType fixedPoint;
TransformType::OutputPointType movingPoint;
DisplacementFieldType::IndexType index;
VectorType displacement;
while( ! fi.IsAtEnd() )
{
index = fi.GetIndex();
field->TransformIndexToPhysicalPoint( index, fixedPoint );
movingPoint = transform->TransformPoint( fixedPoint );
displacement = movingPoint - fixedPoint;
fi.Set( displacement );
++fi;
}
typedef itk::ImageFileWriter< DisplacementFieldType > FieldWriterType;
FieldWriterType::Pointer fieldWriter = FieldWriterType::New();
fieldWriter->SetInput( field );
fieldWriter->SetFileName( argv[6] );
try
{
fieldWriter->Update();
}
catch( itk::ExceptionObject & excp )
{
std::cerr << "Exception thrown " << std::endl;
std::cerr << excp << std::endl;
return EXIT_FAILURE;
}
}
// Optionally, save the transform parameters in a file
if( argc > 9 )
{
std::ofstream parametersFile;
parametersFile.open( argv[9] );
parametersFile << finalParameters << std::endl;
parametersFile.close();
}
return EXIT_SUCCESS;
}
|
// t0533.cc
// double-typed expr
void f(double,double); // line 4: this is the right one
void f(int,int);
void foo()
{
double d;
f(2*d+4, 3.0); // calls 'f' on line 4
f(4+2*d, 3.0); // calls 'f' on line 4
__checkType(2*d+4, (double)0);
__checkType(4+2*d, (double)0);
char *p;
int i;
__checkType(p+i, (char*)0);
__checkType(p-i, (char*)0);
__checkType(p-p, (int)0);
}
|
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/chromeos/net/network_portal_detector_strategy.h"
#include "base/logging.h"
#include "chromeos/network/network_handler.h"
#include "chromeos/network/network_state.h"
#include "chromeos/network/network_state_handler.h"
namespace chromeos {
namespace {
const NetworkState* DefaultNetwork() {
return NetworkHandler::Get()->network_state_handler()->DefaultNetwork();
}
// TODO (ygorshenin@): reuse net::BackoffEntry for strategies.
class LoginScreenStrategy : public PortalDetectorStrategy {
public:
static const int kMaxAttempts = 3;
static const int kDelayBetweenAttemptsSec = 3;
static const int kBaseAttemptTimeoutSec = 5;
LoginScreenStrategy() {}
virtual ~LoginScreenStrategy() {}
protected:
// PortalDetectorStrategy overrides:
virtual StrategyId Id() const OVERRIDE { return STRATEGY_ID_LOGIN_SCREEN; }
virtual bool CanPerformAttemptImpl() OVERRIDE {
return delegate_->AttemptCount() < kMaxAttempts;
}
virtual base::TimeDelta GetDelayTillNextAttemptImpl() OVERRIDE {
return AdjustDelay(base::TimeDelta::FromSeconds(kDelayBetweenAttemptsSec));
}
virtual base::TimeDelta GetNextAttemptTimeoutImpl() OVERRIDE {
int timeout = DefaultNetwork()
? (delegate_->AttemptCount() + 1) * kBaseAttemptTimeoutSec
: kBaseAttemptTimeoutSec;
return base::TimeDelta::FromSeconds(timeout);
}
private:
DISALLOW_COPY_AND_ASSIGN(LoginScreenStrategy);
};
class ErrorScreenStrategy : public PortalDetectorStrategy {
public:
static const int kDelayBetweenAttemptsSec = 3;
static const int kAttemptTimeoutSec = 15;
ErrorScreenStrategy() {}
virtual ~ErrorScreenStrategy() {}
protected:
// PortalDetectorStrategy overrides:
virtual StrategyId Id() const OVERRIDE { return STRATEGY_ID_ERROR_SCREEN; }
virtual bool CanPerformAttemptImpl() OVERRIDE { return true; }
virtual bool CanPerformAttemptAfterDetectionImpl() OVERRIDE { return true; }
virtual base::TimeDelta GetDelayTillNextAttemptImpl() OVERRIDE {
return AdjustDelay(base::TimeDelta::FromSeconds(kDelayBetweenAttemptsSec));
}
virtual base::TimeDelta GetNextAttemptTimeoutImpl() OVERRIDE {
return base::TimeDelta::FromSeconds(kAttemptTimeoutSec);
}
private:
DISALLOW_COPY_AND_ASSIGN(ErrorScreenStrategy);
};
class SessionStrategy : public PortalDetectorStrategy {
public:
static const int kFastDelayBetweenAttemptsSec = 1;
static const int kFastAttemptTimeoutSec = 3;
static const int kMaxFastAttempts = 3;
static const int kNormalDelayBetweenAttemptsSec = 10;
static const int kNormalAttemptTimeoutSec = 5;
static const int kMaxNormalAttempts = 3;
static const int kSlowDelayBetweenAttemptsSec = 2 * 60;
static const int kSlowAttemptTimeoutSec = 5;
SessionStrategy() {}
virtual ~SessionStrategy() {}
protected:
virtual StrategyId Id() const OVERRIDE { return STRATEGY_ID_SESSION; }
virtual bool CanPerformAttemptImpl() OVERRIDE { return true; }
virtual bool CanPerformAttemptAfterDetectionImpl() OVERRIDE { return true; }
virtual base::TimeDelta GetDelayTillNextAttemptImpl() OVERRIDE {
int delay;
if (IsFastAttempt())
delay = kFastDelayBetweenAttemptsSec;
else if (IsNormalAttempt())
delay = kNormalDelayBetweenAttemptsSec;
else
delay = kSlowDelayBetweenAttemptsSec;
return AdjustDelay(base::TimeDelta::FromSeconds(delay));
}
virtual base::TimeDelta GetNextAttemptTimeoutImpl() OVERRIDE {
int timeout;
if (IsFastAttempt())
timeout = kFastAttemptTimeoutSec;
else if (IsNormalAttempt())
timeout = kNormalAttemptTimeoutSec;
else
timeout = kSlowAttemptTimeoutSec;
return base::TimeDelta::FromSeconds(timeout);
}
private:
bool IsFastAttempt() {
return delegate_->AttemptCount() < kMaxFastAttempts;
}
bool IsNormalAttempt() {
return delegate_->AttemptCount() < kMaxFastAttempts + kMaxNormalAttempts;
}
DISALLOW_COPY_AND_ASSIGN(SessionStrategy);
};
} // namespace
// PortalDetectorStrategy -----------------------------------------------------
// static
base::TimeDelta PortalDetectorStrategy::delay_till_next_attempt_for_testing_;
// static
bool PortalDetectorStrategy::delay_till_next_attempt_for_testing_initialized_ =
false;
// static
base::TimeDelta PortalDetectorStrategy::next_attempt_timeout_for_testing_;
// static
bool PortalDetectorStrategy::next_attempt_timeout_for_testing_initialized_ =
false;
PortalDetectorStrategy::PortalDetectorStrategy() : delegate_(NULL) {}
PortalDetectorStrategy::~PortalDetectorStrategy() {}
// statc
scoped_ptr<PortalDetectorStrategy> PortalDetectorStrategy::CreateById(
StrategyId id) {
switch (id) {
case STRATEGY_ID_LOGIN_SCREEN:
return scoped_ptr<PortalDetectorStrategy>(new LoginScreenStrategy());
case STRATEGY_ID_ERROR_SCREEN:
return scoped_ptr<PortalDetectorStrategy>(new ErrorScreenStrategy());
case STRATEGY_ID_SESSION:
return scoped_ptr<PortalDetectorStrategy>(new SessionStrategy());
default:
NOTREACHED();
return scoped_ptr<PortalDetectorStrategy>(
static_cast<PortalDetectorStrategy*>(NULL));
}
}
bool PortalDetectorStrategy::CanPerformAttempt() {
return CanPerformAttemptImpl();
}
bool PortalDetectorStrategy::CanPerformAttemptAfterDetection() {
return CanPerformAttemptAfterDetectionImpl();
}
base::TimeDelta PortalDetectorStrategy::GetDelayTillNextAttempt() {
if (delay_till_next_attempt_for_testing_initialized_)
return delay_till_next_attempt_for_testing_;
return GetDelayTillNextAttemptImpl();
}
base::TimeDelta PortalDetectorStrategy::GetNextAttemptTimeout() {
if (next_attempt_timeout_for_testing_initialized_)
return next_attempt_timeout_for_testing_;
return GetNextAttemptTimeoutImpl();
}
bool PortalDetectorStrategy::CanPerformAttemptImpl() { return false; }
bool PortalDetectorStrategy::CanPerformAttemptAfterDetectionImpl() {
return false;
}
base::TimeDelta PortalDetectorStrategy::GetDelayTillNextAttemptImpl() {
return base::TimeDelta();
}
base::TimeDelta PortalDetectorStrategy::GetNextAttemptTimeoutImpl() {
return base::TimeDelta();
}
base::TimeDelta PortalDetectorStrategy::AdjustDelay(
const base::TimeDelta& delay) {
if (!delegate_->AttemptCount())
return base::TimeDelta();
base::TimeTicks now = delegate_->GetCurrentTimeTicks();
base::TimeDelta elapsed;
if (now > delegate_->AttemptStartTime())
elapsed = now - delegate_->AttemptStartTime();
if (delay > elapsed)
return delay - elapsed;
return base::TimeDelta();
}
} // namespace chromeos
|
/*===================================================================
The Medical Imaging Interaction Toolkit (MITK)
Copyright (c) German Cancer Research Center,
Division of Medical and Biological Informatics.
All rights reserved.
This software is distributed WITHOUT ANY WARRANTY; without
even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE.
See LICENSE.txt or http://www.mitk.org for details.
===================================================================*/
#include <vnl/vnl_cross.h>
#include <vnl/vnl_quaternion.h>
#include <mitkChiSquareNoiseModel.h>
using namespace mitk;
template< class ScalarType >
ChiSquareNoiseModel< ScalarType >::ChiSquareNoiseModel()
{
m_RandGen.seed();
}
template< class ScalarType >
ChiSquareNoiseModel< ScalarType >::~ChiSquareNoiseModel()
{
}
template< class ScalarType >
void ChiSquareNoiseModel< ScalarType >::SetSeed(int seed)
{
if (seed>=0)
m_RandGen.seed(seed);
else
m_RandGen.seed();
}
template< class ScalarType >
void ChiSquareNoiseModel< ScalarType >::AddNoise(PixelType& pixel)
{
for( unsigned int i=0; i<pixel.Size(); i++)
pixel[i] += (ScalarType)(m_Distribution(m_RandGen)-m_Distribution.n());
}
|
// Autogenerated from CppHeaderCreator
// Created by Sc2ad
// =========================================================================
#pragma once
// Begin includes
#include "extern/beatsaber-hook/shared/utils/typedefs.h"
#include "extern/beatsaber-hook/shared/utils/byref.hpp"
// Including type: LiteNetLib.NetManager
#include "LiteNetLib/NetManager.hpp"
// Including type: System.ValueType
#include "System/ValueType.hpp"
// Including type: System.Collections.Generic.IEnumerator`1
#include "System/Collections/Generic/IEnumerator_1.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-properties.hpp"
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-fields.hpp"
#include "extern/beatsaber-hook/shared/utils/utils.h"
// Completed includes
// Begin forward declares
// Forward declaring namespace: LiteNetLib
namespace LiteNetLib {
// Forward declaring type: NetPeer
class NetPeer;
}
// Completed forward declares
// Type namespace: LiteNetLib
namespace LiteNetLib {
// Size: 0x10
#pragma pack(push, 1)
// WARNING Layout: Sequential may not be correctly taken into account!
// Autogenerated type: LiteNetLib.NetManager/LiteNetLib.NetPeerEnumerator
// [TokenAttribute] Offset: FFFFFFFF
struct NetManager::NetPeerEnumerator/*, public System::ValueType, public System::Collections::Generic::IEnumerator_1<LiteNetLib::NetPeer*>*/ {
public:
// private readonly LiteNetLib.NetPeer _initialPeer
// Size: 0x8
// Offset: 0x0
LiteNetLib::NetPeer* initialPeer;
// Field size check
static_assert(sizeof(LiteNetLib::NetPeer*) == 0x8);
// private LiteNetLib.NetPeer _p
// Size: 0x8
// Offset: 0x8
LiteNetLib::NetPeer* p;
// Field size check
static_assert(sizeof(LiteNetLib::NetPeer*) == 0x8);
// Creating value type constructor for type: NetPeerEnumerator
constexpr NetPeerEnumerator(LiteNetLib::NetPeer* initialPeer_ = {}, LiteNetLib::NetPeer* p_ = {}) noexcept : initialPeer{initialPeer_}, p{p_} {}
// Creating interface conversion operator: operator System::ValueType
operator System::ValueType() noexcept {
return *reinterpret_cast<System::ValueType*>(this);
}
// Creating interface conversion operator: operator System::Collections::Generic::IEnumerator_1<LiteNetLib::NetPeer*>
operator System::Collections::Generic::IEnumerator_1<LiteNetLib::NetPeer*>() noexcept {
return *reinterpret_cast<System::Collections::Generic::IEnumerator_1<LiteNetLib::NetPeer*>*>(this);
}
// Get instance field: private readonly LiteNetLib.NetPeer _initialPeer
LiteNetLib::NetPeer* _get__initialPeer();
// Set instance field: private readonly LiteNetLib.NetPeer _initialPeer
void _set__initialPeer(LiteNetLib::NetPeer* value);
// Get instance field: private LiteNetLib.NetPeer _p
LiteNetLib::NetPeer* _get__p();
// Set instance field: private LiteNetLib.NetPeer _p
void _set__p(LiteNetLib::NetPeer* value);
// public LiteNetLib.NetPeer get_Current()
// Offset: 0xD8210C
LiteNetLib::NetPeer* get_Current();
// private System.Object System.Collections.IEnumerator.get_Current()
// Offset: 0xD82114
::Il2CppObject* System_Collections_IEnumerator_get_Current();
// public System.Void .ctor(LiteNetLib.NetPeer p)
// Offset: 0xD820B4
template<::il2cpp_utils::CreationType creationType = ::il2cpp_utils::CreationType::Temporary>
NetPeerEnumerator(LiteNetLib::NetPeer* p) {
static auto ___internal__logger = ::Logger::get().WithContext("LiteNetLib::NetManager::NetPeerEnumerator::.ctor");
static auto* ___internal__method = THROW_UNLESS((::il2cpp_utils::FindMethod(*this, ".ctor", std::vector<Il2CppClass*>{}, ::std::vector<const Il2CppType*>{::il2cpp_utils::ExtractType(p)})));
::il2cpp_utils::RunMethodThrow<void, false>(*this, ___internal__method, p);
}
// public System.Void Dispose()
// Offset: 0xD820BC
void Dispose();
// public System.Boolean MoveNext()
// Offset: 0xD820C0
bool MoveNext();
// public System.Void Reset()
// Offset: 0xD82100
void Reset();
}; // LiteNetLib.NetManager/LiteNetLib.NetPeerEnumerator
#pragma pack(pop)
static check_size<sizeof(NetManager::NetPeerEnumerator), 8 + sizeof(LiteNetLib::NetPeer*)> __LiteNetLib_NetManager_NetPeerEnumeratorSizeCheck;
static_assert(sizeof(NetManager::NetPeerEnumerator) == 0x10);
}
DEFINE_IL2CPP_ARG_TYPE(LiteNetLib::NetManager::NetPeerEnumerator, "LiteNetLib", "NetManager/NetPeerEnumerator");
#include "extern/beatsaber-hook/shared/utils/il2cpp-utils-methods.hpp"
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::get_Current
// Il2CppName: get_Current
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<LiteNetLib::NetPeer* (LiteNetLib::NetManager::NetPeerEnumerator::*)()>(&LiteNetLib::NetManager::NetPeerEnumerator::get_Current)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(LiteNetLib::NetManager::NetPeerEnumerator), "get_Current", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::System_Collections_IEnumerator_get_Current
// Il2CppName: System.Collections.IEnumerator.get_Current
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<::Il2CppObject* (LiteNetLib::NetManager::NetPeerEnumerator::*)()>(&LiteNetLib::NetManager::NetPeerEnumerator::System_Collections_IEnumerator_get_Current)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(LiteNetLib::NetManager::NetPeerEnumerator), "System.Collections.IEnumerator.get_Current", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::NetPeerEnumerator
// Il2CppName: .ctor
// Cannot get method pointer of value based method overload from template for constructor!
// Try using FindMethod instead!
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::Dispose
// Il2CppName: Dispose
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (LiteNetLib::NetManager::NetPeerEnumerator::*)()>(&LiteNetLib::NetManager::NetPeerEnumerator::Dispose)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(LiteNetLib::NetManager::NetPeerEnumerator), "Dispose", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::MoveNext
// Il2CppName: MoveNext
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<bool (LiteNetLib::NetManager::NetPeerEnumerator::*)()>(&LiteNetLib::NetManager::NetPeerEnumerator::MoveNext)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(LiteNetLib::NetManager::NetPeerEnumerator), "MoveNext", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
// Writing MetadataGetter for method: LiteNetLib::NetManager::NetPeerEnumerator::Reset
// Il2CppName: Reset
template<>
struct ::il2cpp_utils::il2cpp_type_check::MetadataGetter<static_cast<void (LiteNetLib::NetManager::NetPeerEnumerator::*)()>(&LiteNetLib::NetManager::NetPeerEnumerator::Reset)> {
static const MethodInfo* get() {
return ::il2cpp_utils::FindMethod(classof(LiteNetLib::NetManager::NetPeerEnumerator), "Reset", std::vector<Il2CppClass*>(), ::std::vector<const Il2CppType*>{});
}
};
|
/*
* Copyright (c) 2017-2021 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "arm_compute/runtime/NEON/functions/NEPermute.h"
#include "arm_compute/core/Validate.h"
#include "src/runtime/cpu/operators/CpuPermute.h"
namespace arm_compute
{
struct NEPermute::Impl
{
const ITensor *src{ nullptr };
ITensor *dst{ nullptr };
std::unique_ptr<cpu::CpuPermute> op{ nullptr };
};
NEPermute::NEPermute()
: _impl(std::make_unique<Impl>())
{
}
NEPermute::~NEPermute() = default;
void NEPermute::configure(const ITensor *input, ITensor *output, const PermutationVector &perm)
{
ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
_impl->src = input;
_impl->dst = output;
_impl->op = std::make_unique<cpu::CpuPermute>();
_impl->op->configure(input->info(), output->info(), perm);
}
Status NEPermute::validate(const ITensorInfo *input, const ITensorInfo *output, const PermutationVector &perm)
{
ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
ARM_COMPUTE_RETURN_ON_ERROR(cpu::CpuPermute::validate(input, output, perm));
return Status{};
}
void NEPermute::run()
{
ITensorPack pack;
pack.add_tensor(TensorType::ACL_SRC, _impl->src);
pack.add_tensor(TensorType::ACL_DST, _impl->dst);
_impl->op->run(pack);
}
} // namespace arm_compute
|
#include "ini_file.h"
#include <fstream>
#include <cstring>
#include <algorithm>
#include <iostream>
namespace ini_file
{
IniFile::IniFile() : comment_delimiter_("#") { }
IniFile::IniFile(string& file_name): comment_delimiter_("#") { Load(file_name); }
int IniFile::Load(const string& file_name)
{
string line;
string clean_line;
string comment;
string right_comment;
int error_no;
Release();
this->ini_filepath_ = file_name;
std::ifstream ifs(ini_filepath_);
if (!ifs.is_open()) {
err_msg_ = string("open") + ini_filepath_ + string(" file failed.");
return kErrorOpenFileFailed;
}
auto cur_section = new IniSection();
cur_section->name = "";
sections_vec_.push_back(cur_section);
while (std::getline(ifs, line)) {
Trim(line);
std::cout << "Load: " << line << "\n";
// step 0,
if (line.length() <= 0) {
comment += kDelimit;
continue;
}
std::cout << comment << "\n";
// step 1,
if (IsCommentLine(line)) {
comment += line + kDelimit;
continue;
}
// 检查行尾是否存在注释, 若存在注释则添加到right_comment中
Split(line, this->comment_delimiter_, &clean_line, &right_comment);
// step 2.
if (clean_line[0] == '[') {
error_no = UpdateSection(clean_line, comment, right_comment, &cur_section);
if(error_no) {
std::cout << "error: " << this->err_msg_ << "\n";
}
}
else {
error_no = AddKeyValuePair(clean_line, comment, right_comment, cur_section);
}
if (error_no != 0) {
ifs.close();
return error_no;
}
comment = "";
right_comment = "";
}
ifs.close();
return kRetOk;
}
void IniFile::Release()
{
this->ini_filepath_ = "";
// ReSharper disable once CppUseAuto
for (IniSectionIter it = sections_vec_.begin(); it != sections_vec_.end(); ++it) {
delete(*it);
}
this->sections_vec_.clear();
}
int IniFile::Save()
{
return SaveAs(this->ini_filepath_);
}
int IniFile::SaveAs(const string& file_name)
{
string data = "";
for (auto sec_it = sections_vec_.begin(); sec_it != sections_vec_.end(); ++sec_it) {
if ((*sec_it)->comment != "") {
data += (*sec_it)->comment;
}
if ((*sec_it)->name != "") {
data += string("[") + (*sec_it)->name + string("]");
data += kDelimit;
}
if ((*sec_it)->right_comment != "") {
data += " " + this->comment_delimiter_ + (*sec_it)->right_comment;
}
for (auto item_it = (*sec_it)->items.begin(); item_it != (*sec_it)->items.end(); ++item_it) {
if (item_it->comment != "") {
data += item_it->comment;
if (data[data.length() - 1] != '\n') data += kDelimit;
}
data += item_it->key + "=" + item_it->value;
if (item_it->right_comment != "") {
data += " " + this->comment_delimiter_ + item_it->right_comment;
}
if (data[data.length() - 1] != '\n') data += kDelimit;
}
}
std::ofstream ofs;
ofs << data;
ofs.close();
return kRetOk;
}
int IniFile::GetStringValue(const string& section, const string& key, string* value)
{
return GetValue(section, key, value);
}
int IniFile::GetIntValue(const string& section, const string& key, int* value)
{
string str_value;
const auto error_no = GetValue(section, key, &str_value);
*value = atoi(str_value.c_str());
return error_no;
}
int IniFile::GetDoubleValue(const string& section, const string& key, double* value)
{
string str_value;
const auto error_no = GetValue(section, key, &str_value);
*value = atof(str_value.c_str());
return error_no;
}
int IniFile::GetBoolValue(const string& section, const string& key, bool* value)
{
string str_value;
const auto err = GetValue(section, key, &str_value);
if (StrCmpIgnoreCase(str_value, "true") || StrCmpIgnoreCase(str_value, "1")) *value = true;
if (StrCmpIgnoreCase(str_value, "false") || StrCmpIgnoreCase(str_value, "0")) *value = false;
return err;
}
int IniFile::GetComment(const string& section, const string& key, string* comment)
{
auto sect = GetSection(section);
if (sect == nullptr) {
err_msg_ = string("not found the section. ") + section;
return kErrorNotFoundSection;
}
if (key == "") {
*comment = sect->comment;
return kRetOk;
}
// ReSharper disable once CppUseAuto
for (IniSection::IniItemIter it = sect->begin(); it != sect->end(); ++it) {
if (it->key == key) {
*comment = it->comment;
return kRetOk;
}
}
this->err_msg_ = string("not found the key") + section;
return kErrorNotFoundKey;
}
int IniFile::GetRightComment(const string& section, const string& key, string* right_comment)
{
// ReSharper disable once CppUseAuto
IniSection* sect = GetSection(section);
if (sect == nullptr) {
err_msg_ = string("not found the section.") + section;
return kErrorNotFoundSection;
}
if (key == "") {
*right_comment = sect->right_comment;
return kRetOk;
}
for (auto it = sect->items.begin(); it != sect->items.end(); ++it) {
if (it->key == key) {
*right_comment = it->right_comment;
return kRetOk;
}
}
this->err_msg_ = string("not found the key.") + key;
return kErrorNotFoundKey;
}
int IniFile::GetSections(vector<string>* sections)
{
for (auto it = this->sections_vec_.begin(); it != this->sections_vec_.end(); ++it) {
sections->push_back((*it)->name);
}
return sections->size();
}
int IniFile::GetSectionCount() const
{
return this->sections_vec_.size();
}
bool IniFile::HasSection(const string& section)
{
return (GetSection(section) != nullptr);
}
bool IniFile::HasKey(const string& section, const string& key)
{
const auto sect = GetSection(section);
if (sect != nullptr) {
for (auto it = sect->begin(); it != sect->end(); ++it) {
if (it->key == key)
return true;
}
}
return false;
}
IniSection* IniFile::GetSection(const string& section)
{
for (auto it = this->sections_vec_.begin(); it != this->sections_vec_.end(); ++it) {
if ((*it)->name == section) { return *it; }
}
return nullptr;
}
void IniFile::test_output()
{
Print();
}
// 使用string::erase 去除两端空格
void IniFile::Trim(string& str)
{
str.erase(0, str.find_first_not_of(" "));
str.erase(str.find_last_not_of(" ") + 1);
}
bool IniFile::IsCommentLine(const string& str) const
{
return StartWith(str, comment_delimiter_);
}
/// 判断str行开头字符,prefix: "#"则定义为注释,"["定义为Section,
bool IniFile::StartWith(const string& str, const string& prefix)
{
if (strncmp(str.c_str(), prefix.c_str(), prefix.size()) == 0)
return true;
return false;
}
bool IniFile::Split(const string& str, const string& sep, string* left_content, string* right_comment)
{
const auto pos = str.find(sep);
string left, right;
if (pos != string::npos) {
left = string(str, 0, pos);
right = string(str, pos + 1);
Trim(left);
Trim(right);
*left_content = left;
*right_comment = right;
return true;
}
else {
left = str;
right = "";
Trim(left);
*left_content = left;
*right_comment = right;
return false;
}
}
bool IniFile::StrCmpIgnoreCase(const string& str1, const string& str2)
{
auto first_str{str1};
auto last_str{str2};
std::transform(first_str.begin(), first_str.end(), first_str.begin(), tolower);
std::transform(last_str.begin(), last_str.end(), last_str.begin(), tolower);
return (first_str == last_str);
}
int IniFile::UpdateSection(const string& clean_line, const string& comment, const string& right_comment,
IniSection** section)
{
const auto pos = clean_line.find_first_of(']');
if (pos == string::npos) {
err_msg_ = string("No matched ] found.");
return kErrorUnmatchedBrackets;
}
const auto len = pos - 1;
if (len <= 0) {
err_msg_ = string("section name is empty.");
return kErrorSectionEmpty;
}
string s(clean_line, 1, len);
Trim(s);
if (GetSection(s) != nullptr) {
err_msg_ = string("section ") + s + string(" already exist");
return kErrorSectionAlreadyExists;
}
auto new_sec = new IniSection();
new_sec->name = s;
new_sec->comment = comment;
new_sec->right_comment = right_comment;
sections_vec_.push_back(new_sec);
*section = new_sec;
return 0;
}
int IniFile::AddKeyValuePair(const string& clean_line, const string& comment, const string& right_comment,
IniSection* section)
{
string key, value;
if (!Parse(clean_line, &key, &value)) {
err_msg_ = string("parse line failed: ") + clean_line;
return kErrorParseKeyValueFailed;
}
IniItem item;
item.key = key;
item.value = value;
item.comment = comment;
item.right_comment = right_comment;
section->items.push_back(item);
return 0;
}
bool IniFile::Parse(const string& content, string* key, string* value)
{
return Split(content, "=", key, value);
}
int IniFile::GetValues(const string& section, const string& key, vector<string>* values, vector<string>* comments)
{
values->clear();
comments->clear();
auto sect = GetSection(section);
if (sect == nullptr) {
err_msg_ = string("Not found the section. ") + section;
return kErrorNotFoundSection;
}
for (auto it = sect->begin(); it != sect->end(); ++it) {
if (it->key == key) {
auto value = it->value;
auto comment = it->comment;
values->push_back(value);
comments->push_back(comment);
}
}
if (values->size() == 0) {
err_msg_ = string("Not found the key ") + key;
return kErrorNotFoundKey;
}
return kRetOk;
}
int IniFile::GetValues(const string& section, const string& key, vector<string>* values)
{
vector<string> comments;
return GetValues(section, key, values, &comments);
}
int IniFile::GetValue(const string& section, const string& key, string* value, string* comment)
{
auto sect = GetSection(section);
if (sect == nullptr) {
err_msg_ = string("not found the section. ") + section;
return kErrorNotFoundSection;
}
for (auto it = sect->begin(); it != sect->end(); ++it) {
if (it->key == key) {
*value = it->value;
*comment = it->comment;
return kRetOk;
}
}
this->err_msg_ = string("not fond the key. ") + key;
return kErrorNotFoundKey;
}
int IniFile::GetValue(const string& section, const string& key, string* value)
{
string comment;
return GetValue(section, key, value, &comment);
}
void IniFile::Print()
{
printf("\n################# print start ################\n");
printf("file path: [%s] \n", this->ini_filepath_.c_str());
printf("comment delimiter: [%s]\n", this->comment_delimiter_.c_str());
for (auto it = sections_vec_.begin(); it != sections_vec_.end(); ++it) {
printf("comment: [%s]\n", (*it)->comment.c_str());
printf("section: [%s]\n", (*it)->name.c_str());
if ((*it)->right_comment != "") {
printf("right comment: [%s]\n", (*it)->right_comment.c_str());
}
for (auto item_it = (*it)->items.begin(); item_it != (*it)->items.end(); ++item_it) {
printf(" comment : [%s]\n", item_it->comment.c_str());
printf(" param : [%s=%s]\n", item_it->key.c_str(), item_it->value.c_str());
if (item_it->right_comment != "") {
printf(" right comment: [%s]\n", item_it->right_comment.c_str());
}
}
}
printf("\n################# print end ################\n");
}
// string 两端空格去除,实现方式:从0坐标开始while循环,使用isspace判断是否是空格,直到不是空格,从不是空格处创建新的str, string(str, i, len-i);
// 新的str再次从尾部开始for循环,直到不是空格,得到尾部非空格i的偏移位置,再次创建新string, 这里涉及两次拷贝。
void TrimComplex(string& str)
{
auto len = str.length();
auto i = 0;
while ((i < len) && isspace(str[i]) && (str[i] != '\0')) {
i++;
}
if (i != 0) {
str = string(str, i, len - i);
}
len = str.length();
for (i = len - 1; i >= 0; --i) {
if (!isspace(str[i])) { break; }
}
str = string(str, 0, i + 1);
}
}
int main(int argc, char* argv[])
{
const std::string config_file = "d:\\temp\\Config.ini";
auto ini_config = ini_file::IniFile();
ini_file::IniFile config_ini;
ini_config.Load(config_file);
std::cout << "Section_count: " << ini_config.GetSectionCount() << "\n";
ini_config.test_output();
auto sect = ini_config.GetSection("HotKey");
for (auto it = sect->begin(); it != sect->end(); ++it) {
printf("item: [%s = %s]\n", it->key.c_str(), it->value.c_str());
}
}
|
// Copyright (c) 2020 Microsoft Corporation.
// Licensed under the MIT License.
#include "Behaviors/UxtHandConstraintComponent.h"
#include "Engine/World.h"
#include "HandTracking/UxtHandTrackingFunctionLibrary.h"
#include "Utils/UxtFunctionLibrary.h"
namespace
{
// Like FMath::LineBoxIntersection but from the inside and returns the result
inline bool LineBoxIntersectionInternal(const FBox& Box, const FVector& Start, const FVector& End, FVector& HitLocation)
{
const FVector StartToEnd = End - Start;
const FVector OneOverStartToEnd = StartToEnd.Reciprocal();
FVector Time;
if (Start.X >= Box.Min.X && Start.X <= Box.Max.X)
{
if (End.X < Box.Min.X)
{
Time.X = (Box.Min.X - Start.X) * OneOverStartToEnd.X;
}
else if (End.X > Box.Max.X)
{
Time.X = (Box.Max.X - Start.X) * OneOverStartToEnd.X;
}
else
{
Time.X = 2.0f;
}
}
else
{
return false;
}
if (Start.Y >= Box.Min.Y && Start.Y <= Box.Max.Y)
{
if (End.Y < Box.Min.Y)
{
Time.Y = (Box.Min.Y - Start.Y) * OneOverStartToEnd.Y;
}
else if (End.Y > Box.Max.Y)
{
Time.Y = (Box.Max.Y - Start.Y) * OneOverStartToEnd.Y;
}
else
{
Time.Y = 2.0f;
}
}
else
{
return false;
}
if (Start.Z >= Box.Min.Z && Start.Z <= Box.Max.Z)
{
if (End.Z < Box.Min.Z)
{
Time.Z = (Box.Min.Z - Start.Z) * OneOverStartToEnd.Z;
}
else if (End.Z > Box.Max.Z)
{
Time.Z = (Box.Max.Z - Start.Z) * OneOverStartToEnd.Z;
}
else
{
Time.Z = 2.0f;
}
}
else
{
return false;
}
const float MinTime = FMath::Min3(Time.X, Time.Y, Time.Z);
if (MinTime >= 0.0f && MinTime <= 1.0f)
{
HitLocation = Start + StartToEnd * MinTime;
const float BOX_SIDE_THRESHOLD = 0.1f;
if (HitLocation.X > Box.Min.X - BOX_SIDE_THRESHOLD && HitLocation.X < Box.Max.X + BOX_SIDE_THRESHOLD &&
HitLocation.Y > Box.Min.Y - BOX_SIDE_THRESHOLD && HitLocation.Y < Box.Max.Y + BOX_SIDE_THRESHOLD &&
HitLocation.Z > Box.Min.Z - BOX_SIDE_THRESHOLD && HitLocation.Z < Box.Max.Z + BOX_SIDE_THRESHOLD)
{
return true;
}
}
return false;
}
} // namespace
UUxtHandConstraintComponent::UUxtHandConstraintComponent()
{
PrimaryComponentTick.bCanEverTick = true;
PrimaryComponentTick.bStartWithTickEnabled = false;
bAutoActivate = true;
}
EControllerHand UUxtHandConstraintComponent::GetTrackedHand() const
{
return TrackedHand;
}
const FBox& UUxtHandConstraintComponent::GetHandBounds() const
{
return HandBounds;
}
bool UUxtHandConstraintComponent::IsConstraintActive() const
{
return bIsConstraintActive;
}
const FVector& UUxtHandConstraintComponent::GetGoalLocation() const
{
return GoalLocation;
}
const FQuat& UUxtHandConstraintComponent::GetGoalRotation() const
{
return GoalRotation;
}
bool UUxtHandConstraintComponent::IsHandUsableForConstraint(EControllerHand NewHand)
{
// Accept by default
return true;
}
void UUxtHandConstraintComponent::BeginPlay()
{
Super::BeginPlay();
// Initialize tracked hand from user setting.
// If Hand is 'Any Hand' the left is arbitrarily chosen, will switch to right if not tracked.
TrackedHand = (Hand == EControllerHand::Left || Hand == EControllerHand::Right) ? Hand : EControllerHand::Left;
bIsConstraintActive = false;
UpdateConstraint();
}
void UUxtHandConstraintComponent::TickComponent(float DeltaTime, ELevelTick TickType, FActorComponentTickFunction* ThisTickFunction)
{
Super::TickComponent(DeltaTime, TickType, ThisTickFunction);
UpdateConstraint();
if (bIsConstraintActive && bMoveOwningActor)
{
AddMovement(DeltaTime);
}
}
FVector UUxtHandConstraintComponent::GetZoneDirection(const FVector& HandLocation, const FQuat& HandRotation) const
{
// Directions are for the left hand case
FVector DirectionUlnar;
FVector DirectionUp;
switch (OffsetMode)
{
case EUxtHandConstraintOffsetMode::LookAtCamera:
{
FTransform HeadPose = UUxtFunctionLibrary::GetHeadPose(GetWorld());
FVector LookAtVector = HandLocation - HeadPose.GetLocation();
bool IsPalmFacingCamera = FVector::DotProduct(LookAtVector, HandRotation.GetUpVector()) > 0.0f;
DirectionUlnar =
!LookAtVector.IsNearlyZero() ? FVector::CrossProduct(LookAtVector, FVector::UpVector).GetSafeNormal() : -FVector::RightVector;
if (IsPalmFacingCamera)
{
DirectionUlnar = -DirectionUlnar;
}
DirectionUp = HeadPose.GetRotation().GetUpVector();
break;
}
case EUxtHandConstraintOffsetMode::HandRotation:
{
DirectionUlnar = -HandRotation.GetRightVector();
DirectionUp = HandRotation.GetForwardVector();
break;
}
}
// Flip for the right hand case
if (TrackedHand == EControllerHand::Right)
{
DirectionUlnar = -DirectionUlnar;
}
switch (Zone)
{
case EUxtHandConstraintZone::UlnarSide:
return DirectionUlnar;
case EUxtHandConstraintZone::RadialSide:
return -DirectionUlnar;
case EUxtHandConstraintZone::AboveFingerTips:
return DirectionUp;
case EUxtHandConstraintZone::BelowWrist:
return -DirectionUp;
}
return FVector::ZeroVector;
}
void UUxtHandConstraintComponent::UpdateConstraint()
{
const bool bWasConstraintActive = bIsConstraintActive;
const EControllerHand OldTrackedHand = TrackedHand;
bIsConstraintActive = false;
FQuat PalmRotation;
FVector PalmLocation;
if (UpdateTrackedHand(PalmLocation, PalmRotation))
{
if (UpdateHandBounds(PalmLocation, PalmRotation))
{
if (UpdateGoal(PalmLocation, PalmRotation))
{
// Activate constraint
bIsConstraintActive = true;
}
}
}
else
{
HandBounds = FBox(EForceInit::ForceInitToZero);
}
if (!bWasConstraintActive && bIsConstraintActive)
{
if (bMoveOwningActor)
{
// When activating snap to the goal
GetOwner()->SetActorLocation(GoalLocation);
GetOwner()->SetActorRotation(GoalRotation);
}
// Activate constraint
OnConstraintActivated.Broadcast();
OnBeginTracking.Broadcast(TrackedHand);
}
else if (bWasConstraintActive && !bIsConstraintActive)
{
// Deactivate constraint
OnEndTracking.Broadcast(TrackedHand);
OnConstraintDeactivated.Broadcast();
}
else if (bIsConstraintActive && OldTrackedHand != TrackedHand)
{
// Tracked hand changed while constraint is active
OnEndTracking.Broadcast(OldTrackedHand);
OnBeginTracking.Broadcast(TrackedHand);
}
}
bool UUxtHandConstraintComponent::UpdateTrackedHand(FVector& OutPalmLocation, FQuat& OutPalmRotation)
{
// Utility lambda for getting palm location and rotation of the TrackedHand, returns false if rejected.
auto GetValidTransformFromTrackedHand = [this, &OutPalmLocation, &OutPalmRotation]() -> bool {
if (IsHandUsableForConstraint(TrackedHand))
{
float PalmRadius;
return UUxtHandTrackingFunctionLibrary::GetHandJointState(
TrackedHand, EUxtHandJoint::Palm, OutPalmRotation, OutPalmLocation, PalmRadius);
}
return false;
};
// Update the tracked hand
if (Hand == EControllerHand::Left || Hand == EControllerHand::Right)
{
TrackedHand = Hand;
return GetValidTransformFromTrackedHand();
}
else if (Hand == EControllerHand::AnyHand)
{
// Try to use current tracked hand
if (GetValidTransformFromTrackedHand())
{
return true;
}
// Tracking lost, select opposite hand
TrackedHand = (TrackedHand == EControllerHand::Left ? EControllerHand::Right : EControllerHand::Left);
return GetValidTransformFromTrackedHand();
}
else
{
// Unspecified hand type
return false;
}
}
bool UUxtHandConstraintComponent::UpdateHandBounds(const FVector& PalmLocation, const FQuat& PalmRotation)
{
FTransform WorldFromPalm = FTransform(PalmRotation, PalmLocation);
FTransform PalmFromWorld = WorldFromPalm.Inverse();
HandBounds = FBox(EForceInit::ForceInitToZero);
for (int i = 0; i < (uint8)EUxtHandJoint::Count; ++i)
{
EUxtHandJoint Joint = (EUxtHandJoint)i;
FQuat JointRotation;
FVector JointLocation;
float JointRadius;
if (!UUxtHandTrackingFunctionLibrary::GetHandJointState(TrackedHand, Joint, JointRotation, JointLocation, JointRadius))
{
continue;
}
// Joint position in palm coordinates
FVector LocalLoc = PalmFromWorld.TransformPosition(JointLocation);
// Union with box around the joint, using radius for padding
HandBounds += FBox(LocalLoc - FVector::OneVector * JointRadius, LocalLoc + FVector::OneVector * JointRadius);
}
return (bool)HandBounds.IsValid;
}
bool UUxtHandConstraintComponent::UpdateGoal(const FVector& PalmLocation, const FQuat& PalmRotation)
{
if (!HandBounds.IsValid)
{
return false;
}
FVector ZoneDirection = GetZoneDirection(PalmLocation, PalmRotation);
// Inverse transform ray origin and direction into hand bounds space
FVector LocalZoneDirection = PalmRotation.UnrotateVector(ZoneDirection);
// Enlarge bounds by margin
FBox ZoneBox = HandBounds.ExpandBy(GoalMargin);
// Extent vector is half size, multiply by 3 to ensure the ray reaches outside the box
float RayLength = 3.0f * ZoneBox.GetExtent().Size();
FVector LocalHitLocation;
// Should only fail in degenerate cases, e.g. empty bounding box.
if (!LineBoxIntersectionInternal(ZoneBox, FVector::ZeroVector, RayLength * LocalZoneDirection, LocalHitLocation))
{
return false;
}
GoalLocation = PalmRotation.RotateVector(LocalHitLocation) + PalmLocation;
const FTransform& CurrentTransform = GetOwner()->GetActorTransform();
switch (RotationMode)
{
case EUxtHandConstraintRotationMode::None:
GoalRotation = CurrentTransform.GetRotation();
break;
case EUxtHandConstraintRotationMode::LookAtCamera:
{
FTransform HeadPose = UUxtFunctionLibrary::GetHeadPose(GetWorld());
GoalRotation = FRotationMatrix::MakeFromXZ(HeadPose.GetLocation() - GoalLocation, FVector::UpVector).ToQuat();
break;
}
case EUxtHandConstraintRotationMode::HandRotation:
// Palm rotation has X facing up, rotate about Y by 90 degrees so that Z is up for consistency
GoalRotation = PalmRotation * FRotator(-90, 0, 0).Quaternion();
break;
}
return true;
}
void UUxtHandConstraintComponent::AddMovement(float DeltaTime)
{
FVector Location = GetOwner()->GetActorLocation();
FQuat Rotation = GetOwner()->GetActorQuat();
FVector SmoothLoc;
if (LocationLerpTime <= KINDA_SMALL_NUMBER)
{
SmoothLoc = GoalLocation;
}
else
{
float Weight = FMath::Clamp(1.0f - FMath::Exp(-DeltaTime / LocationLerpTime), 0.0f, 1.0f);
SmoothLoc = FMath::Lerp(Location, GoalLocation, Weight);
}
FQuat SmoothRot;
if (RotationLerpTime <= KINDA_SMALL_NUMBER)
{
SmoothRot = GoalRotation;
}
else
{
float Weight = FMath::Clamp(1.0f - FMath::Exp(-DeltaTime / RotationLerpTime), 0.0f, 1.0f);
SmoothRot = FMath::Lerp(Rotation, GoalRotation, Weight);
}
GetOwner()->SetActorLocation(SmoothLoc);
GetOwner()->SetActorRotation(SmoothRot);
}
|
#ifndef SAGA_IMPL_PACKAGES_CPR_CPR_JOB_CPI_HPP
#define SAGA_IMPL_PACKAGES_CPR_CPR_JOB_CPI_HPP
#if defined(__WAVE__) && defined(SAGA_CREATE_PREPROCESSED_FILES)
#pragma wave option(preserve: 2, line: 0, output: "preprocessed/cpr_job_cpi.hpp")
#endif
// Copyright (c) 2005-2009 Hartmut Kaiser
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#if defined(__WAVE__) && defined(SAGA_CREATE_PREPROCESSED_FILES)
#pragma wave option(output: null)
#endif
#include <string>
#include <saga/saga/util.hpp>
#include <saga/saga/types.hpp>
#include <saga/saga/exception.hpp>
#include <saga/saga/cpr.hpp>
#include <saga/saga/adaptors/instance_data.hpp>
#include <saga/saga/adaptors/packages/cpr_job_cpi_instance_data.hpp>
#include <saga/saga/adaptors/attribute_cache_cpi.hpp>
#include <saga/saga/adaptors/attribute_cpi.hpp>
#include <saga/impl/call.hpp>
#include <saga/impl/config.hpp>
#include <saga/impl/engine/register_members.hpp>
#include <saga/impl/packages/job/job_cpi.hpp>
#ifdef SAGA_DEBUG
#include <saga/saga/packages/cpr/preprocessed/cpr_job_cpi.hpp>
#else
#if defined(__WAVE__) && defined(SAGA_CREATE_PREPROCESSED_FILES)
#pragma wave option(output: "preprocessed/cpr_job_cpi.hpp")
#endif
///////////////////////////////////////////////////////////////////////////////
namespace saga { namespace impl { namespace v1_0 {
/**
* This class declares all functions of "service" defined in the SAGA API.
* It extends the base class saga::cpi
*
* @see For more information, read the GGF SAGA API Specification
*/
class cpr_job_cpi
: public job_cpi
{
public:
cpr_job_cpi (proxy * p, cpi_info const& info,
TR1::shared_ptr<saga::adaptor> adaptor, cpi::flags flags)
: job_cpi (p, info, adaptor, flags)
{
}
virtual ~cpr_job_cpi (void)
{
}
cpi::type get_type() const { return cpi::CPRJob; }
// cpr management methods
SAGA_CALL_CPI_DECL_VIRT_1 (cpr_job_cpi, saga::impl::void_t, checkpoint, saga::url)
SAGA_CALL_CPI_DECL_VIRT_1 (cpr_job_cpi, saga::impl::void_t, recover, saga::url)
SAGA_CALL_CPI_DECL_VIRT_1 (cpr_job_cpi, saga::impl::void_t, cpr_stage_in, saga::url)
SAGA_CALL_CPI_DECL_VIRT_1 (cpr_job_cpi, saga::impl::void_t, cpr_stage_out, saga::url)
SAGA_CALL_CPI_DECL_VIRT_0 (cpr_job_cpi, saga::url, cpr_last)
SAGA_CALL_CPI_DECL_VIRT_0 (cpr_job_cpi, std::vector <saga::url>, cpr_list)
}; // class job_cpi
///////////////////////////////////////////////////////////////////////////////
}}} // namespace saga::impl::v1_0
///////////////////////////////////////////////////////////////////////////////
namespace saga { namespace adaptors { namespace v1_0 {
///////////////////////////////////////////////////////////////////////////////
// register cpr CPI functions
template <typename Derived>
inline bool
register_cpr_job_functions(
std::vector<saga::impl::v1_0::cpi_info>& infos,
saga::impl::v1_0::cpi::maker_type maker,
saga::impl::v1_0::preference_type const& prefs,
saga::uuid const& cpi_uuid, saga::uuid const& adaptor_uuid,
std::string const& cpi_name)
{
bool retval = false;
saga::impl::v1_0::cpi_info info(saga::adaptors::cpr_job_cpi, cpi_name,
maker, prefs, cpi_uuid, adaptor_uuid);
SAGA_LOG(SAGA_VERBOSE_LEVEL_BLURB)
<< "begin register_cpr_job_functions (" << cpi_name << "): "
<< adaptor_uuid.string() << ":";
typedef saga::impl::v1_0::cpr_job_cpi base_cpi;
typedef void (base_cpi::*base_sync_checkpoint_type)(saga::impl::void_t&, saga::url);
typedef saga::task (base_cpi::*base_async_checkpoint_type)(saga::url);
typedef void (Derived::*sync_checkpoint_type)(saga::impl::void_t&, saga::url);
typedef saga::task (Derived::*async_checkpoint_type)(saga::url);
SAGA_REGISTER_MEMBER_EX(retval, info, base_cpi, Derived, checkpoint, prefs,
base_sync_checkpoint_type, base_async_checkpoint_type,
sync_checkpoint_type, async_checkpoint_type);
SAGA_REGISTER_MEMBER(retval, info, base_cpi, Derived, recover, prefs)
SAGA_REGISTER_MEMBER(retval, info, base_cpi, Derived, cpr_stage_in, prefs)
SAGA_REGISTER_MEMBER(retval, info, base_cpi, Derived, cpr_stage_out, prefs)
SAGA_REGISTER_MEMBER(retval, info, base_cpi, Derived, cpr_last, prefs)
SAGA_REGISTER_MEMBER(retval, info, base_cpi, Derived, cpr_list, prefs)
infos.push_back(info);
SAGA_LOG(SAGA_VERBOSE_LEVEL_BLURB)
<< "end register_cpr_job_functions";
return retval; // is true if at least one function got registered
}
///////////////////////////////////////////////////////////////////////////
template <typename Derived, typename Mutex = boost::recursive_mutex>
class cpr_job_cpi
: public attribute_cache_cpi<Derived, saga::impl::v1_0::cpr_job_cpi>
{
protected:
typedef attribute_cache_cpi<Derived, saga::impl::v1_0::cpr_job_cpi>
base_type;
typedef saga::impl::proxy proxy;
typedef saga::impl::v1_0::cpi cpi;
typedef saga::impl::v1_0::cpi_info cpi_info;
typedef saga::impl::v1_0::preference_type preference_type;
typedef Mutex mutex_type;
mutex_type mtx_; // secure access to cpi_instance_data
public:
cpr_job_cpi (proxy * p, cpi_info const& info,
TR1::shared_ptr<saga::adaptor> adaptor, cpi::flags flags)
: base_type(p, info, adaptor, flags)
{
}
virtual ~cpr_job_cpi (void)
{
}
///////////////////////////////////////////////////////////////////////
// instance data
// data associated with the API object instance, i.e. visible to all
// CPI object instances associated with the API object instance
// regardless of the shared library (adaptor) they are implemented in.
typedef saga::adaptors::v1_0::cpr_job_cpi_instance_data cpr_job_cpi_instance_data;
friend class saga::adaptors::instance_data<cpr_job_cpi_instance_data>;
typedef saga::adaptors::instance_data<cpr_job_cpi_instance_data> instance_data;
///////////////////////////////////////////////////////////////////////
// generic factory function
static cpi* cpi_maker (proxy * p,
cpi_info const & info,
saga::ini::ini const & glob_ini,
saga::ini::ini const & adap_ini,
TR1::shared_ptr<saga::adaptor> adaptor)
{
return new Derived (p, info, glob_ini, adap_ini, adaptor);
}
///////////////////////////////////////////////////////////////////////
// register implemented functions
static void
register_cpi(std::vector<cpi_info>& infos, preference_type prefs,
saga::uuid adaptor_uuid)
{
// register cpr job CPI functions
saga::uuid cpi_uuid;
register_cpr_job_functions<Derived>(infos,
&saga::adaptors::v1_0::cpr_job_cpi<Derived>::cpi_maker,
prefs, cpi_uuid, adaptor_uuid, saga::adaptors::cpr_job_cpi);
register_job_functions<Derived>(infos,
&saga::adaptors::v1_0::cpr_job_cpi<Derived>::cpi_maker,
prefs, cpi_uuid, adaptor_uuid, saga::adaptors::cpr_job_cpi);
// register attribute CPI functions
if (!register_attribute_functions<Derived>(infos,
&saga::adaptors::v1_0::cpr_job_cpi<Derived>::cpi_maker,
prefs, cpi_uuid, adaptor_uuid, saga::adaptors::cpr_job_cpi))
{
// register the default (cache based) implementation of the
// attribute CPI if the derived class does not implement the
// attribute interface by itself
register_attribute_functions<base_type>(infos,
&saga::adaptors::v1_0::cpr_job_cpi<Derived>::cpi_maker,
prefs, cpi_uuid, adaptor_uuid, saga::adaptors::cpr_job_cpi);
}
}
///////////////////////////////////////////////////////////////////////
// generic up-cast of the impl pointer
TR1::shared_ptr<Derived> shared_from_this()
{
return TR1::static_pointer_cast<Derived>(
this->base_type::shared_from_this());
}
};
///////////////////////////////////////////////////////////////////////////////
}}} // namespace saga::adaptors::v1_0
#if defined(__WAVE__) && defined(SAGA_CREATE_PREPROCESSED_FILES)
#pragma wave option(output: null)
#endif
#endif // !defined(SAGA_DEBUG)
#endif // SAGA_IMPL_PACKAGES_CPR_CPR_JOB_CPI_HPP
|
// --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2018.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Eugen Netz $
// $Authors: Eugen Netz $
// --------------------------------------------------------------------------
#include <OpenMS/APPLICATIONS/TOPPBase.h>
#include <OpenMS/ANALYSIS/XLMS/OpenPepXLLFAlgorithm.h>
#include <OpenMS/FORMAT/XQuestResultXMLFile.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/FORMAT/MzMLFile.h>
#include <OpenMS/FORMAT/ConsensusXMLFile.h>
#include <OpenMS/FORMAT/IdXMLFile.h>
#include <OpenMS/FORMAT/MzIdentMLFile.h>
#include <OpenMS/CHEMISTRY/ModificationsDB.h>
#include <OpenMS/SYSTEM/File.h>
using namespace std;
using namespace OpenMS;
//-------------------------------------------------------------
//Doxygen docu
//-------------------------------------------------------------
/**
@page TOPP_OpenPepXLLF OpenPepXLLF
@brief Search for cross-linked peptide pairs in tandem MS spectra
This tool performs a search for cross-links in the given mass spectra.
It executes the following steps in order:
<ul>
<li>Reading of MS2 spectra from the given mzML file, MS1 spectra are ignored for now</li>
<li>Processing of spectra: deisotoping and filtering</li>
<li>Digesting and preprocessing the protein database, building a peptide pair index dependent on the precursor masses of the MS2 spectra</li>
<li>Generating theoretical spectra of cross-linked peptides and aligning the experimental spectra against those</li>
<li>Scoring of cross-link spectrum matches</li>
<li>Using PeptideIndexer to map the peptides to all possible source proteins</li>
<li>Writing out the results in idXML, mzid according to mzIdentML 1.2 specifications and/or in the xQuest output format</li>
</ul>
See below or have a look at the INI file (via "OpenPepXLLF -write_ini myini.ini") for available parameters and more functionality.
<h3>Input: MS2 spectra and fasta database of proteins expected to be cross-linked in the sample</h3>
The spectra should be provided as one mzML file. If you have multiple files, e.g. for multiple fractions, you should run this tool on each
file separately.
The database can either be provided as one merged file containing targets and decoys or as two separate files.
<h3>Parameters</h3>
The parameters for fixed and variable modifications refer to additional modifications beside the cross-linker.
The linker used in the experiment has to be described using the cross-linker specific parameters.
Only one mass is allowed for a cross-linker that links two peptides, while multiple masses are possible for mono-links of the same cross-linking reagent.
Mono-links are cross-linkers, that are linked to one peptide by one of their two reactive groups.
To search for isotopically labeled pairs of cross-linkers see the tool OpenPepXL.
The parameters -cross_linker:residue1 and -cross_linker:residue2 are used to enumerate the amino acids,
that each end of the linker can react with. This way any heterobifunctional cross-linker can be defined.
To define a homobifunctional cross-linker, these two parameters should have the same value.
The parameter -cross_linker:name is used to solve ambiguities caused by different cross-linkers with the same mass
after the linking reaction (see section on output for clarification).
<h3>Output: XL-MS Identifications with scores and linked positions in the proteins</h3>
There are three file formats for output of data possible. idXML is the internal format of OpenMS, and is recommended for post-processing using other TOPP tools like XFDR or TOPPView.
The second format is the output format of xQuest,which is a popular XL-MS ID tool.
This format is compatible with a number of post-processing and visulization tools,
like xProphet for FDR estimation (Leitner, A. et al., 2014, Nature protocols)
and through the xQuest Results Viewer also the XlinkAnalyzer for visualization and analysis using protein structures (Kosinski, J. et al., 2015, Journal of structural biology).
The third format is mzIdentML according to the specifications for XL-MS ID data in version 1.2 (Vizcaíno, J. A. et al., 2017, Mol Cell Proteomics).
This is a standardized format and will be compatible with complete submissions to the PRIDE database, which is part of the ProteomeXchange consortium.
The specification includes the XLMOD database of cross-linking reagents, and if the provided cross-link mass matches one from the
database, its accession and name are used. If the name is provided with the -cross_linker:name parameter, it is used
to solve ambiguities arising from different cross-linkers having the same mass after the linking reaction (e.g. DSS and BS3).
It is also used as the name of the linker, if no matching masses are found in the database.
<CENTER>
<table>
<tr>
<td ALIGN = "center" BGCOLOR="#EBEBEB"> pot. predecessor tools </td>
<td VALIGN="middle" ROWSPAN=2> \f$ \longrightarrow \f$ OpenPepXLLF \f$ \longrightarrow \f$</td>
<td ALIGN = "center" BGCOLOR="#EBEBEB"> pot. successor tools </td>
</tr>
<tr>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> - </td>
<td VALIGN="middle" ALIGN = "center" ROWSPAN=1> - </td>
</tr>
</table>
</CENTER>
<B>The command line parameters of this tool are:</B>
@verbinclude UTILS_OpenPepXLLF.cli
<B>INI file documentation of this tool:</B>
@htmlinclude UTILS_OpenPepXLLF.html
*/
class TOPPOpenPepXLLF :
public TOPPBase
{
public:
TOPPOpenPepXLLF() :
TOPPBase("OpenPepXLLF", "Tool for protein-protein cross linking with label-free linkers.", false)
{
}
protected:
void registerOptionsAndFlags_() override
{
// input files
registerInputFile_("in", "<file>", "", "Input file containing the spectra.", true, false);
setValidFormats_("in", ListUtils::create<String>("mzML"));
registerInputFile_("database", "<file>", "", "Input file containing the protein database.", true, false);
setValidFormats_("database", ListUtils::create<String>("fasta"));
registerInputFile_("decoy_database", "<file>", "", "Input file containing the decoy protein database. Decoys can also be included in the normal database file instead (or additionally).", false, true);
setValidFormats_("decoy_database", ListUtils::create<String>("fasta"));
registerFullParam_(OpenPepXLLFAlgorithm().getDefaults());
// output file
registerOutputFile_("out_xquestxml", "<file>", "", "Results in the xquest.xml format (at least one of these output parameters should be set, otherwise you will not have any results).", false);
setValidFormats_("out_xquestxml", ListUtils::create<String>("xml,xquest.xml"));
registerOutputFile_("out_xquest_specxml", "<file>", "", "Matched spectra in the xQuest .spec.xml format for spectra visualization in the xQuest results manager.", false, false);
setValidFormats_("out_xquest_specxml", ListUtils::create<String>("xml,spec.xml"));
registerOutputFile_("out_idXML", "<file>", "", "Results in idXML format (at least one of these output parameters should be set, otherwise you will not have any results).", false);
setValidFormats_("out_idXML", ListUtils::create<String>("idXML"));
registerOutputFile_("out_mzIdentML", "<file>","", "Results in mzIdentML (.mzid) format (at least one of these output parameters should be set, otherwise you will not have any results)", false);
setValidFormats_("out_mzIdentML", ListUtils::create<String>("mzid"));
}
ExitCodes main_(int, const char**) override
{
ProgressLogger progresslogger;
progresslogger.setLogType(log_type_);
const string in_mzml(getStringOption_("in"));
const string in_fasta(getStringOption_("database"));
const string in_decoy_fasta(getStringOption_("decoy_database"));
const string out_idXML(getStringOption_("out_idXML"));
const string out_xquest = getStringOption_("out_xquestxml");
const string out_xquest_specxml = getStringOption_("out_xquest_specxml");
const string out_mzIdentML = getStringOption_("out_mzIdentML");
OPENMS_LOG_INFO << "Analyzing file: " << endl;
OPENMS_LOG_INFO << in_mzml << endl;
// load MS2 map
PeakMap unprocessed_spectra;
MzMLFile f;
f.setLogType(log_type_);
PeakFileOptions options;
options.clearMSLevels();
options.addMSLevel(2);
options.addMSLevel(1);
f.getOptions() = options;
f.load(in_mzml, unprocessed_spectra);
// load linked features
ConsensusMap cfeatures;
ConsensusXMLFile cf;
// load fasta database
progresslogger.startProgress(0, 1, "Load database from FASTA file...");
FASTAFile fastaFile;
vector<FASTAFile::FASTAEntry> fasta_db;
fastaFile.load(in_fasta, fasta_db);
if (!in_decoy_fasta.empty())
{
vector<FASTAFile::FASTAEntry> fasta_decoys;
fastaFile.load(in_decoy_fasta, fasta_decoys);
fasta_db.reserve(fasta_db.size() + fasta_decoys.size());
fasta_db.insert(fasta_db.end(), fasta_decoys.begin(), fasta_decoys.end());
}
progresslogger.endProgress();
// initialize solution vectors
vector<ProteinIdentification> protein_ids(1);
vector<PeptideIdentification> peptide_ids;
// these are mainly necessary for writing out xQuest type spectrum files
vector< vector< OPXLDataStructs::CrossLinkSpectrumMatch > > all_top_csms;
PeakMap spectra;
OpenPepXLLFAlgorithm search_algorithm;
Param this_param = getParam_().copy("", true);
Param algo_param = search_algorithm.getParameters();
algo_param.update(this_param, false, OpenMS_Log_debug); // suppress param. update message
search_algorithm.setParameters(algo_param);
search_algorithm.setLogType(this->log_type_);
ProteinIdentification::SearchParameters search_params;
search_params.db = in_fasta;
search_params.setMetaValue("input_mzML", in_mzml);
search_params.setMetaValue("input_decoys", in_decoy_fasta);
search_params.setMetaValue("out_xquest_specxml", out_xquest_specxml);
protein_ids[0].setSearchParameters(search_params);
protein_ids[0].setDateTime(DateTime::now());
protein_ids[0].setSearchEngine("OpenPepXL");
protein_ids[0].setSearchEngineVersion(VersionInfo::getVersion());
protein_ids[0].setMetaValue("SpectrumIdentificationProtocol", DataValue("MS:1002494")); // cross-linking search = MS:1002494
// run algorithm
OpenPepXLLFAlgorithm::ExitCodes exit_code = search_algorithm.run(unprocessed_spectra, fasta_db, protein_ids, peptide_ids, all_top_csms, spectra);
if (exit_code != OpenPepXLLFAlgorithm::EXECUTION_OK)
{
if (exit_code == OpenPepXLLFAlgorithm::ILLEGAL_PARAMETERS)
{
return ILLEGAL_PARAMETERS;
}
}
// MS path already set in algorithm. Overwrite here so we get something testable
if (getFlag_("test"))
{
// if test mode set, add file without path so we can compare it
protein_ids[0].setPrimaryMSRunPath({"file://" + File::basename(in_mzml)});
}
// write output
progresslogger.startProgress(0, 1, "Writing output...");
if (out_idXML.size() > 0)
{
IdXMLFile().store(out_idXML, protein_ids, peptide_ids);
}
if (out_mzIdentML.size() > 0)
{
MzIdentMLFile().store(out_mzIdentML, protein_ids, peptide_ids);
}
if (out_xquest.size() > 0 || out_xquest_specxml.size() > 0)
{
vector<String> input_split_dir;
vector<String> input_split;
getStringOption_("in").split(String("/"), input_split_dir);
input_split_dir[input_split_dir.size()-1].split(String("."), input_split);
String base_name = input_split[0];
if (out_xquest.size() > 0)
{
XQuestResultXMLFile().store(out_xquest, protein_ids, peptide_ids);
}
if (out_xquest_specxml.size() > 0)
{
XQuestResultXMLFile::writeXQuestXMLSpec(out_xquest_specxml, base_name, all_top_csms, spectra);
}
}
progresslogger.endProgress();
return EXECUTION_OK;
}
};
int main(int argc, const char** argv)
{
TOPPOpenPepXLLF tool;
return tool.main(argc, argv);
}
|
//===--- LoadableByAddress.cpp - Lower SIL address-only types. ------------===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
// This pass lowers loadable SILTypes. On completion, the SILType of every
// function argument is an address instead of the type itself.
// This reduces the code size.
// Consequently, this pass is required for IRGen.
// It is a mandatory IRGen preparation pass (not a diagnostic pass).
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "loadable-address"
#include "FixedTypeInfo.h"
#include "IRGenMangler.h"
#include "IRGenModule.h"
#include "NativeConventionSchema.h"
#include "swift/AST/GenericEnvironment.h"
#include "swift/IRGen/IRGenSILPasses.h"
#include "swift/SIL/DebugUtils.h"
#include "swift/SIL/SILArgument.h"
#include "swift/SIL/SILBuilder.h"
#include "swift/SILOptimizer/PassManager/Transforms.h"
#include "swift/SILOptimizer/Utils/Local.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
using namespace swift;
using namespace swift::irgen;
static GenericEnvironment *getGenericEnvironment(CanSILFunctionType loweredTy) {
return loweredTy->getGenericSignature().getGenericEnvironment();
}
/// Utility to determine if this is a large loadable type
static bool isLargeLoadableType(GenericEnvironment *GenericEnv, SILType t,
irgen::IRGenModule &Mod) {
if (t.isAddress() || t.isClassOrClassMetatype()) {
return false;
}
CanType canType = t.getSwiftRValueType();
if (canType->hasTypeParameter()) {
assert(GenericEnv && "Expected a GenericEnv");
canType = GenericEnv->mapTypeIntoContext(canType)->getCanonicalType();
}
if (canType.getAnyGeneric()) {
assert(t.isObject() && "Expected only two categories: address and object");
assert(!canType->hasTypeParameter());
const TypeInfo &TI = Mod.getTypeInfoForLowered(canType);
auto &nativeSchemaOrigParam = TI.nativeParameterValueSchema(Mod);
return nativeSchemaOrigParam.requiresIndirect();
}
return false;
}
static bool modifiableFunction(CanSILFunctionType funcType) {
if (funcType->getLanguage() == SILFunctionLanguage::C) {
// C functions should use the old ABI
return false;
}
return true;
}
static bool shouldTransformResults(GenericEnvironment *env,
CanSILFunctionType fnType,
irgen::IRGenModule &IGM);
static bool shouldTransformFunctionType(GenericEnvironment *env,
CanSILFunctionType fnType,
irgen::IRGenModule &IGM);
static bool shouldTransformParameter(GenericEnvironment *env,
SILParameterInfo param,
irgen::IRGenModule &IGM) {
SILType storageType = param.getSILStorageType();
// FIXME: only function types and not recursively-transformable types?
if (auto fnType = storageType.getAs<SILFunctionType>())
return shouldTransformFunctionType(env, fnType, IGM);
switch (param.getConvention()) {
case ParameterConvention::Indirect_In_Guaranteed:
case ParameterConvention::Indirect_Inout:
case ParameterConvention::Indirect_InoutAliasable:
case ParameterConvention::Indirect_In:
return false;
default:
return isLargeLoadableType(env, storageType, IGM);
}
}
static bool shouldTransformFunctionType(GenericEnvironment *env,
CanSILFunctionType fnType,
irgen::IRGenModule &IGM) {
if (shouldTransformResults(env, fnType, IGM))
return true;
for (auto param : fnType->getParameters()) {
if (shouldTransformParameter(env, param, IGM))
return true;
}
for (auto yield : fnType->getYields()) {
if (shouldTransformParameter(env, yield, IGM))
return true;
}
return false;
}
// Forward declarations - functions depend on each other
static SmallVector<SILParameterInfo, 4>
getNewParameters(GenericEnvironment *env, CanSILFunctionType fnType,
irgen::IRGenModule &IGM);
static SmallVector<SILYieldInfo, 2>
getNewYields(GenericEnvironment *env, CanSILFunctionType fnType,
irgen::IRGenModule &IGM);
static SILType getNewSILType(GenericEnvironment *GenericEnv,
SILType storageType, irgen::IRGenModule &Mod);
static bool newResultsDiffer(GenericEnvironment *GenericEnv,
ArrayRef<SILResultInfo> origResults,
irgen::IRGenModule &Mod) {
SmallVector<SILResultInfo, 2> newResults;
for (auto result : origResults) {
SILType currResultTy = result.getSILStorageType();
SILType newSILType = getNewSILType(GenericEnv, currResultTy, Mod);
// We (currently) only care about function signatures
if (!isLargeLoadableType(GenericEnv, currResultTy, Mod) &&
(newSILType != currResultTy)) {
return true;
}
}
return false;
}
static SmallVector<SILResultInfo, 2>
getNewResults(GenericEnvironment *GenericEnv,
CanSILFunctionType fnType, irgen::IRGenModule &Mod) {
// Get new SIL Function results - same as old results UNLESS:
// 1) Function type results might have a different signature
// 2) Large loadables are replaced by @out version
auto origResults = fnType->getResults();
SmallVector<SILResultInfo, 2> newResults;
for (auto result : origResults) {
SILType currResultTy = result.getSILStorageType();
SILType newSILType = getNewSILType(GenericEnv, currResultTy, Mod);
// We (currently) only care about function signatures
if (!isLargeLoadableType(GenericEnv, currResultTy, Mod) &&
(newSILType != currResultTy)) {
// Case (1) Above
SILResultInfo newResult(newSILType.getSwiftRValueType(),
result.getConvention());
newResults.push_back(newResult);
} else if ((newSILType != currResultTy) &&
shouldTransformResults(GenericEnv, fnType, Mod)) {
// Case (2) Above
SILResultInfo newSILResultInfo(newSILType.getSwiftRValueType(),
ResultConvention::Indirect);
newResults.push_back(newSILResultInfo);
} else {
newResults.push_back(result);
}
}
return newResults;
}
static CanSILFunctionType
getNewSILFunctionType(GenericEnvironment *env,
CanSILFunctionType fnType,
irgen::IRGenModule &IGM) {
if (!modifiableFunction(fnType)) {
return fnType;
}
auto newParams = getNewParameters(env, fnType, IGM);
auto newYields = getNewYields(env, fnType, IGM);
auto newResults = getNewResults(env, fnType, IGM);
auto newFnType = SILFunctionType::get(
fnType->getGenericSignature(),
fnType->getExtInfo(),
fnType->getCoroutineKind(),
fnType->getCalleeConvention(),
newParams,
newYields,
newResults,
fnType->getOptionalErrorResult(),
fnType->getASTContext(),
fnType->getWitnessMethodConformanceOrNone());
return newFnType;
}
// Get the function type or the optional function type
static CanSILFunctionType getInnerFunctionType(SILType storageType) {
if (auto currSILFunctionType = storageType.getAs<SILFunctionType>()) {
return currSILFunctionType;
}
if (auto optionalType = storageType.getAnyOptionalObjectType()) {
if (auto currSILFunctionType = optionalType.getAs<SILFunctionType>()) {
return currSILFunctionType;
}
}
return CanSILFunctionType();
}
static SILType getNewOptionalFunctionType(GenericEnvironment *GenericEnv,
SILType storageType,
irgen::IRGenModule &Mod) {
SILType newSILType = storageType;
if (auto objectType = storageType.getAnyOptionalObjectType()) {
if (auto fnType = objectType.getAs<SILFunctionType>()) {
if (shouldTransformFunctionType(GenericEnv, fnType, Mod)) {
auto newFnType = getNewSILFunctionType(GenericEnv, fnType, Mod);
newSILType =
SILType::getPrimitiveType(newFnType, storageType.getCategory());
newSILType = SILType::getOptionalType(newSILType);
}
}
}
return newSILType;
}
static bool shouldTransformResults(GenericEnvironment *genEnv,
CanSILFunctionType loweredTy,
irgen::IRGenModule &Mod) {
if (!modifiableFunction(loweredTy)) {
return false;
}
if (loweredTy->getNumResults() != 1) {
return false;
}
auto singleResult = loweredTy->getSingleResult();
auto resultStorageType = singleResult.getSILStorageType();
auto newResultStorageType = getNewSILType(genEnv, resultStorageType, Mod);
if (resultStorageType != newResultStorageType) {
return true;
}
return false;
}
static bool modResultType(SILFunction *F, irgen::IRGenModule &Mod) {
GenericEnvironment *genEnv = F->getGenericEnvironment();
auto loweredTy = F->getLoweredFunctionType();
return shouldTransformResults(genEnv, loweredTy, Mod);
}
static SILParameterInfo
getNewParameter(GenericEnvironment *env, SILParameterInfo param,
irgen::IRGenModule &IGM) {
SILType storageType = param.getSILStorageType();
SILType newOptFuncType =
getNewOptionalFunctionType(env, storageType, IGM);
if (newOptFuncType != storageType) {
return param.getWithType(newOptFuncType.getSwiftRValueType());
}
if (auto paramFnType = storageType.getAs<SILFunctionType>()) {
if (shouldTransformFunctionType(env, paramFnType, IGM)) {
auto newFnType = getNewSILFunctionType(env, paramFnType, IGM);
return param.getWithType(newFnType);
} else {
return param;
}
} else if (isLargeLoadableType(env, storageType, IGM)) {
if (param.getConvention() == ParameterConvention::Direct_Guaranteed)
return SILParameterInfo(storageType.getSwiftRValueType(),
ParameterConvention::Indirect_In_Guaranteed);
else
return SILParameterInfo(storageType.getSwiftRValueType(),
ParameterConvention::Indirect_In_Constant);
} else {
return param;
}
}
static SmallVector<SILParameterInfo, 4>
getNewParameters(GenericEnvironment *env, CanSILFunctionType fnType,
irgen::IRGenModule &IGM) {
SmallVector<SILParameterInfo, 4> newParams;
for (SILParameterInfo param : fnType->getParameters()) {
auto newParam = getNewParameter(env, param, IGM);
newParams.push_back(newParam);
}
return newParams;
}
static SmallVector<SILYieldInfo, 2>
getNewYields(GenericEnvironment *env, CanSILFunctionType fnType,
irgen::IRGenModule &IGM) {
SmallVector<SILYieldInfo, 2> newYields;
for (auto oldYield : fnType->getYields()) {
auto newYieldAsParam = getNewParameter(env, oldYield, IGM);
newYields.push_back(SILYieldInfo(newYieldAsParam.getType(),
newYieldAsParam.getConvention()));
}
return newYields;
}
static SILType getNewSILType(GenericEnvironment *GenericEnv,
SILType storageType, irgen::IRGenModule &Mod) {
SILType newSILType = getNewOptionalFunctionType(GenericEnv, storageType, Mod);
if (newSILType != storageType) {
return newSILType;
}
if (auto fnType = storageType.getAs<SILFunctionType>()) {
if (shouldTransformFunctionType(GenericEnv, fnType, Mod)) {
auto newFnType = getNewSILFunctionType(GenericEnv, fnType, Mod);
newSILType = SILType::getPrimitiveType(newFnType,
storageType.getCategory());
}
} else if (isLargeLoadableType(GenericEnv, storageType, Mod)) {
newSILType = storageType.getAddressType();
}
return newSILType;
}
//===----------------------------------------------------------------------===//
// StructLoweringState: shared state for the pass's analysis and transforms.
//===----------------------------------------------------------------------===//
namespace {
struct StructLoweringState {
SILFunction *F;
irgen::IRGenModule &Mod;
// All large loadable function arguments that we modified
SmallVector<SILValue, 16> largeLoadableArgs;
// All modified function signature function arguments
SmallVector<SILValue, 16> funcSigArgs;
// All args for which we did a load
llvm::MapVector<SILValue, SILValue> argsToLoadedValueMap;
// All applies for which we did an alloc
llvm::MapVector<SILInstruction *, SILValue> applyRetToAllocMap;
// recerse map of the one above
llvm::MapVector<SILInstruction *, SILInstruction *> allocToApplyRetMap;
// All call sites with SILArgument that needs to be re-written
// Calls are removed from the set when rewritten.
SmallVector<SILInstruction *, 16> applies;
// All MethodInst that use the large struct
SmallVector<MethodInst *, 16> methodInstsToMod;
// Large loadable store instrs should call the outlined copy
SmallVector<StoreInst *, 16> storeInstsToMod;
// All switch_enum instrs that should be converted to switch_enum_addr
SmallVector<SwitchEnumInst *, 16> switchEnumInstsToMod;
// All struct_extract instrs that should be converted to struct_element_addr
SmallVector<StructExtractInst *, 16> structExtractInstsToMod;
// All tuple instructions for which the return type is a function type
SmallVector<SingleValueInstruction *, 8> tupleInstsToMod;
// All allock stack instructions to modify
SmallVector<AllocStackInst *, 8> allocStackInstsToMod;
// All pointer to address instructions to modify
SmallVector<PointerToAddressInst *, 8> pointerToAddrkInstsToMod;
// All Retain and release instrs should be replaced with _addr version
SmallVector<RetainValueInst *, 16> retainInstsToMod;
SmallVector<ReleaseValueInst *, 16> releaseInstsToMod;
// All result types instrs for which we need to convert the ResultTy
llvm::SetVector<SingleValueInstruction *> resultTyInstsToMod;
// All instructions that use the large struct that are not covered above
SmallVector<SILInstruction *, 16> instsToMod;
// All function-exiting terminators (return or throw instructions).
SmallVector<TermInst *, 8> returnInsts;
// All (large type) return instructions that are modified
SmallVector<ReturnInst *, 8> modReturnInsts;
// All destroy_value instrs should be replaced with _addr version
SmallVector<SILInstruction *, 16> destroyValueInstsToMod;
// All debug instructions.
// to be modified *only if* the operands are used in "real" instructions
SmallVector<SILInstruction *, 16> debugInstsToMod;
StructLoweringState(SILFunction *F, irgen::IRGenModule &Mod)
: F(F), Mod(Mod) {}
};
} // end anonymous namespace
//===----------------------------------------------------------------------===//
// LargeValueVisitor: Map large loadable values to ValueStorage.
//===----------------------------------------------------------------------===//
namespace {
class LargeValueVisitor {
StructLoweringState &pass;
PostOrderFunctionInfo postorderInfo;
public:
explicit LargeValueVisitor(StructLoweringState &pass)
: pass(pass), postorderInfo(pass.F) {}
void mapReturnInstrs();
void mapValueStorage();
protected:
void visitApply(ApplySite applySite);
void visitMethodInst(MethodInst *instr);
void visitStoreInst(StoreInst *instr);
void visitSwitchEnumInst(SwitchEnumInst *instr);
void visitStructExtractInst(StructExtractInst *instr);
void visitRetainInst(RetainValueInst *instr);
void visitReleaseInst(ReleaseValueInst *instr);
void visitResultTyInst(SingleValueInstruction *instr);
void visitDebugValueInst(DebugValueInst *instr);
void visitDestroyValueInst(DestroyValueInst *instr);
void visitTupleInst(SingleValueInstruction *instr);
void visitAllocStackInst(AllocStackInst *instr);
void visitPointerToAddressInst(PointerToAddressInst *instr);
void visitReturnInst(ReturnInst *instr);
void visitDeallocInst(DeallocStackInst *instr);
void visitInstr(SILInstruction *instr);
};
} // end anonymous namespace
void LargeValueVisitor::mapReturnInstrs() {
for (auto *BB : postorderInfo.getReversePostOrder()) {
if (BB->getTerminator()->isFunctionExiting())
pass.returnInsts.push_back(BB->getTerminator());
}
}
void LargeValueVisitor::mapValueStorage() {
for (auto *BB : postorderInfo.getReversePostOrder()) {
for (auto &II : *BB) {
SILInstruction *currIns = &II;
switch (currIns->getKind()) {
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
visitApply(ApplySite(currIns));
break;
}
case SILInstructionKind::ClassMethodInst:
case SILInstructionKind::SuperMethodInst:
case SILInstructionKind::ObjCMethodInst:
case SILInstructionKind::ObjCSuperMethodInst:
case SILInstructionKind::WitnessMethodInst: {
// TODO Any more instructions to add here?
auto *MI = dyn_cast<MethodInst>(currIns);
visitMethodInst(MI);
break;
}
case SILInstructionKind::StructExtractInst:
case SILInstructionKind::StructElementAddrInst:
case SILInstructionKind::RefTailAddrInst:
case SILInstructionKind::RefElementAddrInst:
case SILInstructionKind::BeginAccessInst:
case SILInstructionKind::EnumInst: {
// TODO Any more instructions to add here?
visitResultTyInst(cast<SingleValueInstruction>(currIns));
break;
}
case SILInstructionKind::StoreInst: {
auto *SI = dyn_cast<StoreInst>(currIns);
visitStoreInst(SI);
break;
}
case SILInstructionKind::RetainValueInst: {
auto *RETI = dyn_cast<RetainValueInst>(currIns);
visitRetainInst(RETI);
break;
}
case SILInstructionKind::ReleaseValueInst: {
auto *RELI = dyn_cast<ReleaseValueInst>(currIns);
visitReleaseInst(RELI);
break;
}
case SILInstructionKind::DebugValueInst: {
auto *DI = dyn_cast<DebugValueInst>(currIns);
visitDebugValueInst(DI);
break;
}
case SILInstructionKind::DestroyValueInst: {
auto *DI = dyn_cast<DestroyValueInst>(currIns);
visitDestroyValueInst(DI);
break;
}
case SILInstructionKind::SwitchEnumInst: {
auto *SEI = dyn_cast<SwitchEnumInst>(currIns);
visitSwitchEnumInst(SEI);
break;
}
case SILInstructionKind::TupleElementAddrInst:
case SILInstructionKind::TupleExtractInst: {
visitTupleInst(cast<SingleValueInstruction>(currIns));
break;
}
case SILInstructionKind::AllocStackInst: {
auto *ASI = dyn_cast<AllocStackInst>(currIns);
visitAllocStackInst(ASI);
break;
}
case SILInstructionKind::PointerToAddressInst: {
auto *PTA = dyn_cast<PointerToAddressInst>(currIns);
visitPointerToAddressInst(PTA);
break;
}
case SILInstructionKind::ReturnInst: {
auto *RI = dyn_cast<ReturnInst>(currIns);
visitReturnInst(RI);
break;
}
case SILInstructionKind::DeallocStackInst: {
auto *DI = dyn_cast<DeallocStackInst>(currIns);
visitDeallocInst(DI);
break;
}
default: {
assert(!ApplySite::isa(currIns) && "Did not expect an ApplySite");
assert(!dyn_cast<MethodInst>(currIns) && "Unhandled Method Inst");
visitInstr(currIns);
break;
}
}
}
}
}
static bool modifiableApply(ApplySite applySite, irgen::IRGenModule &Mod) {
// If the callee is a method then use the old ABI
if (applySite.getSubstCalleeType()->getLanguage() == SILFunctionLanguage::C) {
return false;
}
return true;
}
void LargeValueVisitor::visitApply(ApplySite applySite) {
if (!modifiableApply(applySite, pass.Mod)) {
return visitInstr(applySite.getInstruction());
}
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
for (Operand &operand : applySite.getArgumentOperands()) {
SILValue currOperand = operand.get();
SILType silType = currOperand->getType();
SILType newSilType = getNewSILType(genEnv, silType, pass.Mod);
if (silType != newSilType ||
std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
currOperand) != pass.largeLoadableArgs.end() ||
std::find(pass.funcSigArgs.begin(), pass.funcSigArgs.end(),
currOperand) != pass.funcSigArgs.end()) {
pass.applies.push_back(applySite.getInstruction());
return;
}
}
SILType currType = applySite.getType();
SILType newType = getNewSILType(genEnv, currType, pass.Mod);
// We only care about function type results
if (!isLargeLoadableType(genEnv, currType, pass.Mod) &&
(currType != newType)) {
pass.applies.push_back(applySite.getInstruction());
return;
}
// Check callee - need new generic env:
CanSILFunctionType origSILFunctionType = applySite.getSubstCalleeType();
GenericEnvironment *genEnvCallee = nullptr;
auto newSILFunctionType =
getNewSILFunctionType(genEnvCallee, origSILFunctionType, pass.Mod);
if (origSILFunctionType != newSILFunctionType) {
pass.applies.push_back(applySite.getInstruction());
}
}
static bool isMethodInstUnmodifiable(MethodInst *instr) {
for (auto *user : instr->getUses()) {
if (ApplySite::isa(user->getUser())) {
ApplySite applySite = ApplySite(user->getUser());
if (applySite.getSubstCalleeType()->getLanguage() ==
SILFunctionLanguage::C) {
return true;
}
}
}
return false;
}
void LargeValueVisitor::visitMethodInst(MethodInst *instr) {
if (isMethodInstUnmodifiable(instr)) {
// Do not change the method!
visitInstr(instr);
return;
}
SILType currSILType = instr->getType();
auto fnType = currSILType.castTo<SILFunctionType>();
GenericEnvironment *genEnv = nullptr;
if (fnType->isPolymorphic()) {
genEnv = getGenericEnvironment(fnType);
}
if (shouldTransformFunctionType(genEnv, fnType, pass.Mod)) {
pass.methodInstsToMod.push_back(instr);
return;
}
if (newResultsDiffer(genEnv, fnType->getResults(), pass.Mod)) {
pass.methodInstsToMod.push_back(instr);
}
}
void LargeValueVisitor::visitStoreInst(StoreInst *instr) {
SILValue src = instr->getSrc();
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
src) != pass.largeLoadableArgs.end()) {
pass.storeInstsToMod.push_back(instr);
}
}
static bool shouldConvertBBArg(SILArgument *arg, irgen::IRGenModule &Mod) {
auto *F = arg->getFunction();
SILType storageType = arg->getType();
GenericEnvironment *genEnv = F->getGenericEnvironment();
CanType currCanType = storageType.getSwiftRValueType();
if (auto funcType = dyn_cast<SILFunctionType>(currCanType)) {
if (funcType->isPolymorphic()) {
genEnv = getGenericEnvironment(funcType);
}
}
SILType newSILType = getNewSILType(genEnv, storageType, Mod);
// We (currently) only care about function signatures
if (!isLargeLoadableType(genEnv, storageType, Mod) &&
(newSILType != storageType)) {
return true;
}
return false;
}
void LargeValueVisitor::visitSwitchEnumInst(SwitchEnumInst *instr) {
SILValue operand = instr->getOperand();
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand) != pass.largeLoadableArgs.end()) {
pass.switchEnumInstsToMod.push_back(instr);
return;
}
// In case we converted the target BB type of this enum - need to modify!
unsigned numOfCases = instr->getNumCases();
SmallVector<std::pair<EnumElementDecl *, SILBasicBlock *>, 16> caseBBs;
for (unsigned i = 0; i < numOfCases; ++i) {
auto currCase = instr->getCase(i);
auto *currBB = currCase.second;
for (SILArgument *arg : currBB->getArguments()) {
if (shouldConvertBBArg(arg, pass.Mod)) {
pass.switchEnumInstsToMod.push_back(instr);
return;
}
}
}
}
void LargeValueVisitor::visitStructExtractInst(StructExtractInst *instr) {
SILValue operand = instr->getOperand();
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand) != pass.largeLoadableArgs.end()) {
pass.structExtractInstsToMod.push_back(instr);
}
}
void LargeValueVisitor::visitRetainInst(RetainValueInst *instr) {
for (Operand &operand : instr->getAllOperands()) {
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand.get()) != pass.largeLoadableArgs.end()) {
pass.retainInstsToMod.push_back(instr);
return;
}
}
}
void LargeValueVisitor::visitReleaseInst(ReleaseValueInst *instr) {
for (Operand &operand : instr->getAllOperands()) {
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand.get()) != pass.largeLoadableArgs.end()) {
pass.releaseInstsToMod.push_back(instr);
return;
}
}
}
void LargeValueVisitor::visitDebugValueInst(DebugValueInst *instr) {
for (Operand &operand : instr->getAllOperands()) {
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand.get()) != pass.largeLoadableArgs.end()) {
pass.debugInstsToMod.push_back(instr);
}
}
}
void LargeValueVisitor::visitDestroyValueInst(DestroyValueInst *instr) {
for (Operand &operand : instr->getAllOperands()) {
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand.get()) != pass.largeLoadableArgs.end()) {
pass.destroyValueInstsToMod.push_back(instr);
}
}
}
void LargeValueVisitor::visitResultTyInst(SingleValueInstruction *instr) {
GenericEnvironment *genEnv = instr->getFunction()->getGenericEnvironment();
SILType currSILType = instr->getType().getObjectType();
SILType newSILType = getNewSILType(genEnv, currSILType, pass.Mod);
if (currSILType != newSILType) {
pass.resultTyInstsToMod.insert(instr);
}
auto *SEI = dyn_cast<StructExtractInst>(instr);
if (SEI) {
visitStructExtractInst(SEI);
} else {
visitInstr(instr);
}
}
void LargeValueVisitor::visitTupleInst(SingleValueInstruction *instr) {
SILType currSILType = instr->getType().getObjectType();
if (auto funcType = currSILType.getAs<SILFunctionType>()) {
GenericEnvironment *genEnv = instr->getFunction()->getGenericEnvironment();
if (!genEnv && funcType->isPolymorphic()) {
genEnv = getGenericEnvironment(funcType);
}
auto newSILFunctionType =
getNewSILFunctionType(genEnv, funcType, pass.Mod);
if (funcType != newSILFunctionType) {
pass.tupleInstsToMod.push_back(instr);
}
}
visitInstr(instr);
}
void LargeValueVisitor::visitAllocStackInst(AllocStackInst *instr) {
SILType currSILType = instr->getType().getObjectType();
if (getInnerFunctionType(currSILType)) {
pass.allocStackInstsToMod.push_back(instr);
}
}
void LargeValueVisitor::visitPointerToAddressInst(PointerToAddressInst *instr) {
SILType currSILType = instr->getType().getObjectType();
if (getInnerFunctionType(currSILType)) {
pass.pointerToAddrkInstsToMod.push_back(instr);
}
}
static bool modNonFuncTypeResultType(GenericEnvironment *genEnv,
CanSILFunctionType loweredTy,
irgen::IRGenModule &Mod) {
if (!modifiableFunction(loweredTy)) {
return false;
}
if (loweredTy->getNumResults() != 1) {
return false;
}
auto singleResult = loweredTy->getSingleResult();
auto resultStorageType = singleResult.getSILStorageType();
if (isLargeLoadableType(genEnv, resultStorageType, Mod)) {
return true;
}
return false;
}
static bool modNonFuncTypeResultType(SILFunction *F, irgen::IRGenModule &Mod) {
GenericEnvironment *genEnv = F->getGenericEnvironment();
auto loweredTy = F->getLoweredFunctionType();
return modNonFuncTypeResultType(genEnv, loweredTy, Mod);
}
void LargeValueVisitor::visitReturnInst(ReturnInst *instr) {
if (!modResultType(pass.F, pass.Mod)) {
visitInstr(instr);
} else if (modNonFuncTypeResultType(pass.F, pass.Mod)) {
pass.modReturnInsts.push_back(instr);
} // else: function signature return instructions remain as-is
}
void LargeValueVisitor::visitDeallocInst(DeallocStackInst *instr) {
auto opInstr = instr->getOperand();
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
opInstr) != pass.largeLoadableArgs.end()) {
auto *opAsInstr = dyn_cast<AllocStackInst>(opInstr);
assert(opAsInstr && "Expected an alloc stack instruction");
assert(pass.allocToApplyRetMap.find(opAsInstr) !=
pass.allocToApplyRetMap.end() &&
"Unexpected dealloc instr!");
}
}
void LargeValueVisitor::visitInstr(SILInstruction *instr) {
for (Operand &operand : instr->getAllOperands()) {
if (std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
operand.get()) != pass.largeLoadableArgs.end()) {
pass.instsToMod.push_back(instr);
// will be replaced later by the load / alloc_stack:
pass.argsToLoadedValueMap[operand.get()] = operand.get();
}
}
}
//===----------------------------------------------------------------------===//
// LoadableStorageAllocation: Generate alloc_stack and address projections
// for all loadable types we pass around.
//===----------------------------------------------------------------------===//
namespace {
class LoadableStorageAllocation {
StructLoweringState &pass;
public:
explicit LoadableStorageAllocation(StructLoweringState &pass) : pass(pass) {}
void allocateLoadableStorage();
void replaceLoadWithCopyAddr(LoadInst *optimizableLoad);
void replaceLoadWithCopyAddrForModifiable(LoadInst *unoptimizableLoad);
protected:
void convertIndirectFunctionArgs();
void insertIndirectReturnArgs();
void convertIndirectFunctionPointerArgsForUnmodifiable();
void convertIndirectBasicBlockArgs();
void convertApplyResults();
void allocateForArg(SILValue value);
AllocStackInst *allocateForApply(SILInstruction *apply, SILType type);
SILArgument *replaceArgType(SILBuilder &argBuilder, SILArgument *arg,
SILType newSILType);
};
} // end anonymous namespace
static SILInstruction *createOutlinedCopyCall(SILBuilder ©Builder,
SILValue src, SILValue tgt,
StructLoweringState &pass,
SILLocation *loc = nullptr) {
SILLocation locToUse = loc ? *loc : copyBuilder.getInsertionPoint()->getLoc();
auto *copy =
copyBuilder.createCopyAddr(locToUse, src, tgt, IsTake, IsInitialization);
return copy;
}
void LoadableStorageAllocation::replaceLoadWithCopyAddr(
LoadInst *optimizableLoad) {
SILValue value = optimizableLoad->getOperand();
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
AllocStackInst *allocInstr =
allocBuilder.createAllocStack(value.getLoc(), value->getType());
SILBuilderWithScope outlinedBuilder(optimizableLoad);
createOutlinedCopyCall(outlinedBuilder, value, allocInstr, pass);
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
for (auto *user : optimizableLoad->getUses()) {
SILInstruction *userIns = user->getUser();
switch (userIns->getKind()) {
case SILInstructionKind::CopyAddrInst:
case SILInstructionKind::DeallocStackInst:
break;
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
if (std::find(pass.applies.begin(), pass.applies.end(), userIns) ==
pass.applies.end()) {
pass.applies.push_back(userIns);
}
break;
}
case SILInstructionKind::RetainValueInst: {
auto *insToInsert = dyn_cast<RetainValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.retainInstsToMod.push_back(insToInsert);
break;
}
case SILInstructionKind::ReleaseValueInst: {
auto *insToInsert = dyn_cast<ReleaseValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.releaseInstsToMod.push_back(insToInsert);
break;
}
case SILInstructionKind::StoreInst: {
auto *insToInsert = dyn_cast<StoreInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.storeInstsToMod.push_back(insToInsert);
break;
}
case SILInstructionKind::DebugValueInst: {
auto *insToInsert = dyn_cast<DebugValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.debugInstsToMod.push_back(insToInsert);
break;
}
case SILInstructionKind::DestroyValueInst: {
auto *insToInsert = dyn_cast<DestroyValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.destroyValueInstsToMod.push_back(insToInsert);
break;
}
case SILInstructionKind::StructExtractInst: {
auto *instToInsert = dyn_cast<StructExtractInst>(userIns);
if (std::find(pass.structExtractInstsToMod.begin(),
pass.structExtractInstsToMod.end(),
instToInsert) == pass.structExtractInstsToMod.end()) {
pass.structExtractInstsToMod.push_back(instToInsert);
}
break;
}
case SILInstructionKind::SwitchEnumInst: {
auto *instToInsert = dyn_cast<SwitchEnumInst>(userIns);
if (std::find(pass.switchEnumInstsToMod.begin(),
pass.switchEnumInstsToMod.end(),
instToInsert) == pass.switchEnumInstsToMod.end()) {
pass.switchEnumInstsToMod.push_back(instToInsert);
}
break;
}
default:
llvm_unreachable("Unexpected instruction");
}
}
optimizableLoad->replaceAllUsesWith(allocInstr);
optimizableLoad->getParent()->erase(optimizableLoad);
}
static bool usesContainApplies(LoadInst *unoptimizableLoad,
irgen::IRGenModule &Mod) {
for (auto *user : unoptimizableLoad->getUses()) {
SILInstruction *userIns = user->getUser();
switch (userIns->getKind()) {
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
ApplySite site(userIns);
SILValue callee = site.getCallee();
if (callee == unoptimizableLoad) {
break;
}
SILType currType = unoptimizableLoad->getType().getObjectType();
GenericEnvironment *genEnv =
unoptimizableLoad->getFunction()->getGenericEnvironment();
SILType newSILType = getNewSILType(genEnv, currType, Mod);
if (currType == newSILType) {
break;
}
return true;
}
default:
break;
}
}
return false;
}
void LoadableStorageAllocation::replaceLoadWithCopyAddrForModifiable(
LoadInst *unoptimizableLoad) {
if (!usesContainApplies(unoptimizableLoad, pass.Mod)) {
return;
}
SILValue value = unoptimizableLoad->getOperand();
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
AllocStackInst *allocInstr =
allocBuilder.createAllocStack(value.getLoc(), value->getType());
SILBuilderWithScope outlinedBuilder(unoptimizableLoad);
createOutlinedCopyCall(outlinedBuilder, value, allocInstr, pass);
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
SmallVector<Operand *, 8> usersToMod;
for (auto *user : unoptimizableLoad->getUses()) {
SILInstruction *userIns = user->getUser();
switch (userIns->getKind()) {
case SILInstructionKind::CopyAddrInst:
case SILInstructionKind::DeallocStackInst:
break;
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
ApplySite site(userIns);
if (!modifiableApply(site, pass.Mod)) {
break;
}
SILValue callee = site.getCallee();
if (callee == unoptimizableLoad) {
break;
}
SILType currType = unoptimizableLoad->getType().getObjectType();
GenericEnvironment *genEnv =
userIns->getFunction()->getGenericEnvironment();
SILType newSILType = getNewSILType(genEnv, currType, pass.Mod);
if (currType == newSILType) {
break;
}
if (std::find(pass.applies.begin(), pass.applies.end(), userIns) ==
pass.applies.end()) {
pass.applies.push_back(userIns);
}
usersToMod.push_back(user);
break;
}
case SILInstructionKind::RetainValueInst: {
auto *insToInsert = dyn_cast<RetainValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.retainInstsToMod.push_back(insToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::ReleaseValueInst: {
auto *insToInsert = dyn_cast<ReleaseValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.releaseInstsToMod.push_back(insToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::StoreInst: {
auto *insToInsert = dyn_cast<StoreInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.storeInstsToMod.push_back(insToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::DebugValueInst: {
auto *insToInsert = dyn_cast<DebugValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.debugInstsToMod.push_back(insToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::DestroyValueInst: {
auto *insToInsert = dyn_cast<DestroyValueInst>(userIns);
assert(insToInsert && "Unexpected cast failure");
pass.destroyValueInstsToMod.push_back(insToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::StructExtractInst: {
auto *instToInsert = dyn_cast<StructExtractInst>(userIns);
pass.structExtractInstsToMod.push_back(instToInsert);
usersToMod.push_back(user);
break;
}
case SILInstructionKind::SwitchEnumInst: {
auto *instToInsert = dyn_cast<SwitchEnumInst>(userIns);
pass.switchEnumInstsToMod.push_back(instToInsert);
usersToMod.push_back(user);
break;
}
default:
break;
}
}
while (!usersToMod.empty()) {
auto *currUser = usersToMod.pop_back_val();
currUser->set(allocInstr);
}
}
void LoadableStorageAllocation::allocateLoadableStorage() {
// We need to map all functions exists
// required for Apply result's allocations
// Else we might get the following error:
// "stack dealloc does not match most recent stack alloc"
// When we dealloc later
LargeValueVisitor(pass).mapReturnInstrs();
if (modifiableFunction(pass.F->getLoweredFunctionType())) {
// Turn by-value function args to by-address ones
convertIndirectFunctionArgs();
} else {
convertIndirectFunctionPointerArgsForUnmodifiable();
}
convertApplyResults();
// Populate the pass' data structs
LargeValueVisitor(pass).mapValueStorage();
// Turn by-value BB args to by-address ones
convertIndirectBasicBlockArgs();
// Create an AllocStack for every used large loadable type in the function.
for (auto &argToAlloc : pass.argsToLoadedValueMap) {
assert(argToAlloc.first == argToAlloc.second);
allocateForArg(argToAlloc.first);
}
}
SILArgument *LoadableStorageAllocation::replaceArgType(SILBuilder &argBuilder,
SILArgument *arg,
SILType newSILType) {
CopyValueInst *copyArg = argBuilder.createCopyValue(
RegularLocation(const_cast<ValueDecl *>(arg->getDecl())),
SILUndef::get(newSILType, pass.F->getModule()));
arg->replaceAllUsesWith(copyArg);
assert(std::find(pass.largeLoadableArgs.begin(), pass.largeLoadableArgs.end(),
arg) == pass.largeLoadableArgs.end());
arg = arg->getParent()->replaceFunctionArgument(
arg->getIndex(), newSILType, ValueOwnershipKind::Trivial, arg->getDecl());
copyArg->replaceAllUsesWith(arg);
copyArg->eraseFromParent();
return arg;
}
void LoadableStorageAllocation::insertIndirectReturnArgs() {
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
auto loweredTy = pass.F->getLoweredFunctionType();
auto singleResult = loweredTy->getSingleResult();
SILType resultStorageType = singleResult.getSILStorageType();
auto canType = resultStorageType.getSwiftRValueType();
if (canType->hasTypeParameter()) {
assert(genEnv && "Expected a GenericEnv");
canType = genEnv->mapTypeIntoContext(canType)->getCanonicalType();
}
resultStorageType = SILType::getPrimitiveObjectType(canType);
auto &ctx = pass.F->getModule().getASTContext();
auto var = new (ctx) ParamDecl(
VarDecl::Specifier::InOut, SourceLoc(), SourceLoc(),
ctx.getIdentifier("$return_value"), SourceLoc(),
ctx.getIdentifier("$return_value"),
resultStorageType.getSwiftRValueType(), pass.F->getDeclContext());
pass.F->begin()->insertFunctionArgument(0, resultStorageType.getAddressType(),
ValueOwnershipKind::Trivial, var);
}
void LoadableStorageAllocation::convertIndirectFunctionArgs() {
SILBasicBlock *entry = pass.F->getEntryBlock();
SILBuilderWithScope argBuilder(entry->begin());
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
for (SILArgument *arg : entry->getArguments()) {
SILType storageType = arg->getType();
SILType newSILType = getNewSILType(genEnv, storageType, pass.Mod);
if (newSILType != storageType) {
ValueOwnershipKind ownership = arg->getOwnershipKind();
arg = replaceArgType(argBuilder, arg, newSILType);
if (isLargeLoadableType(genEnv, storageType, pass.Mod)) {
// Add to largeLoadableArgs if and only if it wasn't a modified function
// signature arg
pass.largeLoadableArgs.push_back(arg);
} else {
arg->setOwnershipKind(ownership);
pass.funcSigArgs.push_back(arg);
}
}
}
// Convert the result type to indirect if necessary:
if (modNonFuncTypeResultType(pass.F, pass.Mod)) {
insertIndirectReturnArgs();
}
}
static void convertBBArgType(SILBuilder &argBuilder, SILType newSILType,
SILArgument *arg) {
CopyValueInst *copyArg = argBuilder.createCopyValue(
RegularLocation(const_cast<ValueDecl *>(arg->getDecl())),
SILUndef::get(newSILType, arg->getFunction()->getModule()));
arg->replaceAllUsesWith(copyArg);
arg = arg->getParent()->replacePHIArgument(arg->getIndex(), newSILType,
arg->getOwnershipKind());
copyArg->replaceAllUsesWith(arg);
copyArg->eraseFromParent();
}
void LoadableStorageAllocation::convertApplyResults() {
for (auto &BB : *pass.F) {
for (auto &II : BB) {
auto *currIns = &II;
if (!ApplySite::isa(currIns)) {
continue;
}
if (isa<PartialApplyInst>(currIns)) {
continue;
}
auto applySite = ApplySite(currIns);
if (!modifiableApply(applySite, pass.Mod)) {
continue;
}
CanSILFunctionType origSILFunctionType = applySite.getSubstCalleeType();
GenericEnvironment *genEnv = nullptr;
if (!shouldTransformResults(genEnv, origSILFunctionType, pass.Mod)) {
continue;
}
auto singleResult = origSILFunctionType->getSingleResult();
auto resultStorageType = singleResult.getSILStorageType();
if (!isLargeLoadableType(genEnv, resultStorageType, pass.Mod)) {
// Make sure it is a function type
if (!resultStorageType.is<SILFunctionType>()) {
// Check if it is an optional function type
auto optionalType = resultStorageType.getAnyOptionalObjectType();
assert(optionalType &&
"Expected SILFunctionType or Optional for the result type");
assert(optionalType.is<SILFunctionType>() &&
"Expected a SILFunctionType inside the optional Type");
}
continue;
}
auto newSILType = getNewSILType(genEnv, resultStorageType, pass.Mod);
auto *newVal = allocateForApply(currIns, newSILType.getObjectType());
if (auto apply = dyn_cast<ApplyInst>(currIns)) {
apply->replaceAllUsesWith(newVal);
} else {
auto tryApplyIns = cast<TryApplyInst>(currIns);
auto *normalBB = tryApplyIns->getNormalBB();
SILBuilderWithScope argBuilder(normalBB->begin());
assert(normalBB->getNumArguments() == 1 &&
"Expected only one arg for try_apply normal BB");
auto arg = normalBB->getArgument(0);
arg->replaceAllUsesWith(newVal);
auto emptyTy = SILType::getPrimitiveObjectType(
TupleType::getEmpty(argBuilder.getModule().getASTContext()));
convertBBArgType(argBuilder, emptyTy, arg);
}
}
}
}
void LoadableStorageAllocation::
convertIndirectFunctionPointerArgsForUnmodifiable() {
SILBasicBlock *entry = pass.F->getEntryBlock();
SILBuilderWithScope argBuilder(entry->begin());
for (SILArgument *arg : entry->getArguments()) {
SILType storageType = arg->getType();
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
SILType newSILType = getNewSILType(genEnv, storageType, pass.Mod);
if (!isLargeLoadableType(genEnv, storageType, pass.Mod) &&
(newSILType != storageType)) {
auto *castInstr = argBuilder.createUncheckedBitCast(
RegularLocation(const_cast<ValueDecl *>(arg->getDecl())), arg,
newSILType);
arg->replaceAllUsesWith(castInstr);
castInstr->setOperand(0, arg);
}
}
}
void LoadableStorageAllocation::convertIndirectBasicBlockArgs() {
SILBasicBlock *entry = pass.F->getEntryBlock();
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
for (SILBasicBlock &BB : *pass.F) {
if (&BB == entry) {
// Already took care of function args
continue;
}
SILBuilderWithScope argBuilder(BB.begin());
for (SILArgument *arg : BB.getArguments()) {
if (!shouldConvertBBArg(arg, pass.Mod)) {
continue;
}
SILType storageType = arg->getType();
SILType newSILType = getNewSILType(genEnv, storageType, pass.Mod);
convertBBArgType(argBuilder, newSILType, arg);
}
}
}
void LoadableStorageAllocation::allocateForArg(SILValue value) {
if (auto *allocInstr = dyn_cast<AllocStackInst>(value)) {
// Special case: the value was already an Alloc
// This happens in case of values from apply results (for example)
// we *should* add a load for the current uses.
// Said load should happen before the first use
// As such add it right after the apply()
LoadInst *load = nullptr;
assert(pass.allocToApplyRetMap.find(allocInstr) !=
pass.allocToApplyRetMap.end() &&
"Alloc is not for apply results");
auto *applyInst = pass.allocToApplyRetMap[allocInstr];
assert(applyInst && "Value is not an apply");
auto II = applyInst->getIterator();
SILBuilderWithScope loadBuilder(II);
if (auto *tryApply = dyn_cast<TryApplyInst>(applyInst)) {
auto *tgtBB = tryApply->getNormalBB();
assert(tgtBB && "Could not find try apply's target BB");
loadBuilder.setInsertionPoint(tgtBB->begin());
} else {
++II;
loadBuilder.setInsertionPoint(II);
}
if (!pass.F->hasQualifiedOwnership()) {
load = loadBuilder.createLoad(applyInst->getLoc(), value,
LoadOwnershipQualifier::Unqualified);
} else {
load = loadBuilder.createLoad(applyInst->getLoc(), value,
LoadOwnershipQualifier::Take);
}
pass.argsToLoadedValueMap[value] = load;
return;
}
assert(!ApplySite::isa(value) && "Unexpected instruction");
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
AllocStackInst *allocInstr =
allocBuilder.createAllocStack(value.getLoc(), value->getType());
LoadInst *loadCopy = nullptr;
auto *applyOutlinedCopy =
createOutlinedCopyCall(allocBuilder, value, allocInstr, pass);
if (!pass.F->hasQualifiedOwnership()) {
loadCopy = allocBuilder.createLoad(applyOutlinedCopy->getLoc(), allocInstr,
LoadOwnershipQualifier::Unqualified);
} else {
loadCopy = allocBuilder.createLoad(applyOutlinedCopy->getLoc(), allocInstr,
LoadOwnershipQualifier::Take);
}
pass.argsToLoadedValueMap[value] = loadCopy;
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
}
AllocStackInst *
LoadableStorageAllocation::allocateForApply(SILInstruction *apply,
SILType type) {
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
auto *allocInstr = allocBuilder.createAllocStack(apply->getLoc(), type);
pass.largeLoadableArgs.push_back(allocInstr);
pass.allocToApplyRetMap[allocInstr] = apply;
pass.applyRetToAllocMap[apply] = allocInstr;
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
return allocInstr;
}
//===----------------------------------------------------------------------===//
// LoadableByAddress: Top-Level Function Transform.
//===----------------------------------------------------------------------===//
namespace {
class LoadableByAddress : public SILModuleTransform {
/// The entry point to this function transformation.
void run() override;
void runOnFunction(SILFunction *F);
private:
void updateLoweredTypes(SILFunction *F);
void recreateApplies();
void recreateSingleApply(SILInstruction *applyInst);
void recreateConvInstrs();
void recreateBuiltinInstrs();
void recreateLoadInstrs();
void recreateUncheckedEnumDataInstrs();
void recreateUncheckedTakeEnumDataAddrInst();
void fixStoreToBlockStorageInstrs();
private:
llvm::SetVector<SILFunction *> modFuncs;
llvm::SetVector<SingleValueInstruction *> conversionInstrs;
llvm::SetVector<BuiltinInst *> builtinInstrs;
llvm::SetVector<LoadInst *> loadInstrsOfFunc;
llvm::SetVector<UncheckedEnumDataInst *> uncheckedEnumDataOfFunc;
llvm::SetVector<UncheckedTakeEnumDataAddrInst *>
uncheckedTakeEnumDataAddrOfFunc;
llvm::SetVector<StoreInst *> storeToBlockStorageInstrs;
llvm::SetVector<SILInstruction *> modApplies;
llvm::MapVector<SILInstruction *, SILValue> allApplyRetToAllocMap;
};
} // end anonymous namespace
static void setInstrUsers(StructLoweringState &pass, AllocStackInst *allocInstr,
SILValue instrOperand, StoreInst *store) {
SmallVector<Operand *, 8> uses(instrOperand->getUses());
for (Operand *userOp : uses) {
SILInstruction *user = userOp->getUser();
if (user == store) {
continue;
}
if (ApplySite::isa(user)) {
ApplySite site(user);
if (modifiableApply(site, pass.Mod)) {
userOp->set(allocInstr);
}
} else if (auto *storeUser = dyn_cast<StoreInst>(user)) {
// Optimization: replace with copy_addr to reduce code size
assert(std::find(pass.storeInstsToMod.begin(), pass.storeInstsToMod.end(),
storeUser) == pass.storeInstsToMod.end() &&
"Did not expect this instr in storeInstsToMod");
SILBuilderWithScope copyBuilder(storeUser);
SILValue tgt = storeUser->getDest();
createOutlinedCopyCall(copyBuilder, allocInstr, tgt, pass);
storeUser->eraseFromParent();
} else if (auto *dbgInst = dyn_cast<DebugValueInst>(user)) {
SILBuilderWithScope dbgBuilder(dbgInst);
// Rewrite the debug_value to point to the variable in the alloca.
dbgBuilder.createDebugValueAddr(dbgInst->getLoc(), allocInstr);
dbgInst->eraseFromParent();
}
}
}
static void allocateAndSetForInstrOperand(StructLoweringState &pass,
SingleValueInstruction *instrOperand){
assert(instrOperand->getType().isObject());
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
AllocStackInst *allocInstr = allocBuilder.createAllocStack(
instrOperand->getLoc(), instrOperand->getType());
auto II = instrOperand->getIterator();
++II;
SILBuilderWithScope storeBuilder(II);
StoreInst *store = nullptr;
if (pass.F->hasQualifiedOwnership()) {
store = storeBuilder.createStore(instrOperand->getLoc(), instrOperand,
allocInstr, StoreOwnershipQualifier::Init);
} else {
store = storeBuilder.createStore(instrOperand->getLoc(), instrOperand,
allocInstr,
StoreOwnershipQualifier::Unqualified);
}
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
// Traverse all the uses of instrOperand - see if we can replace
setInstrUsers(pass, allocInstr, instrOperand, store);
}
static void allocateAndSetForArgumentOperand(StructLoweringState &pass,
SILValue value,
SILInstruction *applyInst) {
assert(value->getType().isObject());
auto *arg = dyn_cast<SILArgument>(value);
assert(arg && "non-instr operand must be an argument");
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
AllocStackInst *allocInstr =
allocBuilder.createAllocStack(applyInst->getLoc(), value->getType());
auto storeIt = arg->getParent()->begin();
if (storeIt == pass.F->begin()->begin()) {
// Store should happen *after* allocInstr
++storeIt;
}
SILBuilderWithScope storeBuilder(storeIt);
SILLocation Loc = applyInst->getLoc();
Loc.markAutoGenerated();
StoreInst *store = nullptr;
if (pass.F->hasQualifiedOwnership()) {
store = storeBuilder.createStore(Loc, value, allocInstr,
StoreOwnershipQualifier::Init);
} else {
store = storeBuilder.createStore(Loc, value, allocInstr,
StoreOwnershipQualifier::Unqualified);
}
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
// Traverse all the uses of instrOperand - see if we can replace
setInstrUsers(pass, allocInstr, value, store);
}
static bool allUsesAreReplaceable(SingleValueInstruction *instr,
irgen::IRGenModule &Mod) {
bool allUsesAreReplaceable = true;
for (auto *user : instr->getUses()) {
SILInstruction *userIns = user->getUser();
switch (userIns->getKind()) {
case SILInstructionKind::RetainValueInst:
case SILInstructionKind::ReleaseValueInst:
case SILInstructionKind::StoreInst:
case SILInstructionKind::DebugValueInst:
case SILInstructionKind::DestroyValueInst:
break;
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
// Replaceable only if it is not the function pointer
ApplySite site(userIns);
if (!modifiableApply(site, Mod)) {
allUsesAreReplaceable = false;
break;
}
SILValue callee = site.getCallee();
if (callee == instr) {
allUsesAreReplaceable = false;
}
SILType currType = instr->getType().getObjectType();
GenericEnvironment *genEnv =
instr->getFunction()->getGenericEnvironment();
SILType newSILType = getNewSILType(genEnv, currType, Mod);
if (currType == newSILType) {
allUsesAreReplaceable = false;
}
break;
}
case SILInstructionKind::StructExtractInst:
case SILInstructionKind::SwitchEnumInst: {
break;
}
default:
allUsesAreReplaceable = false;
}
}
return allUsesAreReplaceable;
}
static void castTupleInstr(SingleValueInstruction *instr, IRGenModule &Mod) {
SILType currSILType = instr->getType();
auto funcType = currSILType.castTo<SILFunctionType>();
GenericEnvironment *genEnv = instr->getFunction()->getGenericEnvironment();
if (!genEnv && funcType->isPolymorphic()) {
genEnv = getGenericEnvironment(funcType);
}
auto newFnType = getNewSILFunctionType(genEnv, funcType, Mod);
SILType newSILType =
SILType::getPrimitiveType(newFnType, currSILType.getCategory());
auto II = instr->getIterator();
++II;
SILBuilderWithScope castBuilder(II);
SingleValueInstruction *castInstr = nullptr;
switch (instr->getKind()) {
// Add cast to the new sil function type:
case SILInstructionKind::TupleExtractInst: {
castInstr = castBuilder.createUncheckedBitCast(instr->getLoc(), instr,
newSILType.getObjectType());
break;
}
case SILInstructionKind::TupleElementAddrInst: {
castInstr = castBuilder.createUncheckedAddrCast(
instr->getLoc(), instr, newSILType.getAddressType());
break;
}
default:
llvm_unreachable("Unexpected instruction inside tupleInstsToMod");
}
instr->replaceAllUsesWith(castInstr);
castInstr->setOperand(0, instr);
}
static SILValue createCopyOfEnum(StructLoweringState &pass,
SwitchEnumInst *orig) {
auto value = orig->getOperand();
auto type = value->getType();
if (type.isObject()) {
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
// support for non-address operands / enums
auto *allocInstr = allocBuilder.createAllocStack(orig->getLoc(), type);
SILBuilderWithScope storeBuilder(orig);
StoreInst *store = nullptr;
if (pass.F->hasQualifiedOwnership()) {
store = storeBuilder.createStore(orig->getLoc(), value, allocInstr,
StoreOwnershipQualifier::Init);
} else {
store = storeBuilder.createStore(orig->getLoc(), value, allocInstr,
StoreOwnershipQualifier::Unqualified);
}
// Insert stack deallocations.
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
value = allocInstr;
}
SILBuilderWithScope allocBuilder(pass.F->begin()->begin());
auto *allocInstr = allocBuilder.createAllocStack(value.getLoc(), type);
SILBuilderWithScope copyBuilder(orig);
createOutlinedCopyCall(copyBuilder, value, allocInstr, pass);
for (TermInst *termInst : pass.returnInsts) {
SILBuilderWithScope deallocBuilder(termInst);
deallocBuilder.createDeallocStack(allocInstr->getLoc(), allocInstr);
}
return allocInstr;
}
static void rewriteFunction(StructLoweringState &pass,
LoadableStorageAllocation &allocator) {
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
bool repeat = false;
llvm::SetVector<SILInstruction *> currentModApplies;
do {
while (!pass.switchEnumInstsToMod.empty()) {
auto *instr = pass.switchEnumInstsToMod.pop_back_val();
/* unchecked_take_enum_data_addr can be destructive.
* work on a copy instead of the original enum */
auto copiedValue = createCopyOfEnum(pass, instr);
SILBuilderWithScope enumBuilder(instr);
unsigned numOfCases = instr->getNumCases();
SmallVector<std::pair<EnumElementDecl *, SILBasicBlock *>, 16> caseBBs;
for (unsigned i = 0; i < numOfCases; ++i) {
auto currCase = instr->getCase(i);
auto *currBB = currCase.second;
SILBuilderWithScope argBuilder(currBB->begin());
assert(currBB->getNumArguments() <= 1 && "Unhandled BB Type");
EnumElementDecl *decl = currCase.first;
for (SILArgument *arg : currBB->getArguments()) {
SILType storageType = arg->getType();
SILType newSILType = getNewSILType(genEnv, storageType, pass.Mod);
if (storageType == newSILType) {
newSILType = newSILType.getAddressType();
}
auto *newArg = argBuilder.createUncheckedTakeEnumDataAddr(
instr->getLoc(), copiedValue, decl, newSILType.getAddressType());
arg->replaceAllUsesWith(newArg);
currBB->eraseArgument(0);
// Load the enum addr then see if we can get rid of the load:
LoadInst *loadArg = nullptr;
if (!pass.F->hasQualifiedOwnership()) {
loadArg = argBuilder.createLoad(
newArg->getLoc(), newArg, LoadOwnershipQualifier::Unqualified);
} else {
loadArg = argBuilder.createLoad(newArg->getLoc(), newArg,
LoadOwnershipQualifier::Take);
}
newArg->replaceAllUsesWith(loadArg);
loadArg->setOperand(newArg);
// If the load is of a function type - do not replace it.
if (loadArg->getType().is<SILFunctionType>()) {
continue;
}
if (allUsesAreReplaceable(loadArg, pass.Mod)) {
allocator.replaceLoadWithCopyAddr(loadArg);
} else {
allocator.replaceLoadWithCopyAddrForModifiable(loadArg);
}
}
caseBBs.push_back(std::make_pair(decl, currBB));
}
SILBasicBlock *defaultBB =
instr->hasDefault() ? instr->getDefaultBB() : nullptr;
enumBuilder.createSwitchEnumAddr(
instr->getLoc(), copiedValue, defaultBB, caseBBs);
instr->getParent()->erase(instr);
}
while (!pass.structExtractInstsToMod.empty()) {
auto *instr = pass.structExtractInstsToMod.pop_back_val();
bool updateResultTy = pass.resultTyInstsToMod.count(instr) != 0;
if (updateResultTy) {
pass.resultTyInstsToMod.remove(instr);
}
SILBuilderWithScope structBuilder(instr);
auto *newInstr = structBuilder.createStructElementAddr(
instr->getLoc(), instr->getOperand(), instr->getField(),
instr->getType().getAddressType());
// Load the struct element then see if we can get rid of the load:
LoadInst *loadArg = nullptr;
if (!pass.F->hasQualifiedOwnership()) {
loadArg = structBuilder.createLoad(newInstr->getLoc(), newInstr,
LoadOwnershipQualifier::Unqualified);
} else {
loadArg = structBuilder.createLoad(newInstr->getLoc(), newInstr,
LoadOwnershipQualifier::Take);
}
instr->replaceAllUsesWith(loadArg);
instr->getParent()->erase(instr);
// If the load is of a function type - do not replace it.
if (loadArg->getType().is<SILFunctionType>()) {
continue;
}
if (allUsesAreReplaceable(loadArg, pass.Mod)) {
allocator.replaceLoadWithCopyAddr(loadArg);
} else {
allocator.replaceLoadWithCopyAddrForModifiable(loadArg);
}
if (updateResultTy) {
pass.resultTyInstsToMod.insert(newInstr);
}
}
while (!pass.applies.empty()) {
auto *applyInst = pass.applies.pop_back_val();
if (currentModApplies.count(applyInst) == 0) {
currentModApplies.insert(applyInst);
}
ApplySite applySite = ApplySite(applyInst);
for (Operand &operand : applySite.getArgumentOperands()) {
SILValue currOperand = operand.get();
SILType silType = currOperand->getType();
if (isLargeLoadableType(genEnv, silType, pass.Mod)) {
auto currOperandInstr = dyn_cast<SingleValueInstruction>(currOperand);
// Get its storage location as a new operand
if (!currOperandInstr) {
allocateAndSetForArgumentOperand(pass, currOperand, applyInst);
} else if (auto *load = dyn_cast<LoadInst>(currOperandInstr)) {
// If the load is of a function type - do not replace it.
if (load->getType().is<SILFunctionType>()) {
continue;
}
if (allUsesAreReplaceable(load, pass.Mod)) {
allocator.replaceLoadWithCopyAddr(load);
} else {
allocator.replaceLoadWithCopyAddrForModifiable(load);
}
} else {
// TODO: peephole: special handling of known cases:
// ApplyInst, TupleExtractInst
allocateAndSetForInstrOperand(pass, currOperandInstr);
}
}
}
}
repeat = !pass.switchEnumInstsToMod.empty() ||
!pass.structExtractInstsToMod.empty();
assert(pass.applies.empty());
pass.applies.append(currentModApplies.begin(), currentModApplies.end());
} while (repeat);
for (SILInstruction *instr : pass.instsToMod) {
for (Operand &operand : instr->getAllOperands()) {
auto currOperand = operand.get();
if (std::find(pass.largeLoadableArgs.begin(),
pass.largeLoadableArgs.end(),
currOperand) != pass.largeLoadableArgs.end()) {
SILValue newOperand = pass.argsToLoadedValueMap[currOperand];
assert(newOperand != currOperand &&
"Did not allocate storage and convert operand");
operand.set(newOperand);
}
}
}
for (SingleValueInstruction *instr : pass.tupleInstsToMod) {
castTupleInstr(instr, pass.Mod);
}
while (!pass.allocStackInstsToMod.empty()) {
auto *instr = pass.allocStackInstsToMod.pop_back_val();
SILBuilderWithScope allocBuilder(instr);
SILType currSILType = instr->getType();
SILType newSILType = getNewSILType(genEnv, currSILType, pass.Mod);
auto *newInstr = allocBuilder.createAllocStack(instr->getLoc(), newSILType);
instr->replaceAllUsesWith(newInstr);
instr->getParent()->erase(instr);
}
while (!pass.pointerToAddrkInstsToMod.empty()) {
auto *instr = pass.pointerToAddrkInstsToMod.pop_back_val();
SILBuilderWithScope pointerBuilder(instr);
SILType currSILType = instr->getType();
SILType newSILType = getNewSILType(genEnv, currSILType, pass.Mod);
auto *newInstr = pointerBuilder.createPointerToAddress(
instr->getLoc(), instr->getOperand(), newSILType.getAddressType(),
instr->isStrict());
instr->replaceAllUsesWith(newInstr);
instr->getParent()->erase(instr);
}
for (SILInstruction *instr : pass.debugInstsToMod) {
assert(instr->getAllOperands().size() == 1 &&
"Debug instructions have one operand");
for (Operand &operand : instr->getAllOperands()) {
auto currOperand = operand.get();
if (pass.argsToLoadedValueMap.find(currOperand) !=
pass.argsToLoadedValueMap.end()) {
SILValue newOperand = pass.argsToLoadedValueMap[currOperand];
assert(newOperand != currOperand &&
"Did not allocate storage and convert operand");
operand.set(newOperand);
} else {
assert(currOperand->getType().isAddress() &&
"Expected an address type");
SILBuilderWithScope debugBuilder(instr);
debugBuilder.createDebugValueAddr(instr->getLoc(), currOperand);
instr->getParent()->erase(instr);
}
}
}
for (SILInstruction *instr : pass.destroyValueInstsToMod) {
assert(instr->getAllOperands().size() == 1 &&
"destroy_value instructions have one operand");
for (Operand &operand : instr->getAllOperands()) {
auto currOperand = operand.get();
assert(currOperand->getType().isAddress() && "Expected an address type");
SILBuilderWithScope destroyBuilder(instr);
destroyBuilder.createDestroyAddr(instr->getLoc(), currOperand);
instr->getParent()->erase(instr);
}
}
for (StoreInst *instr : pass.storeInstsToMod) {
SILValue src = instr->getSrc();
SILValue tgt = instr->getDest();
SILType srcType = src->getType();
SILType tgtType = tgt->getType();
assert(srcType && "Expected an address-type source");
assert(tgtType.isAddress() && "Expected an address-type target");
assert(srcType == tgtType && "Source and target type do not match");
SILBuilderWithScope copyBuilder(instr);
createOutlinedCopyCall(copyBuilder, src, tgt, pass);
instr->getParent()->erase(instr);
}
for (RetainValueInst *instr : pass.retainInstsToMod) {
SILBuilderWithScope retainBuilder(instr);
retainBuilder.createRetainValueAddr(
instr->getLoc(), instr->getOperand(), instr->getAtomicity());
instr->getParent()->erase(instr);
}
for (ReleaseValueInst *instr : pass.releaseInstsToMod) {
SILBuilderWithScope releaseBuilder(instr);
releaseBuilder.createReleaseValueAddr(
instr->getLoc(), instr->getOperand(), instr->getAtomicity());
instr->getParent()->erase(instr);
}
for (SingleValueInstruction *instr : pass.resultTyInstsToMod) {
// Update the return type of these instrs
// Note: The operand was already updated!
SILType currSILType = instr->getType().getObjectType();
SILType newSILType = getNewSILType(genEnv, currSILType, pass.Mod);
SILBuilderWithScope resultTyBuilder(instr);
SILLocation Loc = instr->getLoc();
SingleValueInstruction *newInstr = nullptr;
switch (instr->getKind()) {
case SILInstructionKind::StructExtractInst: {
auto *convInstr = cast<StructExtractInst>(instr);
newInstr = resultTyBuilder.createStructExtract(
Loc, convInstr->getOperand(), convInstr->getField(),
newSILType.getObjectType());
break;
}
case SILInstructionKind::StructElementAddrInst: {
auto *convInstr = cast<StructElementAddrInst>(instr);
newInstr = resultTyBuilder.createStructElementAddr(
Loc, convInstr->getOperand(), convInstr->getField(),
newSILType.getAddressType());
break;
}
case SILInstructionKind::RefTailAddrInst: {
auto *convInstr = cast<RefTailAddrInst>(instr);
newInstr = resultTyBuilder.createRefTailAddr(Loc, convInstr->getOperand(),
newSILType.getAddressType());
break;
}
case SILInstructionKind::RefElementAddrInst: {
auto *convInstr = cast<RefElementAddrInst>(instr);
newInstr = resultTyBuilder.createRefElementAddr(
Loc, convInstr->getOperand(), convInstr->getField(),
newSILType.getAddressType());
break;
}
case SILInstructionKind::BeginAccessInst: {
auto *convInstr = cast<BeginAccessInst>(instr);
newInstr = resultTyBuilder.createBeginAccess(Loc, convInstr->getOperand(),
convInstr->getAccessKind(),
convInstr->getEnforcement());
break;
}
case SILInstructionKind::EnumInst: {
auto *convInstr = cast<EnumInst>(instr);
SILValue operand =
convInstr->hasOperand() ? convInstr->getOperand() : SILValue();
newInstr = resultTyBuilder.createEnum(
Loc, operand, convInstr->getElement(), newSILType.getObjectType());
break;
}
default:
llvm_unreachable("Unhandled aggrTy instr");
}
instr->replaceAllUsesWith(newInstr);
instr->eraseFromParent();
}
for (MethodInst *instr : pass.methodInstsToMod) {
SILType currSILType = instr->getType();
auto currSILFunctionType = currSILType.castTo<SILFunctionType>();
GenericEnvironment *genEnvForMethod = nullptr;
if (currSILFunctionType->isPolymorphic()) {
genEnvForMethod = getGenericEnvironment(currSILFunctionType);
}
SILType newSILType = SILType::getPrimitiveObjectType(
getNewSILFunctionType(genEnvForMethod, currSILFunctionType, pass.Mod));
auto member = instr->getMember();
auto loc = instr->getLoc();
SILBuilderWithScope methodBuilder(instr);
MethodInst *newInstr = nullptr;
switch (instr->getKind()) {
case SILInstructionKind::ClassMethodInst: {
SILValue selfValue = instr->getOperand(0);
newInstr = methodBuilder.createClassMethod(loc, selfValue, member,
newSILType);
break;
}
case SILInstructionKind::SuperMethodInst: {
SILValue selfValue = instr->getOperand(0);
newInstr = methodBuilder.createSuperMethod(loc, selfValue, member,
newSILType);
break;
}
case SILInstructionKind::WitnessMethodInst: {
auto *WMI = dyn_cast<WitnessMethodInst>(instr);
assert(WMI && "ValueKind is Witness Method but dyn_cast failed");
newInstr = methodBuilder.createWitnessMethod(
loc, WMI->getLookupType(), WMI->getConformance(), member, newSILType);
break;
}
default:
llvm_unreachable("Expected known MethodInst ValueKind");
}
instr->replaceAllUsesWith(newInstr);
instr->getParent()->erase(instr);
}
while (!pass.modReturnInsts.empty()) {
auto *instr = pass.modReturnInsts.pop_back_val();
auto loc = instr->getLoc(); // SILLocation::RegularKind
auto regLoc = RegularLocation(loc.getSourceLoc());
SILBuilderWithScope retBuilder(instr);
assert(modNonFuncTypeResultType(pass.F, pass.Mod) &&
"Expected a regular type");
// Before we return an empty tuple, init return arg:
auto *entry = pass.F->getEntryBlock();
auto *retArg = entry->getArgument(0);
auto retOp = instr->getOperand();
auto storageType = retOp->getType();
if (storageType.isAddress()) {
// There *might* be a dealloc_stack that already released this value
// we should create the copy *before* the epilogue's deallocations
auto IIR = instr->getReverseIterator();
for (++IIR; IIR != instr->getParent()->rend(); ++IIR) {
auto *currIIInstr = &(*IIR);
if (currIIInstr->getKind() != SILInstructionKind::DeallocStackInst) {
// got the right location - stop.
--IIR;
break;
}
}
auto II = (IIR != instr->getParent()->rend())
? IIR->getIterator()
: instr->getParent()->begin();
SILBuilderWithScope retCopyBuilder(II);
createOutlinedCopyCall(retCopyBuilder, retOp, retArg, pass, ®Loc);
} else {
if (pass.F->hasQualifiedOwnership()) {
retBuilder.createStore(regLoc, retOp, retArg,
StoreOwnershipQualifier::Init);
} else {
retBuilder.createStore(regLoc, retOp, retArg,
StoreOwnershipQualifier::Unqualified);
}
}
auto emptyTy = retBuilder.getModule().Types.getLoweredType(
TupleType::getEmpty(retBuilder.getModule().getASTContext()));
auto newRetTuple = retBuilder.createTuple(regLoc, emptyTy, {});
retBuilder.createReturn(newRetTuple->getLoc(), newRetTuple);
instr->eraseFromParent();
}
}
// Rewrite function return argument if it is a "function pointer"
// If it is a large type also return true - will be re-written later
// Returns true if the return argument needed re-writing
static bool rewriteFunctionReturn(StructLoweringState &pass) {
GenericEnvironment *genEnv = pass.F->getGenericEnvironment();
auto loweredTy = pass.F->getLoweredFunctionType();
SILFunction *F = pass.F;
SILType resultTy = loweredTy->getAllResultsType();
SILType newSILType = getNewSILType(genEnv, resultTy, pass.Mod);
// We (currently) only care about function signatures
if (!isLargeLoadableType(genEnv, resultTy, pass.Mod) &&
(newSILType != resultTy)) {
assert(loweredTy->getNumResults() == 1 && "Expected a single result");
SILResultInfo origResultInfo = loweredTy->getSingleResult();
SILResultInfo newSILResultInfo(newSILType.getSwiftRValueType(),
origResultInfo.getConvention());
auto NewTy = SILFunctionType::get(
loweredTy->getGenericSignature(), loweredTy->getExtInfo(),
loweredTy->getCoroutineKind(),
loweredTy->getCalleeConvention(), loweredTy->getParameters(),
loweredTy->getYields(),
newSILResultInfo, loweredTy->getOptionalErrorResult(),
F->getModule().getASTContext(),
loweredTy->getWitnessMethodConformanceOrNone());
F->rewriteLoweredTypeUnsafe(NewTy);
return true;
} else if (isLargeLoadableType(genEnv, resultTy, pass.Mod)) {
return true;
}
return false;
}
void LoadableByAddress::runOnFunction(SILFunction *F) {
CanSILFunctionType funcType = F->getLoweredFunctionType();
IRGenModule *currIRMod = getIRGenModule()->IRGen.getGenModule(F);
if (F->isExternalDeclaration()) {
if (!modifiableFunction(funcType)) {
return;
}
// External function - re-write external declaration - this is ABI!
GenericEnvironment *genEnv = F->getGenericEnvironment();
auto loweredTy = F->getLoweredFunctionType();
if (!genEnv && loweredTy->isPolymorphic()) {
genEnv = getGenericEnvironment(loweredTy);
}
if (shouldTransformFunctionType(genEnv, loweredTy,
*currIRMod)) {
modFuncs.insert(F);
}
return;
}
StructLoweringState pass(F, *currIRMod);
// Rewrite function args and insert allocs.
LoadableStorageAllocation allocator(pass);
allocator.allocateLoadableStorage();
bool rewrittenReturn = false;
if (modifiableFunction(funcType)) {
rewrittenReturn = rewriteFunctionReturn(pass);
}
DEBUG(llvm::dbgs() << "\nREWRITING: " << F->getName(); F->dump());
// Rewrite instructions relating to the loadable struct.
rewriteFunction(pass, allocator);
invalidateAnalysis(F, SILAnalysis::InvalidationKind::Instructions);
// If we modified the function arguments - add to list of functions to clone
if (modifiableFunction(funcType) &&
(rewrittenReturn || !pass.largeLoadableArgs.empty() ||
!pass.funcSigArgs.empty())) {
modFuncs.insert(F);
}
// If we modified any applies - add them to the global list for recreation
if (!pass.applies.empty()) {
modApplies.insert(pass.applies.begin(), pass.applies.end());
}
if (!pass.applyRetToAllocMap.empty()) {
for (auto elm : pass.applyRetToAllocMap) {
allApplyRetToAllocMap.insert(elm);
}
}
}
static SILValue
getOperandTypeWithCastIfNecessary(SILInstruction *containingInstr, SILValue op,
IRGenModule &Mod, SILBuilder &builder) {
SILType currSILType = op->getType();
if (auto funcType = currSILType.getAs<SILFunctionType>()) {
GenericEnvironment *genEnv =
containingInstr->getFunction()->getGenericEnvironment();
if (!genEnv && funcType->isPolymorphic()) {
genEnv = getGenericEnvironment(funcType);
}
auto newFnType = getNewSILFunctionType(genEnv, funcType, Mod);
SILType newSILType = SILType::getPrimitiveObjectType(newFnType);
if (currSILType.isAddress()) {
newSILType = newSILType.getAddressType(); // we need address for loads
if (newSILType != currSILType) {
auto castInstr = builder.createUncheckedAddrCast(
containingInstr->getLoc(), op, newSILType);
return castInstr;
}
return op;
}
assert(currSILType.isObject() && "Expected an object type");
if (newSILType != currSILType) {
auto castInstr = builder.createUncheckedBitCast(containingInstr->getLoc(),
op, newSILType);
return castInstr;
}
}
return op;
}
void LoadableByAddress::recreateSingleApply(SILInstruction *applyInst) {
auto *F = applyInst->getFunction();
IRGenModule *currIRMod = getIRGenModule()->IRGen.getGenModule(F);
// Collect common info
ApplySite applySite = ApplySite(applyInst);
SILValue callee = applySite.getCallee();
if (auto site = ApplySite::isa(callee)) {
// We need to re-create the callee's apply before recreating this one
// else verification will fail with wrong SubstCalleeType
auto calleInstr = site.getInstruction();
if (modApplies.remove(calleInstr)) {
recreateSingleApply(calleInstr);
callee = applySite.getCallee();
}
}
CanSILFunctionType origSILFunctionType = applySite.getSubstCalleeType();
GenericEnvironment *genEnv = nullptr;
CanSILFunctionType newSILFunctionType =
getNewSILFunctionType(genEnv, origSILFunctionType, *currIRMod);
SILFunctionConventions newSILFunctionConventions(newSILFunctionType,
*getModule());
SmallVector<SILValue, 8> callArgs;
SILBuilderWithScope applyBuilder(applyInst);
// If we turned a direct result into an indirect parameter
// Find the new alloc we created earlier.
// and pass it as first parameter:
if (applyInst->getKind() != SILInstructionKind::PartialApplyInst &&
modNonFuncTypeResultType(genEnv, origSILFunctionType, *currIRMod) &&
modifiableApply(applySite, *getIRGenModule())) {
assert(allApplyRetToAllocMap.find(applyInst) !=
allApplyRetToAllocMap.end());
auto newAlloc = allApplyRetToAllocMap.find(applyInst)->second;
callArgs.push_back(newAlloc);
}
// Collect arg operands
for (Operand &operand : applySite.getArgumentOperands()) {
SILValue currOperand = operand.get();
currOperand = getOperandTypeWithCastIfNecessary(applyInst, currOperand,
*currIRMod, applyBuilder);
callArgs.push_back(currOperand);
}
// Recreate apply with new operands due to substitution-type cache
switch (applyInst->getKind()) {
case SILInstructionKind::ApplyInst: {
auto *castedApply = cast<ApplyInst>(applyInst);
SILValue newApply =
applyBuilder.createApply(castedApply->getLoc(), callee,
applySite.getSubstitutions(),
callArgs, castedApply->isNonThrowing());
castedApply->replaceAllUsesWith(newApply);
break;
}
case SILInstructionKind::TryApplyInst: {
auto *castedApply = cast<TryApplyInst>(applyInst);
applyBuilder.createTryApply(
castedApply->getLoc(), callee,
applySite.getSubstitutions(), callArgs,
castedApply->getNormalBB(), castedApply->getErrorBB());
break;
}
case SILInstructionKind::PartialApplyInst: {
auto *castedApply = cast<PartialApplyInst>(applyInst);
// Change the type of the Closure
auto partialApplyConvention = castedApply->getType()
.getAs<SILFunctionType>()
->getCalleeConvention();
auto newApply =
applyBuilder.createPartialApply(castedApply->getLoc(), callee,
applySite.getSubstitutions(), callArgs,
partialApplyConvention);
castedApply->replaceAllUsesWith(newApply);
break;
}
default:
llvm_unreachable("Unexpected instr: unknown apply type");
}
applyInst->getParent()->erase(applyInst);
}
void LoadableByAddress::recreateApplies() {
while (!modApplies.empty()) {
auto *applyInst = modApplies.pop_back_val();
recreateSingleApply(applyInst);
}
}
void LoadableByAddress::recreateLoadInstrs() {
for (auto *loadInstr : loadInstrsOfFunc) {
SILBuilderWithScope loadBuilder(loadInstr);
// If this is a load of a function for which we changed the return type:
// add UncheckedBitCast before the load
auto loadOp = loadInstr->getOperand();
loadOp = getOperandTypeWithCastIfNecessary(loadInstr, loadOp,
*getIRGenModule(), loadBuilder);
auto *newInstr = loadBuilder.createLoad(loadInstr->getLoc(), loadOp,
loadInstr->getOwnershipQualifier());
loadInstr->replaceAllUsesWith(newInstr);
loadInstr->getParent()->erase(loadInstr);
}
}
void LoadableByAddress::recreateUncheckedEnumDataInstrs() {
for (auto *enumInstr : uncheckedEnumDataOfFunc) {
SILBuilderWithScope enumBuilder(enumInstr);
SILFunction *F = enumInstr->getFunction();
IRGenModule *currIRMod = getIRGenModule()->IRGen.getGenModule(F);
SILType origType = enumInstr->getType();
GenericEnvironment *genEnv = F->getGenericEnvironment();
SILType newType = getNewSILType(genEnv, origType, *currIRMod);
auto caseTy = enumInstr->getOperand()->getType().getEnumElementType(
enumInstr->getElement(), F->getModule());
SingleValueInstruction *newInstr = nullptr;
if (caseTy != newType) {
auto *takeEnum = enumBuilder.createUncheckedEnumData(
enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(),
caseTy);
newInstr = enumBuilder.createUncheckedBitCast(enumInstr->getLoc(),
takeEnum, newType);
} else {
newInstr = enumBuilder.createUncheckedEnumData(
enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(),
newType);
}
enumInstr->replaceAllUsesWith(newInstr);
enumInstr->getParent()->erase(enumInstr);
}
}
void LoadableByAddress::recreateUncheckedTakeEnumDataAddrInst() {
for (auto *enumInstr : uncheckedTakeEnumDataAddrOfFunc) {
SILBuilderWithScope enumBuilder(enumInstr);
SILFunction *F = enumInstr->getFunction();
IRGenModule *currIRMod = getIRGenModule()->IRGen.getGenModule(F);
SILType origType = enumInstr->getType();
GenericEnvironment *genEnv = F->getGenericEnvironment();
SILType newType = getNewSILType(genEnv, origType, *currIRMod);
auto caseTy = enumInstr->getOperand()->getType().getEnumElementType(
enumInstr->getElement(), F->getModule());
SingleValueInstruction *newInstr = nullptr;
if (caseTy != origType.getObjectType()) {
auto *takeEnum = enumBuilder.createUncheckedTakeEnumDataAddr(
enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(),
caseTy.getAddressType());
newInstr = enumBuilder.createUncheckedAddrCast(
enumInstr->getLoc(), takeEnum, newType.getAddressType());
} else {
newInstr = enumBuilder.createUncheckedTakeEnumDataAddr(
enumInstr->getLoc(), enumInstr->getOperand(), enumInstr->getElement(),
newType.getAddressType());
}
enumInstr->replaceAllUsesWith(newInstr);
enumInstr->getParent()->erase(enumInstr);
}
}
void LoadableByAddress::fixStoreToBlockStorageInstrs() {
for (auto *instr : storeToBlockStorageInstrs) {
auto dest = instr->getDest();
auto destBlock = cast<ProjectBlockStorageInst>(dest);
SILType destType = destBlock->getType();
auto src = instr->getSrc();
SILType srcType = src->getType();
if (destType.getObjectType() != srcType) {
// Add cast to destType
SILBuilderWithScope castBuilder(instr);
auto *castInstr = castBuilder.createUncheckedBitCast(
instr->getLoc(), src, destType.getObjectType());
instr->setOperand(StoreInst::Src, castInstr);
}
}
}
void LoadableByAddress::recreateConvInstrs() {
for (auto *convInstr : conversionInstrs) {
IRGenModule *currIRMod =
getIRGenModule()->IRGen.getGenModule(convInstr->getFunction());
SILType currSILType = convInstr->getType();
if (auto *thinToPointer = dyn_cast<ThinFunctionToPointerInst>(convInstr)) {
currSILType = thinToPointer->getOperand()->getType();
}
auto currSILFunctionType = currSILType.castTo<SILFunctionType>();
GenericEnvironment *genEnv =
convInstr->getFunction()->getGenericEnvironment();
CanSILFunctionType newFnType =
getNewSILFunctionType(genEnv, currSILFunctionType, *currIRMod);
SILType newType = SILType::getPrimitiveObjectType(newFnType);
SILBuilderWithScope convBuilder(convInstr);
SingleValueInstruction *newInstr = nullptr;
switch (convInstr->getKind()) {
case SILInstructionKind::ThinToThickFunctionInst: {
auto instr = cast<ThinToThickFunctionInst>(convInstr);
newInstr = convBuilder.createThinToThickFunction(
instr->getLoc(), instr->getOperand(), newType);
break;
}
case SILInstructionKind::ThinFunctionToPointerInst: {
auto instr = cast<ThinFunctionToPointerInst>(convInstr);
newType = getNewSILType(genEnv, instr->getType(), *getIRGenModule());
newInstr = convBuilder.createThinFunctionToPointer(
instr->getLoc(), instr->getOperand(), newType);
break;
}
case SILInstructionKind::ConvertFunctionInst: {
auto instr = cast<ConvertFunctionInst>(convInstr);
newInstr = convBuilder.createConvertFunction(
instr->getLoc(), instr->getOperand(), newType);
break;
}
default:
llvm_unreachable("Unexpected conversion instruction");
}
convInstr->replaceAllUsesWith(newInstr);
convInstr->getParent()->erase(convInstr);
}
}
void LoadableByAddress::recreateBuiltinInstrs() {
for (auto *builtinInstr : builtinInstrs) {
auto *currIRMod =
getIRGenModule()->IRGen.getGenModule(builtinInstr->getFunction());
auto *F = builtinInstr->getFunction();
GenericEnvironment *genEnv = F->getGenericEnvironment();
auto resultTy = builtinInstr->getType();
auto newResultTy = getNewSILType(genEnv, resultTy, *currIRMod);
llvm::SmallVector<SILValue, 5> newArgs;
for (auto oldArg : builtinInstr->getArguments()) {
newArgs.push_back(oldArg);
}
SILBuilderWithScope builtinBuilder(builtinInstr);
auto *newInstr = builtinBuilder.createBuiltin(
builtinInstr->getLoc(), builtinInstr->getName(), newResultTy,
builtinInstr->getSubstitutions(),
newArgs);
builtinInstr->replaceAllUsesWith(newInstr);
builtinInstr->getParent()->erase(builtinInstr);
}
}
void LoadableByAddress::updateLoweredTypes(SILFunction *F) {
IRGenModule *currIRMod = getIRGenModule()->IRGen.getGenModule(F);
CanSILFunctionType funcType = F->getLoweredFunctionType();
GenericEnvironment *genEnv = F->getGenericEnvironment();
if (!genEnv && funcType->isPolymorphic()) {
genEnv = getGenericEnvironment(funcType);
}
auto newFuncTy = getNewSILFunctionType(genEnv, funcType, *currIRMod);
F->rewriteLoweredTypeUnsafe(newFuncTy);
}
/// The entry point to this function transformation.
void LoadableByAddress::run() {
// Set the SIL state before the PassManager has a chance to run
// verification.
getModule()->setStage(SILStage::Lowered);
for (auto &F : *getModule())
runOnFunction(&F);
if (modFuncs.empty()) {
return;
}
// Scan the module for all references of the modified functions:
llvm::SetVector<FunctionRefInst *> funcRefs;
for (SILFunction &CurrF : *getModule()) {
for (SILBasicBlock &BB : CurrF) {
for (SILInstruction &I : BB) {
if (auto *FRI = dyn_cast<FunctionRefInst>(&I)) {
SILFunction *RefF = FRI->getReferencedFunction();
if (modFuncs.count(RefF) != 0) {
// Go over the uses and add them to lists to modify
//
// FIXME: Why aren't function_ref uses processed transitively? And
// why is it necessary to visit uses at all if they will be visited
// later in this loop?
for (auto *user : FRI->getUses()) {
SILInstruction *currInstr = user->getUser();
switch (currInstr->getKind()) {
case SILInstructionKind::ApplyInst:
case SILInstructionKind::TryApplyInst:
case SILInstructionKind::PartialApplyInst: {
if (modApplies.count(currInstr) == 0) {
modApplies.insert(currInstr);
}
break;
}
case SILInstructionKind::ConvertFunctionInst:
case SILInstructionKind::ThinFunctionToPointerInst:
case SILInstructionKind::ThinToThickFunctionInst: {
conversionInstrs.insert(
cast<SingleValueInstruction>(currInstr));
break;
}
case SILInstructionKind::BuiltinInst: {
auto *instr = cast<BuiltinInst>(currInstr);
builtinInstrs.insert(instr);
break;
}
case SILInstructionKind::DebugValueAddrInst:
case SILInstructionKind::DebugValueInst: {
break;
}
default:
llvm_unreachable("Unhandled use of FunctionRefInst");
}
}
funcRefs.insert(FRI);
}
} else if (auto *CFI = dyn_cast<ConvertFunctionInst>(&I)) {
SILValue val = CFI->getConverted();
SILType currType = val->getType();
auto fType = currType.getAs<SILFunctionType>();
assert(fType && "Expected SILFunctionType");
if (modifiableFunction(fType)) {
conversionInstrs.insert(CFI);
}
} else if (auto *TTI = dyn_cast<ThinToThickFunctionInst>(&I)) {
auto canType = TTI->getCallee()->getType();
auto fType = canType.castTo<SILFunctionType>();
if (modifiableFunction(fType))
conversionInstrs.insert(TTI);
} else if (auto *LI = dyn_cast<LoadInst>(&I)) {
SILType currType = LI->getType();
if (auto fType = getInnerFunctionType(currType)) {
if (modifiableFunction(fType)) {
// need to re-create these loads: re-write type cache
loadInstrsOfFunc.insert(LI);
}
}
} else if (auto *UED = dyn_cast<UncheckedEnumDataInst>(&I)) {
SILType currType = UED->getType();
if (auto fType = getInnerFunctionType(currType)) {
if (modifiableFunction(fType)) {
// need to re-create these loads: re-write type cache
uncheckedEnumDataOfFunc.insert(UED);
}
}
} else if (auto *UED = dyn_cast<UncheckedTakeEnumDataAddrInst>(&I)) {
SILType currType = UED->getType();
if (auto fType = getInnerFunctionType(currType)) {
if (modifiableFunction(fType)) {
// need to re-create these loads: re-write type cache
uncheckedTakeEnumDataAddrOfFunc.insert(UED);
}
}
} else if (auto *SI = dyn_cast<StoreInst>(&I)) {
auto dest = SI->getDest();
if (isa<ProjectBlockStorageInst>(dest)) {
storeToBlockStorageInstrs.insert(SI);
}
}
}
}
}
for (auto *F : modFuncs) {
// Update the lowered type of the Function
updateLoweredTypes(F);
}
// Update all references:
// Note: We don't need to update the witness tables and vtables
// They just contain a pointer to the function
// The pointer does not change
for (FunctionRefInst *instr : funcRefs) {
SILFunction *F = instr->getReferencedFunction();
SILBuilderWithScope refBuilder(instr);
FunctionRefInst *newInstr =
refBuilder.createFunctionRef(instr->getLoc(), F);
instr->replaceAllUsesWith(newInstr);
instr->getParent()->erase(instr);
}
// Re-create all conversions for which we modified the FunctionRef
recreateConvInstrs();
// Re-create all builtins for which we modified the FunctionRef
recreateBuiltinInstrs();
// Re-create all unchecked enum data instrs of function pointers
recreateUncheckedEnumDataInstrs();
// Same for data addr
recreateUncheckedTakeEnumDataAddrInst();
// Re-create all load instrs of function pointers
recreateLoadInstrs();
// Re-create all applies that we modified in the module
recreateApplies();
// Fix all instructions that rely on block storage type
fixStoreToBlockStorageInstrs();
// Clean up the data structs:
modFuncs.clear();
conversionInstrs.clear();
loadInstrsOfFunc.clear();
uncheckedEnumDataOfFunc.clear();
modApplies.clear();
storeToBlockStorageInstrs.clear();
}
SILTransform *irgen::createLoadableByAddress() {
return new LoadableByAddress();
}
|
#include "AST/Type.h"
#include "frontend/lexer.h"
#include "LLVM/context.h"
#include "AST/declarations/class-decl.h"
std::shared_ptr<AST::Type>
AST::BasicType::String = std::make_shared<BasicType>(K_string),
AST::BasicType::Int = std::make_shared<BasicType>(K_int),
AST::BasicType::Long = std::make_shared<BasicType>(K_long),
AST::BasicType::Float=std::make_shared<BasicType>(K_float),
AST::BasicType::Double = std::make_shared<BasicType>(K_double),
AST::BasicType::Void = std::make_shared<AST::BasicType>(K_void),
AST::BasicType::Void_Ptr = std::make_shared<AST::BasicType>(K_string),
AST::BasicType::Boolean = std::make_shared<AST::BasicType>(K_bool);
std::shared_ptr<AST::Type> AST::Type::Match() {
std::shared_ptr<AST::Type> type=nullptr;
if (frontend::Lexer::IsBasicType())
type = BasicType::Match();
else if(frontend::Lexer::Check('('))
type = Tuple::Match();
else type = CustomType::Match();
if (frontend::Lexer::Check('['))
type= Tensor::Match(type);
return type;
}
std::shared_ptr<AST::BasicType> AST::BasicType::Match() {
auto type = std::make_shared<BasicType>(frontend::Lexer::token->type);
frontend::Lexer::Next();
return type;
}
llvm::Type* AST::BasicType::ToLLVM(std::shared_ptr<DFContext>context) {
llvm::Type* llvm_type = nullptr;
switch (detail) {
case K_void:
case 1: llvm_type = llvm::Type::getVoidTy(context->context); break;
case K_byte: llvm_type = llvm::Type::getInt8Ty(context->context); break;
case K_short: llvm_type = llvm::Type::getInt16Ty(context->context); break;
case K_int: llvm_type = llvm::Type::getInt32Ty(context->context); break;
case K_long: llvm_type = llvm::Type::getInt64Ty(context->context); break;
case K_float: llvm_type = llvm::Type::getFloatTy(context->context); break;
case K_double: llvm_type = llvm::Type::getDoubleTy(context->context); break;
case K_bool: llvm_type = llvm::Type::getInt8Ty(context->context); break;
case K_string: llvm_type = llvm::Type::getInt8PtrTy(context->context); break;
}
return llvm_type;
}
std::string AST::BasicType::ToString() {
return frontend::Lexer::Token::Name(detail);
}
AST::CustomType::CustomType(const std::shared_ptr<decl::ClassDecl> decl) :decl(decl),str(decl->GetFullname()), Type(Custom) {}
std::shared_ptr<AST::CustomType> AST::CustomType::Match() {
auto type = std::make_shared<AST::CustomType>();
type->str = frontend::Lexer::string_val;
frontend::Lexer::Match(Id);
while (frontend::Lexer::Check('.')) {
frontend::Lexer::Next();
type->str += "." + frontend::Lexer::string_val;
frontend::Lexer::Match(Id);
}
if (frontend::Lexer::Check('<')) {
frontend::Lexer::Next();
type->str += "<";
if (frontend::Lexer::Check(Id)) {
frontend::Lexer::Next();
type->str += frontend::Lexer::string_val;
while (frontend::Lexer::Check(',')) {
frontend::Lexer::Next();
frontend::Lexer::Match(Id);
type->str += "," + frontend::Lexer::string_val;
}
}
frontend::Lexer::Match('>');
type->str += ">";
}
return type;
}
llvm::Type* AST::CustomType::ToLLVM(std::shared_ptr<DFContext> ctx) {
llvm::Type* llvm_type = nullptr;
if (ctx->ast->IsCustomType(str)) {
const auto ty = ctx->module->getTypeByName(str);
llvm_type = ctx->ast->GetClassDecl(str)->category == AST::decl::ClassDecl::kClass ? ty->getPointerTo() : static_cast<llvm::Type*>(ty);
}
else {
if (decl == nullptr)decl = ctx->ast->GetClassDecl(str);
llvm_type = ctx->module->getTypeByName(decl->GetFullname());
if (llvm_type == nullptr) {
frontend::Debugger::ErrorV(-1, -1,"Unknown Type: {}", str );
return nullptr;
}
}
return llvm_type;
}
// std::shared_ptr<AST::Type> AST::CustomType::Copy() {
// auto cp= std::make_shared<AST::CustomType>();
// cp->str = str;
// return cp;
// }
std::string AST::CustomType::ToString() { return str; }
std::shared_ptr<AST::Type> AST::Tuple::Match() {
auto type = std::make_shared<AST::Tuple>();
frontend::Lexer::Match('(');
type->types.push_back(AST::Type::Match());
if (frontend::Lexer::Check(')'))return type->types[0];
while (frontend::Lexer::Check(','))
type->types.push_back(AST::Type::Match());
frontend::Lexer::Match(')');
return std::static_pointer_cast<AST::Type>(type);
}
llvm::Type* AST::Tuple::ToLLVM(std::shared_ptr<DFContext>) {return nullptr;}
std::string AST::Tuple::ToString() { return "NOT_IMPLMENTED"; }
std::shared_ptr<AST::Tensor> AST::Tensor::Match(std::shared_ptr<AST::Type> base) {
auto type = std::make_shared<AST::Tensor>(base);
frontend::Lexer::Match('[');
if (frontend::Lexer::Check(Num)) {
if (frontend::Lexer::token->value != K_int) {
frontend::Debugger::Error("Value in side a array operator shoule be integer.");
return nullptr;
}
// type->array = frontend::Lexer::number_val;
frontend::Lexer::Next();
}
else {
// type->array = -2;
}
frontend::Lexer::Match(']');
return type;
}
llvm::Type* AST::Tensor::ToLLVM(std::shared_ptr<DFContext>) {
return nullptr;
}
std::string AST::Tensor::ToString() { return "NOT_IMPLMENTED"; }
llvm::Type* AST::FunctionType::ToLLVM(std::shared_ptr<DFContext>) { return nullptr; }
std::string AST::FunctionType::ToString() { return "NOT_IMPLMENTED"; }
|
// Copyright Carl Philipp Reh 2006 - 2019.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef SGE_OPENCL_KERNEL_LOCAL_BUFFER_HPP_INCLUDED
#define SGE_OPENCL_KERNEL_LOCAL_BUFFER_HPP_INCLUDED
#include <sge/opencl/detail/symbol.hpp>
#include <sge/opencl/memory_object/byte_size.hpp>
namespace sge::opencl::kernel
{
class local_buffer
{
public:
SGE_OPENCL_DETAIL_SYMBOL explicit local_buffer(memory_object::byte_size const &);
[[nodiscard]] SGE_OPENCL_DETAIL_SYMBOL memory_object::byte_size::value_type byte_size() const;
private:
memory_object::byte_size byte_size_;
};
}
#endif
|
//
// Created by vitor on 25-08-2018.
//
#ifndef CIRCUITSIM_BASIC_RESISTOR_HH
#define CIRCUITSIM_BASIC_RESISTOR_HH
#include <string_view>
#include "basic_component.hh"
#include "component_with_value.hh"
#include "../dc_context_view.hpp"
namespace circuitsim {
template<class Derived>
class basic_resistor : public component_with_value<Derived, basic_component<Derived, 2>> {
public:
using base = component_with_value<Derived, basic_component<Derived, 2>>;
using base::base;
};
template<class Resistor>
struct resistor_traits {
static constexpr std::string_view symbol() {
return "R";
}
static constexpr double default_value() {
return 100;
}
static void stamp(const Resistor &x, dc_context_view &ctx) {
ctx.stamp_resistance(x.port(0), x.port(1), value(x));
}
};
}
#endif //CIRCUITSIM_BASIC_RESISTOR_HH
|
/*
* ***** BEGIN LICENSE BLOCK *****
* Version: MIT
*
* Copyright (c) 2010-2013 Alan Antonuk
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* ***** END LICENSE BLOCK *****
*/
#include <gtest/gtest.h>
#include "SimpleAmqpClient/SimpleAmqpClient.h"
#include "connected_test.h"
using namespace AmqpClient;
TEST(connecting_test, connect_default) {
Channel::ptr_t channel = Channel::Create(connected_test::GetBrokerHost());
}
TEST(connecting_test, connect_badhost) {
EXPECT_THROW(Channel::ptr_t channel = Channel::Create("HostDoesntExist"),
std::runtime_error);
}
TEST(connecting_test, connect_badauth) {
EXPECT_THROW(Channel::ptr_t channel = Channel::Create(
connected_test::GetBrokerHost(), 5672, "baduser", "badpass"),
AccessRefusedException);
}
TEST(connecting_test, connect_badframesize) {
// AMQP Spec says we have a minimum frame size of 4096
EXPECT_THROW(
Channel::ptr_t channel = Channel::Create(
connected_test::GetBrokerHost(), 5672, "guest", "guest", "/", 400),
AmqpResponseLibraryException);
}
TEST(connecting_test, connect_badvhost) {
EXPECT_THROW(Channel::ptr_t channel =
Channel::Create(connected_test::GetBrokerHost(), 5672,
"guest", "guest", "nonexitant_vhost"),
NotAllowedException);
}
TEST(connecting_test, connect_using_uri) {
std::string host_uri = "amqp://" + connected_test::GetBrokerHost();
Channel::ptr_t channel = Channel::CreateFromUri(host_uri);
}
|
#pragma once
#include <wls/app/plugin.hpp>
#include <wls/chain/database.hpp>
#include <wls/chain/comment_object.hpp>
#include <boost/multi_index/composite_key.hpp>
#include <fc/thread/future.hpp>
#include <fc/api.hpp>
namespace wls { namespace tags {
using namespace wls::chain;
using namespace boost::multi_index;
using wls::app::application;
using chainbase::object;
using chainbase::oid;
using chainbase::allocator;
//
// Plugins should #define their SPACE_ID's so plugins with
// conflicting SPACE_ID assignments can be compiled into the
// same binary (by simply re-assigning some of the conflicting #defined
// SPACE_ID's in a build script).
//
// Assignment of SPACE_ID's cannot be done at run-time because
// various template automagic depends on them being known at compile
// time.
//
#ifndef TAG_SPACE_ID
#define TAG_SPACE_ID 5
#endif
#define TAGS_PLUGIN_NAME "tags"
typedef protocol::fixed_string_32 tag_name_type;
// Plugins need to define object type IDs such that they do not conflict
// globally. If each plugin uses the upper 8 bits as a space identifier,
// with 0 being for chain, then the lower 8 bits are free for each plugin
// to define as they see fit.
enum
{
tag_object_type = ( TAG_SPACE_ID << 8 ),
tag_stats_object_type = ( TAG_SPACE_ID << 8 ) + 1,
peer_stats_object_type = ( TAG_SPACE_ID << 8 ) + 2,
author_tag_stats_object_type = ( TAG_SPACE_ID << 8 ) + 3
};
namespace detail { class tags_plugin_impl; }
/**
* The purpose of the tag object is to allow the generation and listing of
* all top level posts by a string tag. The desired sort orders include:
*
* 1. created - time of creation
* 2. maturing - about to receive a payout
* 3. active - last reply the post or any child of the post
* 4. netvotes - individual accounts voting for post minus accounts voting against it
*
* When ever a comment is modified, all tag_objects for that comment are updated to match.
*/
class tag_object : public object< tag_object_type, tag_object >
{
public:
template< typename Constructor, typename Allocator >
tag_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
tag_object() {}
id_type id;
tag_name_type tag;
time_point_sec created;
time_point_sec active;
time_point_sec cashout;
int64_t net_rshares = 0;
int32_t net_votes = 0;
int32_t children = 0;
double hot = 0;
double trending = 0;
account_id_type author;
comment_id_type parent;
comment_id_type comment;
bool is_post()const { return parent == comment_id_type(); }
};
typedef oid< tag_object > tag_id_type;
struct by_cashout; /// all posts regardless of depth
struct by_net_rshares; /// all comments regardless of depth
struct by_parent_created;
struct by_parent_active;
struct by_parent_net_rshares; /// all top level posts by direct pending payout
struct by_parent_net_votes; /// all top level posts by direct votes
struct by_parent_trending;
struct by_parent_children; /// all top level posts with the most discussion (replies at all levels)
struct by_parent_hot;
struct by_author_parent_created; /// all blog posts by author with tag
struct by_author_comment;
struct by_reward_fund_net_rshares;
struct by_comment;
struct by_tag;
typedef multi_index_container<
tag_object,
indexed_by<
ordered_unique< tag< by_id >, member< tag_object, tag_id_type, &tag_object::id > >,
ordered_unique< tag< by_comment >,
composite_key< tag_object,
member< tag_object, comment_id_type, &tag_object::comment >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less< comment_id_type >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_author_comment >,
composite_key< tag_object,
member< tag_object, account_id_type, &tag_object::author >,
member< tag_object, comment_id_type, &tag_object::comment >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less< account_id_type >, std::less< comment_id_type >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_created >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, time_point_sec, &tag_object::created >,
member<tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less< tag_name_type >, std::less<comment_id_type>, std::greater< time_point_sec >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_active >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, time_point_sec, &tag_object::active >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< time_point_sec >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_net_rshares >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, int64_t, &tag_object::net_rshares >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< int64_t >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_net_votes >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, int32_t, &tag_object::net_votes >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< int32_t >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_children >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, int32_t, &tag_object::children >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< int32_t >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_hot >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, double, &tag_object::hot >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< double >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_parent_trending >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, comment_id_type, &tag_object::parent >,
member< tag_object, double, &tag_object::trending >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<comment_id_type>, std::greater< double >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_cashout >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, time_point_sec, &tag_object::cashout >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less< time_point_sec >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_net_rshares >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, int64_t, &tag_object::net_rshares >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::greater< int64_t >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_author_parent_created >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
member< tag_object, account_id_type, &tag_object::author >,
member< tag_object, time_point_sec, &tag_object::created >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less<account_id_type>, std::greater< time_point_sec >, std::less< tag_id_type > >
>,
ordered_unique< tag< by_reward_fund_net_rshares >,
composite_key< tag_object,
member< tag_object, tag_name_type, &tag_object::tag >,
const_mem_fun< tag_object, bool, &tag_object::is_post >,
member< tag_object, int64_t, &tag_object::net_rshares >,
member< tag_object, tag_id_type, &tag_object::id >
>,
composite_key_compare< std::less<tag_name_type>, std::less< bool >,std::greater< int64_t >, std::less< tag_id_type > >
>
>,
allocator< tag_object >
> tag_index;
/**
* The purpose of this index is to quickly identify how popular various tags by maintaining variou sums over
* all posts under a particular tag
*/
class tag_stats_object : public object< tag_stats_object_type, tag_stats_object >
{
public:
template< typename Constructor, typename Allocator >
tag_stats_object( Constructor&& c, allocator< Allocator > )
{
c( *this );
}
tag_stats_object() {}
id_type id;
tag_name_type tag;
asset total_payout = asset( 0, WLS_SYMBOL );
int32_t net_votes = 0;
uint32_t top_posts = 0;
uint32_t comments = 0;
fc::uint128 total_trending = 0;
};
typedef oid< tag_stats_object > tag_stats_id_type;
struct by_comments;
struct by_top_posts;
struct by_trending;
typedef multi_index_container<
tag_stats_object,
indexed_by<
ordered_unique< tag< by_id >, member< tag_stats_object, tag_stats_id_type, &tag_stats_object::id > >,
ordered_unique< tag< by_tag >, member< tag_stats_object, tag_name_type, &tag_stats_object::tag > >,
/*
ordered_non_unique< tag< by_comments >,
composite_key< tag_stats_object,
member< tag_stats_object, uint32_t, &tag_stats_object::comments >,
member< tag_stats_object, tag_name_type, &tag_stats_object::tag >
>,
composite_key_compare< std::less< tag_name_type >, std::greater< uint32_t > >
>,
ordered_non_unique< tag< by_top_posts >,
composite_key< tag_stats_object,
member< tag_stats_object, uint32_t, &tag_stats_object::top_posts >,
member< tag_stats_object, tag_name_type, &tag_stats_object::tag >
>,
composite_key_compare< std::less< tag_name_type >, std::greater< uint32_t > >
>,
*/
ordered_non_unique< tag< by_trending >,
composite_key< tag_stats_object,
member< tag_stats_object, fc::uint128 , &tag_stats_object::total_trending >,
member< tag_stats_object, tag_name_type, &tag_stats_object::tag >
>,
composite_key_compare< std::greater< fc::uint128 >, std::less< tag_name_type > >
>
>,
allocator< tag_stats_object >
> tag_stats_index;
/**
* The purpose of this object is to track the relationship between accounts based upon how a user votes. Every time
* a user votes on a post, the relationship between voter and author increases direct rshares.
*/
class peer_stats_object : public object< peer_stats_object_type, peer_stats_object >
{
public:
template< typename Constructor, typename Allocator >
peer_stats_object( Constructor&& c, allocator< Allocator > a )
{
c( *this );
}
peer_stats_object() {}
id_type id;
account_id_type voter;
account_id_type peer;
int32_t direct_positive_votes = 0;
int32_t direct_votes = 1;
int32_t indirect_positive_votes = 0;
int32_t indirect_votes = 1;
float rank = 0;
void update_rank()
{
auto direct = float( direct_positive_votes ) / direct_votes;
auto indirect = float( indirect_positive_votes ) / indirect_votes;
auto direct_order = log( direct_votes );
auto indirect_order = log( indirect_votes );
if( !(direct_positive_votes+indirect_positive_votes) ){
direct_order *= -1;
indirect_order *= -1;
}
direct *= direct;
indirect *= indirect;
direct *= direct_order * 10;
indirect *= indirect_order;
rank = direct + indirect;
}
};
typedef oid< peer_stats_object > peer_stats_id_type;
struct by_rank;
struct by_voter_peer;
typedef multi_index_container<
peer_stats_object,
indexed_by<
ordered_unique< tag< by_id >, member< peer_stats_object, peer_stats_id_type, &peer_stats_object::id > >,
ordered_unique< tag< by_rank >,
composite_key< peer_stats_object,
member< peer_stats_object, account_id_type, &peer_stats_object::voter >,
member< peer_stats_object, float, &peer_stats_object::rank >,
member< peer_stats_object, account_id_type, &peer_stats_object::peer >
>,
composite_key_compare< std::less< account_id_type >, std::greater< float >, std::less< account_id_type > >
>,
ordered_unique< tag< by_voter_peer >,
composite_key< peer_stats_object,
member< peer_stats_object, account_id_type, &peer_stats_object::voter >,
member< peer_stats_object, account_id_type, &peer_stats_object::peer >
>,
composite_key_compare< std::less< account_id_type >, std::less< account_id_type > >
>
>,
allocator< peer_stats_object >
> peer_stats_index;
/**
* This purpose of this object is to maintain stats about which tags an author uses, how frequnetly, and
* how many total earnings of all posts by author in tag. It also allows us to answer the question of which
* authors earn the most in each tag category. This helps users to discover the best bloggers to follow for
* particular tags.
*/
class author_tag_stats_object : public object< author_tag_stats_object_type, author_tag_stats_object >
{
public:
template< typename Constructor, typename Allocator >
author_tag_stats_object( Constructor&& c, allocator< Allocator > )
{
c( *this );
}
id_type id;
account_id_type author;
tag_name_type tag;
asset total_rewards = asset( 0, WLS_SYMBOL );
uint32_t total_posts = 0;
};
typedef oid< author_tag_stats_object > author_tag_stats_id_type;
struct by_author_tag_posts;
struct by_author_posts_tag;
struct by_author_tag_rewards;
struct by_tag_rewards_author;
using std::less;
using std::greater;
typedef chainbase::shared_multi_index_container<
author_tag_stats_object,
indexed_by<
ordered_unique< tag< by_id >,
member< author_tag_stats_object, author_tag_stats_id_type, &author_tag_stats_object::id >
>,
ordered_unique< tag< by_author_posts_tag >,
composite_key< author_tag_stats_object,
member< author_tag_stats_object, account_id_type, &author_tag_stats_object::author >,
member< author_tag_stats_object, uint32_t, &author_tag_stats_object::total_posts >,
member< author_tag_stats_object, tag_name_type, &author_tag_stats_object::tag >
>,
composite_key_compare< less< account_id_type >, greater< uint32_t >, less< tag_name_type > >
>,
ordered_unique< tag< by_author_tag_posts >,
composite_key< author_tag_stats_object,
member< author_tag_stats_object, account_id_type, &author_tag_stats_object::author >,
member< author_tag_stats_object, tag_name_type, &author_tag_stats_object::tag >,
member< author_tag_stats_object, uint32_t, &author_tag_stats_object::total_posts >
>,
composite_key_compare< less< account_id_type >, less< tag_name_type >, greater< uint32_t > >
>,
ordered_unique< tag< by_author_tag_rewards >,
composite_key< author_tag_stats_object,
member< author_tag_stats_object, account_id_type, &author_tag_stats_object::author >,
member< author_tag_stats_object, tag_name_type, &author_tag_stats_object::tag >,
member< author_tag_stats_object, asset, &author_tag_stats_object::total_rewards >
>,
composite_key_compare< less< account_id_type >, less< tag_name_type >, greater< asset > >
>,
ordered_unique< tag< by_tag_rewards_author >,
composite_key< author_tag_stats_object,
member< author_tag_stats_object, tag_name_type, &author_tag_stats_object::tag >,
member< author_tag_stats_object, asset, &author_tag_stats_object::total_rewards >,
member< author_tag_stats_object, account_id_type, &author_tag_stats_object::author >
>,
composite_key_compare< less< tag_name_type >, greater< asset >, less< account_id_type > >
>
>
> author_tag_stats_index;
/**
* Used to parse the metadata from the comment json_meta field.
*/
struct comment_metadata { set<string> tags; };
/**
* This plugin will scan all changes to posts and/or their meta data and
*
*/
class tags_plugin : public wls::app::plugin
{
public:
tags_plugin( application* app );
virtual ~tags_plugin();
std::string plugin_name()const override { return TAGS_PLUGIN_NAME; }
virtual void plugin_set_program_options(
boost::program_options::options_description& cli,
boost::program_options::options_description& cfg) override;
virtual void plugin_initialize(const boost::program_options::variables_map& options) override;
virtual void plugin_startup() override;
friend class detail::tags_plugin_impl;
std::unique_ptr<detail::tags_plugin_impl> my;
};
/**
* This API is used to query data maintained by the tags_plugin
*/
class tag_api : public std::enable_shared_from_this<tag_api> {
public:
tag_api(){};
tag_api(const app::api_context& ctx){}//:_app(&ctx.app){}
void on_api_startup(){
}
vector<tag_stats_object> get_tags()const { return vector<tag_stats_object>(); }
private:
//app::application* _app = nullptr;
};
} } //wls::tag
FC_API( wls::tags::tag_api, (get_tags) );
FC_REFLECT( wls::tags::tag_object,
(id)(tag)(created)(active)(cashout)(net_rshares)(net_votes)(hot)(trending)(children)(author)(parent)(comment) )
CHAINBASE_SET_INDEX_TYPE( wls::tags::tag_object, wls::tags::tag_index )
FC_REFLECT( wls::tags::tag_stats_object,
(id)(tag)(total_payout)(net_votes)(top_posts)(comments)(total_trending) );
CHAINBASE_SET_INDEX_TYPE( wls::tags::tag_stats_object, wls::tags::tag_stats_index )
FC_REFLECT( wls::tags::peer_stats_object,
(id)(voter)(peer)(direct_positive_votes)(direct_votes)(indirect_positive_votes)(indirect_votes)(rank) );
CHAINBASE_SET_INDEX_TYPE( wls::tags::peer_stats_object, wls::tags::peer_stats_index )
FC_REFLECT( wls::tags::comment_metadata, (tags) );
FC_REFLECT( wls::tags::author_tag_stats_object, (id)(author)(tag)(total_posts)(total_rewards) )
CHAINBASE_SET_INDEX_TYPE( wls::tags::author_tag_stats_object, wls::tags::author_tag_stats_index )
|
/*
* Problema: Lendo Livros
* https://www.urionlinejudge.com.br/judge/pt/problems/view/1542
*/
#include <iostream>
#include <iomanip>
#include <cstdio>
#include <cstdlib>
#include <numeric>
#include <string>
#include <sstream>
#include <iomanip>
#include <locale>
#include <bitset>
#include <map>
#include <vector>
#include <queue>
#include <stack>
#include <algorithm>
#include <cmath>
#define INF 0x3F3F3F3F
#define PI 3.14159265358979323846
#define EPS 1e-10
#define vet_i(tipo, lin, col, inic) vector< vector< tipo > > (lin, vector< tipo > (col, inic))
#define vet_d(tipo) vector< vector< tipo > >
#define lli long long int
#define llu unsigned long long int
#define fore(var, inicio, final) for(int var=inicio; var<final; var++)
#define forec(var, inicio, final, incremento) for(int var=inicio; var<final; incremento)
#define forit(it, var) for( it = var.begin(); it != var.end(); it++ )
using namespace std;
int main(){
int q, d, p;
int a, b, pag, pag_min;
int d1, d2, df;
char x;
cin>>q;
while(q!=0){
cin>>d>>p;
pag = d*(p*q)/(p-q);
if(pag != 1) cout<<pag<<" paginas"<<endl;
else cout<<"1 pagina"<<endl;
cin>>q;
}
return 0;
}
|
/**
* Definition for singly-linked list.
* struct ListNode {
* int val;
* ListNode *next;
* ListNode(int x) : val(x), next(NULL) {}
* };
*/
class Solution {
public:
ListNode* reverseList(ListNode* head) {
//头插法逆置单链表,每次都把下一个节点插到头部
ListNode* root = head;
while(root && head->next){
ListNode *p = head->next;
head->next = p->next;
p->next = root;
root = p;
}
return root;
}
};
|
#include "voxigen/queueThread.h"
#include "voxigen/fileio/log.h"
namespace voxigen
{
QueueThread::QueueThread(std::condition_variable *completeEvent):
m_completeEvent(completeEvent)
{
processRequest=std::bind(&QueueThread::defaultCallback, this, std::placeholders::_1
);
}
void QueueThread::setCallback(process::Callback callback)
{
processRequest=callback;
}
void QueueThread::start(size_t threadCount)
{
{
std::unique_lock<std::mutex> lock(m_mutex);
m_run=true;
}
//create
for(unsigned int i=0; i<threadCount; ++i)
{
std::thread workerThread=std::thread(std::bind(&QueueThread::process, this));
m_threads.push_back(std::move(workerThread));
}
}
void QueueThread::stop()
{
{
std::unique_lock<std::mutex> lock(m_mutex);
m_run=false;
}
m_event.notify_all();
for(unsigned int i=0; i<m_threads.size(); ++i)
m_threads[i].join();
}
void QueueThread::updateQueue(RequestQueue &queue, RequestQueue &cancelQueue, RequestQueue &completedQueue, bool forceResort)
{
bool notify=false;
bool resort=forceResort;
{
std::unique_lock<std::mutex> lock(m_mutex);
//remove canceled request
if(!cancelQueue.empty())
resort=resort || removeRequests(m_queue, cancelQueue, completedQueue);
//insert request to workerQueue
if(!queue.empty())
{
if(resort)//we are going to resort so just insert them
{
#ifdef DEBUG_THREAD
for(process::Request *request:queue)
Log::debug("ProcessThread bulk inserting chunk %llx", request->data.chunk.handle);
#endif//DEBUG_RENDERERS
m_queue.insert(m_queue.end(), queue.begin(), queue.end());
queue.clear();
}
else
insertRequests(m_queue, queue);
notify=true;
}
if(resort)
resortQueue(m_queue);
if(!m_completedQueue.empty())
{
#ifdef DEBUG_THREAD
for(process::Request *request:m_completedQueue)
Log::debug("ProcessThread completed request %llx", request);
#endif//DEBUG_THREAD
completedQueue.insert(completedQueue.end(), m_completedQueue.begin(), m_completedQueue.end());
m_completedQueue.clear();
}
}
if(notify)
m_event.notify_all();
}
void QueueThread::process()
{
bool run=true;
process::Request *request=nullptr;
while(run)
{
{
std::unique_lock<std::mutex> lock(m_mutex);
run=m_run;
if(!run)
break;
if(request)
{
#ifdef DEBUG_THREAD
Log::debug("ProcessThread request complete %llx", request);
#endif//DEBUG_THREAD
m_completedQueue.push_back(request);
m_completeEvent->notify_all();
request=nullptr;
}
if(!m_queue.empty())
{
std::pop_heap(m_queue.begin(), m_queue.end(), process::Compare());
request=m_queue.back();
m_queue.pop_back();
}
else
{
m_event.wait(lock);
continue;
}
}
#ifdef DEBUG_THREAD
Log::debug("ProcessThread processing request %llx", request);
#endif//DEBUG_THREAD
processRequest(request);
}
}
void QueueThread::insertRequests(RequestQueue &queue, RequestQueue &requests)
{
for(process::Request *request:requests)
{
#ifdef DEBUG_THREAD
Log::debug("ProcessThread inserting chunk %llx", request->data.chunk.handle);
#endif//DEBUG_RENDERERS
queue.push_back(request);
std::push_heap(queue.begin(), queue.end(), process::Compare());
}
requests.clear();
}
void QueueThread::resortQueue(RequestQueue &queue)
{
std::make_heap(queue.begin(), queue.end(), process::Compare());
}
bool QueueThread::removeRequests(RequestQueue &queue, RequestQueue &cancelQueue, RequestQueue &completed)
{
bool resort=false;
size_t removed=0;
for(size_t i=0; i<queue.size(); )
{
bool increment=true;
process::Request *request=queue[i];
for(size_t j=0; j<cancelQueue.size(); ++j)
{
if(cancelQueue[j]==request)
{
//set request as canceled and add to completed
queue[i]->result=process::Result::Canceled;
completed.push_back(queue[i]);
//remove current from queue by swapping the back
queue[i]=queue.back();
queue.pop_back();
//remove request from cancelQueue by swapping the back
cancelQueue[j]=cancelQueue.back();
cancelQueue.pop_back();
removed++;
increment=false;
break;
}
}
//only increment if nothing removed
if(increment)
++i;
}
if(removed>0)
resort=true;//we removed some so going to need to resort
//return all cancels that were not found
for(size_t j=0; j<cancelQueue.size(); ++j)
completed.push_back(cancelQueue[j]);
cancelQueue.clear();
return resort;
}
}//namespace voxigen
|
/****************************************************************************
* ==> PSS_UserEntity ------------------------------------------------------*
****************************************************************************
* Description : Provides an user entity *
* Developer : Processsoft *
****************************************************************************/
#include "stdafx.h"
#include "PSS_UserEntity.h"
// processsoft
#include "zBaseLib\PSS_BaseDocument.h"
#include "zBaseLib\PSS_GUID.h"
#ifdef _DEBUG
#undef THIS_FILE
static char THIS_FILE[]=__FILE__;
#define new DEBUG_NEW
#endif
//---------------------------------------------------------------------------
// Serialization
//---------------------------------------------------------------------------
IMPLEMENT_SERIAL(PSS_UserEntity, CObject, g_DefVersion)
//---------------------------------------------------------------------------
// PSS_UserEntity
//---------------------------------------------------------------------------
PSS_UserEntity::PSS_UserEntity(const CString& name,
const CString& description,
const float cost,
PSS_UserEntity* pParent) :
CObject(),
m_pParent(pParent),
m_EntityName(name),
m_EntityDescription(description),
m_EntityCost(cost)
{
CreateGUID();
}
//---------------------------------------------------------------------------
PSS_UserEntity::PSS_UserEntity(const PSS_UserEntity& other) :
CObject(),
m_pParent(NULL),
m_EntityCost(0.0f)
{
*this = other;
}
//---------------------------------------------------------------------------
PSS_UserEntity::~PSS_UserEntity()
{}
//---------------------------------------------------------------------------
PSS_UserEntity& PSS_UserEntity::operator = (const PSS_UserEntity& other)
{
m_GUID = other.m_GUID;
m_EntityName = other.m_EntityName;
m_EntityDescription = other.m_EntityDescription;
m_EntityCost = other.m_EntityCost;
return *this;
}
//---------------------------------------------------------------------------
PSS_UserEntity* PSS_UserEntity::Clone() const
{
return new PSS_UserEntity(*this);
}
//---------------------------------------------------------------------------
#ifdef _DEBUG
void PSS_UserEntity::AssertValid() const
{
CObject::AssertValid();
}
#endif
//---------------------------------------------------------------------------
#ifdef _DEBUG
void PSS_UserEntity::Dump(CDumpContext& dc) const
{
CObject::Dump(dc);
}
#endif
//---------------------------------------------------------------------------
void PSS_UserEntity::Serialize(CArchive& ar)
{
if (ar.IsStoring())
{
// write the elements
ar << m_GUID;
ar << m_EntityName;
ar << m_EntityDescription;
ar << m_EntityCost;
}
else
{
// read the elements
CString s;
ar >> s;
PSS_BaseDocument* pBaseDoc = dynamic_cast<PSS_BaseDocument*>(ar.m_pDocument);
if (pBaseDoc && pBaseDoc->GetDocumentStamp().GetInternalVersion() >= 19)
{
m_GUID = s;
ar >> m_EntityName;
}
else
{
CreateGUID();
m_EntityName = s;
}
ar >> m_EntityDescription;
ar >> m_EntityCost;
}
}
//---------------------------------------------------------------------------
void PSS_UserEntity::CreateGUID()
{
m_GUID = PSS_GUID::CreateNewGUID();
}
//---------------------------------------------------------------------------
|
#include "p2Defs.h"
#include "p2Log.h"
#include "j1App.h"
#include "j1Render.h"
#include "j1Textures.h"
#include "j1Fonts.h"
#include "j1Input.h"
#include "j1Gui.h"
#include <iostream>
#include <sstream>
#define LAYER 10
// Class Gui -------------------------
// -----------------------------------
j1Gui::j1Gui() : j1Module()
{
name = "gui";
}
// Destructor
j1Gui::~j1Gui()
{}
// Called before render is available
bool j1Gui::Awake(pugi::xml_node& conf)
{
LOG("Loading GUI atlas");
bool ret = true;
atlas_file_name = conf.child("atlas").attribute("file").as_string("");
return ret;
}
// Called before the first frame
bool j1Gui::Start()
{
bool ret = false;
LOG("Start module gui");
// Load atlas
if (atlas == nullptr)
atlas = App->tex->Load(atlas_file_name.c_str());
// Starting intern camera position
camera_x = App->render->camera.x;
camera_y = App->render->camera.y;
if (atlas != nullptr)
ret = true;
return ret;
}
// ---------------------------------------------------------------------
// Update all UI_Elements
// ---------------------------------------------------------------------
bool j1Gui::Update(float dt)
{
// Start -------------------------------------------------
if (start)
{
// Set variables that inherit from window to childs
for (p2PQueue_item<UI_Element*>* elements = App->gui->elements_list.start; elements != nullptr; elements = elements->next)
{
if (elements->data->type == ui_element::ui_window)
{
list<UI_Element*> childs;
App->gui->GetChilds(elements->data, childs);
for (list<UI_Element*>::iterator it = childs.begin(); it != childs.end(); it++)
{
(*it)->blit_layer = elements->data->blit_layer;
(*it)->is_ui = elements->data->is_ui;
(*it)->is_gameplay = elements->data->is_gameplay;
}
}
}
start = false;
}
// Update
// -------------------------------------------------------
// Update all elements in order
list<UI_Element*> to_top;
p2PQueue<UI_Element*> to_update;
for (p2PQueue_item<UI_Element*>* elements = App->gui->elements_list.start; elements != nullptr; elements = elements->next)
{
// Move elements if the camera is moving
if (elements->data->is_ui && (camera_x != App->render->camera.x || camera_y != App->render->camera.y))
{
elements->data->rect.x += camera_x - App->render->camera.x;
elements->data->rect.y += camera_y - App->render->camera.y;
}
// To update if enabled
if (elements->data->enabled)
{
to_update.Push(elements->data, elements->data->blit_layer);
// Debug lines ------------------------------------
if (debug)
{
for (list<UI_Element*>::iterator it = elements->data->childs.begin(); it != elements->data->childs.end(); it++)
{
if ((*it)->enabled)
{
App->render->DrawLine(elements->data->rect.x + elements->data->rect.w * 0.5f,
elements->data->rect.y + elements->data->rect.h * 0.5f,
(*it)->rect.x + (*it)->rect.w * 0.5f,
(*it)->rect.y + (*it)->rect.h * 0.5,
255, 255, 255);
}
}
}
// -------------------------------------------------
}
//Take higher layer
if (elements->next == nullptr)
higher_layer = elements->priority;
}
// Update
for (p2PQueue_item<UI_Element*>* up = to_update.start; up != nullptr; up = up->next)
up->data->update();
// Move clicked elements
Move_Elements();
// Update intern camera position
camera_x = App->render->camera.x;
camera_y = App->render->camera.y;
return true;
}
// Update all guis
bool j1Gui::PreUpdate()
{
return true;
}
// Called after all Updates
bool j1Gui::PostUpdate()
{
return true;
}
// Called before quitting
bool j1Gui::CleanUp()
{
LOG("Freeing GUI");
App->tex->UnLoad(atlas);
while (elements_list.Count() > 0)
{
p2PQueue_item<UI_Element*>* elements = App->gui->elements_list.start;
DeleteElement(elements->data);
}
return true;
}
const void j1Gui::GetAtlas() const
{
App->gui->atlas = App->tex->Load(atlas_file_name.c_str());
}
// ---------------------------------------------------------------------
// Create a new Window
// ---------------------------------------------------------------------
UI_Window* j1Gui::UI_CreateWin(iPoint pos, int w, int h, int blit, bool _is_gameplay, bool _dinamic, bool _is_ui)
{
UI_Window* ret = nullptr;
ret = new UI_Window();
if (ret != nullptr)
{
ret->Set(pos, w, h);
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_ui = _is_ui;
ret->is_gameplay = _is_gameplay;
// Layer
ret->layer = elements_list.Count();
ret->blit_layer = blit;
// -----
ret->type = ui_window;
ret->parent = ret;
elements_list.Push(ret, ret->layer);
windows.push_back(ret);
}
return ret;
}
// ---------------------------------------------------------------------
// Gets all the childs of a UI_Element.
// ---------------------------------------------------------------------
void j1Gui::GetChilds(UI_Element * element, list<UI_Element*>& visited)
{
list<UI_Element*> frontier;
visited.push_back(element);
// Add the current childs
for (list<UI_Element*>::iterator it = element->childs.begin(); it != element->childs.end(); it++)
frontier.push_back(*it);
// Navigate through all the childs and add them
int end = 0;
while (!frontier.empty())
{
for (list<UI_Element*>::iterator fr = frontier.begin(); fr != frontier.end(); fr++)
{
list<UI_Element*>::iterator find = std::find(visited.begin(), visited.end(), *fr);
if (find == visited.end() && *fr != element)
{
visited.push_back(*fr);
for (list<UI_Element*>::iterator ch = (*fr)->childs.begin(); ch != (*fr)->childs.end(); ch++)
{
frontier.push_back(*ch);
}
}
frontier.erase(fr);
}
}
// ---------------------------------------
}
// ---------------------------------------------------------------------
// Gets all the parents of a UI_Element.
// ---------------------------------------------------------------------
void j1Gui::GetParentElements(UI_Element * element, list<UI_Element*>& visited)
{
UI_Element* curr = element;
while (curr != nullptr)
{
if(curr != nullptr)
visited.push_back(curr);
curr = curr->parent_element;
}
}
// ---------------------------------------------------------------------
// Updates the PQ elements order.
// ---------------------------------------------------------------------
void j1Gui::ReorderElements()
{
list<UI_Element*> copy;
// Copy all elements of PQ and clean it
while (App->gui->elements_list.Count() != 0)
{
UI_Element* tmp;
App->gui->elements_list.Pop(tmp);
copy.push_back(tmp);
}
App->gui->elements_list.Clear();
// Place againt he elements on the PQ (now they are on the correct order)
for (list<UI_Element*>::iterator it = copy.begin(); it != copy.end(); it++)
App->gui->elements_list.Push(*it, (*it)->layer);
}
// ---------------------------------------------------------------------
// Moves the clicked UI_Element, and it's childs, with the mouse.
// ---------------------------------------------------------------------
bool j1Gui::Move_Elements()
{
int ret = false;
// Click
if((App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_DOWN) && !moving)
{
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
// Get the object with the higher layer
to_move = CheckClickMove(mouse_x, mouse_y);
if (to_move != nullptr)
{
// Put window and childs to top
to_move->PutWindowToTop();
moving = true;
ret = true;
}
}
// Move ---------------------
if (moving)
{
// Get Mouse ------------
int curr_x; int curr_y;
App->input->GetMousePosition(curr_x, curr_y);
curr_x -= App->render->camera.x;
curr_y -= App->render->camera.y;
// ----------------------
// Get childs
list<UI_Element*> visited;
App->gui->GetChilds(to_move, visited);
// Move all childs ------
for (list<UI_Element*>::iterator it = visited.begin(); it != visited.end(); it++)
{
if (curr_x != mouse_x)
(*it)->rect.x -= mouse_x - curr_x;
if (curr_y != mouse_y)
(*it)->rect.y -= mouse_y - curr_y;
}
// Update mouse stored in childs
for (list<UI_Element*>::iterator it = visited.begin(); it != visited.end(); it++)
{
(*it)->mouse_x = curr_x;
(*it)->mouse_y = curr_y;
}
// Update mouse stored in this element
mouse_x = curr_x;
mouse_y = curr_y;
// ----------------------
}
// Release click
if ((App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_UP))
{
to_move = nullptr;
moving = false;
}
return ret;
}
// ---------------------------------------------------------------------
// Chooses the element that has to be moved.
// ---------------------------------------------------------------------
UI_Element* j1Gui::CheckClickMove(int x, int y)
{
list<UI_Element*> elements_clicked;
// Check the UI_Elements that are in the point
for (p2PQueue_item<UI_Element*>* elements = App->gui->elements_list.start; elements != nullptr; elements = elements->next)
{
if (x > elements->data->rect.x && x < elements->data->rect.x + elements->data->rect.w)
{
if (y > elements->data->rect.y && y < elements->data->rect.y + elements->data->rect.h)
{
// Check if you can click through it and if it's enabled
if (!elements->data->click_through && elements->data->enabled)
{
elements_clicked.push_back(elements->data);
}
}
}
}
// Get the higher element
int higher_layer = -1;
int higher_blit_layer = -1;
UI_Element* higher_element = nullptr;
if (!elements_clicked.empty())
{
for (list<UI_Element*>::iterator it = elements_clicked.begin(); it != elements_clicked.end(); it++)
{
if ((*it)->layer > higher_layer && (*it)->blit_layer >= higher_blit_layer)
{
higher_layer = (*it)->layer;
higher_blit_layer = (*it)->blit_layer;
higher_element = *it;
}
}
// If the current it's not dynamic, check if there is dinamic parents
if (!higher_element->dinamic)
{
list<UI_Element*> parents_list;
App->gui->GetParentElements(higher_element, parents_list);
higher_element = nullptr;
for (list<UI_Element*>::iterator it = parents_list.begin(); it != parents_list.end(); it++)
{
if ((*it)->dinamic)
{
higher_element = *it;
break;
}
}
}
}
return higher_element;
}
// ---------------------------------------------------------------------
// Deletes and frees an UI_Element.
// ---------------------------------------------------------------------
void j1Gui::DeleteElement(UI_Element* element)
{
if (element == nullptr || element == NULL)
return;
list<UI_Element*> childs;
App->gui->GetChilds(element, childs);
// Delete element and it's childs
for (list<UI_Element*>::iterator ch = childs.begin(); ch != childs.end(); ch++)
{
if (*ch == nullptr && (*ch)->parent->childs.size() > 0)
(*ch)->parent->childs.remove(*ch);
if ((*ch)->parent_element != nullptr && (*ch)->parent_element->childs.size() > 0)
(*ch)->parent_element->childs.remove(*ch);
if ((*ch)->type == ui_window && windows.size() > 0)
windows.remove((UI_Window*)*ch);
// Delete from pQ
list<UI_Element*> to_add;
while (App->gui->elements_list.Count() > 0)
{
UI_Element* current = nullptr;
App->gui->elements_list.Pop(current);
if (current != *ch)
to_add.push_back(current);
}
for (list<UI_Element*>::iterator ta = to_add.begin(); ta != to_add.end(); ta++)
App->gui->elements_list.Push((*ta), (*ta)->layer);
(*ch)->cleanup();
RELEASE((*ch));
}
}
// -----------------------------------
// ------------------------- Class Gui
// -----------------------------------
// Element ---------------------------
UI_Element::UI_Element()
{
}
UI_Element::~UI_Element()
{
}
bool UI_Element::update()
{
return true;
}
bool UI_Element::cleanup()
{
return true;
}
void UI_Element::SetEnabled(bool set)
{
enabled = set;
}
// ---------------------------------------------------------------------
// Enables or disables all the childs of an UI_Element.
// ---------------------------------------------------------------------
void UI_Element::SetEnabledAndChilds(bool set)
{
list<UI_Element*> visited;
App->gui->GetChilds(this, visited);
for (list<UI_Element*>::iterator it = visited.begin(); it != visited.end(); it++)
(*it)->enabled = set;
}
// ---------------------------------------------------------------------
// Put all elements of a window to the top of the PQ.
// ---------------------------------------------------------------------
bool UI_Element::PutWindowToTop()
{
bool ret = true;
list<UI_Element*> visited;
list<UI_Element*> copy;
// Get childs from the window parent
App->gui->GetChilds(parent, visited);
// Update layer
int i = 0;
for (list<UI_Element*>::iterator it = visited.begin(); it != visited.end(); it++, i++)
(*it)->layer = App->gui->higher_layer + i + 1;
// Rorded the elements of the PQ
App->gui->ReorderElements();
return ret;
}
iPoint UI_Element::GetPos()
{
return iPoint(rect.x, rect.y);
}
void UI_Element::SetPos(iPoint newpos)
{
list<UI_Element*> childs;
App->gui->GetChilds(this, childs);
iPoint distance(newpos.x - GetPos().x, newpos.y - GetPos().y);
for (list<UI_Element*>::iterator it = childs.begin(); it != childs.end(); it++)
{
(*it)->rect.x += distance.x;
(*it)->rect.y += distance.y;
}
}
// ---------------------------------------------------------------------
// Detects the highest layer of a clicked point.
// ---------------------------------------------------------------------
int UI_Element::CheckClickOverlap(int x, int y)
{
list<UI_Element*> contactors;
// Check the UI_Elements that are in the point
for (p2PQueue_item<UI_Element*>* elements = App->gui->elements_list.start; elements != nullptr; elements = elements->next)
{
if (x > elements->data->rect.x && x < elements->data->rect.x + elements->data->rect.w)
{
if (y > elements->data->rect.y && y < elements->data->rect.y + elements->data->rect.h)
{
// Check if is dinamic
if (!elements->data->click_through && elements->data->enabled)
contactors.push_back(elements->data);
}
}
}
// Get the higher layer
int higher_layer = -1;
int higher_blit_layer = -1;
if (!contactors.empty())
{
for (list<UI_Element*>::iterator it = contactors.begin(); it != contactors.end(); it++)
{
if ((*it)->layer > higher_layer && (*it)->blit_layer >= higher_blit_layer)
{
higher_layer = (*it)->layer;
higher_blit_layer = (*it)->blit_layer;
}
}
}
return higher_layer;
}
bool UI_Element::CheckClickRect(int x, int y)
{
if (x > rect.x && x < rect.x + rect.w)
{
if (y > rect.y && y < rect.y + rect.h)
{
return true;
}
}
return false;
}
// ---------------------------------------------------------------------
// Adds a child to an UI_Element.
// ---------------------------------------------------------------------
void UI_Element::AddChild(UI_Element * _child)
{
childs.push_back(_child);
_child->parent_element = this;
}
// ---------------------------------------------------------------------
// Adds both childs one to the other to avoid the overlaping check. (Deprecated?)
// ---------------------------------------------------------------------
void UI_Element::AddChildBoth(UI_Element * _child)
{
childs.push_back(_child);
_child->parent_element = this;
_child->childs.push_back(this);
this->parent_element = _child;
}
// ---------------------------------------------------------------------
// Mouse check functions.
// ---------------------------------------------------------------------
bool UI_Element::MouseClickEnterLeftIntern()
{
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_DOWN)
{
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (mouse_x > rect.x && mouse_x < rect.x + rect.w)
{
if (mouse_y > rect.y && mouse_y < rect.y + rect.h)
{
clicked = true;
return true;
}
}
}
return false;
}
bool UI_Element::MouseClickOutLeftIntern()
{
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_UP)
{
if (clicked)
{
clicked = false;
return true;
}
}
return false;
}
void UI_Element::SetDebugColor(SDL_Color _color)
{
color.r = _color.r; color.g = _color.g; color.b = _color.b; color.a = _color.a;
}
// -----------------------------------
// --------------------------- Element
// -----------------------------------
// Window ----------------------------
UI_Window::UI_Window()
{
}
UI_Window::~UI_Window()
{
}
bool UI_Window::update()
{
if (App->gui->debug)
App->render->DrawQuad(rect, color.r, color.g, color.b, color.a, false);
return true;
}
void UI_Window::Set(iPoint pos, int w, int h)
{
rect.x = pos.x;
rect.y = pos.y;
rect.w = w;
rect.h = h;
color.r = color.g = color.b = color.a = 255;
}
// ---------------------------------------------------------------------
// Create a button linked to the current window
// ---------------------------------------------------------------------
UI_Button* UI_Window::CreateButton(iPoint pos, int w, int h, bool _dinamic)
{
App->gui->start = true;
UI_Button* ret = nullptr;
ret = new UI_Button();
if (ret != nullptr)
{
ret->type = ui_button;
ret->Set(pos, w, h);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
// ---------------------------------------------------------------------
// Create text linked to the current window
// ---------------------------------------------------------------------
UI_Text* UI_Window::CreateText(iPoint pos, _TTF_Font * font, int spacing, bool _dinamic, uint r, uint g, uint b)
{
App->gui->start = true;
UI_Text* ret = nullptr;
ret = new UI_Text();
if (ret != nullptr)
{
ret->type = ui_text;
ret->Set(pos, font, spacing, r, g, b);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
// ---------------------------------------------------------------------
// Create an image linked to the current window
// ---------------------------------------------------------------------
UI_Image* UI_Window::CreateImage(iPoint pos, SDL_Rect image, bool _dinamic)
{
App->gui->start = true;
UI_Image* ret = nullptr;
ret = new UI_Image();
if (ret != nullptr)
{
ret->type = ui_image;
ret->Set(pos, image);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
// ---------------------------------------------------------------------
// Create a text input box to the current window
// ---------------------------------------------------------------------
UI_Text_Input* UI_Window::CreateTextInput(iPoint pos, int w, _TTF_Font* font, bool _dinamic, uint r, uint g, uint b)
{
App->gui->start = true;
UI_Text_Input* ret = nullptr;
ret = new UI_Text_Input();
if (ret != nullptr)
{
ret->type = ui_text_input;
ret->Set(pos, w, font, r, g, b);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
ret->text->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
UI_Scroll_Bar * UI_Window::CreateScrollBar(iPoint pos, int view_w, int view_h, int button_size, bool _dinamic)
{
App->gui->start = true;
UI_Scroll_Bar* ret = nullptr;
ret = new UI_Scroll_Bar();
if(ret != nullptr)
{
ret->type = ui_scroll_bar;
ret->Set(pos, view_w, view_h, button_size);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
UI_ColoredRect * UI_Window::CreateColoredRect(iPoint pos, int w, int h, SDL_Color color, bool filled, bool _dinamic)
{
App->gui->start = true;
UI_ColoredRect* ret = nullptr;
ret = new UI_ColoredRect();
if (ret != nullptr)
{
ret->type = ui_colored_rect;
ret->Set(pos, w, h, color, filled);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
UI_Check_Box * UI_Window::CreateCheckBox(iPoint pos, int w, int h, SDL_Rect pressed, SDL_Rect idle, bool multiple_choices, bool _dinamic)
{
App->gui->start = true;
UI_Check_Box* ret = nullptr;
ret = new UI_Check_Box();
if (ret != nullptr)
{
ret->type = ui_check_box;
ret->Set(pos, w, h, pressed, idle, multiple_choices);
ret->parent = this;
ret->parent_element = this;
ret->dinamic = _dinamic;
ret->started_dinamic = _dinamic;
ret->is_gameplay = is_gameplay;
// Layers --
ret->layer = childs.size() + layer + 1;
// ---------
App->gui->elements_list.Push(ret, ret->layer);
childs.push_back((UI_Element*)ret);
}
return ret;
}
// -----------------------------------
// ---------------------------- Window
// -----------------------------------
// Button ----------------------------
UI_Button::UI_Button()
{
}
UI_Button::~UI_Button()
{
}
void UI_Button::Set(iPoint _pos, int w, int h)
{
rect.x = _pos.x;
rect.y = _pos.y;
rect.w = w;
rect.h = h;
color.r = color.g = color.b = color.a = 255;
}
bool UI_Button::update()
{
if (!enabled)
return false;
if (App->gui->debug)
App->render->DrawQuad(rect, color.r, color.g, color.b, -1.0f, color.a, false);
if (print)
{
if (!is_gameplay)
App->render->Blit(App->gui->atlas, rect.x, rect.y, &curr);
//else
//{
// if (is_ui)
// App->view->LayerBlit(LAYER, App->gui->atlas, iPoint(rect.x, rect.y), curr, viewport, -1.0f, false);
// else
// App->view->LayerBlit(LAYER, App->gui->atlas, iPoint(rect.x, rect.y), curr);
//}
}
ChangeButtonStats();
return true;
}
void UI_Button::ChangeButtonStats()
{
if (to_enter)
enter = true;
if (!to_enter)
enter = false;
if (to_clicked_left)
clicked_left = true;
if (!to_clicked_left)
clicked_left = false;
if (to_clicked_right)
clicked_right = true;
if (!to_clicked_right)
clicked_right = false;
}
bool UI_Button::MouseEnter()
{
if (!enabled)
return false;
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (CheckClickOverlap(mouse_x, mouse_y) != layer)
return false;
if(CheckClickRect(mouse_x, mouse_y))
{
if (!enter)
{
to_enter = true;
return true;
}
return false;
}
return false;
}
bool UI_Button::MouseOut()
{
if (!enabled)
return false;
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (CheckClickOverlap(mouse_x, mouse_y) != layer && !enter)
return true;
if (CheckClickRect(mouse_x, mouse_y))
return false;
if(enter)
{
to_enter = false;
return true;
}
else
return false;
}
bool UI_Button::MouseClickEnterLeft()
{
if (!enabled)
return false;
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_DOWN)
{
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (CheckClickOverlap(mouse_x, mouse_y) != layer)
return false;
if (CheckClickRect(mouse_x, mouse_y))
{
to_clicked_left = true;
return true;
}
}
return false;
}
bool UI_Button::MouseClickOutLeft()
{
if (!enabled)
return false;
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_UP)
{
if (clicked_left)
{
to_clicked_left = false;
return true;
}
}
return false;
}
bool UI_Button::MouseClickEnterRight()
{
if (!enabled)
return false;
if (App->input->GetMouseButtonDown(SDL_BUTTON_RIGHT) == KEY_DOWN)
{
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (CheckClickOverlap(mouse_x, mouse_y) != layer)
return false;
if (mouse_x > rect.x && mouse_x < rect.x + rect.w)
{
if (mouse_y > rect.y && mouse_y < rect.y + rect.h)
{
to_clicked_right = true;
return true;
}
}
}
return false;
}
bool UI_Button::MouseClickOutRight()
{
if (!enabled)
return false;
if (App->input->GetMouseButtonDown(SDL_BUTTON_RIGHT) == KEY_UP)
{
if (clicked_right)
{
to_clicked_right = false;
return true;
}
}
return false;
}
void UI_Button::AddImage(char* name, SDL_Rect rect)
{
rect_text rt(name, rect);
rect_list.push_back(rt);
}
void UI_Button::SetImage(char* name)
{
for (list<rect_text>::iterator it = rect_list.begin(); it != rect_list.end(); it++)
{
//WIP
}
}
// -----------------------------------
// ---------------------------- Button
// -----------------------------------
// Text ------------------------------
UI_Text::UI_Text()
{
}
UI_Text::~UI_Text()
{
}
void UI_Text::Set(iPoint _pos, _TTF_Font* _font, int _spacing, uint r, uint g, uint b)
{
color.r = r;
color.g = g;
color.b = b;
font = _font;
rect.x = _pos.x;
rect.y = _pos.y;
rect.w = -1;
rect.h = -1;
spacing = _spacing;
color.a = 255;
}
void UI_Text::SetText(string _text)
{
// Clean last texts
for (list<tex_str>::iterator it = tex_str_list.begin(); it != tex_str_list.end(); it++)
App->tex->UnLoad((*it).texture);
tex_str_list.clear();
string tmp = _text;
int i = 0;
string comp;
while (tmp[i] != '\0')
{
comp.clear();
int words_counter = 0;
for (; tmp[i] != '\n' && tmp[i] != '\0';)
{
comp.insert(words_counter, 1, tmp[i]);
i++, words_counter++;
}
if (tmp[i] != '\0')
i++;
comp[words_counter] = '\0';
tex_str ts(comp.c_str(), App->font->Print(comp.c_str(), color, font));
tex_str_list.push_back(ts);
}
}
string UI_Text::GetText()
{
string ret;
int acumulated = 0;
for (list<tex_str>::iterator it = tex_str_list.begin(); it != tex_str_list.end(); it++)
{
ret.insert(acumulated, (*it).text.c_str());
acumulated += (*it).text.size() + 1;
}
ret[acumulated] = '\0';
return ret;
}
bool UI_Text::update()
{
if (!enabled)
return false;
// Get highest w and add all h
int w = 0, h = 0;
for (list<tex_str>::iterator it = tex_str_list.begin(); it != tex_str_list.end(); it++)
{
App->font->CalcSize((*it).text.c_str(), rect.w, rect.h, font);
h += rect.h;
if (rect.w > w)
w = rect.w;
}
rect.w = w;
rect.h = h;
// ----------------------------
if (App->gui->debug)
App->render->DrawQuad(rect, color.r, color.g, color.b, -1.0f, color.a, false);
if (print)
{
int space = 0;
for (list<tex_str>::iterator it = tex_str_list.begin(); it != tex_str_list.end(); it++)
{
//WIP
}
}
return true;
}
bool UI_Text::cleanup()
{
for (list<tex_str>::iterator it = tex_str_list.begin(); it != tex_str_list.end(); it++)
App->tex->UnLoad((*it).texture);
return true;
}
// -----------------------------------
// ------------------------------ Text
// -----------------------------------
// Image -----------------------------
UI_Image::UI_Image()
{
}
UI_Image::~UI_Image()
{
}
void UI_Image::Set(iPoint _pos, SDL_Rect _image)
{
rect.x = _pos.x;
rect.y = _pos.y;
image.x = _image.x;
image.y = _image.y;
image.w = _image.w;
image.h = _image.h;
rect.w = _image.w;
rect.h = _image.h;
color.r = color.g = color.b = color.a = 255;
}
void UI_Image::ChangeImage(SDL_Rect _rect)
{
image = { _rect.x, _rect.y, _rect.w, _rect.h };
rect.w = _rect.w;
rect.h = _rect.h;
}
bool UI_Image::update()
{
if (!enabled)
return false;
if (App->gui->debug)
App->render->DrawQuad(rect, color.r, color.g, color.b, -1.0f, color.a, false);
if (print)
{
if (!is_gameplay)
App->render->Blit(App->gui->atlas, rect.x, rect.y, &image);
//WIP
}
return true;
}
// -----------------------------------
// ----------------------------- Image
// -----------------------------------
// Text Input ------------------------
UI_Text_Input::UI_Text_Input()
{
}
UI_Text_Input::~UI_Text_Input()
{
}
void UI_Text_Input::Set(iPoint pos, int w, _TTF_Font* font, uint r, uint g, uint b)
{
rect.x = pos.x;
rect.y = pos.y;
rect.w = w;
text = new UI_Text();
text->Set(iPoint(0, 0), font, 0, r, g, b);
// Set bar size
App->font->CalcSize("@", bar.x, bar.h, font);
bar.w = 1;
rect.h = bar.h;
camera_before.x = App->render->camera.x;
camera_before.y = App->render->camera.y;
color.r = color.g = color.b = color.a = 255;
text_offset = 0;
}
bool UI_Text_Input::update()
{
if (!enabled)
return false;
if (App->gui->debug)
App->render->DrawQuad(rect, color.r, color.g, color.b, -1.0f, color.a, false);
string test = intern_text;
// Print
if (print)
{
SetIsActive();
if (intern_text.size() == 0 && active)
text->SetText("");
// Manuall change text
ChangeTextInput();
// Input
if (active)
{
// Take and print input
if (TakeInput() || Delete() || MoveCursor())
{
// Update words position list
if(!pasword)
SetBarPos(intern_text.substr(0, bar_pos));
else
SetPasword();
//LOG("%d %s %d", bar_pos, intern_text.substr(0, bar_pos).c_str(), text_offset);
}
// Move cursor
DrawBar();
}
// Viewport -----------
App->render->SetViewPort({ rect.x + App->render->camera.x, rect.y + App->render->camera.y, rect.w, rect.h });
text->update();
App->render->ResetViewPort();
// --------------------
// Camera
if (camera_before.x != App->render->camera.x)
{
text->rect.x += camera_before.x - App->render->camera.x;
camera_before.x = App->render->camera.x;
}
if (camera_before.y != App->render->camera.y)
{
text->rect.y += camera_before.y - App->render->camera.y;
camera_before.y = App->render->camera.y;
}
}
return true;
}
bool UI_Text_Input::cleanup()
{
App->gui->DeleteElement(text);
return true;
}
void UI_Text_Input::SetTextInput(string text)
{
text_change = text;
change = true;
}
bool UI_Text_Input::TakeInput()
{
bool ret = false;
if (App->input->input_text.size() > 0 && App->input->input_text.size() < 2)
{
intern_text.insert(bar_pos, App->input->input_text.c_str());
// Set new text
text->SetText(intern_text);
// Clean input
App->input->input_text.clear();
// Increase bar positon
bar_pos++;
ret = true;
}
return ret;
}
bool UI_Text_Input::Delete()
{
bool ret = false;
if (App->input->GetKey(SDL_SCANCODE_BACKSPACE) == KEY_DOWN)
{
if (intern_text.size() > 0 && bar_pos > 0)
{
// Dynamic movement
if (text_offset < 0)
{
int to_erase = GetTextSize(intern_text.substr(bar_pos - 1, 1));
if (text_offset + to_erase > 0)
{
text_offset = 0;
text->rect.x = 0;
}
else
{
text_offset += to_erase;
text->rect.x += to_erase;
}
}
// ---------------
intern_text.erase(bar_pos-1, 1);
bar_pos--;
text->SetText(intern_text);
ret = true;
}
}
else if (App->input->GetKey(SDL_SCANCODE_DELETE) == KEY_DOWN)
{
if (intern_text.size() > 0 && bar_pos < intern_text.size())
{
// Dynamic movement
if (text_offset < 0)
{
int to_erase = GetTextSize(intern_text.substr(bar_pos, 1));
if (text_offset + to_erase > 0)
{
text_offset = 0;
text->rect.x = 0;
}
else
{
text_offset += to_erase;
text->rect.x += to_erase;
}
}
// ---------------
intern_text.erase(bar_pos, 1);
text->SetText(intern_text);
ret = true;
}
}
return ret;
}
bool UI_Text_Input::MoveCursor()
{
bool ret = false;
if (App->input->GetKey(SDL_SCANCODE_LEFT) == KEY_DOWN)
{
if (bar_pos > 0)
{
bar_pos--;
ret = true;
}
}
if (App->input->GetKey(SDL_SCANCODE_RIGHT) == KEY_DOWN)
{
if (bar_pos < intern_text.size())
{
bar_pos++;
ret = true;
}
}
return ret;
}
void UI_Text_Input::SetBarPos(string _text)
{
int width, height;
App->font->CalcSize(_text.c_str(), width, height, text->font);
bar_x = width + text_offset;
DinamicViewport();
bar_x = width + text_offset;
}
int UI_Text_Input::GetTextSize(string _text)
{
int width, height;
App->font->CalcSize(_text.c_str(), width, height, text->font);
return width;
}
void UI_Text_Input::DrawBar()
{
bar.x = rect.x + bar_x;
bar.y = rect.y;
App->render->DrawQuad(bar, color.r, color.g, color.b, -1.0f, color.a, true);
}
void UI_Text_Input::DinamicViewport()
{
// Right
if (bar_x > rect.w)
{
text_offset -= bar_x - rect.w;
text->rect.x -= bar_x - rect.w;
}
// Left
if (bar_pos >= 0 && text_offset < 0 && bar_x <= 0)
{
text_offset -= bar_x;
text->rect.x -= bar_x;
}
}
void UI_Text_Input::SetPasword()
{
string tmp;
for (int i = 0; i < intern_text.size(); i++)
tmp.insert(i, 1, '*');
text->SetText(tmp);
SetBarPos(tmp);
}
void UI_Text_Input::ChangeTextInput()
{
if (change)
{
text->SetText(text_change);
intern_text = text_change.c_str();
SetBarPos(intern_text);
bar_pos = intern_text.size();
change = false;
}
}
void UI_Text_Input::Clear()
{
intern_text.clear();
bar_pos = 0;
bar_x = 0;
}
void UI_Text_Input::SetIsActive()
{
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_DOWN)
{
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
//Check if there is another object on the same point
if (CheckClickOverlap(mouse_x, mouse_y) == layer)
{
active = true;
}
else
{
active = false;
}
}
}
bool UI_Text_Input::MouseClick()
{
if (App->input->GetMouseButtonDown(SDL_BUTTON_LEFT) == KEY_DOWN)
{
int mouse_x, mouse_y;
App->input->GetMousePosition(mouse_x, mouse_y);
if (CheckClickOverlap(mouse_x, mouse_y) != layer)
return false;
mouse_x -= App->render->camera.x;
mouse_y -= App->render->camera.y;
if (mouse_x > rect.x && mouse_x < rect.x + rect.w)
{
if (mouse_y > rect.y && mouse_y < rect.y + rect.h)
{
return true;
}
}
}
return false;
}
// -----------------------------------
// ------------------------ Text Input
// -----------------------------------
// Scroll Bar ------------------------
UI_Scroll_Bar::UI_Scroll_Bar()
{
}
UI_Scroll_Bar::~UI_Scroll_Bar()
{
}
void UI_Scroll_Bar::Set(iPoint pos, int view_w, int view_h, int button_size)
{
// Viewport
rect.x = pos.x;
rect.y = pos.y;
rect.w = view_w;
rect.h = view_h;
starting_v = view_h;
starting_h = view_w;
moving_rect.x = pos.x;
moving_rect.y = pos.y;
moving_rect.w = view_w;
moving_rect.h = view_h;
// Button vertical ---
button_v = new UI_Button();
button_v->Set(iPoint(view_w + button_size, pos.y), button_size, view_h);
button_v->layer = App->gui->elements_list.Count() + 1;
AddChild(button_v);
button_starting_v = button_v->rect.h;
App->gui->elements_list.Push(button_v, button_v->layer);
// ----------
// Button horizontal ---
button_h = new UI_Button();
button_h->Set(iPoint(pos.x, pos.y + view_h), view_w, button_size);
button_h->layer = App->gui->elements_list.Count() + 2;
AddChild(button_h);
button_starting_h = button_h->rect.w;
App->gui->elements_list.Push(button_h, button_h->layer);
// ----------
// Min and max bar movement allowed
min_bar_v = pos.y;
max_bar_v = min_bar_v + view_h;
min_bar_h = pos.x;
max_bar_h = min_bar_h + view_w;
color.r = color.g = color.b = color.a = 255;
}
bool UI_Scroll_Bar::update()
{
if (!enabled)
return false;
if (App->gui->debug)
{
App->render->DrawQuad(moving_rect, color.r, color.g, color.b, -1.0f, color.a, false);
App->render->DrawQuad(rect, 255, 0, 0, -1.0f, 255, false);
App->render->DrawLine(button_v->rect.x + (button_v->rect.w / 2), min_bar_v, button_v->rect.x + (button_v->rect.w / 2), max_bar_v, color.r, color.g, color.b, -1.0f, color.a);
App->render->DrawLine(min_bar_h, button_h->rect.y + (button_h->rect.h/2), max_bar_h, button_h->rect.y + (button_h->rect.h / 2), color.r, color.g, color.b, -1.0f, color.a);
}
// Viewport -----------
App->render->SetViewPort({ rect.x + App->render->camera.x, rect.y + App->render->camera.y, rect.w, rect.h});
// rect.x + rect.w + App->render->camera.x
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
(*it).element->update();
App->render->ResetViewPort();
// --------------------
ChangeHeightMovingRect();
ChangeWidthMovingRect();
MoveBarV();
MoveBarH();
// Lock element when moving scroll bars
if (parent->started_dinamic)
{
if (button_v->MouseClickEnterLeft() || button_h->MouseClickEnterLeft())
parent->dinamic = false;
if (button_v->MouseClickOutLeft() || button_h->MouseClickOutLeft())
parent->dinamic = true;
}
return true;
}
bool UI_Scroll_Bar::cleanup()
{
ClearElements();
return true;
}
void UI_Scroll_Bar::AddElement(UI_Element * element)
{
scroll_element el;
el.element = element;
el.element->parent = parent;
el.starting_pos_x = element->rect.x;
el.starting_pos_y = element->rect.y;
elements.push_back(el);
}
void UI_Scroll_Bar::DeleteScrollElement(UI_Element * element)
{
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
if ((*it).element == element)
{
elements.remove(*it);
App->gui->DeleteElement(element);
break;
}
}
}
void UI_Scroll_Bar::ClearElements()
{
while (!elements.empty())
{
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
App->gui->DeleteElement((*it).element);
elements.remove(*it);
break;
}
}
elements.clear();
}
void UI_Scroll_Bar::ChangeHeightMovingRect()
{
// Taking lowest element vertical --
int lowest = 0;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
if (((min_bar_v - moving_rect.y) + (*it).element->rect.y + (*it).element->rect.h + App->render->camera.y) > lowest)
lowest = ((min_bar_v - moving_rect.y) + (*it).element->rect.y + (*it).element->rect.h) + App->render->camera.y;
}
// ----------------------------------
moving_rect.h = lowest;
// Inverse rule of thirds
if (moving_rect.h > 0)
{
button_v->rect.h = (button_starting_v * starting_v) / moving_rect.h;
if (button_v->rect.h < 20)
button_v->rect.h = 20;
if (button_v->rect.h > button_starting_v)
button_v->rect.h = button_starting_v;
}
else
{
button_v->rect.y = min_bar_v;
button_v->rect.h = button_starting_v;
}
// Update min and max bar positions
min_bar_v = rect.y;
max_bar_v = rect.y + rect.h;
}
void UI_Scroll_Bar::ChangeWidthMovingRect()
{
// Take higher element horizontal --
int higher = 0;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
if (((min_bar_h - moving_rect.x) + (*it).element->rect.x + (*it).element->rect.w + App->render->camera.x) > higher)
higher = ((min_bar_h - moving_rect.x) + (*it).element->rect.x + (*it).element->rect.w) + App->render->camera.x;
}
// ----------------------------------
if (starting_h < higher)
{
moving_rect.w = higher;
// Inverse rule of thirds
button_h->rect.w = (button_starting_h * starting_h) / moving_rect.w;
if (button_h->rect.w < 20)
button_h->rect.w = 20;
if (button_h->rect.w > button_starting_h)
button_h->rect.w = button_starting_h;
}
else
{
button_h->rect.x = min_bar_h;
button_h->rect.w = button_starting_h;
}
// Update min and max bar positions
min_bar_h = rect.x;
max_bar_h = min_bar_h + rect.w;
}
void UI_Scroll_Bar::MoveBarV()
{
if (button_v->MouseClickEnterLeft())
{
int mouse_x_tmp;
App->input->GetMousePosition(mouse_x_tmp, mouse_y);
is_scrolling_v = true;
}
if (is_scrolling_v)
{
int curr_x; int curr_y;
App->input->GetMousePosition(curr_x, curr_y);
// ----------------------
// Move buttons
if (curr_y != mouse_y)
{
if (((button_v->rect.y + button_v->rect.h) - (mouse_y - curr_y)) <= max_bar_v && (button_v->rect.y - (mouse_y - curr_y)) >= min_bar_v)
{
button_v->rect.y -= mouse_y - curr_y;
}
else if(((button_v->rect.y + button_v->rect.h) - (mouse_y - curr_y)) > max_bar_v)
{
button_v->rect.y += max_bar_v - (button_v->rect.y + button_v->rect.h);
}
else if ((button_v->rect.y - (mouse_y - curr_y)) < min_bar_v)
{
button_v->rect.y -= button_v->rect.y - min_bar_v;
}
mouse_y = curr_y;
}
}
if (button_v->MouseClickOutLeft())
{
is_scrolling_v = false;
}
// Move elements inside the scroll
int bar_distance = (min_bar_v + button_v->rect.h) - max_bar_v;
int moving_distance = (min_bar_v + moving_rect.h) - max_bar_v;
int position_bar = button_v->rect.y - min_bar_v;
if (bar_distance < 0)
{
// Rule of thirds
scroll_v = -floor((float)(position_bar * moving_distance) / bar_distance);
moving_rect.y = min_bar_v - scroll_v;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
(*it).element->rect.y = (*it).starting_pos_y - scroll_v - App->render->camera.y;
}
}
else
{
moving_rect.y = min_bar_v - scroll_v;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
(*it).element->rect.y = (*it).starting_pos_y - scroll_v - App->render->camera.y;
}
}
}
void UI_Scroll_Bar::MoveBarH()
{
if (button_h->MouseClickEnterLeft())
{
int mouse_y_tmp;
App->input->GetMousePosition(mouse_x, mouse_y_tmp);
is_scrolling_h = true;
}
if (is_scrolling_h)
{
int curr_x; int curr_y;
App->input->GetMousePosition(curr_x, curr_y);
// ----------------------
if (curr_x != mouse_x)
{
if (((button_h->rect.x + button_h->rect.w) - (mouse_x - curr_x)) <= max_bar_h && (button_h->rect.x - (mouse_x - curr_x)) >= min_bar_h)
{
button_h->rect.x -= mouse_x - curr_x;
}
else if (((button_h->rect.x + button_h->rect.w) - (mouse_x - curr_x)) > max_bar_h)
{
button_h->rect.x += max_bar_h - (button_h->rect.x + button_h->rect.w);
}
else if ((button_h->rect.x - (mouse_x - curr_x)) < min_bar_h)
{
button_h->rect.x -= button_h->rect.x - min_bar_h;
}
mouse_x = curr_x;
}
}
if (button_h->MouseClickOutLeft())
{
is_scrolling_h = false;
}
int bar_distance = (min_bar_h + button_h->rect.w) - max_bar_h;
int moving_distance = (min_bar_h + moving_rect.w) - max_bar_h;
int position_bar = button_h->rect.x - min_bar_h;
if (bar_distance < 0)
{
scroll_h = -floor((float)(position_bar * moving_distance) / bar_distance);
moving_rect.x = min_bar_h - scroll_h;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
(*it).element->rect.x = (*it).starting_pos_x - scroll_h - App->render->camera.x;
}
}
else
{
moving_rect.x = min_bar_h - scroll_h;
for (list<scroll_element>::iterator it = elements.begin(); it != elements.end(); it++)
{
(*it).element->rect.x = (*it).starting_pos_x - scroll_h - App->render->camera.x;
}
}
}
// -----------------------------------
// ------------------------ Scroll Bar
// -----------------------------------
// Colored Rect ----------------------
UI_ColoredRect::UI_ColoredRect()
{
}
UI_ColoredRect::~UI_ColoredRect()
{
}
void UI_ColoredRect::Set(iPoint pos, int w, int h, SDL_Color _color, bool filled)
{
rect.x = pos.x;
rect.y = pos.y;
rect.w = w;
rect.h = h;
color = _color;
}
bool UI_ColoredRect::update()
{
if (!enabled)
return false;
App->render->DrawQuad(rect, color.r, color.g, color.b, -1.0f, color.a, filled);
return true;
}
void UI_ColoredRect::SetColor(SDL_Color _color)
{
color = _color;
}
// -----------------------------------
// ---------------------- Colored Rect
// -----------------------------------
// Check Box -------------------------
UI_Check_Box::UI_Check_Box()
{
}
UI_Check_Box::~UI_Check_Box()
{
}
void UI_Check_Box::Set(iPoint pos, int w, int h, SDL_Rect _pressed, SDL_Rect _idle, bool _multiple_choice)
{
rect = { pos.x , pos.y, w, h };
pressed = { _pressed }; idle = { _idle };
multiple_choice = _multiple_choice;
}
bool UI_Check_Box::update()
{
if (!enabled)
return false;
// Debug
if (App->gui->debug)
{
App->render->DrawQuad(rect, 255, 255, 255, -1.0f, 255, false);
for (int i = 0; i < check_box_list.size(); i++)
{
if (check_box_list.at(i)->checked)
{
App->render->DrawQuad({ check_box_list.at(i)->button->rect}, 255, 255, 255, -1.0f, 255, true);
}
}
}
// Print
for (int i = 0; i < check_box_list.size(); i++)
{
SDL_Rect button = NULLRECT;
if (check_box_list.at(i)->checked)
button = pressed;
else
button = idle;
if (!is_gameplay)
App->render->Blit(App->gui->atlas, check_box_list.at(i)->button->rect.x, check_box_list.at(i)->button->rect.y, &button);
//WIP
}
CheckControl();
return true;
}
bool UI_Check_Box::cleanup()
{
for (int i = 0; i < check_box_list.size(); i++)
RELEASE(check_box_list.at(i));
return true;
}
void UI_Check_Box::AddBox(iPoint pos, int size_w, int size_h, char * name)
{
check_box* cb = new check_box(pos, size_w, size_h);
cb->button->layer = layer + 1;
cb->button->blit_layer = blit_layer;
App->gui->elements_list.Push((UI_Element*)cb->button, cb->button->layer);
childs.push_back(cb->button);
check_box_list.push_back(cb);
}
bool UI_Check_Box::GetBox(char * name)
{
for (int i = 0; i < check_box_list.size(); i++)
{
//WIP
}
return false;
}
void UI_Check_Box::SetBox(bool set, char * name)
{
check_box* curr = nullptr;
bool change = false;
for (int i = 0; i < check_box_list.size(); i++)
{
//WIP
}
if (!multiple_choice && change && curr->checked)
{
for (int i = 0; i < check_box_list.size(); i++)
{
if (check_box_list.at(i) != curr)
check_box_list.at(i)->checked = false;
}
}
}
void UI_Check_Box::SetBox(bool set, int _i)
{
check_box* curr = nullptr;
bool change = false;
for (int i = 0; i < check_box_list.size(); i++)
{
if (i == _i)
{
check_box_list.at(i)->checked = set;
change = true;
break;
}
}
if (!multiple_choice && change && curr->checked)
{
for (int i = 0; i < check_box_list.size(); i++)
{
if (check_box_list.at(i) != curr)
check_box_list.at(i)->checked = false;
}
}
}
void UI_Check_Box::CheckControl()
{
check_box* curr = nullptr;
bool change = false;
for (int i = 0; i < check_box_list.size(); i++)
{
curr = check_box_list.at(i);
if (curr->button->MouseClickEnterLeft())
{
curr->checked = !curr->checked;
change = true;
break;
}
}
if (!multiple_choice && change && curr->checked)
{
for (int i = 0; i < check_box_list.size(); i++)
{
if (check_box_list.at(i) != curr)
check_box_list.at(i)->checked = false;
}
}
}
|
#include "stdafx.h"
#include "FallingItem.hpp"
#include "Shadow.hpp"
#include "ScreenManager.hpp"
#include "Game.hpp"
#include "stdlib.hpp"
#include "Function.hpp"
#include "Events.hpp"
#include "Sfx.hpp"
#include "SwitchStates.hpp"
#include "Sound/Midi.hpp"
#include "ParticleBurst.hpp"
#include "Particle.hpp"
#include "ScreenShake.hpp"
#include "Abe.hpp"
const AnimId sFallingItemData_544DC0[15][2] = {
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Crate_A, AnimId::Falling_Crate_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Rock_A, AnimId::Falling_Rock_B},
{AnimId::Falling_Crate_A, AnimId::Falling_Crate_B}};
ALIVE_VAR(1, 0x5BC208, FallingItem*, pPrimaryFallingItem_5BC208, nullptr);
EXPORT FallingItem* FallingItem::ctor_4272C0(Path_FallingItem* pTlv, s32 tlvInfo)
{
ctor_408240(0);
SetVTable(this, 0x544E98);
field_4_typeId = AETypes::eRockSpawner_48;
field_6_flags.Set(BaseGameObject::eCanExplode_Bit7);
field_118_tlvInfo = tlvInfo;
Add_Resource_4DC130(ResourceManager::Resource_Animation, ResourceID::kExplo2ResID);
const s32 lvlIdx = static_cast<s32>(gMap_5C3030.field_0_current_level);
const AnimRecord& rec = AnimRec(sFallingItemData_544DC0[lvlIdx][0]);
u8** ppRes = Add_Resource_4DC130(ResourceManager::Resource_Animation, rec.mResourceId);
Animation_Init_424E10(rec.mFrameTableOffset, rec.mMaxW, rec.mMaxH, ppRes, 1, 1);
field_11E_id = pTlv->field_10_id;
if (pTlv->field_12_scale == Scale_short::eHalf_1)
{
field_CC_sprite_scale = FP_FromDouble(0.5);
field_D6_scale = 0;
field_20_animation.field_C_render_layer = Layer::eLayer_12;
}
else
{
field_CC_sprite_scale = FP_FromInteger(1);
field_D6_scale = 1;
field_20_animation.field_C_render_layer = Layer::eLayer_31;
}
field_124_fall_delay = pTlv->field_14_fall_delay;
field_120_max_falling_items = pTlv->field_16_max_falling_items;
field_122_remaining_falling_items = pTlv->field_16_max_falling_items;
field_134_bHitGrinderOrMineCar = FALSE;
field_12C_reset_id = pTlv->field_18_reset_id;
field_12E_do_sound_in_state_falling = TRUE;
field_B8_xpos = FP_FromInteger(pTlv->field_8_top_left.field_0_x);
field_BC_ypos = FP_FromInteger(pTlv->field_8_top_left.field_2_y);
if (field_BC_ypos > pScreenManager_5BB5F4->field_20_pCamPos->field_4_y)
{
field_BC_ypos = pScreenManager_5BB5F4->field_20_pCamPos->field_4_y;
}
field_138_xpos = FP_FromInteger((pTlv->field_8_top_left.field_0_x + pTlv->field_C_bottom_right.field_0_x) / 2);
field_13C_ypos = FP_FromInteger(pTlv->field_C_bottom_right.field_2_y);
field_130_yPosStart = field_BC_ypos;
field_11C_state = State::eState_0_WaitForIdEnable;
field_140_sound_channels = 0;
if (!pPrimaryFallingItem_5BC208)
{
pPrimaryFallingItem_5BC208 = this;
field_144_created_gnFrame = sGnFrame_5C1B84;
}
field_E0_pShadow = ae_new<Shadow>();
if (field_E0_pShadow)
{
field_E0_pShadow->ctor_4AC990();
}
return this;
}
FallingItem* FallingItem::ctor_427560(s16 xpos, s16 ypos, s16 scale, s16 id, s16 delayTime, s16 numItems, s16 resetId)
{
ctor_408240(0);
SetVTable(this, 0x544E98);
field_4_typeId = AETypes::eRockSpawner_48;
field_6_flags.Set(BaseGameObject::eCanExplode_Bit7);
field_118_tlvInfo = -1;
const s32 lvlIdx = static_cast<s32>(gMap_5C3030.field_0_current_level);
const AnimRecord& rec = AnimRec(sFallingItemData_544DC0[lvlIdx][0]);
u8** ppRes = Add_Resource_4DC130(ResourceManager::Resource_Animation, rec.mResourceId);
Animation_Init_424E10(rec.mFrameTableOffset, rec.mMaxW, rec.mMaxH, ppRes, 1, 1);
field_20_animation.field_C_render_layer = Layer::eLayer_31;
if (id)
{
field_11E_id = id;
}
else
{
field_11E_id = 1;
}
if (scale)
{
field_CC_sprite_scale = FP_FromDouble(0.5);
field_D6_scale = 0;
}
else
{
field_CC_sprite_scale = FP_FromInteger(1);
field_D6_scale = 1;
}
field_124_fall_delay = delayTime;
field_120_max_falling_items = numItems;
field_122_remaining_falling_items = numItems;
const FP xFixed = FP_FromInteger(xpos);
const FP yFixed = FP_FromInteger(ypos);
field_12C_reset_id = resetId;
field_134_bHitGrinderOrMineCar = FALSE;
field_12E_do_sound_in_state_falling = TRUE;
field_B8_xpos = xFixed;
field_BC_ypos = yFixed;
field_138_xpos = xFixed;
field_13C_ypos = yFixed;
field_130_yPosStart = yFixed;
field_11C_state = State::eState_0_WaitForIdEnable;
field_140_sound_channels = 0;
if (!pPrimaryFallingItem_5BC208)
{
pPrimaryFallingItem_5BC208 = this;
field_144_created_gnFrame = sGnFrame_5C1B84;
}
field_E0_pShadow = ae_new<Shadow>();
if (field_E0_pShadow)
{
field_E0_pShadow->ctor_4AC990();
}
return this;
}
BaseGameObject* FallingItem::VDestructor(s32 flags)
{
return vdtor_427530(flags);
}
void FallingItem::VUpdate()
{
vUpdate_427780();
}
void FallingItem::VScreenChanged()
{
vScreenChanged_428180();
}
void FallingItem::dtor_427EB0()
{
SetVTable(this, 0x544E98);
if (pPrimaryFallingItem_5BC208 == this)
{
pPrimaryFallingItem_5BC208 = nullptr;
}
Path::TLV_Reset_4DB8E0(field_118_tlvInfo, -1, 0, 0);
dtor_4080B0();
}
FallingItem* FallingItem::vdtor_427530(s32 flags)
{
dtor_427EB0();
if (flags & 1)
{
ae_delete_free_495540(this);
}
return this;
}
void FallingItem::vScreenChanged_428180()
{
if (gMap_5C3030.field_0_current_level != gMap_5C3030.field_A_level || gMap_5C3030.field_2_current_path != gMap_5C3030.field_C_path || field_11C_state != State::eState_3_Falling)
{
field_6_flags.Set(BaseGameObject::eDead_Bit3);
}
}
EXPORT void FallingItem::vUpdate_427780()
{
if (Event_Get_422C00(kEventDeathReset))
{
field_6_flags.Set(BaseGameObject::eDead_Bit3);
}
// The primary item controls the main sound effects, otherwise there would be a crazy amount of smashing sounds
if (pPrimaryFallingItem_5BC208 == this)
{
if (!((sGnFrame_5C1B84 - field_144_created_gnFrame) % 87))
{
if (field_D6_scale == 1)
{
SFX_Play_46FA90(SoundEffect::FallingItemPresence1_74, 45);
}
else
{
SFX_Play_46FA90(SoundEffect::FallingItemPresence1_74, 20);
}
}
if (!((sGnFrame_5C1B84 - field_144_created_gnFrame) % 25))
{
if (field_D6_scale == 1)
{
SFX_Play_46FA90(SoundEffect::FallingItemPresence2_75, 45);
}
else
{
SFX_Play_46FA90(SoundEffect::FallingItemPresence2_75, 20);
}
}
}
switch (field_11C_state)
{
case State::eState_0_WaitForIdEnable:
if (field_11E_id && SwitchStates_Get_466020(field_11E_id))
{
field_6_flags.Clear(BaseGameObject::eCanExplode_Bit7);
field_11C_state = State::eState_2_WaitForFallDelay;
field_C4_velx = FP_FromInteger(0);
field_C8_vely = FP_FromInteger(0);
const AnimRecord& animRec = AnimRec(sFallingItemData_544DC0[static_cast<s32>(gMap_5C3030.field_0_current_level)][1]);
field_20_animation.Set_Animation_Data_409C80(animRec.mFrameTableOffset, nullptr);
field_128_delay_timer = sGnFrame_5C1B84 + field_124_fall_delay;
}
break;
// TODO: Must only be set outside of the object
case State::eState_1_GoWaitForDelay:
{
field_6_flags.Clear(BaseGameObject::eCanExplode_Bit7);
field_11C_state = State::eState_2_WaitForFallDelay;
field_C4_velx = FP_FromInteger(0);
field_C8_vely = FP_FromInteger(0);
const AnimRecord& animRec = AnimRec(sFallingItemData_544DC0[static_cast<s32>(gMap_5C3030.field_0_current_level)][1]);
field_20_animation.Set_Animation_Data_409C80(animRec.mFrameTableOffset, nullptr);
field_128_delay_timer = sGnFrame_5C1B84 + field_124_fall_delay;
break;
}
case State::eState_2_WaitForFallDelay:
if (static_cast<s32>(sGnFrame_5C1B84) >= field_128_delay_timer)
{
field_11C_state = State::eState_3_Falling;
field_12E_do_sound_in_state_falling = TRUE;
if (field_D6_scale == 1)
{
field_140_sound_channels = SFX_Play_46FBA0(SoundEffect::AirStream_23, 50, -2600);
}
else
{
field_140_sound_channels = SFX_Play_46FBA0(SoundEffect::AirStream_23, 25, -2600);
}
}
break;
case State::eState_3_Falling:
{
if (field_12E_do_sound_in_state_falling)
{
if (field_BC_ypos >= sActiveHero_5C1B68->field_BC_ypos - FP_FromInteger(240 / 2))
{
field_12E_do_sound_in_state_falling = FALSE;
if (field_D6_scale == 1)
{
SFX_Play_46FBA0(SoundEffect::AirStream_23, 127, -1300);
}
else
{
SFX_Play_46FBA0(SoundEffect::AirStream_23, 64, -1300);
}
}
}
DamageHitItems_427F40();
if (field_C8_vely < FP_FromInteger(20))
{
field_C8_vely += field_CC_sprite_scale * FP_FromDouble(1.8);
}
PathLine* pathLine = nullptr;
FP hitX = {};
FP hitY = {};
if (sCollisions_DArray_5C1128->Raycast_417A60(
field_B8_xpos,
field_BC_ypos,
field_B8_xpos,
field_C8_vely + field_BC_ypos,
&pathLine,
&hitX,
&hitY,
field_D6_scale != 0 ? 1 : 16)
== 1)
{
if (!field_134_bHitGrinderOrMineCar)
{
field_BC_ypos = hitY;
}
}
else if (!field_134_bHitGrinderOrMineCar)
{
field_BC_ypos += field_C8_vely;
return;
}
field_134_bHitGrinderOrMineCar = FALSE;
field_11C_state = State::eState_4_Smashed;
auto pShake = ae_new<ScreenShake>();
if (pShake)
{
pShake->ctor_4ACF70(0, field_CC_sprite_scale == FP_FromDouble(0.5));
}
if (gMap_5C3030.field_0_current_level == LevelIds::eBonewerkz_8)
{
auto pPart = ae_new<ParticleBurst>();
if (pPart)
{
pPart->ctor_41CF50(
field_B8_xpos,
field_BC_ypos,
0x14u,
field_CC_sprite_scale,
BurstType::eSticks_1,
13);
}
auto pParticle = ae_new<Particle>();
if (pParticle)
{
u8** ppRes = ResourceManager::GetLoadedResource_49C2A0(ResourceManager::Resource_Animation, ResourceID::kExplo2ResID, 0, 0);
pParticle->ctor_4CC4C0(
field_B8_xpos,
field_BC_ypos - (FP_FromInteger(15) * field_CC_sprite_scale),
51156,
202,
91,
ppRes);
pParticle->field_20_animation.field_B_render_mode = TPageAbr::eBlend_1;
pParticle->field_CC_sprite_scale = field_CC_sprite_scale * FP_FromDouble(0.75);
}
}
else
{
auto pPartBurst = ae_new<ParticleBurst>();
if (pPartBurst)
{
pPartBurst->ctor_41CF50(
field_B8_xpos,
field_BC_ypos,
25,
field_CC_sprite_scale,
BurstType::eFallingRocks_0,
13);
}
}
}
break;
case State::eState_4_Smashed:
if (field_140_sound_channels)
{
SND_Stop_Channels_Mask_4CA810(field_140_sound_channels);
field_140_sound_channels = 0;
}
Event_Broadcast_422BC0(kEventLoudNoise, this);
SFX_Play_46FA90(SoundEffect::FallingItemLand_62, 0, field_CC_sprite_scale);
if (field_D6_scale == 1)
{
SFX_Play_46FBA0(SoundEffect::FallingItemHit_47, 110, -1536);
}
else
{
SFX_Play_46FBA0(SoundEffect::FallingItemHit_47, 55, -1536);
}
if (field_11E_id)
{
if (field_12C_reset_id)
{
SwitchStates_Do_Operation_465F00(field_11E_id, SwitchOp::eSetFalse_1);
}
}
--field_122_remaining_falling_items;
if ((field_120_max_falling_items > 0 && field_122_remaining_falling_items <= 0) || !gMap_5C3030.Is_Point_In_Current_Camera_4810D0(field_C2_lvl_number, field_C0_path_number, field_138_xpos, field_13C_ypos, 0))
{
field_6_flags.Set(BaseGameObject::eDead_Bit3);
}
else
{
const AnimRecord& animRec = AnimRec(sFallingItemData_544DC0[static_cast<s32>(gMap_5C3030.field_0_current_level)][0]);
field_20_animation.Set_Animation_Data_409C80(animRec.mFrameTableOffset, nullptr);
field_6_flags.Set(BaseGameObject::eCanExplode_Bit7);
field_C8_vely = FP_FromInteger(0);
field_C4_velx = FP_FromInteger(0);
field_BC_ypos = field_130_yPosStart;
field_11C_state = State::eState_0_WaitForIdEnable;
}
break;
default:
return;
}
}
void FallingItem::DamageHitItems_427F40()
{
for (s32 idx = 0; idx < gBaseGameObject_list_BB47C4->Size(); idx++)
{
BaseGameObject* pObj = gBaseGameObject_list_BB47C4->ItemAt(idx);
if (!pObj)
{
break;
}
if (pObj != this)
{
if (pObj->field_6_flags.Get(BaseGameObject::eIsBaseAliveGameObject_Bit6) || pObj->field_4_typeId == AETypes::eGrinder_30)
{
BaseAnimatedWithPhysicsGameObject* pAliveObj = static_cast<BaseAnimatedWithPhysicsGameObject*>(pObj);
PSX_RECT fallingItemRect = {};
vGetBoundingRect_424FD0(&fallingItemRect, 1);
PSX_RECT objRect = {};
pAliveObj->vGetBoundingRect_424FD0(&objRect, 1);
if (pAliveObj->field_CC_sprite_scale == field_CC_sprite_scale)
{
if (pAliveObj->field_4_typeId == AETypes::eGrinder_30 || pAliveObj->field_4_typeId == AETypes::eMineCar_89)
{
objRect.x += pAliveObj->field_DA_xOffset;
objRect.y += pAliveObj->field_D8_yOffset;
objRect.w += pAliveObj->field_DA_xOffset;
objRect.h += pAliveObj->field_D8_yOffset;
}
if (PSX_Rects_overlap_no_adjustment(&fallingItemRect, &objRect))
{
if (pAliveObj->field_4_typeId == AETypes::eGrinder_30)
{
// Grinder is not a type that implements VTakeDamage
field_134_bHitGrinderOrMineCar = TRUE;
}
else if (pAliveObj->field_4_typeId == AETypes::eMineCar_89)
{
// ?? Could still call VTakeDamage here but OG doesn't ??
field_134_bHitGrinderOrMineCar = TRUE;
}
else
{
bool doDamage = true;
if (pAliveObj->field_4_typeId == AETypes::eParamite_96)
{
// Some strange edge case for paramites - prevents them getting smashed by
// falling items when stood on an edge by their huge heads peeking over a bit.
if (pAliveObj->field_B8_xpos < FP_FromInteger(fallingItemRect.x) || pAliveObj->field_B8_xpos > FP_FromInteger(fallingItemRect.w))
{
doDamage = false;
}
}
if (doDamage)
{
static_cast<BaseAliveGameObject*>(pAliveObj)->VTakeDamage_408730(this);
}
}
}
}
}
}
}
}
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/runtime/vm/jit/vasm-x64.h"
#include "hphp/runtime/vm/jit/back-end-x64.h"
#include "hphp/runtime/vm/jit/block.h"
#include "hphp/runtime/vm/jit/code-gen.h"
#include "hphp/runtime/vm/jit/code-gen-helpers-x64.h"
#include "hphp/runtime/vm/jit/mc-generator.h"
#include "hphp/runtime/vm/jit/print.h"
#include "hphp/runtime/vm/jit/prof-data.h"
#include "hphp/runtime/vm/jit/service-requests-inline.h"
#include "hphp/runtime/vm/jit/timer.h"
#include "hphp/runtime/vm/jit/vasm-print.h"
TRACE_SET_MOD(vasm);
namespace HPHP { namespace jit {
using namespace reg;
using namespace x64;
const char* vinst_names[] = {
#define O(name, imms, uses, defs) #name,
X64_OPCODES
#undef O
};
bool isBlockEnd(Vinstr& inst) {
switch (inst.op) {
case Vinstr::bindaddr:
case Vinstr::bindjcc1:
case Vinstr::bindjmp:
case Vinstr::end:
case Vinstr::fallback:
case Vinstr::jcc:
case Vinstr::jmp:
case Vinstr::jmpr:
case Vinstr::jmpm:
case Vinstr::phijmp:
case Vinstr::resume:
case Vinstr::ud2:
case Vinstr::unwind:
case Vinstr::retransopt:
case Vinstr::ret:
return true;
default:
return false;
}
}
Vlabel Vunit::makeBlock(AreaIndex area) {
auto i = blocks.size();
blocks.emplace_back(area);
return Vlabel{i};
}
Vreg Vunit::makeConst(uint64_t v) {
auto it = cpool.find(v);
if (it != cpool.end()) return it->second;
auto r = makeReg();
cpool[v] = r;
return r;
}
Vreg Vunit::makeConst(double d) {
union { double d; uint64_t i; } u;
u.d = d;
return makeConst(u.i);
}
Vtuple Vunit::makeTuple(const VregList& regs) {
auto i = tuples.size();
tuples.emplace_back(regs);
return Vtuple{i};
}
Vout& Vout::operator<<(Vinstr inst) {
assert(!closed());
inst.origin = m_origin;
m_unit.blocks[m_block].code.push_back(inst);
return *this;
}
Vout Vout::makeBlock() {
return {m_meta, m_unit, m_unit.makeBlock(m_area), m_area, m_origin};
}
Vout Vout::makeEntry() {
auto label = m_unit.makeBlock(m_area);
m_unit.roots.push_back(label); // save entry label
return {m_meta, m_unit, label, m_area, m_origin};
}
// implicit cast to label for initializing branch instructions
Vout::operator Vlabel() const {
assert(empty());
return m_block;
}
bool Vout::empty() const {
return m_unit.blocks[m_block].code.empty();
}
bool Vout::closed() const {
return !empty() && isBlockEnd(m_unit.blocks[m_block].code.back());
}
namespace {
struct Vgen {
Vgen(Vunit& u, Vasm::AreaList& areas, Vmeta* meta, AsmInfo* asmInfo)
: unit(u)
, backend(mcg->backEnd())
, areas(areas)
, meta(meta)
, m_asmInfo(asmInfo)
, addrs(u.blocks.size(), nullptr)
{}
void emit(jit::vector<Vlabel>&);
private:
// intrinsics
void emit(bindaddr& i);
void emit(bindcall& i);
void emit(bindexit& i);
void emit(bindjcc1& i);
void emit(bindjcc2& i);
void emit(bindjmp& i);
void emit(callstub& i);
void emit(contenter& i);
void emit(copy i);
void emit(copy2& i);
void emit(copyargs& i) { always_assert(false); }
void emit(end& i) {}
void emit(ldimm& i);
void emit(fallback& i);
void emit(fallbackcc i);
void emit(incstat& i) { emitIncStat(a->code(), i.stat, i.n, i.force); }
void emit(kpcall& i);
void emit(ldpoint& i);
void emit(load& i);
void emit(mccall& i);
void emit(mcprep& i);
void emit(nocatch& i);
void emit(nop& i) { a->nop(); }
void emit(phidef& i) { always_assert(false); }
void emit(phijmp& i) { always_assert(false); }
void emit(point& i) { meta->points[i.p] = a->frontier(); }
void emit(resume& i) { emitServiceReq(a->code(), REQ_RESUME); }
void emit(retransopt& i);
void emit(store& i);
void emit(syncpoint i);
void emit(unwind& i);
// instructions
void emit(andb& i) { commute(i); a->andb(i.s0, i.d); }
void emit(andbi& i) { binary(i); a->andb(i.s0, i.d); }
void emit(andbim& i) { a->andb(i.s, i.m); }
void emit(andl& i) { commute(i); a->andl(i.s0, i.d); }
void emit(andli& i) { binary(i); a->andl(i.s0, i.d); }
void emit(andq& i) { commute(i); a->andq(i.s0, i.d); }
void emit(andqi& i) { binary(i); a->andq(i.s0, i.d); }
void emit(addlm& i) { a->addl(i.s0, i.m); }
void emit(addq& i) { commute(i); a->addq(i.s0, i.d); }
void emit(addqi& i) { binary(i); a->addq(i.s0, i.d); }
void emit(addsd& i) { commute(i); a->addsd(i.s0, i.d); }
void emit(call i);
void emit(callm& i) { a->call(i.target); }
void emit(callr& i) { a->call(i.target); }
void emit(cloadq& i);
void emit(cmovq i);
void emit(cmpb& i) { a->cmpb(i.s0, i.s1); }
void emit(cmpbi& i) { a->cmpb(i.s0, i.s1); }
void emit(cmpbim& i) { a->cmpb(i.s0, i.s1); }
void emit(cmpl& i) { a->cmpl(i.s0, i.s1); }
void emit(cmpli& i) { a->cmpl(i.s0, i.s1); }
void emit(cmplim& i) { a->cmpl(i.s0, i.s1); }
void emit(cmplm& i) { a->cmpl(i.s0, i.s1); }
void emit(cmpq& i) { a->cmpq(i.s0, i.s1); }
void emit(cmpqi& i) { a->cmpq(i.s0, i.s1); }
void emit(cmpqim& i) { a->cmpq(i.s0, i.s1); }
void emit(cmpqm& i) { a->cmpq(i.s0, i.s1); }
void emit(cmpsd& i) { noncommute(i); a->cmpsd(i.s0, i.d, i.pred); }
void emit(cqo& i) { a->cqo(); }
void emit(cvttsd2siq& i) { a->cvttsd2siq(i.s, i.d); }
void emit(cvtsi2sd& i);
void emit(cvtsi2sdm& i);
void emit(decl& i) { unary(i); a->decl(i.d); }
void emit(declm& i) { a->decl(i.m); }
void emit(decq& i) { unary(i); a->decq(i.d); }
void emit(decqm& i) { a->decq(i.m); }
void emit(divsd& i) { noncommute(i); a->divsd(i.s0, i.d); }
void emit(imul& i) { commute(i); a->imul(i.s0, i.d); }
void emit(incwm& i) { a->incw(i.m); }
void emit(idiv& i) { a->idiv(i.s); }
void emit(incl& i) { unary(i); a->incl(i.d); }
void emit(inclm& i) { a->incl(i.m); }
void emit(incq& i) { unary(i); a->incq(i.d); }
void emit(incqm& i) { a->incq(i.m); }
void emit(incqmlock& i) { a->lock(); a->incq(i.m); }
void emit(jcc& i);
void emit(jmp i);
void emit(jmpr& i) { a->jmp(i.target); }
void emit(jmpm& i) { a->jmp(i.target); }
void emit(lea& i);
void emit(leap& i) { a->lea(i.s, i.d); }
void emit(loaddqu& i) { a->movdqu(i.s, i.d); }
void emit(loadl& i) { a->loadl(i.s, i.d); }
void emit(loadq& i);
void emit(loadqp& i) { a->loadq(i.s, i.d); }
void emit(loadsd& i) { a->movsd(i.s, i.d); }
void emit(loadzbl& i) { a->loadzbl(i.s, i.d); }
void emit(movb& i) { a->movb(i.s, i.d); }
void emit(movbi& i) { a->movb(i.s, i.d); }
void emit(movdqa& i) { a->movdqa(i.s, i.d); }
void emit(movl& i) { a->movl(i.s, i.d); }
void emit(movq& i) { a->movq(i.s, i.d); }
void emit(movqrx& i) { a->movq_rx(i.s, i.d); }
void emit(movqxr& i) { a->movq_xr(i.s, i.d); }
void emit(movzbl& i) { a->movzbl(i.s, i.d); }
void emit(movsbl& i) { a->movsbl(i.s, i.d); }
void emit(mulsd& i) { commute(i); a->mulsd(i.s0, i.d); }
void emit(neg& i) { unary(i); a->neg(i.d); }
void emit(not& i) { unary(i); a->not(i.d); }
void emit(orq& i) { commute(i); a->orq(i.s0, i.d); }
void emit(orqi& i) { binary(i); a->orq(i.s0, i.d); }
void emit(orqim& i) { a->orq(i.s0, i.m); }
void emit(pop& i) { a->pop(i.d); }
void emit(popm& i) { a->pop(i.m); }
void emit(psllq& i) { binary(i); a->psllq(i.s0, i.d); }
void emit(psrlq& i) { binary(i); a->psrlq(i.s0, i.d); }
void emit(push& i) { a->push(i.s); }
void emit(pushl& i) { a->pushl(i.s); }
void emit(pushm& i) { a->push(i.s); }
void emit(roundsd& i) { a->roundsd(i.dir, i.s, i.d); }
void emit(ret& i) { a->ret(); }
void emit(rorqi& i) { binary(i); a->rorq(i.s0, i.d); }
void emit(sarq& i) { unary(i); a->sarq(i.d); }
void emit(sarqi& i) { binary(i); a->sarq(i.s0, i.d); }
void emit(sbbl& i) { noncommute(i); a->sbbl(i.s0, i.d); }
void emit(setcc& i) { a->setcc(i.cc, i.d); }
void emit(shlli& i) { binary(i); a->shll(i.s0, i.d); }
void emit(shlq& i) { unary(i); a->shlq(i.d); }
void emit(shlqi& i) { binary(i); a->shlq(i.s0, i.d); }
void emit(shrli& i) { binary(i); a->shrl(i.s0, i.d); }
void emit(shrqi& i) { binary(i); a->shrq(i.s0, i.d); }
void emit(sqrtsd& i) { a->sqrtsd(i.s, i.d); }
void emit(storedqu& i) { a->movdqu(i.s, i.m); }
void emit(storeb& i) { a->storeb(i.s, i.m); }
void emit(storebim& i) { a->storeb(i.s, i.m); }
void emit(storel& i) { a->storel(i.s, i.m); }
void emit(storelim& i) { a->storel(i.s, i.m); }
void emit(storeq& i) { a->storeq(i.s, i.m); }
void emit(storeqim& i) { a->storeq(i.s, i.m); }
void emit(storesd& i) { a->movsd(i.s, i.m); }
void emit(storew& i) { a->storew(i.s, i.m); }
void emit(storewim& i) { a->storew(i.s, i.m); }
void emit(subl& i) { noncommute(i); a->subl(i.s0, i.d); }
void emit(subli& i) { binary(i); a->subl(i.s0, i.d); }
void emit(subq& i) { noncommute(i); a->subq(i.s0, i.d); }
void emit(subqi& i) { binary(i); a->subq(i.s0, i.d); }
void emit(subsd& i) { noncommute(i); a->subsd(i.s0, i.d); }
void emit(testb& i) { a->testb(i.s0, i.s1); }
void emit(testbi& i) { a->testb(i.s0, i.s1); }
void emit(testbim& i) { a->testb(i.s0, i.s1); }
void emit(testl& i) { a->testl(i.s0, i.s1); }
void emit(testli& i) { a->testl(i.s0, i.s1); }
void emit(testlim& i) { a->testl(i.s0, i.s1); }
void emit(testq& i) { a->testq(i.s0, i.s1); }
void emit(testqm& i) { a->testq(i.s0, i.s1); }
void emit(testqim& i) { a->testq(i.s0, i.s1); }
void emit(ucomisd& i) { a->ucomisd(i.s0, i.s1); }
void emit(ud2& i) { a->ud2(); }
void emit(unpcklpd& i) { noncommute(i); a->unpcklpd(i.s0, i.d); }
void emit(xorb& i) { commute(i); a->xorb(i.s0, i.d); }
void emit(xorbi& i) { binary(i); a->xorb(i.s0, i.d); }
void emit(xorq& i) { commute(i); a->xorq(i.s0, i.d); }
void emit(xorqi& i) { binary(i); a->xorq(i.s0, i.d); }
// helpers
void prep(Reg8 s, Reg8 d) { if (s != d) a->movb(s, d); }
void prep(Reg32 s, Reg32 d) { if (s != d) a->movl(s, d); }
void prep(Reg64 s, Reg64 d) { if (s != d) a->movq(s, d); }
void prep(RegXMM s, RegXMM d) { if (s != d) a->movdqa(s, d); }
CodeAddress start(Vlabel b) {
auto area = unit.blocks[b].area;
return areas[(int)area].start;
}
bool check(Vblock& block);
CodeBlock& main() { return area(AreaIndex::Main).code; }
CodeBlock& cold() { return area(AreaIndex::Cold).code; }
CodeBlock& frozen() { return area(AreaIndex::Frozen).code; }
template<class Inst> void unary(Inst& i) { prep(i.s, i.d); }
template<class Inst> void binary(Inst& i) { prep(i.s1, i.d); }
template<class Inst> void commute(Inst&);
template<class Inst> void noncommute(Inst&);
private:
Vasm::Area& area(AreaIndex i) {
assert((unsigned)i < areas.size());
return areas[(unsigned)i];
}
private:
struct LabelPatch { CodeAddress instr; Vlabel target; };
struct PointPatch { CodeAddress instr; Vpoint pos; };
Vunit& unit;
BackEnd& backend;
Vasm::AreaList& areas;
Vmeta* meta;
AsmInfo* m_asmInfo;
X64Assembler* a;
Vlabel next{0}; // in linear order
jit::vector<CodeAddress> addrs;
jit::vector<LabelPatch> jccs, jmps, calls, catches;
jit::vector<PointPatch> ldpoints;
jit::hash_map<uint64_t,uint64_t*> cpool;
};
// prepare a binary op that is not commutative. s0 must be a different
// register than s1 so we don't clobber it.
template<class Inst> void Vgen::noncommute(Inst& i) {
assert(i.s1 == i.d || i.s0 != i.d); // do not clobber s0
binary(i);
}
// prepare a binary op that is commutative. Swap operands if the dest is s0.
template<class Inst> void Vgen::commute(Inst& i) {
if (i.s1 != i.d && i.s0 == i.d) {
i = Inst{i.s1, i.s0, i.d};
} else {
binary(i);
}
}
void Vgen::emit(call i) {
if (a->jmpDeltaFits(i.target) && !Stats::enabled()) {
a->call(i.target);
} else {
// can't do a near call; store address in data section.
// call by loading the address using rip-relative addressing. This
// assumes the data section is near the current code section. Since
// this sequence is directly in-line, rip-relative like this is
// more compact than loading a 64-bit immediate.
auto addr = mcg->allocLiteral((uint64_t)i.target);
a->call(rip[(intptr_t)addr]);
assert(((int32_t*)a->frontier())[-1] + a->frontier() == (TCA)addr);
}
}
void Vgen::emit(cloadq& i) {
auto m = i.t;
always_assert(!m.index.isValid()); // not supported, but could be later.
if (i.f != i.d) {
always_assert(i.d != m.base); // don't clobber base
a->movq(i.f, i.d);
}
a->cload_reg64_disp_reg64(i.cc, m.base, m.disp, i.d);
}
// add s0 s1 d => mov s1->d; d += s0
// cmov cc s d => if cc { mov s->d }
void Vgen::emit(cmovq i) {
if (i.f != i.d && i.t == i.d) {
// negate the condition and swap t/f operands so we dont clobber i.t
i = {ccNegate(i.cc), i.t, i.f, i.d};
} else {
prep(i.f, i.d);
}
a->cmov_reg64_reg64(i.cc, i.t, i.d);
}
void Vgen::emit(contenter& i) {
Label Stub, End;
Reg64 fp = i.fp, target = i.target;
a->jmp8(End);
asm_label(*a, Stub);
a->pop(fp[AROFF(m_savedRip)]);
a->jmp(target);
asm_label(*a, End);
a->call(Stub);
// m_savedRip will point here.
}
void Vgen::emit(copy i) {
if (i.s == i.d) return;
if (i.s.isGP()) {
if (i.d.isGP()) { // GP => GP
a->movq(i.s, i.d);
} else { // GP => XMM
// This generates a movq x86 instruction, which zero extends
// the 64-bit value in srcReg into a 128-bit XMM register
a->movq_rx(i.s, i.d);
}
} else {
if (i.d.isGP()) { // XMM => GP
a->movq_xr(i.s, i.d);
} else { // XMM => XMM
// This copies all 128 bits in XMM,
// thus avoiding partial register stalls
a->movdqa(i.s, i.d);
}
}
}
void Vgen::emit(copy2& i) {
assert(i.s0.isValid() && i.s1.isValid() && i.d0.isValid() && i.d1.isValid());
auto s0 = i.s0, s1 = i.s1, d0 = i.d0, d1 = i.d1;
assert(d0 != d1);
if (d0 == s1) {
if (d1 == s0) {
a->xchgq(d0, d1);
} else {
// could do this in a simplify pass
if (s1 != d1) a->movq(s1, d1); // save s1 first; d1 != s0
if (s0 != d0) a->movq(s0, d0);
}
} else {
// could do this in a simplify pass
if (s0 != d0) a->movq(s0, d0);
if (s1 != d1) a->movq(s1, d1);
}
}
void Vgen::emit(bindaddr& i) {
mcg->setJmpTransID((TCA)i.dest);
*i.dest = emitEphemeralServiceReq(a->code(),
mcg->getFreeStub(a->code(), &mcg->cgFixups()),
REQ_BIND_ADDR,
i.dest,
i.sk.toAtomicInt(),
TransFlags{}.packed);
mcg->cgFixups().m_codePointers.insert(i.dest);
}
void Vgen::emit(bindcall& i) {
emitBindCall(a->code(), cold(), frozen(), i.sk, i.callee, i.argc);
}
void Vgen::emit(bindexit& i) {
emitBindSideExit(a->code(), frozen(), i.cc, i.target, i.trflags);
}
void Vgen::emit(bindjcc1& i) {
backend.prepareForTestAndSmash(a->code(), 0,
TestAndSmashFlags::kAlignJccAndJmp);
auto const patchAddr = a->frontier();
auto const jccStub =
emitEphemeralServiceReq(frozen(),
mcg->getFreeStub(frozen(), &mcg->cgFixups()),
REQ_BIND_JMPCC_FIRST,
RipRelative(patchAddr),
i.targets[1],
i.targets[0],
i.cc,
ccServiceReqArgInfo(i.cc));
mcg->setJmpTransID(a->frontier());
a->jcc(i.cc, jccStub);
mcg->setJmpTransID(a->frontier());
a->jmp(jccStub);
}
void Vgen::emit(bindjcc2& i) {
backend.prepareForSmash(a->code(), kJmpccLen);
auto def = emitEphemeralServiceReq(frozen(),
mcg->getFreeStub(frozen(),
&mcg->cgFixups()),
REQ_BIND_JMPCC_SECOND,
RipRelative(a->frontier()),
i.target, i.cc);
mcg->setJmpTransID(a->frontier());
a->jcc(i.cc, def);
}
void Vgen::emit(bindjmp& i) {
emitBindJmp(a->code(), frozen(), i.target, i.trflags);
}
void Vgen::emit(callstub& i) {
emit(call{i.target, i.args});
emit(syncpoint{i.fix});
}
void Vgen::emit(fallback& i) {
emit(fallbackcc{CC_None, i.dest, i.trflags});
}
void Vgen::emit(fallbackcc i) {
auto const destSR = mcg->tx().getSrcRec(i.dest);
if (!i.trflags.packed) {
destSR->emitFallbackJump(a->code(), i.cc);
} else {
destSR->emitFallbackJumpCustom(a->code(), frozen(), i.dest, i.trflags);
}
}
void Vgen::emit(kpcall& i) {
backend.prepareForSmash(a->code(), kCallLen);
mcg->tx().profData()->addPrologueMainCaller(i.callee, i.prologIndex,
a->frontier());
always_assert(backend.isSmashable(a->frontier(), kCallLen));
a->call(i.target);
}
void Vgen::emit(ldimm& i) {
auto val = i.s.q();
if (i.d.isGP()) {
if (val == 0) {
Reg64 d = i.d;
if (i.saveflags) {
a->movl(0, r32(d));
} else {
a->xorl(r32(d), r32(d));
}
} else {
a->emitImmReg(i.s, i.d);
}
} else if (i.s.q() == 0) {
a->pxor(i.d, i.d); // does not modify flags
} else {
auto addr = mcg->allocLiteral(i.s.q());
a->movsd(rip[(intptr_t)addr], i.d);
}
}
void Vgen::emit(ldpoint& i) {
ldpoints.push_back({a->frontier(), i.s});
a->lea(rip[0], i.d);
}
void Vgen::emit(load& i) {
if (i.s.seg == Vptr::FS) a->fs();
auto mref = i.s.mr();
if (i.d.isGP()) {
a->loadq(mref, i.d);
} else {
a->movsd(mref, i.d);
}
}
void Vgen::emit(mccall& i) {
backend.prepareForSmash(a->code(), kCallLen);
a->call(i.target);
}
// emit smashable mov as part of method cache callsite
void Vgen::emit(mcprep& i) {
/*
* For the first time through, set the cache to hold the address
* of the movq (*2 + 1), so we can find the movq from the handler.
*
* We set the low bit for two reasons: the Class* will never be a valid
* Class*, so we'll always miss the inline check before it's smashed, and
* handlePrimeCacheMiss can tell it's not been smashed yet
*/
backend.prepareForSmash(a->code(), MethodCache::kMovLen);
auto movAddr = a->frontier();
auto movAddrUInt = reinterpret_cast<uintptr_t>(movAddr);
a->movq(0x8000000000000000u, i.d);
auto after = reinterpret_cast<uintptr_t*>(a->frontier());
after[-1] = (movAddrUInt << 1) | 1;
mcg->cgFixups().m_addressImmediates.insert(
reinterpret_cast<TCA>(~movAddrUInt));
}
void Vgen::emit(retransopt& i) {
emitServiceReq(a->code(), REQ_RETRANSLATE_OPT,
i.sk.toAtomicInt(), i.id);
}
void Vgen::emit(store& i) {
if (i.s.isGP()) {
a->storeq(i.s, i.d);
} else {
a->movsd(i.s, i.d);
}
}
void Vgen::emit(syncpoint i) {
FTRACE(5, "IR recordSyncPoint: {} {} {}\n", a->frontier(),
i.fix.pcOffset, i.fix.spOffset);
mcg->recordSyncPoint(a->frontier(), i.fix.pcOffset,
i.fix.spOffset);
}
void Vgen::emit(nocatch& i) {
// register a null catch trace at this position.
mcg->registerCatchBlock(a->frontier(), nullptr);
}
void Vgen::emit(unwind& i) {
// Unwind instructions terminate blocks with calls that can throw, and have
// the edges to catch (unwinder) blocks and fall-through blocks.
catches.push_back({a->frontier(), i.targets[1]});
emit(jmp{i.targets[0]});
}
// overall emitter
void Vgen::emit(jit::vector<Vlabel>& labels) {
// Some structures here track where we put things just for debug printing.
struct Snippet {
const IRInstruction* origin;
TcaRange range;
};
struct BlockInfo {
jit::vector<Snippet> snippets;
};
// This is under the printir tracemod because it mostly shows you IR and
// machine code, not vasm and machine code (not implemented).
bool shouldUpdateAsmInfo = !!m_asmInfo
&& Trace::moduleEnabledRelease(HPHP::Trace::printir, kCodeGenLevel);
std::vector<TransBCMapping>* bcmap = nullptr;
if (mcg->tx().isTransDBEnabled() || RuntimeOption::EvalJitUseVtuneAPI) {
bcmap = &mcg->cgFixups().m_bcMap;
}
jit::vector<jit::vector<BlockInfo>> areaToBlockInfos;
if (shouldUpdateAsmInfo) {
areaToBlockInfos.resize(areas.size());
for (auto& r : areaToBlockInfos) {
r.resize(unit.blocks.size());
}
}
for (int i = 0, n = labels.size(); i < n; ++i) {
assert(check(unit.blocks[labels[i]]));
auto b = labels[i];
auto& block = unit.blocks[b];
X64Assembler as { area(block.area).code };
a = &as;
auto blockStart = a->frontier();
addrs[b] = blockStart;
{
// Compute the next block we will emit into the current area.
auto cur_start = start(labels[i]);
auto j = i + 1;
while (j < labels.size() && cur_start != start(labels[j])) {
j++;
}
next = j < labels.size() ? labels[j] : Vlabel(unit.blocks.size());
}
const IRInstruction* currentOrigin = nullptr;
auto blockInfo = shouldUpdateAsmInfo
? &areaToBlockInfos[unsigned(block.area)][b]
: nullptr;
auto start_snippet = [&](Vinstr& inst) {
if (!shouldUpdateAsmInfo) return;
blockInfo->snippets.push_back(
Snippet { inst.origin, TcaRange { a->code().frontier(), nullptr } }
);
};
auto finish_snippet = [&] {
if (!shouldUpdateAsmInfo) return;
if (!blockInfo->snippets.empty()) {
auto& snip = blockInfo->snippets.back();
snip.range = TcaRange { snip.range.start(), a->code().frontier() };
}
};
for (auto& inst : block.code) {
if (currentOrigin != inst.origin) {
finish_snippet();
start_snippet(inst);
currentOrigin = inst.origin;
}
if (bcmap && inst.origin) {
auto sk = inst.origin->marker().sk();
if (bcmap->empty() ||
bcmap->back().md5 != sk.unit()->md5() ||
bcmap->back().bcStart != sk.offset()) {
bcmap->push_back(TransBCMapping{sk.unit()->md5(), sk.offset(),
main().frontier(), cold().frontier(),
frozen().frontier()});
}
}
switch (inst.op) {
#define O(name, imms, uses, defs) \
case Vinstr::name: emit(inst.name##_); break;
X64_OPCODES
#undef O
}
}
finish_snippet();
}
for (auto& p : jccs) {
assert(addrs[p.target]);
X64Assembler::patchJcc(p.instr, addrs[p.target]);
}
for (auto& p : jmps) {
assert(addrs[p.target]);
X64Assembler::patchJmp(p.instr, addrs[p.target]);
}
for (auto& p : calls) {
assert(addrs[p.target]);
X64Assembler::patchCall(p.instr, addrs[p.target]);
}
for (auto& p : catches) {
mcg->registerCatchBlock(p.instr, addrs[p.target]);
}
for (auto& p : ldpoints) {
auto after_lea = p.instr + 7;
auto d = meta->points[p.pos] - after_lea;
assert(deltaFits(d, sz::dword));
((int32_t*)after_lea)[-1] = d;
}
if (!shouldUpdateAsmInfo) {
return;
}
for (auto i = 0; i < areas.size(); ++i) {
auto& blockInfos = areaToBlockInfos[i];
for (auto const blockID : labels) {
auto const& blockInfo = blockInfos[static_cast<size_t>(blockID)];
if (blockInfo.snippets.empty()) continue;
const IRInstruction* currentOrigin = nullptr;
for (auto const& snip : blockInfo.snippets) {
if (currentOrigin != snip.origin && snip.origin) {
currentOrigin = snip.origin;
}
m_asmInfo->updateForInstruction(
currentOrigin,
static_cast<AreaIndex>(i),
snip.range.start(),
snip.range.end());
}
}
}
}
void Vgen::emit(cvtsi2sd& i) {
a->pxor(i.d, i.d);
a->cvtsi2sd(i.s, i.d);
}
void Vgen::emit(cvtsi2sdm& i) {
a->pxor(i.d, i.d);
a->cvtsi2sd(i.s, i.d);
}
void Vgen::emit(jcc& i) {
if (i.targets[1] != i.targets[0]) {
if (next == i.targets[1]) {
i = jcc{ccNegate(i.cc), {i.targets[1], i.targets[0]}};
}
auto taken = i.targets[1];
jccs.push_back({a->frontier(), taken});
a->jcc(i.cc, a->frontier());
}
emit(jmp{i.targets[0]});
}
void Vgen::emit(jmp i) {
if (next == i.target) return;
jmps.push_back({a->frontier(), i.target});
a->jmp(a->frontier());
}
void Vgen::emit(lea& i) {
// could do this in a simplify pass
if (i.s.disp == 0 && i.s.base.isValid() && !i.s.index.isValid()) {
emit(copy{i.s.base, i.d});
} else {
a->lea(i.s, i.d);
}
}
void Vgen::emit(loadq& i) {
if (i.s.seg == Vptr::FS) a->fs();
a->loadq(i.s.mr(), i.d);
}
// check that each block has exactly one terminal instruction at the end.
bool Vgen::check(Vblock& block) {
assert(!block.code.empty());
auto n = block.code.size();
for (size_t i = 0; i < n - 1; ++i) {
assert(!isBlockEnd(block.code[i]));
}
assert(isBlockEnd(block.code[n - 1]));
return true;
}
}
Vout& Vasm::add(CodeBlock& cb, AreaIndex area) {
assert(size_t(area) == m_areas.size());
auto b = m_unit.makeBlock(area);
if (size_t(b) == 0) m_unit.roots.push_back(b);
Vout v{m_meta, m_unit, b, area};
m_areas.push_back(Area{v, cb, cb.frontier()});
return m_areas.back().out;
}
// copy of layoutBlocks in layout.cpp
jit::vector<Vlabel> layoutBlocks(const Vunit& unit) {
auto blocks = sortBlocks(unit);
// partition into main/cold/frozen areas without changing relative order,
// and the end{} block will be last.
auto coldIt = std::stable_partition(blocks.begin(), blocks.end(),
[&](Vlabel b) {
return unit.blocks[b].area == AreaIndex::Main &&
unit.blocks[b].code.back().op != Vinstr::end;
});
std::stable_partition(coldIt, blocks.end(),
[&](Vlabel b) {
return unit.blocks[b].area == AreaIndex::Cold &&
unit.blocks[b].code.back().op != Vinstr::end;
});
return blocks;
}
void Vasm::finishX64(const Abi& abi, AsmInfo* asmInfo) {
if (!m_unit.cpool.empty()) {
foldImms(m_unit);
}
if (m_unit.hasVrs()) {
Timer _t(Timer::vasm_xls);
removeDeadCode(m_unit);
allocateRegisters(m_unit, abi);
}
if (m_unit.blocks.size() > 1) {
Timer _t(Timer::vasm_jumps);
optimizeJmps(m_unit);
}
Timer _t(Timer::vasm_gen);
auto blocks = layoutBlocks(m_unit);
Vgen(m_unit, m_areas, m_meta, asmInfo).emit(blocks);
}
auto const vauto_gp = RegSet(rAsm).add(r11);
auto const vauto_simd = RegSet(xmm5).add(xmm6).add(xmm7);
UNUSED const Abi vauto_abi {
.gpUnreserved = vauto_gp,
.gpReserved = x64::abi.gp() - vauto_gp,
.simdUnreserved = vauto_simd,
.simdReserved = x64::abi.simd() - vauto_simd,
.calleeSaved = x64::abi.calleeSaved
};
Vauto::~Vauto() {
UNUSED auto& areas = this->areas();
for (auto& b : unit().blocks) {
if (!b.code.empty()) {
// found at least one nonempty block. finish up.
if (!main().closed()) main() << end{};
assert(areas.size() < 2 || cold().empty() || cold().closed());
assert(areas.size() < 3 || frozen().empty() || frozen().closed());
Trace::Bump bumper{Trace::printir, 10}; // prevent spurious printir
finishX64(vauto_abi, nullptr);
return;
}
}
}
std::string format(Vreg r) {
if (r.isPhys()) {
if (r.isGP()) {
Reg64 r64 = r;
return regname(r64);
} else {
RegXMM rxmm = r;
return regname(rxmm);
}
}
std::ostringstream str;
str << "%" << size_t(r);
return str.str();
}
Vtuple findDefs(const Vunit& unit, Vlabel b) {
assert(!unit.blocks[b].code.empty() &&
unit.blocks[b].code.front().op == Vinstr::phidef);
return unit.blocks[b].code.front().phidef_.defs;
}
}}
|
#include <gpd_ros/grasp_detection_server_prl.h>
GraspDetectionServerPRL::GraspDetectionServerPRL(ros::NodeHandle& node){
std::string cfg_file;
node.param("config_file", cfg_file, std::string(""));
grasp_detector_ = new gpd::GraspDetector(cfg_file);
// Read parameters from configuration file.
gpd::util::ConfigFile config_file(cfg_file);
config_file.ExtractKeys();
std::vector<double> camera_position =
config_file.getValueOfKeyAsStdVectorDouble("camera_position",
"0.0 0.0 0.0");
view_point_ << camera_position[0], camera_position[1], camera_position[2];
std::string rviz_topic;
node.param("rviz_topic", rviz_topic, std::string(""));
if (!rviz_topic.empty()) {
rviz_plotter_ = new GraspPlotter(node, grasp_detector_->getHandSearchParameters().hand_geometry_);
use_rviz_ = true;
}
else {
use_rviz_ = false;
}
std::string cloud_topic;
node.param("cloud_topic", cloud_topic, std::string("/camera/depth_registered/points"));
cloud_sub_ = node.subscribe(cloud_topic, 1, &GraspDetectionServerPRL::cloud_callback, this);
node.getParam("workspace", workspace_);
}
bool GraspDetectionServerPRL::detectGrasps(gpd_ros::detect_grasps_prl::Request& req, gpd_ros::detect_grasps_prl::Response& res){
ROS_INFO("Received service request ...");
if (has_cloud_){
// Detect grasps in point cloud.
std::vector<std::unique_ptr<gpd::candidate::Hand>> grasps = detectGraspPoses();
// Reset the system.
has_cloud_ = false;
ROS_INFO("Waiting for point cloud to arrive ...");
if (grasps.size() > 0){
ROS_INFO("VISUALIZING");
// Visualize the detected grasps in rviz.
if (use_rviz_){
rviz_plotter_->drawGrasps(grasps, frame_);
}
// Publish the detected grasps.
gpd_ros::GraspConfigList selected_grasps_msg = GraspMessages::createGraspListMsg(grasps, cloud_camera_header_);
res.grasp_configs = selected_grasps_msg;
ROS_INFO_STREAM("Detected " << selected_grasps_msg.grasps.size() << " highest-scoring grasps.");
return true;
}
}
ROS_WARN("No grasps detected!");
return false;
}
void GraspDetectionServerPRL::cloud_callback(const sensor_msgs::PointCloud2& msg){
if (!has_cloud_){
delete cloud_camera_;
cloud_camera_ = NULL;
}
Eigen::Matrix3Xd view_points(3,1);
view_points.col(0) = view_point_;
if (msg.fields.size() == 6 && msg.fields[3].name == "normal_x" && msg.fields[4].name == "normal_y"
&& msg.fields[5].name == "normal_z"){
PointCloudPointNormal::Ptr cloud(new PointCloudPointNormal);
pcl::fromROSMsg(msg, *cloud);
cloud_camera_ = new gpd::util::Cloud(cloud, 0, view_points);
cloud_camera_header_ = msg.header;
// ROS_INFO_STREAM("Received cloud with " << cloud_camera_->getCloudProcessed()->size() << " points and normals.");
}
else{
PointCloudRGBA::Ptr cloud(new PointCloudRGBA);
pcl::fromROSMsg(msg, *cloud);
cloud_camera_ = new gpd::util::Cloud(cloud, 0, view_points);
cloud_camera_header_ = msg.header;
ROS_INFO_STREAM("Received cloud with " << cloud_camera_->getCloudProcessed()->size() << " points.");
}
has_cloud_ = true;
frame_ = msg.header.frame_id;
}
std::vector<std::unique_ptr<gpd::candidate::Hand>> GraspDetectionServerPRL::detectGraspPoses(){
// detect grasp poses
std::vector<std::unique_ptr<gpd::candidate::Hand>> grasps;
// preprocess the point cloud
grasp_detector_->preprocessPointCloud(*cloud_camera_);
// detect grasps in the point cloud
grasps = grasp_detector_->detectGrasps(*cloud_camera_);
return grasps;
}
int main(int argc, char** argv){
// seed the random number generator
std::srand(std::time(0));
// initialize ROS
ros::init(argc, argv, "detect_grasps_server_prl");
ros::NodeHandle node("~");
GraspDetectionServerPRL grasp_detection_server_prl(node);
ros::ServiceServer service = node.advertiseService("detect_grasps", &GraspDetectionServerPRL::detectGrasps,
&grasp_detection_server_prl);
ROS_INFO("Ready to detect Grasps");
ros::spin();
return 0;
}
|
/*
* Copyright (c) 2008, Willow Garage, Inc.
* Copyright (c) 2017, Open Source Robotics Foundation, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef RVIZ_RENDERING__OBJECTS__AXES_HPP_
#define RVIZ_RENDERING__OBJECTS__AXES_HPP_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include <OgreVector3.h>
#include "object.hpp"
#include "shape.hpp"
#include "rviz_rendering/visibility_control.hpp"
namespace Ogre
{
class SceneManager;
class SceneNode;
class Quaternion;
class Any;
class ColourValue;
}
namespace rviz_rendering
{
/**
* \class Axes
* \brief An object that displays a set of X/Y/Z axes, with X=Red, Y=Green, Z=Blue
*/
class Axes : public Object
{
public:
/**
* \brief Constructor
* @param manager Scene manager this object is a part of
* @param parent_node A scene node to use as the parent of this object. If NULL, uses the root scene node.
* @param length Length of the axes
* @param radius Radius of the axes
*/
RVIZ_RENDERING_PUBLIC
explicit Axes(
Ogre::SceneManager * manager,
Ogre::SceneNode * parent_node = nullptr,
float length = 1.0f,
float radius = 0.1f);
~Axes() override;
/**
* \brief Set the parameters on this object
*
* @param length Length of the axes
* @param radius Radius of the axes
*/
RVIZ_RENDERING_PUBLIC
void set(float length, float radius);
RVIZ_RENDERING_PUBLIC
void setOrientation(const Ogre::Quaternion & orientation) override;
RVIZ_RENDERING_PUBLIC
void setPosition(const Ogre::Vector3 & position) override;
RVIZ_RENDERING_PUBLIC
void setScale(const Ogre::Vector3 & scale) override;
RVIZ_RENDERING_PUBLIC
void setColor(float r, float g, float b, float a) override;
RVIZ_RENDERING_PUBLIC
const Ogre::Vector3 & getPosition() override;
RVIZ_RENDERING_PUBLIC
const Ogre::Quaternion & getOrientation() override;
/**
* \brief Get the scene node associated with this object
* @return The scene node associated with this object
*/
Ogre::SceneNode * getSceneNode() {return scene_node_;}
/**
* \brief Sets user data on all ogre objects we own
*/
RVIZ_RENDERING_PUBLIC
void setUserData(const Ogre::Any & data) override;
RVIZ_RENDERING_PUBLIC
Shape & getXShape() {return *x_axis_;}
RVIZ_RENDERING_PUBLIC
Shape & getYShape() {return *y_axis_;}
RVIZ_RENDERING_PUBLIC
Shape & getZShape() {return *z_axis_;}
RVIZ_RENDERING_PUBLIC
void setXColor(const Ogre::ColourValue & col);
RVIZ_RENDERING_PUBLIC
void setYColor(const Ogre::ColourValue & col);
RVIZ_RENDERING_PUBLIC
void setZColor(const Ogre::ColourValue & col);
RVIZ_RENDERING_PUBLIC
void setToDefaultColors();
RVIZ_RENDERING_PUBLIC
static const Ogre::ColourValue & getDefaultXColor();
RVIZ_RENDERING_PUBLIC
static const Ogre::ColourValue & getDefaultYColor();
RVIZ_RENDERING_PUBLIC
static const Ogre::ColourValue & getDefaultZColor();
private:
// prohibit copying
Axes(const Axes & other)
: Object(nullptr) {(void) other;}
Axes & operator=(const Axes & other) {(void) other; return *this;}
Ogre::SceneNode * scene_node_;
std::unique_ptr<Shape> x_axis_; ///< Cylinder for the X-axis
std::unique_ptr<Shape> y_axis_; ///< Cylinder for the Y-axis
std::unique_ptr<Shape> z_axis_; ///< Cylinder for the Z-axis
static const Ogre::ColourValue default_x_color_;
static const Ogre::ColourValue default_y_color_;
static const Ogre::ColourValue default_z_color_;
};
} // namespace rviz_rendering
#endif // RVIZ_RENDERING__OBJECTS__AXES_HPP_
|
// Copyright (C) 2018-2022 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "test_utils.h"
#include <intel_gpu/primitives/input_layout.hpp>
#include <intel_gpu/primitives/convolution.hpp>
#include <intel_gpu/primitives/data.hpp>
#include <intel_gpu/primitives/reorder.hpp>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <iomanip>
#include <thread>
#include <type_traits>
#include <fstream>
using namespace cldnn;
using namespace ::tests;
namespace cldnn
{
template<> struct type_to_data_type<FLOAT16> { static const data_types value = data_types::f16; };
}
TEST(concat_gpu, mixed_input_types) {
auto& engine = get_test_engine();
auto input0 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input1 = engine.allocate_memory({ data_types::i32, format::bfyx, { 1, 1, 4, 3 } });
auto input2 = engine.allocate_memory({ data_types::i8, format::bfyx, { 1, 1, 4, 3 } });
auto input3 = engine.allocate_memory({ data_types::f16, format::bfyx, { 1, 1, 4, 3 } });
auto input4 = engine.allocate_memory({ data_types::i64, format::bfyx, { 1, 1, 4, 3 } });
set_values<float>(input0, { 1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f });
set_values<int32_t>(input1, { 11, 12, 13, 14, 12, 12, 13, 14, 13, 13, 13, 15 });
set_values<int8_t>(input2, { 21, 22, 23, 24, 22, 22, 23, 24, 23, 23, 23, 25 });
set_values(input3, { half_t(31.f), half_t(32.f), half_t(33.f),
half_t(34.f), half_t(32.f), half_t(32.f),
half_t(33.f), half_t(34.f), half_t(33.f),
half_t(33.f), half_t(33.f), half_t(35.f) });
set_values<int64_t>(input4, { 41, 42, 43, 44, 42, 42, 43, 44, 43, 43, 43, 45 });
VF<float> output_vec = {
1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f,
11.0f, 12.0f, 13.0f, 14.0f, 12.0f, 12.0f, 13.0f, 14.0f, 13.0f, 13.0f, 13.0f, 15.0f,
21.0f, 22.0f, 23.0f, 24.0f, 22.0f, 22.0f, 23.0f, 24.0f, 23.0f, 23.0f, 23.0f, 25.0f,
31.0f, 32.0f, 33.0f, 34.0f, 32.0f, 32.0f, 33.0f, 34.0f, 33.0f, 33.0f, 33.0f, 35.0f,
41.0f, 42.0f, 43.0f, 44.0f, 42.0f, 42.0f, 43.0f, 44.0f, 43.0f, 43.0f, 43.0f, 45.0f };
topology topology(
input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
input_layout("input2", input2->get_layout()),
input_layout("input3", input3->get_layout()),
input_layout("input4", input4->get_layout()),
concatenation("concat",
{ "input0", "input1", "input2", "input3", "input4" },
concatenation::concatenation_axis::along_f,
data_types::f32,
"",
padding{ { 0,0,0,0 }, 0 })
);
network network(engine, topology);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
network.set_input_data("input2", input2);
network.set_input_data("input3", input3);
network.set_input_data("input4", input4);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "concat");
auto output_memory = outputs.at("concat").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<float> output_ptr(output_memory, get_test_stream());
int y_size = output_layout.size.spatial[1];
int x_size = output_layout.size.spatial[0];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::bfyx);
EXPECT_EQ(y_size, 3);
EXPECT_EQ(x_size, 4);
EXPECT_EQ(f_size, 5);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
TEST(concat_gpu, mixed_input_types_5d) {
auto& engine = get_test_engine();
auto input0 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } });
auto input1 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } });
auto input2 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } });
auto input3 = engine.allocate_memory({ data_types::f16, format::bfzyx, { 1, 1, 1, 4, 3 } });
set_values(input0, { half_t(1.0f), half_t(2.0f), half_t(3.0f),
half_t(4.0f), half_t(2.0f), half_t(2.0f),
half_t(3.0f), half_t(4.0f), half_t(3.0f),
half_t(3.0f), half_t(3.0f), half_t(5.0f) });
set_values(input1, { half_t(11), half_t(12), half_t(13),
half_t(14), half_t(12), half_t(12),
half_t(13), half_t(14), half_t(13),
half_t(13), half_t(13), half_t(15) });
set_values(input2, { half_t(21), half_t(22), half_t(23),
half_t(24), half_t(22), half_t(22),
half_t(23), half_t(24), half_t(23),
half_t(23), half_t(23), half_t(25) });
set_values(input3, { half_t(31.f), half_t(32.f), half_t(33.f),
half_t(34.f), half_t(32.f), half_t(32.f),
half_t(33.f), half_t(34.f), half_t(33.f),
half_t(33.f), half_t(33.f), half_t(35.f) });
VF<float> output_vec = {
1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f,
11.0f, 12.0f, 13.0f, 14.0f, 12.0f, 12.0f, 13.0f, 14.0f, 13.0f, 13.0f, 13.0f, 15.0f,
21.0f, 22.0f, 23.0f, 24.0f, 22.0f, 22.0f, 23.0f, 24.0f, 23.0f, 23.0f, 23.0f, 25.0f,
31.0f, 32.0f, 33.0f, 34.0f, 32.0f, 32.0f, 33.0f, 34.0f, 33.0f, 33.0f, 33.0f, 35.0f };
topology topology(
input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
input_layout("input2", input2->get_layout()),
input_layout("input3", input3->get_layout()),
concatenation("concat",
{ "input0", "input1", "input2", "input3" },
concatenation::concatenation_axis::along_f,
data_types::f32,
"",
padding{ { 0,0,0,0 }, 0 })
);
network network(engine, topology);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
network.set_input_data("input2", input2);
network.set_input_data("input3", input3);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "concat");
auto output_memory = outputs.at("concat").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<float> output_ptr(output_memory, get_test_stream());
int z_size = output_layout.size.spatial[2];
int y_size = output_layout.size.spatial[1];
int x_size = output_layout.size.spatial[0];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::bfzyx);
EXPECT_EQ(z_size, 3);
EXPECT_EQ(y_size, 4);
EXPECT_EQ(x_size, 1);
EXPECT_EQ(f_size, 4);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
TEST(concat_gpu, i8_optimization_with_pool) {
auto& engine = get_test_engine();
auto input0 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 1, 8, 3}});
auto input1 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 1, 8, 3}});
set_values<int8_t>(input0, { 11, 12, 13,
14, 12, 12,
13, -14, 13,
13, -13, 15,
16, -16, -13,
-14, 12, 11,
16, -14, -13,
18, -13, -15, });
set_values<int8_t>(input1, { 11, 12, 13,
15, 12, 12,
13, 14, 12,
13, 13, 15,
12, 14, 13,
14, 17, 18,
13, 14, 11,
13, 13, 15 });
VF<int8_t> output_vec = {13, 13, 13, 13, 15, 15,
16, 15, 16, 14, 13, 14,
13, 14, 13, 18, 16, 18,
16, 15, 16, 15, 18, 14,
18, 14, -13, 15};
layout reorder_layout(data_types::i8, format::yxfb, {7, 2, 2, 1});
topology topology(input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
pooling("pool0", "input0", pooling_mode::max, {1, 1, 2, 2}, {1, 1, 1, 1}),
pooling("pool1", "input1", pooling_mode::max, {1, 1, 2, 2}, {1, 1, 1, 1}),
concatenation("concat",
{"pool0", "pool1"},
concatenation::concatenation_axis::along_f,
data_types::i8,
"",
padding{{0, 0, 0, 0}, 0}),
reorder("reorder", "concat", reorder_layout));
cldnn::build_options options;
options.set_option(cldnn::build_option::optimize_data(true));
network network(engine, topology, options);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "reorder");
auto output_memory = outputs.at("reorder").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<int8_t> output_ptr(output_memory, get_test_stream());
int y_size = output_layout.size.spatial[0];
int x_size = output_layout.size.spatial[1];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::yxfb);
EXPECT_EQ(y_size, 7);
EXPECT_EQ(x_size, 2);
EXPECT_EQ(f_size, 2);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
TEST(concat_gpu, i8_optimization_with_conv) {
// Filter : 3x2x3
// Stride : 2x1
// Input1 : 4x5
// Input2 : 4x5
// Input3 : 4x5
// Concat output : 3x4x5
// Conv input : 3x4x5
// Output : 2x3
//
// Input0:
// 1 2 3 -4 5
// 2 2 3 4 -6
// -3 3 3 5 1
// -1 1 1 1 -1
// Input1:
// 5 5 3 -4 5
// 2 -2 5 4 6
// 6 1 3 5 1
// 1 2 -3 -4 5
// Input2:
// -2 1 3 2 -5
// 1 2 -2 4 2
// 3 5 3 -3 1
// 5 4 3 2 1
//
// Filter:
// 1 2 1 1 2 1 1 2 1
// 2 1 2 2 1 2 2 1 2
//
// Output:
// 53 54 30
// 52 47 37
auto& engine = get_test_engine();
auto input0 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 1, 5, 4}});
auto input1 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 1, 5, 4}});
auto input2 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 1, 5, 4}});
auto weights = engine.allocate_memory({ data_types::i8, format::bfyx, { 1, 3, 3, 2 } });
set_values<int8_t>(weights, { 1, 2, 1,
2, 1, 2, 1, 2, 1,
2, 1, 2, 1, 2, 1,
2, 1, 2 });
set_values<int8_t>(input0, { 1, 2, 3, -4, 5,
2, 2, 3, 4, -6,
-3, 3, 3, 5, 1,
-1, 1, 1, 1, -1 });
set_values<int8_t>(input1, { 5, 5, 3, -4, 5,
2, -2, 5, 4, 6,
6, 1, 3, 5, 1,
1, 2, -3, -4, 5 });
set_values<int8_t>(input2, { -2, 1, 3, 2, -5,
1, 2, -2, 4, 2,
3, 5, 3, -3, 1,
5, 4, 3, 2, 1 });
VF<int8_t> output_vec = { 53, 54, 30, 52, 47, 37 };
layout reorder_layout(data_types::i8, format::bfyx, {1, 1, 2, 3});
topology topology(input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
input_layout("input2", input2->get_layout()),
concatenation("concat",
{"input0", "input1", "input2"},
concatenation::concatenation_axis::along_f,
data_types::i8,
"",
padding{{0, 0, 0, 0}, 0}),
data("weights", weights),
convolution("conv", "concat", { "weights" }, { 1,1,1,2 }),
reorder("output", "conv", reorder_layout));
cldnn::build_options options;
options.set_option(cldnn::build_option::optimize_data(true));
network network(engine, topology, options);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
network.set_input_data("input2", input2);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "output");
auto output_memory = outputs.at("output").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<int8_t> output_ptr(output_memory, get_test_stream());
int y_size = output_layout.size.spatial[1];
int x_size = output_layout.size.spatial[0];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::bfyx);
EXPECT_EQ(y_size, 2);
EXPECT_EQ(x_size, 3);
EXPECT_EQ(f_size, 1);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
TEST(concat_gpu, i8_optimization_with_pool_conv) {
// Filter : 32x2x1
// Input offset : 0x0x-1x0
// Stride : 1x1
// Input0 : 16x3x2
// Input1 : 16x3x2
// Output : 1x1x3
//
// Input0:
// -3 6 0 2 -1 -1 6 0 5 4 1 6 2 4 0 5
// -2 -1 1 0 2 3 3 3 6 2 4 7 3 6 7 -1
// 7 7 5 -3 1 -1 5 4 0 3 -2 6 2 5 2 4
// 5 -1 3 6 2 0 -3 -1 0 3 0 -1 1 6 1 6
// 5 -2 2 -1 5 6 3 4 1 0 6 6 7 2 6 3
// 6 7 -1 5 5 6 -1 0 -1 5 5 2 3 -1 -3 4
//
// Input1:
// 4 -2 0 0 6 2 0 4 6 4 4 4 -3 -1 4 -3
// 1 0 -1 5 -1 1 4 2 7 7 0 2 3 4 -1 3
// 7 7 2 -3 -1 5 -2 2 6 -3 0 7 0 3 3 3
// -1 0 -2 -2 7 -3 -3 -1 5 0 3 4 0 -1 2 5
// 2 -1 2 -3 0 -3 -3 2 4 3 3 5 5 7 5 1
// 2 2 -3 6 6 7 1 -1 -2 5 1 -1 4 5 -3 -2
//
// Filters:
// -1, 2, -2, 2, -2, 1, 1, 0, -1, 1, 2, -2, 2, 1, -2, 0,
// 0, -2, -2, -2, -2, -1, 2, 1, 2, -1, -1, 0, 2, -2, -2, 1,
// 0, -2, 0, 1, -2, -1, -2, 0, -1, -1, -2, 1, -2, 0, 1, 2,
// 2, 2, 2, -2, 0, 2, 1, -2, -1, -1, 0, -2, 2, -1, 2, -1
//
// Output:
// -14, -35, -10
auto& engine = get_test_engine();
auto input0 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 16, 3, 2}});
auto input1 = engine.allocate_memory({data_types::i8, format::bfyx, {1, 16, 3, 2}});
auto weights = engine.allocate_memory({data_types::i8, format::bfyx, {1, 32, 2, 1}});
set_values<int8_t>(weights, {-1, 2, -2, 2, -2, 1, 1, 0, -1, 1, 2, -2, 2, 1, -2, 0, 0, -2, -2, -2, -2, -1, 2, 1, 2, -1, -1, 0, 2, -2, -2, 1,
0, -2, 0, 1, -2, -1, -2, 0, -1, -1, -2, 1, -2, 0, 1, 2, 2, 2, 2, -2, 0, 2, 1, -2, -1, -1, 0, -2, 2, -1, 2, -1});
set_values<int8_t>(input0, {-3, 6, 0, 2, -1, -1, 6, 0, 5, 4, 1, 6, 2, 4, 0, 5,
-2, -1, 1, 0, 2, 3, 3, 3, 6, 2, 4, 7, 3, 6, 7, -1,
7, 7, 5, -3, 1, -1, 5, 4, 0, 3, -2, 6, 2, 5, 2, 4,
5, -1, 3, 6, 2, 0, -3, -1, 0, 3, 0, -1, 1, 6, 1, 6,
5, -2, 2, -1, 5, 6, 3, 4, 1, 0, 6, 6, 7, 2, 6, 3,
6, 7, -1, 5, 5, 6, -1, 0, -1, 5, 5, 2, 3, -1, -3, 4 });
set_values<int8_t>(input1, { 4, -2, 0, 0, 6, 2, 0, 4, 6, 4, 4, 4, -3, -1, 4, -3,
1, 0, -1, 5, -1, 1, 4, 2, 7, 7, 0, 2, 3, 4, -1, 3,
7, 7, 2, -3, -1, 5, -2, 2, 6, -3, 0, 7, 0, 3, 3, 3,
-1, 0, -2, -2, 7, -3, -3, -1, 5, 0, 3, 4, 0, -1, 2, 5,
2, -1, 2, -3, 0, -3, -3, 2, 4, 3, 3, 5, 5, 7, 5, 1,
2, 2, -3, 6, 6, 7, 1, -1, -2, 5, 1, -1, 4, 5, -3, -2});
VF<int8_t> output_vec = { -14, -35, -10 };
layout reorder_layout(data_types::i8, format::bfyx, {1, 1, 3, 1});
topology topology(input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
pooling("pool0", "input0", pooling_mode::max, {1, 1, 2, 2}, {1, 1, 1, 1}),
pooling("pool1", "input1", pooling_mode::max, {1, 1, 2, 2}, {1, 1, 1, 1}),
concatenation("concat",
{"pool0", "pool1"},
concatenation::concatenation_axis::along_f,
data_types::i8,
"",
padding{{0, 0, 0, 0}, 0}),
data("weights", weights),
convolution("conv", "concat", {"weights"}, {1, 1, 1, 1}, tensor{{0, 0, 1, 0}, 0}),
reorder("output", "conv", reorder_layout) );
cldnn::build_options options;
options.set_option(cldnn::build_option::optimize_data(true));
network network(engine, topology, options);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "output");
auto output_memory = outputs.at("output").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<int8_t> output_ptr(output_memory, get_test_stream());
int y_size = output_layout.size.spatial[0];
int x_size = output_layout.size.spatial[1];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::bfyx);
EXPECT_EQ(y_size, 3);
EXPECT_EQ(x_size, 1);
EXPECT_EQ(f_size, 1);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
using TestParamType_concat = ::testing::tuple<size_t, // 0 - Input Batch size
std::vector<size_t>, // 1 - Inputs Features Sizes
size_t, // 2 - Input Y Size
size_t>; // 3 - Input X Size
struct concat_gpu : public ::testing::TestWithParam<TestParamType_concat>
{
static std::string
PrintToStringParamName(testing::TestParamInfo<TestParamType_concat> param_info)
{
std::string in;
for (size_t i = 0; i < testing::get<1>(param_info.param).size() - 1; i++) {
in += std::to_string(testing::get<1>(param_info.param)[i]) + "_";
}
in += std::to_string(testing::get<1>(param_info.param)[testing::get<1>(param_info.param).size() - 1]);
return "in" + std::to_string(testing::get<0>(param_info.param))
+ "x" + in + "x" + std::to_string(testing::get<2>(param_info.param))
+ 'x' + std::to_string(testing::get<3>(param_info.param));
}
};
static const auto concat_gpu_all_params = ::testing::Values(
// Input Batch, Input Features, Input Y, Input X
TestParamType_concat(2, { 2, 15 }, 2, 1),
TestParamType_concat(2, { 2, 31 }, 2, 1),
TestParamType_concat(2, { 2, 32 }, 2, 1),
TestParamType_concat(2, { 2, 37 }, 2, 1),
TestParamType_concat(2, { 2, 63 }, 2, 1),
TestParamType_concat(2, { 2, 64 }, 2, 1),
TestParamType_concat(2, { 2, 65 }, 2, 1),
TestParamType_concat(2, { 2, 75 }, 2, 1),
TestParamType_concat(2, { 15, 2 }, 2, 1),
TestParamType_concat(2, { 31, 2 }, 2, 1),
TestParamType_concat(2, { 32, 2 }, 2, 1),
TestParamType_concat(2, { 37, 2 }, 2, 1),
TestParamType_concat(2, { 63, 2 }, 2, 1),
TestParamType_concat(2, { 64, 2 }, 2, 1),
TestParamType_concat(2, { 65, 2 }, 2, 1),
TestParamType_concat(2, { 75, 2 }, 2, 1),
TestParamType_concat(2, { 2, 15 }, 1, 2),
TestParamType_concat(2, { 2, 31 }, 1, 2),
TestParamType_concat(2, { 2, 32 }, 1, 2),
TestParamType_concat(2, { 2, 37 }, 1, 2),
TestParamType_concat(2, { 2, 63 }, 1, 2),
TestParamType_concat(2, { 2, 64 }, 1, 2),
TestParamType_concat(2, { 2, 65 }, 1, 2),
TestParamType_concat(2, { 2, 75 }, 1, 2),
TestParamType_concat(2, { 15, 2 }, 1, 2),
TestParamType_concat(2, { 31, 2 }, 1, 2),
TestParamType_concat(2, { 32, 2 }, 1, 2),
TestParamType_concat(2, { 37, 2 }, 1, 2),
TestParamType_concat(2, { 63, 2 }, 1, 2),
TestParamType_concat(2, { 64, 2 }, 1, 2),
TestParamType_concat(2, { 65, 2 }, 1, 2),
TestParamType_concat(2, { 75, 2 }, 1, 2),
TestParamType_concat(2, { 32, 32 }, 1, 1),
TestParamType_concat(2, { 64, 64 }, 1, 1),
TestParamType_concat(2, { 2, 2, 2 }, 1, 1),
TestParamType_concat(2, { 2, 32, 2 }, 1, 1),
TestParamType_concat(2, { 31, 32, 32 }, 1, 1),
TestParamType_concat(2, { 32, 31, 2 }, 1, 1),
TestParamType_concat(2, { 32, 31, 32 }, 1, 1),
TestParamType_concat(2, { 32, 32, 32 }, 1, 1),
TestParamType_concat(2, { 33, 32, 32 }, 1, 1),
TestParamType_concat(2, { 33, 3, 3 }, 1, 1),
TestParamType_concat(2, { 33, 3, 33 }, 1, 1),
TestParamType_concat(2, { 64, 64, 64, 64 }, 1, 1)
);
template <typename Type>
struct concat_gpu_4d : public concat_gpu {
public:
void test(format::type fmt) {
auto data_type = type_to_data_type<Type>::value;
auto& engine = get_test_engine();
const size_t batch_num = testing::get<0>(GetParam());
const std::vector<size_t> in_features = testing::get<1>(GetParam());
const size_t input_y = testing::get<2>(GetParam());
const size_t input_x = testing::get<3>(GetParam());
size_t output_f = 0;
for (auto& f : in_features)
output_f += f;
topology topology;
std::vector<VVVVF<Type>> in_data;
std::vector<memory::ptr> in_memory;
std::vector<primitive_id> input_ids;
for (size_t i = 0; i < in_features.size(); i++) {
auto size = tensor(static_cast<int32_t>(batch_num),
static_cast<int32_t>(in_features[i]),
static_cast<int32_t>(input_x),
static_cast<int32_t>(input_y));
auto data = generate_random_4d<Type>(batch_num, in_features[i], input_y, input_x, -1, 1);
auto in_lay = layout(data_type, fmt, size);
auto data_flat = std::vector<Type>(in_lay.get_linear_size(), 0);
for (size_t bi = 0; bi < batch_num; ++bi) {
for (size_t fi = 0; fi < in_features[i]; ++fi) {
for (size_t yi = 0; yi < input_y; ++yi) {
for (size_t xi = 0; xi < input_x; ++xi) {
auto coords = tensor(batch(bi), feature(fi), spatial(xi, yi, 0, 0));
auto in_offset = in_lay.get_linear_offset(coords);
data_flat[in_offset] = data[bi][fi][yi][xi];
}
}
}
}
auto in_mem = engine.allocate_memory(in_lay);
set_values(in_mem, data_flat);
in_memory.push_back(in_mem);
topology.add(input_layout("input" + std::to_string(i), in_lay));
in_data.emplace_back(std::move(data));
input_ids.push_back("input" + std::to_string(i));
}
topology.add(concatenation("concat", input_ids, concatenation::concatenation_axis::along_f));
build_options options;
options.set_option(build_option::optimize_data(true));
network network(engine, topology, options);
for (size_t i = 0; i < in_features.size(); i++) {
network.set_input_data(input_ids[i], in_memory[i]);
}
network.execute();
auto out_mem = network.get_output("concat").get_memory();
cldnn::mem_lock<Type> out_ptr(out_mem, get_test_stream());
for (size_t bi = 0; bi < batch_num; bi++) {
size_t f_sum = 0;
for (size_t in_i = 0; in_i < in_features.size(); in_i++) {
for (size_t fi = 0; fi < in_features[in_i]; fi++) {
for (size_t yi = 0; yi < input_y; yi++) {
for (size_t xi = 0; xi < input_x; xi++) {
auto output_coords = tensor(batch(bi), feature(f_sum + fi), spatial(xi, yi, 0, 0));
auto output_offset = out_mem->get_layout().get_linear_offset(output_coords);
auto ref_val = in_data[in_i][bi][fi][yi][xi];
auto actual_val = out_ptr[output_offset];
EXPECT_EQ(ref_val, actual_val)
<< " b=" << bi << ", f=" << f_sum + fi << "(input " << in_i << "), y=" << yi << ", x=" << xi;
}
}
}
f_sum += in_features[in_i];
}
}
}
};
using concat_gpu_4d_f16 = concat_gpu_4d<FLOAT16>;
using concat_gpu_4d_i8 = concat_gpu_4d<int8_t>;
using concat_gpu_4d_u8 = concat_gpu_4d<uint8_t>;
TEST_P(concat_gpu_4d_f16, fs_b_yx_fsv32) {
ASSERT_NO_FATAL_FAILURE(test(format::fs_b_yx_fsv32));
}
INSTANTIATE_TEST_SUITE_P(smoke,
concat_gpu_4d_f16,
concat_gpu_all_params,
concat_gpu::PrintToStringParamName);
TEST_P(concat_gpu_4d_i8, b_fs_yx_fsv32) {
ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv32));
}
TEST_P(concat_gpu_4d_i8, b_fs_yx_fsv16) {
ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv16));
}
INSTANTIATE_TEST_SUITE_P(smoke_low_precision,
concat_gpu_4d_i8,
concat_gpu_all_params,
concat_gpu::PrintToStringParamName);
TEST_P(concat_gpu_4d_u8, b_fs_yx_fsv32) {
ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv32));
}
INSTANTIATE_TEST_SUITE_P(smoke_low_precision,
concat_gpu_4d_u8,
concat_gpu_all_params,
concat_gpu::PrintToStringParamName);
template <typename Type, typename OutputT>
struct concat_id_conv_gpu_4d : public concat_gpu {
public:
void test(format::type fmt) {
auto data_type = type_to_data_type<Type>::value;
auto& engine = get_test_engine();
const size_t batch_num = testing::get<0>(GetParam());
const std::vector<size_t> in_features = testing::get<1>(GetParam());
const size_t input_y = testing::get<2>(GetParam());
const size_t input_x = testing::get<3>(GetParam());
size_t output_f = 0;
for (auto& f : in_features)
output_f += f;
topology topology;
std::vector<VVVVF<Type>> in_data;
std::vector<memory::ptr> in_memory;
std::vector<primitive_id> input_ids;
for (size_t i = 0; i < in_features.size(); i++) {
auto size = tensor(static_cast<int32_t>(batch_num),
static_cast<int32_t>(in_features[i]),
static_cast<int32_t>(input_x),
static_cast<int32_t>(input_y));
auto data = generate_random_4d<Type>(batch_num, in_features[i], input_y, input_x, -128, 128);
auto in_lay = layout(data_type, fmt, size);
auto data_flat = std::vector<Type>(in_lay.get_linear_size(), 0);
for (size_t bi = 0; bi < batch_num; ++bi) {
for (size_t fi = 0; fi < in_features[i]; ++fi) {
for (size_t yi = 0; yi < input_y; ++yi) {
for (size_t xi = 0; xi < input_x; ++xi) {
auto coords = tensor(batch(bi), feature(fi), spatial(xi, yi, 0, 0));
auto in_offset = in_lay.get_linear_offset(coords);
data_flat[in_offset] = data[bi][fi][yi][xi];
}
}
}
}
auto in_mem = engine.allocate_memory(in_lay);
set_values(in_mem, data_flat);
in_memory.push_back(in_mem);
topology.add(input_layout("input" + std::to_string(i), in_lay));
in_data.emplace_back(std::move(data));
input_ids.push_back("input" + std::to_string(i));
}
topology.add(concatenation("concat", input_ids, concatenation::concatenation_axis::along_f));
// Add identity convolution
auto weights_lay = cldnn::layout(data_type, cldnn::format::bfyx, tensor(batch(output_f), feature(output_f)));
auto weights_mem = engine.allocate_memory(weights_lay);
weights_mem->fill(get_test_stream());
get_test_stream().finish();
{
cldnn::mem_lock<Type> weights_ptr(weights_mem, get_test_stream());
for (size_t fi = 0; fi < output_f; ++fi) {
auto coords = tensor(batch(fi), feature(fi), spatial(0, 0, 0, 0));
auto offset = weights_lay.get_linear_offset(coords);
weights_ptr[offset] = static_cast<Type>(1.f);
}
}
topology.add(data("weights", weights_mem));
topology.add(convolution("conv", "concat", { "weights" }));
build_options options;
options.set_option(build_option::optimize_data(true));
auto conv_forcing = implementation_desc{ fmt, std::string() };
options.set_option(build_option::force_implementations({ {primitive_id("conv"), conv_forcing} }));
network network(engine, topology, options);
for (size_t i = 0; i < in_features.size(); i++) {
network.set_input_data(input_ids[i], in_memory[i]);
}
network.execute();
auto out_mem = network.get_output("conv").get_memory();
cldnn::mem_lock<OutputT> out_ptr(out_mem, get_test_stream());
ASSERT_EQ(out_mem->get_layout().format, fmt);
for (size_t bi = 0; bi < batch_num; bi++) {
size_t f_sum = 0;
for (size_t in_i = 0; in_i < in_features.size(); in_i++) {
for (size_t fi = 0; fi < in_features[in_i]; fi++) {
for (size_t yi = 0; yi < input_y; yi++) {
for (size_t xi = 0; xi < input_x; xi++) {
auto output_coords = tensor(batch(bi), feature(f_sum + fi), spatial(xi, yi, 0, 0));
auto output_offset = out_mem->get_layout().get_linear_offset(output_coords);
auto ref_val = in_data[in_i][bi][fi][yi][xi];
auto actual_val = static_cast<Type>(out_ptr[output_offset]);
ASSERT_EQ(ref_val, actual_val)
<< " b=" << bi << ", f=" << f_sum + fi << "(input " << in_i << "), y=" << yi << ", x=" << xi;
}
}
}
f_sum += in_features[in_i];
}
}
}
};
using concat_id_conv_gpu_4d_f16 = concat_id_conv_gpu_4d<FLOAT16, FLOAT16>;
using concat_id_conv_gpu_4d_i8 = concat_id_conv_gpu_4d<int8_t, float>;
TEST_P(concat_id_conv_gpu_4d_f16, input_order_opt_b_fs_yx_fsv16) {
ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv16));
}
INSTANTIATE_TEST_SUITE_P(smoke_low_precision,
concat_id_conv_gpu_4d_f16,
::testing::Values(
TestParamType_concat(2, { 2, 32 }, 2, 1),
TestParamType_concat(2, { 31, 64 }, 2, 2),
TestParamType_concat(2, { 15, 15, 16 }, 2, 1),
TestParamType_concat(2, { 16, 15, 16 }, 2, 2),
TestParamType_concat(2, { 15, 2, 16, 64 }, 1, 2)
),
concat_gpu::PrintToStringParamName);
TEST_P(concat_id_conv_gpu_4d_i8, input_order_opt_b_fs_yx_fsv16) {
ASSERT_NO_FATAL_FAILURE(test(format::b_fs_yx_fsv16));
}
INSTANTIATE_TEST_SUITE_P(smoke_low_precision,
concat_id_conv_gpu_4d_i8,
::testing::Values(
TestParamType_concat(2, { 2, 32 }, 2, 1),
TestParamType_concat(2, { 31, 64 }, 2, 2),
TestParamType_concat(2, { 15, 15, 16 }, 2, 1),
TestParamType_concat(2, { 16, 15, 16 }, 2, 2),
TestParamType_concat(2, { 15, 2, 16, 64 }, 1, 2)
),
concat_gpu::PrintToStringParamName);
#ifdef ENABLE_ONEDNN_FOR_GPU
TEST(concat_gpu_onednn, basic_input_types) {
auto& engine = get_onednn_test_engine();
auto input0 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input1 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input2 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input3 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
auto input4 = engine.allocate_memory({ data_types::f32, format::bfyx, { 1, 1, 4, 3 } });
set_values<float>(input0, { 1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f });
set_values<float>(input1, { 11.0f, 12.0f, 13.0f, 14.0f, 12.0f, 12.0f, 13.0f, 14.0f, 13.0f, 13.0f, 13.0f, 15.0f });
set_values<float>(input2, { 21.0f, 22.0f, 23.0f, 24.0f, 22.0f, 22.0f, 23.0f, 24.0f, 23.0f, 23.0f, 23.0f, 25.0f });
set_values<float>(input3, { 31.0f, 32.0f, 33.0f, 34.0f, 32.0f, 32.0f, 33.0f, 34.0f, 33.0f, 33.0f, 33.0f, 35.0f });
set_values<float>(input4, { 41.0f, 42.0f, 43.0f, 44.0f, 42.0f, 42.0f, 43.0f, 44.0f, 43.0f, 43.0f, 43.0f, 45.0f });
VF<float> output_vec = {
1.0f, 2.0f, 3.0f, 4.0f, 2.0f, 2.0f, 3.0f, 4.0f, 3.0f, 3.0f, 3.0f, 5.0f,
11.0f, 12.0f, 13.0f, 14.0f, 12.0f, 12.0f, 13.0f, 14.0f, 13.0f, 13.0f, 13.0f, 15.0f,
21.0f, 22.0f, 23.0f, 24.0f, 22.0f, 22.0f, 23.0f, 24.0f, 23.0f, 23.0f, 23.0f, 25.0f,
31.0f, 32.0f, 33.0f, 34.0f, 32.0f, 32.0f, 33.0f, 34.0f, 33.0f, 33.0f, 33.0f, 35.0f,
41.0f, 42.0f, 43.0f, 44.0f, 42.0f, 42.0f, 43.0f, 44.0f, 43.0f, 43.0f, 43.0f, 45.0f };
topology topology(
input_layout("input0", input0->get_layout()),
input_layout("input1", input1->get_layout()),
input_layout("input2", input2->get_layout()),
input_layout("input3", input3->get_layout()),
input_layout("input4", input4->get_layout()),
concatenation("concat",
{ "input0", "input1", "input2", "input3", "input4" },
concatenation::concatenation_axis::along_f,
data_types::f32,
"",
padding{ { 0,0,0,0 }, 0 })
);
build_options options_target;
options_target.set_option(build_option::outputs({ "concat" }));
implementation_desc impl = { format::bfyx, std::string(""), impl_types::onednn };
options_target.set_option(build_option::force_implementations({ {"concat", impl} }));
network network(engine, topology, options_target);
network.set_input_data("input0", input0);
network.set_input_data("input1", input1);
network.set_input_data("input2", input2);
network.set_input_data("input3", input3);
network.set_input_data("input4", input4);
auto outputs = network.execute();
EXPECT_EQ(outputs.size(), size_t(1));
EXPECT_EQ(outputs.begin()->first, "concat");
auto output_memory = outputs.at("concat").get_memory();
auto output_layout = output_memory->get_layout();
cldnn::mem_lock<float> output_ptr(output_memory, get_test_stream());
int y_size = output_layout.size.spatial[1];
int x_size = output_layout.size.spatial[0];
int f_size = output_layout.size.feature[0];
int b_size = output_layout.size.batch[0];
EXPECT_EQ(output_layout.format, format::bfyx);
EXPECT_EQ(y_size, 3);
EXPECT_EQ(x_size, 4);
EXPECT_EQ(f_size, 5);
EXPECT_EQ(b_size, 1);
for (size_t x = 0; x < output_layout.count(); ++x) {
EXPECT_EQ(output_vec[x], output_ptr[x]);
}
}
#endif
|
// Copyright (c) 2011-2015 The Bitcoin Core developers
// Copyright (c) 2014-2017 The Dash Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "rpcconsole.h"
#include "ui_debugwindow.h"
#include "bantablemodel.h"
#include "clientmodel.h"
#include "guiutil.h"
#include "platformstyle.h"
#include "chainparams.h"
#include "netbase.h"
#include "rpc/server.h"
#include "rpc/client.h"
#include "util.h"
#include <openssl/crypto.h>
#include <univalue.h>
#ifdef ENABLE_WALLET
#include <db_cxx.h>
#endif
#include <QDir>
#include <QKeyEvent>
#include <QMenu>
#include <QScrollBar>
#include <QSettings>
#include <QSignalMapper>
#include <QThread>
#include <QTime>
#include <QTimer>
#include <QStringList>
#if QT_VERSION < 0x050000
#include <QUrl>
#endif
// TODO: add a scrollback limit, as there is currently none
// TODO: make it possible to filter out categories (esp debug messages when implemented)
// TODO: receive errors and debug messages through ClientModel
const int CONSOLE_HISTORY = 50;
const QSize FONT_RANGE(4, 40);
const char fontSizeSettingsKey[] = "consoleFontSize";
const TrafficGraphData::GraphRange INITIAL_TRAFFIC_GRAPH_SETTING = TrafficGraphData::Range_30m;
// Repair parameters
const QString SALVAGEWALLET("-salvagewallet");
const QString RESCAN("-rescan");
const QString ZAPTXES1("-zapwallettxes=1");
const QString ZAPTXES2("-zapwallettxes=2");
const QString UPGRADEWALLET("-upgradewallet");
const QString REINDEX("-reindex");
const struct {
const char *url;
const char *source;
} ICON_MAPPING[] = {
{"cmd-request", "send"},
{"cmd-reply", "receive"},
{"cmd-error", "quit"},
{"misc", "misc"},
{NULL, NULL}
};
/* Object for executing console RPC commands in a separate thread.
*/
class RPCExecutor : public QObject
{
Q_OBJECT
public Q_SLOTS:
void request(const QString &command);
Q_SIGNALS:
void reply(int category, const QString &command);
};
/** Class for handling RPC timers
* (used for e.g. re-locking the wallet after a timeout)
*/
class QtRPCTimerBase: public QObject, public RPCTimerBase
{
Q_OBJECT
public:
QtRPCTimerBase(boost::function<void(void)>& func, int64_t millis):
func(func)
{
timer.setSingleShot(true);
connect(&timer, SIGNAL(timeout()), this, SLOT(timeout()));
timer.start(millis);
}
~QtRPCTimerBase() {}
private Q_SLOTS:
void timeout() { func(); }
private:
QTimer timer;
boost::function<void(void)> func;
};
class QtRPCTimerInterface: public RPCTimerInterface
{
public:
~QtRPCTimerInterface() {}
const char *Name() { return "Qt"; }
RPCTimerBase* NewTimer(boost::function<void(void)>& func, int64_t millis)
{
return new QtRPCTimerBase(func, millis);
}
};
#include "rpcconsole.moc"
/**
* Split shell command line into a list of arguments. Aims to emulate \c bash and friends.
*
* - Arguments are delimited with whitespace
* - Extra whitespace at the beginning and end and between arguments will be ignored
* - Text can be "double" or 'single' quoted
* - The backslash \c \ is used as escape character
* - Outside quotes, any character can be escaped
* - Within double quotes, only escape \c " and backslashes before a \c " or another backslash
* - Within single quotes, no escaping is possible and no special interpretation takes place
*
* @param[out] args Parsed arguments will be appended to this list
* @param[in] strCommand Command line to split
*/
bool parseCommandLine(std::vector<std::string> &args, const std::string &strCommand)
{
enum CmdParseState
{
STATE_EATING_SPACES,
STATE_ARGUMENT,
STATE_SINGLEQUOTED,
STATE_DOUBLEQUOTED,
STATE_ESCAPE_OUTER,
STATE_ESCAPE_DOUBLEQUOTED
} state = STATE_EATING_SPACES;
std::string curarg;
Q_FOREACH(char ch, strCommand)
{
switch(state)
{
case STATE_ARGUMENT: // In or after argument
case STATE_EATING_SPACES: // Handle runs of whitespace
switch(ch)
{
case '"': state = STATE_DOUBLEQUOTED; break;
case '\'': state = STATE_SINGLEQUOTED; break;
case '\\': state = STATE_ESCAPE_OUTER; break;
case ' ': case '\n': case '\t':
if(state == STATE_ARGUMENT) // Space ends argument
{
args.push_back(curarg);
curarg.clear();
}
state = STATE_EATING_SPACES;
break;
default: curarg += ch; state = STATE_ARGUMENT;
}
break;
case STATE_SINGLEQUOTED: // Single-quoted string
switch(ch)
{
case '\'': state = STATE_ARGUMENT; break;
default: curarg += ch;
}
break;
case STATE_DOUBLEQUOTED: // Double-quoted string
switch(ch)
{
case '"': state = STATE_ARGUMENT; break;
case '\\': state = STATE_ESCAPE_DOUBLEQUOTED; break;
default: curarg += ch;
}
break;
case STATE_ESCAPE_OUTER: // '\' outside quotes
curarg += ch; state = STATE_ARGUMENT;
break;
case STATE_ESCAPE_DOUBLEQUOTED: // '\' in double-quoted text
if(ch != '"' && ch != '\\') curarg += '\\'; // keep '\' for everything but the quote and '\' itself
curarg += ch; state = STATE_DOUBLEQUOTED;
break;
}
}
switch(state) // final state
{
case STATE_EATING_SPACES:
return true;
case STATE_ARGUMENT:
args.push_back(curarg);
return true;
default: // ERROR to end in one of the other states
return false;
}
}
void RPCExecutor::request(const QString &command)
{
std::vector<std::string> args;
if(!parseCommandLine(args, command.toStdString()))
{
Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Parse error: unbalanced ' or \""));
return;
}
if(args.empty())
return; // Nothing to do
try
{
std::string strPrint;
// Convert argument list to JSON objects in method-dependent way,
// and pass it along with the method name to the dispatcher.
UniValue result = tableRPC.execute(
args[0],
RPCConvertValues(args[0], std::vector<std::string>(args.begin() + 1, args.end())));
// Format result reply
if (result.isNull())
strPrint = "";
else if (result.isStr())
strPrint = result.get_str();
else
strPrint = result.write(2);
Q_EMIT reply(RPCConsole::CMD_REPLY, QString::fromStdString(strPrint));
}
catch (UniValue& objError)
{
try // Nice formatting for standard-format error
{
int code = find_value(objError, "code").get_int();
std::string message = find_value(objError, "message").get_str();
Q_EMIT reply(RPCConsole::CMD_ERROR, QString::fromStdString(message) + " (code " + QString::number(code) + ")");
}
catch (const std::runtime_error&) // raised when converting to invalid type, i.e. missing code or message
{ // Show raw JSON object
Q_EMIT reply(RPCConsole::CMD_ERROR, QString::fromStdString(objError.write()));
}
}
catch (const std::exception& e)
{
Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Error: ") + QString::fromStdString(e.what()));
}
}
RPCConsole::RPCConsole(const PlatformStyle *platformStyle, QWidget *parent) :
QWidget(parent),
ui(new Ui::RPCConsole),
clientModel(0),
historyPtr(0),
platformStyle(platformStyle),
peersTableContextMenu(0),
banTableContextMenu(0),
consoleFontSize(0)
{
ui->setupUi(this);
GUIUtil::restoreWindowGeometry("nRPCConsoleWindow", this->size(), this);
QString theme = GUIUtil::getThemeName();
if (platformStyle->getImagesOnButtons()) {
ui->openDebugLogfileButton->setIcon(QIcon());
}
// Needed on Mac also
ui->clearButton->setIcon(QIcon(":/icons/" + theme + "/clear"));
ui->fontBiggerButton->setIcon(QIcon(":/icons/" + theme + "/add"));
ui->fontSmallerButton->setIcon(QIcon(":/icons/" + theme + "/remove"));
// Install event filter for up and down arrow
ui->lineEdit->installEventFilter(this);
ui->messagesWidget->installEventFilter(this);
connect(ui->clearButton, SIGNAL(clicked()), this, SLOT(clear()));
connect(ui->fontBiggerButton, SIGNAL(clicked()), this, SLOT(fontBigger()));
connect(ui->fontSmallerButton, SIGNAL(clicked()), this, SLOT(fontSmaller()));
connect(ui->btnClearTrafficGraph, SIGNAL(clicked()), ui->trafficGraph, SLOT(clear()));
// Wallet Repair Buttons
// connect(ui->btn_salvagewallet, SIGNAL(clicked()), this, SLOT(walletSalvage()));
// Disable salvage option in GUI, it's way too powerful and can lead to funds loss
ui->btn_salvagewallet->setEnabled(false);
connect(ui->btn_rescan, SIGNAL(clicked()), this, SLOT(walletRescan()));
connect(ui->btn_zapwallettxes1, SIGNAL(clicked()), this, SLOT(walletZaptxes1()));
connect(ui->btn_zapwallettxes2, SIGNAL(clicked()), this, SLOT(walletZaptxes2()));
connect(ui->btn_upgradewallet, SIGNAL(clicked()), this, SLOT(walletUpgrade()));
connect(ui->btn_reindex, SIGNAL(clicked()), this, SLOT(walletReindex()));
// set library version labels
#ifdef ENABLE_WALLET
ui->berkeleyDBVersion->setText(DbEnv::version(0, 0, 0));
std::string walletPath = GetDataDir().string();
walletPath += QDir::separator().toLatin1() + GetArg("-wallet", "wallet.dat");
ui->wallet_path->setText(QString::fromStdString(walletPath));
#else
ui->label_berkeleyDBVersion->hide();
ui->berkeleyDBVersion->hide();
#endif
// Register RPC timer interface
rpcTimerInterface = new QtRPCTimerInterface();
RPCRegisterTimerInterface(rpcTimerInterface);
setTrafficGraphRange(INITIAL_TRAFFIC_GRAPH_SETTING);
ui->peerHeading->setText(tr("Select a peer to view detailed information."));
QSettings settings;
consoleFontSize = settings.value(fontSizeSettingsKey, QFontInfo(QFont()).pointSize()).toInt();
clear();
}
RPCConsole::~RPCConsole()
{
GUIUtil::saveWindowGeometry("nRPCConsoleWindow", this);
RPCUnregisterTimerInterface(rpcTimerInterface);
delete rpcTimerInterface;
delete ui;
}
bool RPCConsole::eventFilter(QObject* obj, QEvent *event)
{
if(event->type() == QEvent::KeyPress) // Special key handling
{
QKeyEvent *keyevt = static_cast<QKeyEvent*>(event);
int key = keyevt->key();
Qt::KeyboardModifiers mod = keyevt->modifiers();
switch(key)
{
case Qt::Key_Up: if(obj == ui->lineEdit) { browseHistory(-1); return true; } break;
case Qt::Key_Down: if(obj == ui->lineEdit) { browseHistory(1); return true; } break;
case Qt::Key_PageUp: /* pass paging keys to messages widget */
case Qt::Key_PageDown:
if(obj == ui->lineEdit)
{
QApplication::postEvent(ui->messagesWidget, new QKeyEvent(*keyevt));
return true;
}
break;
case Qt::Key_Return:
case Qt::Key_Enter:
// forward these events to lineEdit
if(obj == autoCompleter->popup()) {
QApplication::postEvent(ui->lineEdit, new QKeyEvent(*keyevt));
return true;
}
break;
default:
// Typing in messages widget brings focus to line edit, and redirects key there
// Exclude most combinations and keys that emit no text, except paste shortcuts
if(obj == ui->messagesWidget && (
(!mod && !keyevt->text().isEmpty() && key != Qt::Key_Tab) ||
((mod & Qt::ControlModifier) && key == Qt::Key_V) ||
((mod & Qt::ShiftModifier) && key == Qt::Key_Insert)))
{
ui->lineEdit->setFocus();
QApplication::postEvent(ui->lineEdit, new QKeyEvent(*keyevt));
return true;
}
}
}
return QWidget::eventFilter(obj, event);
}
void RPCConsole::setClientModel(ClientModel *model)
{
clientModel = model;
ui->trafficGraph->setClientModel(model);
if (model && clientModel->getPeerTableModel() && clientModel->getBanTableModel()) {
// Keep up to date with client
setNumConnections(model->getNumConnections());
connect(model, SIGNAL(numConnectionsChanged(int)), this, SLOT(setNumConnections(int)));
setNumBlocks(model->getNumBlocks(), model->getLastBlockDate(), model->getVerificationProgress(NULL), false);
connect(model, SIGNAL(numBlocksChanged(int,QDateTime,double,bool)), this, SLOT(setNumBlocks(int,QDateTime,double,bool)));
updateNetworkState();
connect(model, SIGNAL(networkActiveChanged(bool)), this, SLOT(setNetworkActive(bool)));
setMasternodeCount(model->getMasternodeCountString());
connect(model, SIGNAL(strMasternodesChanged(QString)), this, SLOT(setMasternodeCount(QString)));
updateTrafficStats(model->getTotalBytesRecv(), model->getTotalBytesSent());
connect(model, SIGNAL(bytesChanged(quint64,quint64)), this, SLOT(updateTrafficStats(quint64, quint64)));
connect(model, SIGNAL(mempoolSizeChanged(long,size_t)), this, SLOT(setMempoolSize(long,size_t)));
// set up peer table
ui->peerWidget->setModel(model->getPeerTableModel());
ui->peerWidget->verticalHeader()->hide();
ui->peerWidget->setEditTriggers(QAbstractItemView::NoEditTriggers);
ui->peerWidget->setSelectionBehavior(QAbstractItemView::SelectRows);
ui->peerWidget->setSelectionMode(QAbstractItemView::ExtendedSelection);
ui->peerWidget->setContextMenuPolicy(Qt::CustomContextMenu);
ui->peerWidget->setColumnWidth(PeerTableModel::Address, ADDRESS_COLUMN_WIDTH);
ui->peerWidget->setColumnWidth(PeerTableModel::Subversion, SUBVERSION_COLUMN_WIDTH);
ui->peerWidget->setColumnWidth(PeerTableModel::Ping, PING_COLUMN_WIDTH);
ui->peerWidget->horizontalHeader()->setStretchLastSection(true);
// create peer table context menu actions
QAction* disconnectAction = new QAction(tr("&Disconnect"), this);
QAction* banAction1h = new QAction(tr("Ban for") + " " + tr("1 &hour"), this);
QAction* banAction24h = new QAction(tr("Ban for") + " " + tr("1 &day"), this);
QAction* banAction7d = new QAction(tr("Ban for") + " " + tr("1 &week"), this);
QAction* banAction365d = new QAction(tr("Ban for") + " " + tr("1 &year"), this);
// create peer table context menu
peersTableContextMenu = new QMenu(this);
peersTableContextMenu->addAction(disconnectAction);
peersTableContextMenu->addAction(banAction1h);
peersTableContextMenu->addAction(banAction24h);
peersTableContextMenu->addAction(banAction7d);
peersTableContextMenu->addAction(banAction365d);
// Add a signal mapping to allow dynamic context menu arguments.
// We need to use int (instead of int64_t), because signal mapper only supports
// int or objects, which is okay because max bantime (1 year) is < int_max.
QSignalMapper* signalMapper = new QSignalMapper(this);
signalMapper->setMapping(banAction1h, 60*60);
signalMapper->setMapping(banAction24h, 60*60*24);
signalMapper->setMapping(banAction7d, 60*60*24*7);
signalMapper->setMapping(banAction365d, 60*60*24*365);
connect(banAction1h, SIGNAL(triggered()), signalMapper, SLOT(map()));
connect(banAction24h, SIGNAL(triggered()), signalMapper, SLOT(map()));
connect(banAction7d, SIGNAL(triggered()), signalMapper, SLOT(map()));
connect(banAction365d, SIGNAL(triggered()), signalMapper, SLOT(map()));
connect(signalMapper, SIGNAL(mapped(int)), this, SLOT(banSelectedNode(int)));
// peer table context menu signals
connect(ui->peerWidget, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(showPeersTableContextMenu(const QPoint&)));
connect(disconnectAction, SIGNAL(triggered()), this, SLOT(disconnectSelectedNode()));
// peer table signal handling - update peer details when selecting new node
connect(ui->peerWidget->selectionModel(), SIGNAL(selectionChanged(const QItemSelection &, const QItemSelection &)),
this, SLOT(peerSelected(const QItemSelection &, const QItemSelection &)));
// peer table signal handling - update peer details when new nodes are added to the model
connect(model->getPeerTableModel(), SIGNAL(layoutChanged()), this, SLOT(peerLayoutChanged()));
// peer table signal handling - cache selected node ids
connect(model->getPeerTableModel(), SIGNAL(layoutAboutToBeChanged()), this, SLOT(peerLayoutAboutToChange()));
// set up ban table
ui->banlistWidget->setModel(model->getBanTableModel());
ui->banlistWidget->verticalHeader()->hide();
ui->banlistWidget->setEditTriggers(QAbstractItemView::NoEditTriggers);
ui->banlistWidget->setSelectionBehavior(QAbstractItemView::SelectRows);
ui->banlistWidget->setSelectionMode(QAbstractItemView::SingleSelection);
ui->banlistWidget->setContextMenuPolicy(Qt::CustomContextMenu);
ui->banlistWidget->setColumnWidth(BanTableModel::Address, BANSUBNET_COLUMN_WIDTH);
ui->banlistWidget->setColumnWidth(BanTableModel::Bantime, BANTIME_COLUMN_WIDTH);
ui->banlistWidget->horizontalHeader()->setStretchLastSection(true);
// create ban table context menu action
QAction* unbanAction = new QAction(tr("&Unban"), this);
// create ban table context menu
banTableContextMenu = new QMenu(this);
banTableContextMenu->addAction(unbanAction);
// ban table context menu signals
connect(ui->banlistWidget, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(showBanTableContextMenu(const QPoint&)));
connect(unbanAction, SIGNAL(triggered()), this, SLOT(unbanSelectedNode()));
// ban table signal handling - clear peer details when clicking a peer in the ban table
connect(ui->banlistWidget, SIGNAL(clicked(const QModelIndex&)), this, SLOT(clearSelectedNode()));
// ban table signal handling - ensure ban table is shown or hidden (if empty)
connect(model->getBanTableModel(), SIGNAL(layoutChanged()), this, SLOT(showOrHideBanTableIfRequired()));
showOrHideBanTableIfRequired();
// Provide initial values
ui->clientVersion->setText(model->formatFullVersion());
ui->clientUserAgent->setText(model->formatSubVersion());
ui->clientName->setText(model->clientName());
ui->dataDir->setText(model->dataDir());
ui->startupTime->setText(model->formatClientStartupTime());
ui->networkName->setText(QString::fromStdString(Params().NetworkIDString()));
//Setup autocomplete and attach it
QStringList wordList;
std::vector<std::string> commandList = tableRPC.listCommands();
for (size_t i = 0; i < commandList.size(); ++i)
{
wordList << commandList[i].c_str();
}
autoCompleter = new QCompleter(wordList, this);
ui->lineEdit->setCompleter(autoCompleter);
autoCompleter->popup()->installEventFilter(this);
// Start thread to execute RPC commands.
startExecutor();
}
if (!model) {
// Client model is being set to 0, this means shutdown() is about to be called.
// Make sure we clean up the executor thread
Q_EMIT stopExecutor();
thread.wait();
}
}
static QString categoryClass(int category)
{
switch(category)
{
case RPCConsole::CMD_REQUEST: return "cmd-request"; break;
case RPCConsole::CMD_REPLY: return "cmd-reply"; break;
case RPCConsole::CMD_ERROR: return "cmd-error"; break;
default: return "misc";
}
}
void RPCConsole::fontBigger()
{
setFontSize(consoleFontSize+1);
}
void RPCConsole::fontSmaller()
{
setFontSize(consoleFontSize-1);
}
void RPCConsole::setFontSize(int newSize)
{
QSettings settings;
//don't allow a insane font size
if (newSize < FONT_RANGE.width() || newSize > FONT_RANGE.height())
return;
// temp. store the console content
QString str = ui->messagesWidget->toHtml();
// replace font tags size in current content
str.replace(QString("font-size:%1pt").arg(consoleFontSize), QString("font-size:%1pt").arg(newSize));
// store the new font size
consoleFontSize = newSize;
settings.setValue(fontSizeSettingsKey, consoleFontSize);
// clear console (reset icon sizes, default stylesheet) and re-add the content
float oldPosFactor = 1.0 / ui->messagesWidget->verticalScrollBar()->maximum() * ui->messagesWidget->verticalScrollBar()->value();
clear(false);
ui->messagesWidget->setHtml(str);
ui->messagesWidget->verticalScrollBar()->setValue(oldPosFactor * ui->messagesWidget->verticalScrollBar()->maximum());
}
/** Restart wallet with "-salvagewallet" */
void RPCConsole::walletSalvage()
{
buildParameterlist(SALVAGEWALLET);
}
/** Restart wallet with "-rescan" */
void RPCConsole::walletRescan()
{
buildParameterlist(RESCAN);
}
/** Restart wallet with "-zapwallettxes=1" */
void RPCConsole::walletZaptxes1()
{
buildParameterlist(ZAPTXES1);
}
/** Restart wallet with "-zapwallettxes=2" */
void RPCConsole::walletZaptxes2()
{
buildParameterlist(ZAPTXES2);
}
/** Restart wallet with "-upgradewallet" */
void RPCConsole::walletUpgrade()
{
buildParameterlist(UPGRADEWALLET);
}
/** Restart wallet with "-reindex" */
void RPCConsole::walletReindex()
{
buildParameterlist(REINDEX);
}
/** Build command-line parameter list for restart */
void RPCConsole::buildParameterlist(QString arg)
{
// Get command-line arguments and remove the application name
QStringList args = QApplication::arguments();
args.removeFirst();
// Remove existing repair-options
args.removeAll(SALVAGEWALLET);
args.removeAll(RESCAN);
args.removeAll(ZAPTXES1);
args.removeAll(ZAPTXES2);
args.removeAll(UPGRADEWALLET);
args.removeAll(REINDEX);
// Append repair parameter to command line.
args.append(arg);
// Send command-line arguments to BitcoinGUI::handleRestart()
Q_EMIT handleRestart(args);
}
void RPCConsole::clear(bool clearHistory)
{
ui->messagesWidget->clear();
if(clearHistory)
{
history.clear();
historyPtr = 0;
}
ui->lineEdit->clear();
ui->lineEdit->setFocus();
// Add smoothly scaled icon images.
// (when using width/height on an img, Qt uses nearest instead of linear interpolation)
QString iconPath = ":/icons/" + GUIUtil::getThemeName() + "/";
QString iconName = "";
for(int i=0; ICON_MAPPING[i].url; ++i)
{
iconName = ICON_MAPPING[i].source;
ui->messagesWidget->document()->addResource(
QTextDocument::ImageResource,
QUrl(ICON_MAPPING[i].url),
QImage(iconPath + iconName).scaled(QSize(consoleFontSize*2, consoleFontSize*2), Qt::IgnoreAspectRatio, Qt::SmoothTransformation));
}
// Set default style sheet
QFontInfo fixedFontInfo(GUIUtil::fixedPitchFont());
ui->messagesWidget->document()->setDefaultStyleSheet(
QString(
"table { }"
"td.time { color: #4d4d4d; font-size: %2; padding-top: 3px; } "
"td.message { font-family: %1; font-size: %2; white-space:pre-wrap; } "
"td.cmd-request { color: #1a8cff; } "
"td.cmd-error { color: red; } "
"b { color: #1a8cff; } "
).arg(fixedFontInfo.family(), QString("%1pt").arg(consoleFontSize))
);
message(CMD_REPLY, (tr("Welcome to the Addmore Core RPC console.") + "<br>" +
tr("Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.") + "<br>" +
tr("Type <b>help</b> for an overview of available commands.")), true);
}
void RPCConsole::keyPressEvent(QKeyEvent *event)
{
if(windowType() != Qt::Widget && event->key() == Qt::Key_Escape)
{
close();
}
}
void RPCConsole::message(int category, const QString &message, bool html)
{
QTime time = QTime::currentTime();
QString timeString = time.toString();
QString out;
out += "<table><tr><td class=\"time\" width=\"65\">" + timeString + "</td>";
out += "<td class=\"icon\" width=\"32\"><img src=\"" + categoryClass(category) + "\"></td>";
out += "<td class=\"message " + categoryClass(category) + "\" valign=\"middle\">";
if(html)
out += message;
else
out += GUIUtil::HtmlEscape(message, false);
out += "</td></tr></table>";
ui->messagesWidget->append(out);
}
void RPCConsole::updateNetworkState()
{
QString connections = QString::number(clientModel->getNumConnections()) + " (";
connections += tr("In:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_IN)) + " / ";
connections += tr("Out:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_OUT)) + ")";
if(!clientModel->getNetworkActive()) {
connections += " (" + tr("Network activity disabled") + ")";
}
ui->numberOfConnections->setText(connections);
}
void RPCConsole::setNumConnections(int count)
{
if (!clientModel)
return;
updateNetworkState();
}
void RPCConsole::setNetworkActive(bool networkActive)
{
updateNetworkState();
}
void RPCConsole::setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers)
{
if (!headers) {
ui->numberOfBlocks->setText(QString::number(count));
ui->lastBlockTime->setText(blockDate.toString());
}
}
void RPCConsole::setMasternodeCount(const QString &strMasternodes)
{
ui->masternodeCount->setText(strMasternodes);
}
void RPCConsole::setMempoolSize(long numberOfTxs, size_t dynUsage)
{
ui->mempoolNumberTxs->setText(QString::number(numberOfTxs));
if (dynUsage < 1000000)
ui->mempoolSize->setText(QString::number(dynUsage/1000.0, 'f', 2) + " KB");
else
ui->mempoolSize->setText(QString::number(dynUsage/1000000.0, 'f', 2) + " MB");
}
void RPCConsole::on_lineEdit_returnPressed()
{
QString cmd = ui->lineEdit->text();
ui->lineEdit->clear();
if(!cmd.isEmpty())
{
message(CMD_REQUEST, cmd);
Q_EMIT cmdRequest(cmd);
// Remove command, if already in history
history.removeOne(cmd);
// Append command to history
history.append(cmd);
// Enforce maximum history size
while(history.size() > CONSOLE_HISTORY)
history.removeFirst();
// Set pointer to end of history
historyPtr = history.size();
// Scroll console view to end
scrollToEnd();
}
}
void RPCConsole::browseHistory(int offset)
{
historyPtr += offset;
if(historyPtr < 0)
historyPtr = 0;
if(historyPtr > history.size())
historyPtr = history.size();
QString cmd;
if(historyPtr < history.size())
cmd = history.at(historyPtr);
ui->lineEdit->setText(cmd);
}
void RPCConsole::startExecutor()
{
RPCExecutor *executor = new RPCExecutor();
executor->moveToThread(&thread);
// Replies from executor object must go to this object
connect(executor, SIGNAL(reply(int,QString)), this, SLOT(message(int,QString)));
// Requests from this object must go to executor
connect(this, SIGNAL(cmdRequest(QString)), executor, SLOT(request(QString)));
// On stopExecutor signal
// - quit the Qt event loop in the execution thread
connect(this, SIGNAL(stopExecutor()), &thread, SLOT(quit()));
// - queue executor for deletion (in execution thread)
connect(&thread, SIGNAL(finished()), executor, SLOT(deleteLater()), Qt::DirectConnection);
// Default implementation of QThread::run() simply spins up an event loop in the thread,
// which is what we want.
thread.start();
}
void RPCConsole::on_tabWidget_currentChanged(int index)
{
if (ui->tabWidget->widget(index) == ui->tab_console)
ui->lineEdit->setFocus();
else if (ui->tabWidget->widget(index) != ui->tab_peers)
clearSelectedNode();
}
void RPCConsole::on_openDebugLogfileButton_clicked()
{
GUIUtil::openDebugLogfile();
}
void RPCConsole::scrollToEnd()
{
QScrollBar *scrollbar = ui->messagesWidget->verticalScrollBar();
scrollbar->setValue(scrollbar->maximum());
}
void RPCConsole::on_sldGraphRange_valueChanged(int value)
{
setTrafficGraphRange(static_cast<TrafficGraphData::GraphRange>(value));
}
QString RPCConsole::FormatBytes(quint64 bytes)
{
if(bytes < 1024)
return QString(tr("%1 B")).arg(bytes);
if(bytes < 1024 * 1024)
return QString(tr("%1 KB")).arg(bytes / 1024);
if(bytes < 1024 * 1024 * 1024)
return QString(tr("%1 MB")).arg(bytes / 1024 / 1024);
return QString(tr("%1 GB")).arg(bytes / 1024 / 1024 / 1024);
}
void RPCConsole::setTrafficGraphRange(TrafficGraphData::GraphRange range)
{
ui->trafficGraph->setGraphRangeMins(range);
ui->lblGraphRange->setText(GUIUtil::formatDurationStr(TrafficGraphData::RangeMinutes[range] * 60));
}
void RPCConsole::updateTrafficStats(quint64 totalBytesIn, quint64 totalBytesOut)
{
ui->lblBytesIn->setText(FormatBytes(totalBytesIn));
ui->lblBytesOut->setText(FormatBytes(totalBytesOut));
}
void RPCConsole::peerSelected(const QItemSelection &selected, const QItemSelection &deselected)
{
Q_UNUSED(deselected);
if (!clientModel || !clientModel->getPeerTableModel() || selected.indexes().isEmpty())
return;
const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.indexes().first().row());
if (stats)
updateNodeDetail(stats);
}
void RPCConsole::peerLayoutAboutToChange()
{
QModelIndexList selected = ui->peerWidget->selectionModel()->selectedIndexes();
cachedNodeids.clear();
for(int i = 0; i < selected.size(); i++)
{
const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.at(i).row());
cachedNodeids.append(stats->nodeStats.nodeid);
}
}
void RPCConsole::peerLayoutChanged()
{
if (!clientModel || !clientModel->getPeerTableModel())
return;
const CNodeCombinedStats *stats = NULL;
bool fUnselect = false;
bool fReselect = false;
if (cachedNodeids.empty()) // no node selected yet
return;
// find the currently selected row
int selectedRow = -1;
QModelIndexList selectedModelIndex = ui->peerWidget->selectionModel()->selectedIndexes();
if (!selectedModelIndex.isEmpty()) {
selectedRow = selectedModelIndex.first().row();
}
// check if our detail node has a row in the table (it may not necessarily
// be at selectedRow since its position can change after a layout change)
int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.first());
if (detailNodeRow < 0)
{
// detail node disappeared from table (node disconnected)
fUnselect = true;
}
else
{
if (detailNodeRow != selectedRow)
{
// detail node moved position
fUnselect = true;
fReselect = true;
}
// get fresh stats on the detail node.
stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow);
}
if (fUnselect && selectedRow >= 0) {
clearSelectedNode();
}
if (fReselect)
{
for(int i = 0; i < cachedNodeids.size(); i++)
{
ui->peerWidget->selectRow(clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.at(i)));
}
}
if (stats)
updateNodeDetail(stats);
}
void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats)
{
// update the detail ui with latest node information
QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " ");
peerAddrDetails += tr("(node id: %1)").arg(QString::number(stats->nodeStats.nodeid));
if (!stats->nodeStats.addrLocal.empty())
peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal));
ui->peerHeading->setText(peerAddrDetails);
ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices));
ui->peerLastSend->setText(stats->nodeStats.nLastSend ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastSend) : tr("never"));
ui->peerLastRecv->setText(stats->nodeStats.nLastRecv ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastRecv) : tr("never"));
ui->peerBytesSent->setText(FormatBytes(stats->nodeStats.nSendBytes));
ui->peerBytesRecv->setText(FormatBytes(stats->nodeStats.nRecvBytes));
ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected));
ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingTime));
ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingWait));
ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.dMinPing));
ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset));
ui->peerVersion->setText(QString("%1").arg(QString::number(stats->nodeStats.nVersion)));
ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer));
ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound"));
ui->peerHeight->setText(QString("%1").arg(QString::number(stats->nodeStats.nStartingHeight)));
ui->peerWhitelisted->setText(stats->nodeStats.fWhitelisted ? tr("Yes") : tr("No"));
// This check fails for example if the lock was busy and
// nodeStateStats couldn't be fetched.
if (stats->fNodeStateStatsAvailable) {
// Ban score is init to 0
ui->peerBanScore->setText(QString("%1").arg(stats->nodeStateStats.nMisbehavior));
// Sync height is init to -1
if (stats->nodeStateStats.nSyncHeight > -1)
ui->peerSyncHeight->setText(QString("%1").arg(stats->nodeStateStats.nSyncHeight));
else
ui->peerSyncHeight->setText(tr("Unknown"));
// Common height is init to -1
if (stats->nodeStateStats.nCommonHeight > -1)
ui->peerCommonHeight->setText(QString("%1").arg(stats->nodeStateStats.nCommonHeight));
else
ui->peerCommonHeight->setText(tr("Unknown"));
}
ui->detailWidget->show();
}
void RPCConsole::resizeEvent(QResizeEvent *event)
{
QWidget::resizeEvent(event);
}
void RPCConsole::showEvent(QShowEvent *event)
{
QWidget::showEvent(event);
if (!clientModel || !clientModel->getPeerTableModel())
return;
// start PeerTableModel auto refresh
clientModel->getPeerTableModel()->startAutoRefresh();
}
void RPCConsole::hideEvent(QHideEvent *event)
{
QWidget::hideEvent(event);
if (!clientModel || !clientModel->getPeerTableModel())
return;
// stop PeerTableModel auto refresh
clientModel->getPeerTableModel()->stopAutoRefresh();
}
void RPCConsole::showPeersTableContextMenu(const QPoint& point)
{
QModelIndex index = ui->peerWidget->indexAt(point);
if (index.isValid())
peersTableContextMenu->exec(QCursor::pos());
}
void RPCConsole::showBanTableContextMenu(const QPoint& point)
{
QModelIndex index = ui->banlistWidget->indexAt(point);
if (index.isValid())
banTableContextMenu->exec(QCursor::pos());
}
void RPCConsole::disconnectSelectedNode()
{
if(!g_connman)
return;
// Get selected peer addresses
QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected peer address
NodeId id = nodes.at(i).data().toInt();
// Find the node, disconnect it and clear the selected node
if(g_connman->DisconnectNode(id))
clearSelectedNode();
}
}
void RPCConsole::banSelectedNode(int bantime)
{
if (!clientModel || !g_connman)
return;
// Get selected peer addresses
QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected peer address
NodeId id = nodes.at(i).data().toInt();
// Get currently selected peer address
int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(id);
if(detailNodeRow < 0)
return;
// Find possible nodes, ban it and clear the selected node
const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow);
if(stats) {
g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime);
}
}
clearSelectedNode();
clientModel->getBanTableModel()->refresh();
}
void RPCConsole::unbanSelectedNode()
{
if (!clientModel)
return;
// Get selected ban addresses
QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, BanTableModel::Address);
for(int i = 0; i < nodes.count(); i++)
{
// Get currently selected ban address
QString strNode = nodes.at(i).data().toString();
CSubNet possibleSubnet;
LookupSubNet(strNode.toStdString().c_str(), possibleSubnet);
if (possibleSubnet.IsValid() && g_connman)
{
g_connman->Unban(possibleSubnet);
clientModel->getBanTableModel()->refresh();
}
}
}
void RPCConsole::clearSelectedNode()
{
ui->peerWidget->selectionModel()->clearSelection();
cachedNodeids.clear();
ui->detailWidget->hide();
ui->peerHeading->setText(tr("Select a peer to view detailed information."));
}
void RPCConsole::showOrHideBanTableIfRequired()
{
if (!clientModel)
return;
bool visible = clientModel->getBanTableModel()->shouldShow();
ui->banlistWidget->setVisible(visible);
ui->banHeading->setVisible(visible);
}
void RPCConsole::setTabFocus(enum TabTypes tabType)
{
ui->tabWidget->setCurrentIndex(tabType);
}
|
// Copyright (c) 2019 The Refnet Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <test/util.h>
#include <chainparams.h>
#include <consensus/merkle.h>
#include <consensus/validation.h>
#include <key_io.h>
#include <miner.h>
#include <outputtype.h>
#include <pow.h>
#include <scheduler.h>
#include <script/standard.h>
#include <txdb.h>
#include <validation.h>
#include <validationinterface.h>
#ifdef ENABLE_WALLET
#include <wallet/wallet.h>
#endif
#include <boost/thread.hpp>
const std::string ADDRESS_BCRT1_UNSPENDABLE = "bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj";
#ifdef ENABLE_WALLET
std::string getnewaddress(CWallet& w)
{
constexpr auto output_type = OutputType::BECH32;
CPubKey new_key;
if (!w.GetKeyFromPool(new_key)) assert(false);
w.LearnRelatedScripts(new_key, output_type);
const auto dest = GetDestinationForKey(new_key, output_type);
w.SetAddressBook(dest, /* label */ "", "receive");
return EncodeDestination(dest);
}
void importaddress(CWallet& wallet, const std::string& address)
{
LOCK(wallet.cs_wallet);
const auto dest = DecodeDestination(address);
assert(IsValidDestination(dest));
const auto script = GetScriptForDestination(dest);
wallet.MarkDirty();
assert(!wallet.HaveWatchOnly(script));
if (!wallet.AddWatchOnly(script, 0 /* nCreateTime */)) assert(false);
wallet.SetAddressBook(dest, /* label */ "", "receive");
}
#endif // ENABLE_WALLET
CTxIn generatetoaddress(const std::string& address)
{
const auto dest = DecodeDestination(address);
assert(IsValidDestination(dest));
const auto coinbase_script = GetScriptForDestination(dest);
return MineBlock(coinbase_script);
}
CTxIn MineBlock(const CScript& coinbase_scriptPubKey)
{
auto block = PrepareBlock(coinbase_scriptPubKey);
while (!CheckProofOfWork(block->GetHash(), block->nBits, Params().GetConsensus())) {
++block->nNonce;
assert(block->nNonce);
}
bool processed{ProcessNewBlock(Params(), block, true, nullptr)};
assert(processed);
return CTxIn{block->vtx[0]->GetHash(), 0};
}
std::shared_ptr<CBlock> PrepareBlock(const CScript& coinbase_scriptPubKey)
{
auto block = std::make_shared<CBlock>(
BlockAssembler{Params()}
.CreateNewBlock(coinbase_scriptPubKey)
->block);
LOCK(cs_main);
block->nTime = ::ChainActive().Tip()->GetMedianTimePast() + 1;
block->hashMerkleRoot = BlockMerkleRoot(*block);
return block;
}
|
#include "act_map/voxblox_utils.h"
#include "act_map/voxblox/integrator/occupancy_integrator.h"
#include "act_map/sampler.h"
#include "act_map/optim_orient.h"
namespace act_map
{
namespace utils
{
void setPointsInOccupancyLayer(const Eigen::Matrix3Xd& points_w,
act_map::OccupancyLayer* occ_layer)
{
voxblox::OccupancyIntegrator::Config occ_inte_cfg;
voxblox::OccupancyIntegrator occ_integrator(occ_inte_cfg, occ_layer);
for (int i = 0; i < 10; i++)
{
rpg::Pose T_w_c;
T_w_c.setIdentity();
Vec3dVec points_c_vec;
eigenKXToVecKVec(points_w, &points_c_vec);
occ_integrator.integratePointCloud(T_w_c, points_c_vec);
}
}
void getViewDirsOfOccupiedVoxels(const OccupancyLayer& layer,
const float occ_thresh, Vec3dVec* view_dirs)
{
getFromAllVoxels<OccupancyVoxel, Eigen::Vector3d>(
layer, getViewDirOccVoxel,
std::bind(isOccupancyVoxelOccupied, std::placeholders::_1, occ_thresh),
view_dirs);
}
void getCentersOfOccupiedVoxels(const OccupancyLayer& occ_layer,
const float occ_thresh, Vec3dVec* blk_cs,
V3dVecVec* blk_points_w,
V3dVecVec* blk_view_dirs)
{
CHECK_NOTNULL(blk_cs);
CHECK_NOTNULL(blk_points_w);
blk_cs->clear();
blk_points_w->clear();
if (blk_view_dirs)
{
blk_view_dirs->clear();
}
voxblox::BlockIndexList occ_blks;
occ_layer.getAllAllocatedBlocks(&occ_blks);
for (const voxblox::BlockIndex& bidx : occ_blks)
{
const OccupancyBlock& blk_i = occ_layer.getBlockByIndex(bidx);
Vec3dVec points_i;
Vec3dVec views_i;
for (size_t vidx = 0; vidx < blk_i.num_voxels(); vidx++)
{
const OccupancyVoxel& vox_i = blk_i.getVoxelByLinearIndex(vidx);
if (isOccupancyVoxelOccupied(vox_i, occ_thresh))
{
Eigen::Vector3d vox_c = blk_i.computeCoordinatesFromLinearIndex(vidx);
points_i.emplace_back(vox_c);
if (blk_view_dirs)
{
views_i.emplace_back(
blk_i.getVoxelByLinearIndex(vidx).aver_view_from_pt.cast<double>());
}
}
}
if (!points_i.empty())
{
Eigen::Vector3d blk_c;
getBlockCenterFromBlk(blk_i, &blk_c);
blk_cs->emplace_back(blk_c);
blk_points_w->emplace_back(points_i);
if (blk_view_dirs)
{
blk_view_dirs->emplace_back(views_i);
}
}
}
}
void getCentersOfOccupiedVoxels(const OccupancyLayer& layer,
const float occ_thresh, Vec3dVec* points_w)
{
getFromAllVoxels<OccupancyVoxel, Eigen::Vector3d>(
layer, getVoxelCenter<OccupancyVoxel>,
std::bind(isOccupancyVoxelOccupied, std::placeholders::_1, occ_thresh),
points_w);
}
void getCentersOfOccupiedVoxels(const voxblox::Block<OccupancyVoxel>& blk,
const float occ_thresh, Vec3dVec* points_w)
{
getFromAllVoxels<OccupancyVoxel, Eigen::Vector3d>(
blk, getVoxelCenter<OccupancyVoxel>,
std::bind(isOccupancyVoxelOccupied, std::placeholders::_1, occ_thresh),
points_w);
}
size_t countNumOccupiedVoxels(const OccupancyLayer& layer,
const float occ_thresh)
{
return countAllVoxelsIf<OccupancyVoxel>(
layer,
std::bind(isOccupancyVoxelOccupied, std::placeholders::_1, occ_thresh));
}
} // namespace utils
} // namespace act_map
|
#include <bits/stdc++.h>
using namespace std;
char M[16][16];
int C[17][17], Row[16], n;
bool Col[16];
bool check(int i, int j) {
for (int k=0; k<j; ++k)
if (abs(i-Row[k]) == abs(j-k))
return 0;
return 1;
}
int backtrack(int j) {
if (j == n) return 1;
for (int i=0; i<n; ++i)
if (!Col[i] && !C[i])
return 0;
int result = 0;
for (int i=0; i<n; ++i)
if (!Col[i] && M[i][j]!='*' && check(i, j)) {
Row[j] = i;
Col[i] = 1;
result += backtrack(j+1);
Col[i] = 0;
}
return result;
}
int main() {
for (int cse=1; cin >> n && n; ++cse) {
memset(C, 0, sizeof(C));
for (int i=0; i<n; ++i) {
cin >> M[i];
for (int j=n-1; j>=0; --j)
C[i][j] = C[i][j+1] + (M[i][j] == '.');
}
cout << "Case " << cse << ": " << backtrack(0) << '\n';
}
}
|
// Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: ericv@google.com (Eric Veach)
#include "s2/encoded_s2cell_id_vector.h"
#include <vector>
#include <gtest/gtest.h>
#include "s2/third_party/absl/memory/memory.h"
#include "s2/s2loop.h"
#include "s2/s2pointutil.h"
#include "s2/s2shape_index.h"
#include "s2/s2testing.h"
#include "s2/s2text_format.h"
using s2_absl::make_unique;
using s2textformat::MakeCellIdOrDie;
using std::vector;
namespace s2coding {
// Encodes the given vector and returns the corresponding
// EncodedS2CellIdVector (which points into the Encoder's data buffer).
EncodedS2CellIdVector MakeEncodedS2CellIdVector(const vector<S2CellId>& input,
Encoder* encoder) {
EncodeS2CellIdVector(input, encoder);
Decoder decoder(encoder->base(), encoder->length());
EncodedS2CellIdVector cell_ids;
EXPECT_TRUE(cell_ids.Init(&decoder));
return cell_ids;
}
// Encodes the given vector and checks that it has the expected size and
// contents.
void TestEncodedS2CellIdVector(const vector<S2CellId>& expected,
size_t expected_bytes) {
Encoder encoder;
EncodedS2CellIdVector actual = MakeEncodedS2CellIdVector(expected, &encoder);
EXPECT_EQ(expected_bytes, encoder.length());
EXPECT_EQ(actual.Decode(), expected);
}
// Like the above, but accepts a vector<uint64> rather than a vector<S2CellId>.
void TestEncodedS2CellIdVector(const vector<uint64>& raw_expected,
size_t expected_bytes) {
vector<S2CellId> expected;
for (uint64 raw_id : raw_expected) {
expected.push_back(S2CellId(raw_id));
}
TestEncodedS2CellIdVector(expected, expected_bytes);
}
TEST(EncodedS2CellIdVector, Empty) {
TestEncodedS2CellIdVector(vector<S2CellId>{}, 2);
}
TEST(EncodedS2CellIdVector, None) {
TestEncodedS2CellIdVector({S2CellId::None()}, 3);
}
TEST(EncodedS2CellIdVector, NoneNone) {
TestEncodedS2CellIdVector({S2CellId::None(), S2CellId::None()}, 4);
}
TEST(EncodedS2CellIdVector, Sentinel) {
TestEncodedS2CellIdVector({S2CellId::Sentinel()}, 10);
}
TEST(EncodedS2CellIdVector, MaximumShiftCell) {
// Tests the encoding of a single cell at level 2, which corresponds the
// maximum encodable shift value (56).
TestEncodedS2CellIdVector({MakeCellIdOrDie("0/00")}, 3);
}
TEST(EncodedS2CellIdVector, SentinelSentinel) {
TestEncodedS2CellIdVector({S2CellId::Sentinel(), S2CellId::Sentinel()}, 11);
}
TEST(EncodedS2CellIdVector, NoneSentinelNone) {
TestEncodedS2CellIdVector(
{S2CellId::None(), S2CellId::Sentinel(), S2CellId::None()}, 26);
}
TEST(EncodedS2CellIdVector, InvalidCells) {
// Tests that cells with an invalid LSB can be encoded.
TestEncodedS2CellIdVector({0x6, 0xe, 0x7e}, 5);
}
TEST(EncodedS2CellIdVector, OneByteLeafCells) {
// Tests that (1) if all cells are leaf cells, the low bit is not encoded,
// and (2) this can be indicated using the standard 1-byte header.
TestEncodedS2CellIdVector({0x3, 0x7, 0x177}, 5);
}
TEST(EncodedS2CellIdVector, OneByteLevel29Cells) {
// Tests that (1) if all cells are at level 29, the low bit is not encoded,
// and (2) this can be indicated using the standard 1-byte header.
TestEncodedS2CellIdVector({0xc, 0x1c, 0x47c}, 5);
}
TEST(EncodedS2CellIdVector, OneByteLevel28Cells) {
// Tests that (1) if all cells are at level 28, the low bit is not encoded,
// and (2) this can be indicated using the extended 2-byte header.
TestEncodedS2CellIdVector({0x30, 0x70, 0x1770}, 6);
}
TEST(EncodedS2CellIdVector, OneByteMixedCellLevels) {
// Tests that cells at mixed levels can be encoded in one byte.
TestEncodedS2CellIdVector({0x300, 0x1c00, 0x7000, 0xff00}, 6);
}
TEST(EncodedS2CellIdVector, OneByteMixedCellLevelsWithPrefix) {
// Tests that cells at mixed levels can be encoded in one byte even when
// they share a multi-byte prefix.
TestEncodedS2CellIdVector({
0x1234567800000300, 0x1234567800001c00,
0x1234567800007000, 0x123456780000ff00}, 10);
}
TEST(EncodedS2CellIdVector, OneByteRangeWithBaseValue) {
// Tests that cells can be encoded in one byte by choosing a base value
// whose bit range overlaps the delta values.
// 1 byte header, 3 bytes base, 1 byte size, 4 bytes deltas
TestEncodedS2CellIdVector({
0x00ffff0000000000, 0x0100fc0000000000,
0x0100500000000000, 0x0100330000000000}, 9);
}
TEST(EncodedS2CellIdVector, SixFaceCells) {
vector<S2CellId> ids;
for (int face = 0; face < 6; ++face) {
ids.push_back(S2CellId::FromFace(face));
}
TestEncodedS2CellIdVector(ids, 8);
}
TEST(EncodedS2CellIdVector, FourLevel10Children) {
vector<S2CellId> ids;
S2CellId parent = MakeCellIdOrDie("3/012301230");
for (S2CellId id = parent.child_begin();
id != parent.child_end(); id = id.next()) {
ids.push_back(id);
}
TestEncodedS2CellIdVector(ids, 8);
}
TEST(EncodedS2CellIdVector, FractalS2ShapeIndexCells) {
S2Testing::Fractal fractal;
fractal.SetLevelForApproxMaxEdges(3 * 1024);
S2Point center = s2textformat::MakePointOrDie("47.677:-122.206");
MutableS2ShapeIndex index;
index.Add(make_unique<S2Loop::OwningShape>(
fractal.MakeLoop(S2::GetFrame(center), S1Angle::Degrees(1))));
vector<S2CellId> ids;
for (MutableS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN);
!it.done(); it.Next()) {
ids.push_back(it.id());
}
EXPECT_EQ(966, ids.size());
TestEncodedS2CellIdVector(ids, 2902);
}
TEST(EncodedS2CellIdVector, CoveringCells) {
vector<uint64> ids {
0x414a617f00000000, 0x414a61c000000000, 0x414a624000000000,
0x414a63c000000000, 0x414a647000000000, 0x414a64c000000000,
0x414a653000000000, 0x414a704000000000, 0x414a70c000000000,
0x414a714000000000, 0x414a71b000000000, 0x414a7a7c00000000,
0x414a7ac000000000, 0x414a8a4000000000, 0x414a8bc000000000,
0x414a8c4000000000, 0x414a8d7000000000, 0x414a8dc000000000,
0x414a914000000000, 0x414a91c000000000, 0x414a924000000000,
0x414a942c00000000, 0x414a95c000000000, 0x414a96c000000000,
0x414ab0c000000000, 0x414ab14000000000, 0x414ab34000000000,
0x414ab3c000000000, 0x414ab44000000000, 0x414ab4c000000000,
0x414ab6c000000000, 0x414ab74000000000, 0x414ab8c000000000,
0x414ab94000000000, 0x414aba1000000000, 0x414aba3000000000,
0x414abbc000000000, 0x414abe4000000000, 0x414abec000000000,
0x414abf4000000000, 0x46b5454000000000, 0x46b545c000000000,
0x46b5464000000000, 0x46b547c000000000, 0x46b5487000000000,
0x46b548c000000000, 0x46b5494000000000, 0x46b54a5400000000,
0x46b54ac000000000, 0x46b54b4000000000, 0x46b54bc000000000,
0x46b54c7000000000, 0x46b54c8004000000, 0x46b54ec000000000,
0x46b55ad400000000, 0x46b55b4000000000, 0x46b55bc000000000,
0x46b55c4000000000, 0x46b55c8100000000, 0x46b55dc000000000,
0x46b55e4000000000, 0x46b5604000000000, 0x46b560c000000000,
0x46b561c000000000, 0x46ca424000000000, 0x46ca42c000000000,
0x46ca43c000000000, 0x46ca444000000000, 0x46ca45c000000000,
0x46ca467000000000, 0x46ca469000000000, 0x46ca5fc000000000,
0x46ca604000000000, 0x46ca60c000000000, 0x46ca674000000000,
0x46ca679000000000, 0x46ca67f000000000, 0x46ca684000000000,
0x46ca855000000000, 0x46ca8c4000000000, 0x46ca8cc000000000,
0x46ca8e5400000000, 0x46ca8ec000000000, 0x46ca8f0100000000,
0x46ca8fc000000000, 0x46ca900400000000, 0x46ca98c000000000,
0x46ca994000000000, 0x46ca99c000000000, 0x46ca9a4000000000,
0x46ca9ac000000000, 0x46ca9bd500000000, 0x46ca9e4000000000,
0x46ca9ec000000000, 0x46caf34000000000, 0x46caf4c000000000,
0x46caf54000000000
};
EXPECT_EQ(97, ids.size());
TestEncodedS2CellIdVector(ids, 488);
}
TEST(EncodedS2CellIdVector, LowerBoundLimits) {
// Test seeking before the beginning and past the end of the vector.
S2CellId first = S2CellId::Begin(S2CellId::kMaxLevel);
S2CellId last = S2CellId::End(S2CellId::kMaxLevel).prev();
Encoder encoder;
EncodedS2CellIdVector cell_ids = MakeEncodedS2CellIdVector(
{first, last}, &encoder);
EXPECT_EQ(0, cell_ids.lower_bound(S2CellId::None()));
EXPECT_EQ(0, cell_ids.lower_bound(first));
EXPECT_EQ(1, cell_ids.lower_bound(first.next()));
EXPECT_EQ(1, cell_ids.lower_bound(last.prev()));
EXPECT_EQ(1, cell_ids.lower_bound(last));
EXPECT_EQ(2, cell_ids.lower_bound(last.next()));
EXPECT_EQ(2, cell_ids.lower_bound(S2CellId::Sentinel()));
}
} // namespace s2coding
|
// Copyright (c) 2014-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2018 The GeekCash developers
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "chainparams.h"
#include "validation.h"
#include "net.h"
#include "test/test_geekcash.h"
#include <boost/signals2/signal.hpp>
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(main_tests, TestingSetup)
static void TestBlockSubsidyHalvings(const Consensus::Params& consensusParams)
{
// tested in geekcash_tests.cpp
//int maxHalvings = 64;
//CAmount nInitialSubsidy = 50 * COIN;
//CAmount nPreviousSubsidy = nInitialSubsidy * 2; // for height == 0
//BOOST_CHECK_EQUAL(nPreviousSubsidy, nInitialSubsidy * 2);
//for (int nHalvings = 0; nHalvings < maxHalvings; nHalvings++) {
// int nHeight = nHalvings * consensusParams.nSubsidyHalvingInterval;
// CAmount nSubsidy = GetBlockSubsidy(0, nHeight, consensusParams);
// BOOST_CHECK(nSubsidy <= nInitialSubsidy);
// BOOST_CHECK_EQUAL(nSubsidy, nPreviousSubsidy / 2);
// nPreviousSubsidy = nSubsidy;
//}
//BOOST_CHECK_EQUAL(GetBlockSubsidy(0, maxHalvings * consensusParams.nSubsidyHalvingInterval, consensusParams), 0);
}
static void TestBlockSubsidyHalvings(int nSubsidyHalvingInterval)
{
// tested in geekcash_tests.cpp
//Consensus::Params consensusParams;
//consensusParams.nSubsidyHalvingInterval = nSubsidyHalvingInterval;
//TestBlockSubsidyHalvings(consensusParams);
}
BOOST_AUTO_TEST_CASE(block_subsidy_test)
{
// tested in geekcash_tests.cpp
//TestBlockSubsidyHalvings(Params(CBaseChainParams::MAIN).GetConsensus()); // As in main
//TestBlockSubsidyHalvings(150); // As in regtest
//TestBlockSubsidyHalvings(1000); // Just another interval
}
BOOST_AUTO_TEST_CASE(subsidy_limit_test)
{
// tested in geekcash_tests.cpp
//const Consensus::Params& consensusParams = Params(CBaseChainParams::MAIN).GetConsensus();
//CAmount nSum = 0;
//for (int nHeight = 0; nHeight < 14000000; nHeight += 1000) {
// /* @TODO fix subsidity, add nBits */
// CAmount nSubsidy = GetBlockSubsidy(0, nHeight, consensusParams);
// BOOST_CHECK(nSubsidy <= 25 * COIN);
// nSum += nSubsidy * 1000;
// BOOST_CHECK(MoneyRange(nSum));
//}
//BOOST_CHECK_EQUAL(nSum, 1350824726649000ULL);
}
bool ReturnFalse() { return false; }
bool ReturnTrue() { return true; }
BOOST_AUTO_TEST_CASE(test_combiner_all)
{
boost::signals2::signal<bool (), CombinerAll> Test;
BOOST_CHECK(Test());
Test.connect(&ReturnFalse);
BOOST_CHECK(!Test());
Test.connect(&ReturnTrue);
BOOST_CHECK(!Test());
Test.disconnect(&ReturnFalse);
BOOST_CHECK(Test());
Test.disconnect(&ReturnTrue);
BOOST_CHECK(Test());
}
BOOST_AUTO_TEST_SUITE_END()
|
// Copyright (c) 2014 Dropbox, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "codegen/opt/inliner.h"
#include "llvm/Analysis/InlineCost.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Verifier.h"
#include "llvm/PassManager.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include "codegen/codegen.h"
#include "core/options.h"
#include "core/stats.h"
#include "core/util.h"
//#undef VERBOSITY
//#define VERBOSITY(x) 2
namespace pyston {
class MyMaterializer : public llvm::ValueMaterializer {
private:
llvm::Module* new_module;
public:
MyMaterializer(llvm::Module* new_module) : new_module(new_module) {}
virtual llvm::Value* materializeValueFor(llvm::Value* v) {
// llvm::errs() << "materializing\n";
// v->dump();
llvm::Value* r = NULL;
if (llvm::Function* f = llvm::dyn_cast<llvm::Function>(v)) {
// llvm::errs() << "is function\n";
r = new_module->getOrInsertFunction(f->getName(), f->getFunctionType());
} else if (llvm::GlobalVariable* gv = llvm::dyn_cast<llvm::GlobalVariable>(v)) {
// llvm::errs() << " is gv\n";
assert(gv->getLinkage() != llvm::GlobalVariable::PrivateLinkage);
llvm::GlobalVariable* new_gv = llvm::cast<llvm::GlobalVariable>(
new_module->getOrInsertGlobal(gv->getName(), gv->getType()->getElementType()));
RELEASE_ASSERT(!gv->isThreadLocal(), "I don't think MCJIT supports thread-local variables yet");
new_gv->setThreadLocalMode(gv->getThreadLocalMode());
r = new_gv;
} else if (llvm::GlobalAlias* alias = llvm::dyn_cast<llvm::GlobalAlias>(v)) {
#if LLVMREV < 209040
llvm::Value* addressee = llvm::cast<llvm::Constant>(materializeValueFor(alias->getAliasedGlobal()));
#else
llvm::Value* addressee = llvm::cast<llvm::Constant>(materializeValueFor(alias->getAliasee()));
#endif
assert(addressee);
assert(alias->getType() == addressee->getType());
r = addressee;
// r = new llvm::GlobalAlias(alias->getType(), alias->getLinkage(), alias->getName(), addressee,
// new_module);
} else if (llvm::isa<llvm::Constant>(v)) {
// llvm::errs() << " is a constant\n";
r = NULL;
} else {
r = v;
}
// if (r)
// r->dump();
// llvm::errs() << "---\n";
return r;
}
};
class MyInliningPass : public llvm::FunctionPass {
public:
static char ID;
static bool initialized;
static llvm::Module* fake_module;
int threshold;
MyInliningPass(int threshold = 275) : FunctionPass(ID), threshold(threshold) {}
static void initialize() {
if (!initialized) {
llvm::initializeInlineCostAnalysisPass(*llvm::PassRegistry::getPassRegistry());
llvm::initializeSimpleInlinerPass(*llvm::PassRegistry::getPassRegistry());
llvm::initializeTargetTransformInfoAnalysisGroup(*llvm::PassRegistry::getPassRegistry());
fake_module = new llvm::Module("fake", g.context);
initialized = true;
}
}
virtual const char* getPassName() const { return "Pyston inlining pass"; }
bool _runOnFunction(llvm::Function& f) {
Timer _t2("(sum)");
Timer _t("initializing");
initialize();
_t.split("overhead");
// f.dump();
llvm::Module* cur_module = f.getParent();
llvm::PassManager fake_pm;
llvm::InlineCostAnalysis* cost_analysis = new llvm::InlineCostAnalysis();
fake_pm.add(cost_analysis);
// llvm::errs() << "doing fake run\n";
fake_pm.run(*fake_module);
// llvm::errs() << "done with fake run\n";
bool did_any_inlining = false;
// TODO I haven't gotten the callgraph-updating part of the inliner to work,
// so it's not easy to tell what callsites have been inlined into (ie added to)
// the function.
// One simple-but-not-great way to handle it is to just iterate over the entire function
// multiple times and re-inline things until we don't want to inline any more;
// NPASSES controls the maximum number of times to attempt that.
// Right now we actually don't need that, since we only inline fully-optimized
// functions (from the stdlib), and those will already have had inlining
// applied recursively.
const int NPASSES = 1;
for (int passnum = 0; passnum < NPASSES; passnum++) {
_t.split("collecting calls");
std::vector<llvm::CallSite> calls;
for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) {
llvm::CallInst* call = llvm::dyn_cast<llvm::CallInst>(&(*I));
// From Inliner.cpp:
if (!call || llvm::isa<llvm::IntrinsicInst>(call))
continue;
// I->dump();
llvm::CallSite CS(call);
llvm::Value* v = CS.getCalledValue();
llvm::ConstantExpr* ce = llvm::dyn_cast<llvm::ConstantExpr>(v);
if (!ce)
continue;
assert(ce->isCast());
llvm::ConstantInt* l_addr = llvm::cast<llvm::ConstantInt>(ce->getOperand(0));
int64_t addr = l_addr->getSExtValue();
if (addr == (int64_t)printf)
continue;
llvm::Function* f = g.func_addr_registry.getLLVMFuncAtAddress((void*)addr);
if (f == NULL) {
if (VERBOSITY()) {
printf("Giving up on inlining %s:\n",
g.func_addr_registry.getFuncNameAtAddress((void*)addr, true).c_str());
call->dump();
}
continue;
}
// We load the bitcode lazily, so check if we haven't yet fully loaded the function:
if (f->isMaterializable()) {
#if LLVMREV < 220600
f->Materialize();
#else
f->materialize();
#endif
}
// It could still be a declaration, though I think the code won't generate this case any more:
if (f->isDeclaration())
continue;
// Keep this section as a release_assert since the code-to-be-inlined, as well as the inlining
// decisions, can be different in release mode:
int op_idx = -1;
for (llvm::Argument& arg : f->args()) {
++op_idx;
llvm::Type* op_type = call->getOperand(op_idx)->getType();
if (arg.getType() != op_type) {
llvm::errs() << f->getName() << " has arg " << op_idx << " mismatched!\n";
llvm::errs() << "Given ";
op_type->dump();
llvm::errs() << " but underlying function expected ";
arg.getType()->dump();
llvm::errs() << '\n';
}
RELEASE_ASSERT(arg.getType() == call->getOperand(op_idx)->getType(), "");
}
assert(!f->isDeclaration());
CS.setCalledFunction(f);
calls.push_back(CS);
}
// assert(0 && "TODO");
// printf("%ld\n", calls.size());
bool did_inline = false;
_t.split("doing inlining");
while (calls.size()) {
llvm::CallSite cs = calls.back();
calls.pop_back();
// if (VERBOSITY("irgen.inlining") >= 1) {
// llvm::errs() << "Evaluating callsite ";
// cs->dump();
//}
llvm::InlineCost IC = cost_analysis->getInlineCost(cs, threshold);
bool do_inline = false;
if (IC.isAlways()) {
if (VERBOSITY("irgen.inlining") >= 2)
llvm::errs() << "always inline\n";
do_inline = true;
} else if (IC.isNever()) {
if (VERBOSITY("irgen.inlining") >= 2)
llvm::errs() << "never inline\n";
do_inline = false;
} else {
if (VERBOSITY("irgen.inlining") >= 2)
llvm::errs() << "Inline cost: " << IC.getCost() << '\n';
do_inline = (bool)IC;
}
if (VERBOSITY("irgen.inlining") >= 1) {
if (!do_inline)
llvm::outs() << "not ";
llvm::outs() << "inlining ";
cs->dump();
}
if (do_inline) {
static StatCounter num_inlines("num_inlines");
num_inlines.log();
// llvm::CallGraph cg(*f.getParent());
////cg.addToCallGraph(cs->getCalledFunction());
// llvm::InlineFunctionInfo InlineInfo(&cg);
llvm::InlineFunctionInfo InlineInfo;
bool inlined = llvm::InlineFunction(cs, InlineInfo, false);
did_inline = did_inline || inlined;
did_any_inlining = did_any_inlining || inlined;
// if (inlined)
// f.dump();
}
}
if (!did_inline) {
if (passnum >= NPASSES - 1 && VERBOSITY("irgen.inlining"))
printf("quitting after %d passes\n", passnum + 1);
break;
}
}
// TODO would be nice to break out here and not have to rematerialize the function;
// I think I have to do that even if no inlining happened from the "setCalledFunction" call above.
// I thought that'd just change the CS object, but maybe it changes the underlying instruction as well?
// if (!did_any_inlining)
// return false;
_t.split("remapping");
llvm::ValueToValueMapTy VMap;
for (llvm::Function::iterator I = f.begin(), E = f.end(); I != E; ++I) {
VMap[I] = I;
}
MyMaterializer materializer(cur_module);
for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) {
RemapInstruction(&(*I), VMap, llvm::RF_None, NULL, &materializer);
}
_t.split("cleaning up");
std::vector<llvm::GlobalValue*> to_remove;
for (llvm::Module::global_iterator I = cur_module->global_begin(), E = cur_module->global_end(); I != E; ++I) {
if (I->use_empty()) {
to_remove.push_back(I);
continue;
}
}
for (int i = 0; i < to_remove.size(); i++) {
to_remove[i]->eraseFromParent();
}
for (llvm::Module::iterator I = cur_module->begin(), E = cur_module->end(); I != E;) {
if (!I->isDeclaration()) {
++I;
continue;
}
if (I->use_empty()) {
I = cur_module->getFunctionList().erase(I);
} else {
++I;
}
}
return did_any_inlining;
}
virtual bool runOnFunction(llvm::Function& f) {
Timer _t("inlining");
bool rtn = _runOnFunction(f);
static StatCounter us_inlining("us_compiling_optimizing_inlining");
long us = _t.end();
us_inlining.log(us);
return rtn;
}
};
char MyInliningPass::ID = 0;
bool MyInliningPass::initialized = false;
llvm::Module* MyInliningPass::fake_module = 0;
static llvm::RegisterPass<MyInliningPass> X("myinliner", "Function-level inliner", false, false);
llvm::FunctionPass* makeFPInliner(int threshold) {
return new MyInliningPass(threshold);
}
}
|
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
//=============================================================================//
#include "cbase.h"
#include "movevars_shared.h"
#if defined( TF_CLIENT_DLL ) || defined( TF_DLL )
#include "tf_gamerules.h"
#endif
// memdbgon must be the last include file in a .cpp file!!!
#include "tier0/memdbgon.h"
// some cvars used by player movement system
#if defined( HL2_DLL ) || defined( HL2_CLIENT_DLL )
#define DEFAULT_GRAVITY_STRING "600"
#else
#define DEFAULT_GRAVITY_STRING "800"
#endif
float GetCurrentGravity( void )
{
#if defined( TF_CLIENT_DLL ) || defined( TF_DLL )
if ( TFGameRules() )
{
return ( sv_gravity.GetFloat() * TFGameRules()->GetGravityMultiplier() );
}
#endif
return sv_gravity.GetFloat();
}
ConVar sv_gravity ( "sv_gravity", DEFAULT_GRAVITY_STRING, FCVAR_NOTIFY | FCVAR_REPLICATED, "World gravity." );
#if defined( DOD_DLL ) || defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_stopspeed ( "sv_stopspeed","100", FCVAR_NOTIFY | FCVAR_REPLICATED, "Minimum stopping speed when on ground." );
#else
ConVar sv_stopspeed ( "sv_stopspeed","100", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Minimum stopping speed when on ground." );
#endif // DOD_DLL || CSTRIKE_DLL
ConVar sv_noclipaccelerate( "sv_noclipaccelerate", "5", FCVAR_NOTIFY | FCVAR_ARCHIVE | FCVAR_REPLICATED);
ConVar sv_noclipspeed ( "sv_noclipspeed", "5", FCVAR_ARCHIVE | FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_specaccelerate( "sv_specaccelerate", "5", FCVAR_NOTIFY | FCVAR_ARCHIVE | FCVAR_REPLICATED);
ConVar sv_specspeed ( "sv_specspeed", "3", FCVAR_ARCHIVE | FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_specnoclip ( "sv_specnoclip", "1", FCVAR_ARCHIVE | FCVAR_NOTIFY | FCVAR_REPLICATED);
#if defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_maxspeed ( "sv_maxspeed", "320", FCVAR_NOTIFY | FCVAR_REPLICATED);
#else
ConVar sv_maxspeed ( "sv_maxspeed", "450", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY);
#endif // CSTRIKE_DLL
#ifdef _XBOX
ConVar sv_accelerate ( "sv_accelerate", "7", FCVAR_NOTIFY | FCVAR_REPLICATED);
#else
#if defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_accelerate ( "sv_accelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED);
#else
ConVar sv_accelerate ( "sv_accelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY);
#endif // CSTRIKE_DLL
#endif//_XBOX
#if defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_airaccelerate( "sv_airaccelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_wateraccelerate( "sv_wateraccelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_waterfriction( "sv_waterfriction", "1", FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_footsteps ( "sv_footsteps", "1", FCVAR_NOTIFY | FCVAR_REPLICATED, "Play footstep sound for players" );
ConVar sv_rollspeed ( "sv_rollspeed", "200", FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar sv_rollangle ( "sv_rollangle", "0", FCVAR_NOTIFY | FCVAR_REPLICATED, "Max view roll angle");
#else
ConVar sv_airaccelerate( "sv_airaccelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY );
ConVar sv_wateraccelerate( "sv_wateraccelerate", "10", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY );
ConVar sv_waterfriction( "sv_waterfriction", "1", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY );
ConVar sv_footsteps ( "sv_footsteps", "1", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Play footstep sound for players" );
ConVar sv_rollspeed ( "sv_rollspeed", "200", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY);
ConVar sv_rollangle ( "sv_rollangle", "0", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Max view roll angle");
#endif // CSTRIKE_DLL
#if defined( DOD_DLL ) || defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_friction ( "sv_friction","4", FCVAR_NOTIFY | FCVAR_REPLICATED, "World friction." );
#else
ConVar sv_friction ( "sv_friction","4", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "World friction." );
#endif // DOD_DLL || CSTRIKE_DLL
#if defined( CSTRIKE_DLL ) || defined( HL1MP_DLL )
ConVar sv_bounce ( "sv_bounce","0", FCVAR_NOTIFY | FCVAR_REPLICATED, "Bounce multiplier for when physically simulated objects collide with other objects." );
ConVar sv_maxvelocity ( "sv_maxvelocity","3500", FCVAR_REPLICATED, "Maximum speed any ballistically moving object is allowed to attain per axis." );
ConVar sv_stepsize ( "sv_stepsize","18", FCVAR_NOTIFY | FCVAR_REPLICATED );
ConVar sv_backspeed ( "sv_backspeed", "0.6", FCVAR_ARCHIVE | FCVAR_REPLICATED, "How much to slow down backwards motion" );
ConVar sv_waterdist ( "sv_waterdist","12", FCVAR_REPLICATED, "Vertical view fixup when eyes are near water plane." );
#else
ConVar sv_bounce ( "sv_bounce","0", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Bounce multiplier for when physically simulated objects collide with other objects." );
ConVar sv_maxvelocity ( "sv_maxvelocity","3500", FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Maximum speed any ballistically moving object is allowed to attain per axis." );
ConVar sv_stepsize ( "sv_stepsize","18", FCVAR_NOTIFY | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY );
ConVar sv_backspeed ( "sv_backspeed", "0.6", FCVAR_ARCHIVE | FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "How much to slow down backwards motion" );
ConVar sv_waterdist ( "sv_waterdist","12", FCVAR_REPLICATED | FCVAR_DEVELOPMENTONLY, "Vertical view fixup when eyes are near water plane." );
#endif // CSTRIKE_DLL
ConVar sv_skyname ( "sv_skyname", "sky_urb01", FCVAR_ARCHIVE | FCVAR_REPLICATED, "Current name of the skybox texture" );
// Vehicle convars
ConVar r_VehicleViewDampen( "r_VehicleViewDampen", "1", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED );
// Jeep convars
ConVar r_JeepViewDampenFreq( "r_JeepViewDampenFreq", "7.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED );
ConVar r_JeepViewDampenDamp( "r_JeepViewDampenDamp", "1.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar r_JeepViewZHeight( "r_JeepViewZHeight", "10.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED );
// Airboat convars
ConVar r_AirboatViewDampenFreq( "r_AirboatViewDampenFreq", "7.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED );
ConVar r_AirboatViewDampenDamp( "r_AirboatViewDampenDamp", "1.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED);
ConVar r_AirboatViewZHeight( "r_AirboatViewZHeight", "0.0", FCVAR_CHEAT | FCVAR_NOTIFY | FCVAR_REPLICATED );
|
#include<bits/stdc++.h>
using namespace std;
int cnt[50];
random_device rd;
mt19937 ran(rd());
int dice(int n)
{
uniform_int_distribution<int> uid(1,n);
return uid(ran);
}
int main()
{
int dice_size=4;
int show_fields=3;
int rolls=1e6;
srand(time(0));
int num_fields=40;
int GO =0;
int JAIL=10;
int G2J=30;
int CC[3]={2,17,33};
int CH[3]={7,22,36};
int R[3]={15,25,5};
int U[3]={12,28,12};
vector<int> chance,community;
for(int i=0;i<16;i++)
{
chance.push_back(i);
community.push_back(i);
}
random_shuffle(chance.begin(),chance.end());
random_shuffle(community.begin(),community.end());
//Monte-Carlo simulation
int cur=GO;
int doubles=0;
for(int ro=1;ro<=rolls;ro++)
{
int dice1=dice(dice_size);
int dice2=dice(dice_size);
int nxt=(cur+dice1+dice2)%num_fields;
if(dice1==dice2)
doubles++;
else
doubles=0;
if(doubles==3)
{
nxt=JAIL;
doubles=0;
}
for(int i=0;i<3;i++)
{
if(CH[i]==nxt)
{
switch(chance[0])
{
case 0:nxt=GO;break;
case 1:nxt=JAIL;break;
case 2:nxt=11;break;
case 3:nxt=24;break;
case 4:nxt=39;break;
case 5:nxt=5;break;
case 6:
case 7:nxt=R[i];break;
case 8:nxt=U[i];break;
case 9:nxt-=3;break;
}
rotate(chance.begin(),chance.begin()+1,chance.end());
}
}
for(int i=0;i<3;i++)
{
if(nxt==CC[i])
{
switch(community[0])
{
case 0:nxt=GO;break;
case 1:nxt=JAIL;break;
}
rotate(community.begin(),community.begin()+1,community.end());
}
}
if(nxt==G2J)
nxt=JAIL;
cnt[nxt]++;
cur=nxt;
}
multimap<double,int> m;
for(int i=0;i<40;i++)
{
m.insert(make_pair(1.0*cnt[i]/rolls,i));
}
auto i=m.rbegin();
for(int j=1;j<=3;j++)
{
cout<<setw(2)<<setfill('0')<<(*i++).second;
}
}
|
#pragma once
#include <Register/Utility.hpp>
namespace Kvasir {
//Serial peripheral interface
namespace Spi2Cr1{ ///<control register 1
using Addr = Register::Address<0x40013000,0xffff0000,0x00000000,unsigned>;
///Bidirectional data mode enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,15),Register::ReadWriteAccess,unsigned> bidimode{};
///Output enable in bidirectional mode
constexpr Register::FieldLocation<Addr,Register::maskFromRange(14,14),Register::ReadWriteAccess,unsigned> bidioe{};
///Hardware CRC calculation enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(13,13),Register::ReadWriteAccess,unsigned> crcen{};
///CRC transfer next
constexpr Register::FieldLocation<Addr,Register::maskFromRange(12,12),Register::ReadWriteAccess,unsigned> crcnext{};
///Data frame format
constexpr Register::FieldLocation<Addr,Register::maskFromRange(11,11),Register::ReadWriteAccess,unsigned> dff{};
///Receive only
constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,10),Register::ReadWriteAccess,unsigned> rxonly{};
///Software slave management
constexpr Register::FieldLocation<Addr,Register::maskFromRange(9,9),Register::ReadWriteAccess,unsigned> ssm{};
///Internal slave select
constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> ssi{};
///Frame format
constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> lsbfirst{};
///SPI enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,6),Register::ReadWriteAccess,unsigned> spe{};
///Baud rate control
constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,3),Register::ReadWriteAccess,unsigned> br{};
///Master selection
constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,unsigned> mstr{};
///Clock polarity
constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> cpol{};
///Clock phase
constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> cpha{};
}
namespace Spi2Cr2{ ///<control register 2
using Addr = Register::Address<0x40013004,0xffffff08,0x00000000,unsigned>;
///Tx buffer empty interrupt enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> txeie{};
///RX buffer not empty interrupt enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,6),Register::ReadWriteAccess,unsigned> rxneie{};
///Error interrupt enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,5),Register::ReadWriteAccess,unsigned> errie{};
///Frame format
constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,4),Register::ReadWriteAccess,unsigned> frf{};
///SS output enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::ReadWriteAccess,unsigned> ssoe{};
///Tx buffer DMA enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::ReadWriteAccess,unsigned> txdmaen{};
///Rx buffer DMA enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> rxdmaen{};
}
namespace Spi2Sr{ ///<status register
using Addr = Register::Address<0x40013008,0xfffffe00,0x00000000,unsigned>;
///TI frame format error
constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> tifrfe{};
///Busy flag
constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> bsy{};
///Overrun flag
constexpr Register::FieldLocation<Addr,Register::maskFromRange(6,6),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> ovr{};
///Mode fault
constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,5),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> modf{};
///CRC error flag
constexpr Register::FieldLocation<Addr,Register::maskFromRange(4,4),Register::ReadWriteAccess,unsigned> crcerr{};
///Underrun flag
constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> udr{};
///Channel side
constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,2),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> chside{};
///Transmit buffer empty
constexpr Register::FieldLocation<Addr,Register::maskFromRange(1,1),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> txe{};
///Receive buffer not empty
constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::Access<Register::AccessType::readOnly,Register::ReadActionType::normal,Register::ModifiedWriteValueType::normal>,unsigned> rxne{};
}
namespace Spi2Dr{ ///<data register
using Addr = Register::Address<0x4001300c,0xffff0000,0x00000000,unsigned>;
///Data register
constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,0),Register::ReadWriteAccess,unsigned> dr{};
}
namespace Spi2Crcpr{ ///<CRC polynomial register
using Addr = Register::Address<0x40013010,0xffff0000,0x00000000,unsigned>;
///CRC polynomial register
constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,0),Register::ReadWriteAccess,unsigned> crcpoly{};
}
namespace Spi2Rxcrcr{ ///<RX CRC register
using Addr = Register::Address<0x40013014,0xffff0000,0x00000000,unsigned>;
///Rx CRC register
constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,0),Register::ReadWriteAccess,unsigned> rxcrc{};
}
namespace Spi2Txcrcr{ ///<TX CRC register
using Addr = Register::Address<0x40013018,0xffff0000,0x00000000,unsigned>;
///Tx CRC register
constexpr Register::FieldLocation<Addr,Register::maskFromRange(15,0),Register::ReadWriteAccess,unsigned> txcrc{};
}
namespace Spi2I2scfgr{ ///<I2S configuration register
using Addr = Register::Address<0x4001301c,0xfffff040,0x00000000,unsigned>;
///I2S mode selection
constexpr Register::FieldLocation<Addr,Register::maskFromRange(11,11),Register::ReadWriteAccess,unsigned> i2smod{};
///I2S Enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(10,10),Register::ReadWriteAccess,unsigned> i2se{};
///I2S configuration mode
constexpr Register::FieldLocation<Addr,Register::maskFromRange(9,8),Register::ReadWriteAccess,unsigned> i2scfg{};
///PCM frame synchronization
constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,7),Register::ReadWriteAccess,unsigned> pcmsync{};
///I2S standard selection
constexpr Register::FieldLocation<Addr,Register::maskFromRange(5,4),Register::ReadWriteAccess,unsigned> i2sstd{};
///Steady state clock polarity
constexpr Register::FieldLocation<Addr,Register::maskFromRange(3,3),Register::ReadWriteAccess,unsigned> ckpol{};
///Data length to be transferred
constexpr Register::FieldLocation<Addr,Register::maskFromRange(2,1),Register::ReadWriteAccess,unsigned> datlen{};
///Channel length (number of bits per audio channel)
constexpr Register::FieldLocation<Addr,Register::maskFromRange(0,0),Register::ReadWriteAccess,unsigned> chlen{};
}
namespace Spi2I2spr{ ///<I2S prescaler register
using Addr = Register::Address<0x40013020,0xfffffc00,0x00000000,unsigned>;
///Master clock output enable
constexpr Register::FieldLocation<Addr,Register::maskFromRange(9,9),Register::ReadWriteAccess,unsigned> mckoe{};
///Odd factor for the prescaler
constexpr Register::FieldLocation<Addr,Register::maskFromRange(8,8),Register::ReadWriteAccess,unsigned> odd{};
///I2S Linear prescaler
constexpr Register::FieldLocation<Addr,Register::maskFromRange(7,0),Register::ReadWriteAccess,unsigned> i2sdiv{};
}
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2015 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "script/standard.h"
#include "pubkey.h"
#include "script/script.h"
#include "util.h"
#include "utilstrencodings.h"
#include <boost/foreach.hpp>
using namespace std;
typedef vector<unsigned char> valtype;
bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER;
unsigned nMaxDatacarrierBytes = MAX_OP_RETURN_RELAY;
CScriptID::CScriptID(const CScript& in) : uint160(Hash160(in.begin(), in.end())) {}
const char* GetTxnOutputType(txnouttype t)
{
switch (t)
{
case TX_NONSTANDARD: return "nonstandard";
case TX_PUBKEY: return "pubkey";
case TX_PUBKEYHASH: return "pubkeyhash";
case TX_SCRIPTHASH: return "scripthash";
case TX_MULTISIG: return "multisig";
case TX_NULL_DATA: return "nulldata";
case TX_WITNESS_V0_KEYHASH: return "witness_v0_keyhash";
case TX_WITNESS_V0_SCRIPTHASH: return "witness_v0_scripthash";
}
return NULL;
}
/**
* Return public keys or hashes from scriptPubKey, for 'standard' transaction types.
*/
bool Solver(const CScript& scriptPubKey, txnouttype& typeRet, vector<vector<unsigned char> >& vSolutionsRet)
{
// Templates
static multimap<txnouttype, CScript> mTemplates;
if (mTemplates.empty())
{
// Standard tx, sender provides pubkey, receiver adds signature
mTemplates.insert(make_pair(TX_PUBKEY, CScript() << OP_PUBKEY << OP_CHECKSIG));
// Infinitum address tx, sender provides hash of pubkey, receiver provides signature and pubkey
mTemplates.insert(make_pair(TX_PUBKEYHASH, CScript() << OP_DUP << OP_HASH160 << OP_PUBKEYHASH << OP_EQUALVERIFY << OP_CHECKSIG));
// Sender provides N pubkeys, receivers provides M signatures
mTemplates.insert(make_pair(TX_MULTISIG, CScript() << OP_SMALLINTEGER << OP_PUBKEYS << OP_SMALLINTEGER << OP_CHECKMULTISIG));
}
vSolutionsRet.clear();
// Shortcut for pay-to-script-hash, which are more constrained than the other types:
// it is always OP_HASH160 20 [20 byte hash] OP_EQUAL
if (scriptPubKey.IsPayToScriptHash())
{
typeRet = TX_SCRIPTHASH;
vector<unsigned char> hashBytes(scriptPubKey.begin()+2, scriptPubKey.begin()+22);
vSolutionsRet.push_back(hashBytes);
return true;
}
int witnessversion;
std::vector<unsigned char> witnessprogram;
if (scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) {
if (witnessversion == 0 && witnessprogram.size() == 20) {
typeRet = TX_WITNESS_V0_KEYHASH;
vSolutionsRet.push_back(witnessprogram);
return true;
}
if (witnessversion == 0 && witnessprogram.size() == 32) {
typeRet = TX_WITNESS_V0_SCRIPTHASH;
vSolutionsRet.push_back(witnessprogram);
return true;
}
return false;
}
// Provably prunable, data-carrying output
//
// So long as script passes the IsUnspendable() test and all but the first
// byte passes the IsPushOnly() test we don't care what exactly is in the
// script.
if (scriptPubKey.size() >= 1 && scriptPubKey[0] == OP_RETURN && scriptPubKey.IsPushOnly(scriptPubKey.begin()+1)) {
typeRet = TX_NULL_DATA;
return true;
}
// Scan templates
const CScript& script1 = scriptPubKey;
BOOST_FOREACH(const PAIRTYPE(txnouttype, CScript)& tplate, mTemplates)
{
const CScript& script2 = tplate.second;
vSolutionsRet.clear();
opcodetype opcode1, opcode2;
vector<unsigned char> vch1, vch2;
// Compare
CScript::const_iterator pc1 = script1.begin();
CScript::const_iterator pc2 = script2.begin();
while (true)
{
if (pc1 == script1.end() && pc2 == script2.end())
{
// Found a match
typeRet = tplate.first;
if (typeRet == TX_MULTISIG)
{
// Additional checks for TX_MULTISIG:
unsigned char m = vSolutionsRet.front()[0];
unsigned char n = vSolutionsRet.back()[0];
if (m < 1 || n < 1 || m > n || vSolutionsRet.size()-2 != n)
return false;
}
return true;
}
if (!script1.GetOp(pc1, opcode1, vch1))
break;
if (!script2.GetOp(pc2, opcode2, vch2))
break;
// Template matching opcodes:
if (opcode2 == OP_PUBKEYS)
{
while (vch1.size() >= 33 && vch1.size() <= 65)
{
vSolutionsRet.push_back(vch1);
if (!script1.GetOp(pc1, opcode1, vch1))
break;
}
if (!script2.GetOp(pc2, opcode2, vch2))
break;
// Normal situation is to fall through
// to other if/else statements
}
if (opcode2 == OP_PUBKEY)
{
if (vch1.size() < 33 || vch1.size() > 65)
break;
vSolutionsRet.push_back(vch1);
}
else if (opcode2 == OP_PUBKEYHASH)
{
if (vch1.size() != sizeof(uint160))
break;
vSolutionsRet.push_back(vch1);
}
else if (opcode2 == OP_SMALLINTEGER)
{ // Single-byte small integer pushed onto vSolutions
if (opcode1 == OP_0 ||
(opcode1 >= OP_1 && opcode1 <= OP_16))
{
char n = (char)CScript::DecodeOP_N(opcode1);
vSolutionsRet.push_back(valtype(1, n));
}
else
break;
}
else if (opcode1 != opcode2 || vch1 != vch2)
{
// Others must match exactly
break;
}
}
}
vSolutionsRet.clear();
typeRet = TX_NONSTANDARD;
return false;
}
bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet)
{
vector<valtype> vSolutions;
txnouttype whichType;
if (!Solver(scriptPubKey, whichType, vSolutions))
return false;
if (whichType == TX_PUBKEY)
{
CPubKey pubKey(vSolutions[0]);
if (!pubKey.IsValid())
return false;
addressRet = pubKey.GetID();
return true;
}
else if (whichType == TX_PUBKEYHASH)
{
addressRet = CKeyID(uint160(vSolutions[0]));
return true;
}
else if (whichType == TX_SCRIPTHASH)
{
addressRet = CScriptID(uint160(vSolutions[0]));
return true;
}
// Multisig txns have more than one address...
return false;
}
bool ExtractDestinations(const CScript& scriptPubKey, txnouttype& typeRet, vector<CTxDestination>& addressRet, int& nRequiredRet)
{
addressRet.clear();
typeRet = TX_NONSTANDARD;
vector<valtype> vSolutions;
if (!Solver(scriptPubKey, typeRet, vSolutions))
return false;
if (typeRet == TX_NULL_DATA){
// This is data, not addresses
return false;
}
if (typeRet == TX_MULTISIG)
{
nRequiredRet = vSolutions.front()[0];
for (unsigned int i = 1; i < vSolutions.size()-1; i++)
{
CPubKey pubKey(vSolutions[i]);
if (!pubKey.IsValid())
continue;
CTxDestination address = pubKey.GetID();
addressRet.push_back(address);
}
if (addressRet.empty())
return false;
}
else
{
nRequiredRet = 1;
CTxDestination address;
if (!ExtractDestination(scriptPubKey, address))
return false;
addressRet.push_back(address);
}
return true;
}
namespace
{
class CScriptVisitor : public boost::static_visitor<bool>
{
private:
CScript *script;
public:
CScriptVisitor(CScript *scriptin) { script = scriptin; }
bool operator()(const CNoDestination &dest) const {
script->clear();
return false;
}
bool operator()(const CKeyID &keyID) const {
script->clear();
*script << OP_DUP << OP_HASH160 << ToByteVector(keyID) << OP_EQUALVERIFY << OP_CHECKSIG;
return true;
}
bool operator()(const CScriptID &scriptID) const {
script->clear();
*script << OP_HASH160 << ToByteVector(scriptID) << OP_EQUAL;
return true;
}
};
}
CScript GetScriptForDestination(const CTxDestination& dest)
{
CScript script;
boost::apply_visitor(CScriptVisitor(&script), dest);
return script;
}
CScript GetScriptForRawPubKey(const CPubKey& pubKey)
{
return CScript() << std::vector<unsigned char>(pubKey.begin(), pubKey.end()) << OP_CHECKSIG;
}
CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys)
{
CScript script;
script << CScript::EncodeOP_N(nRequired);
BOOST_FOREACH(const CPubKey& key, keys)
script << ToByteVector(key);
script << CScript::EncodeOP_N(keys.size()) << OP_CHECKMULTISIG;
return script;
}
CScript GetScriptForWitness(const CScript& redeemscript)
{
CScript ret;
txnouttype typ;
std::vector<std::vector<unsigned char> > vSolutions;
if (Solver(redeemscript, typ, vSolutions)) {
if (typ == TX_PUBKEY) {
unsigned char h160[20];
CHash160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(h160);
ret << OP_0 << std::vector<unsigned char>(&h160[0], &h160[20]);
return ret;
} else if (typ == TX_PUBKEYHASH) {
ret << OP_0 << vSolutions[0];
return ret;
}
}
uint256 hash;
CSHA256().Write(&redeemscript[0], redeemscript.size()).Finalize(hash.begin());
ret << OP_0 << ToByteVector(hash);
return ret;
}
|
/*=========================================================================
Program: Visualization Toolkit
Module: vtkEmptyCell.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkEmptyCell.h"
#include "vtkCellArray.h"
#include "vtkMath.h"
#include "vtkObjectFactory.h"
#include "vtkPoints.h"
vtkStandardNewMacro(vtkEmptyCell);
//----------------------------------------------------------------------------
int vtkEmptyCell::EvaluatePosition(double vtkNotUsed(x)[3],
double closestPoint[3],
int& subId,
double pcoords[3],
double& dist2,
double *vtkNotUsed(weights))
{
pcoords[0] = pcoords[1] = pcoords[2] = -1.0;
subId = 0;
if (closestPoint != NULL)
{
closestPoint[0] = closestPoint[1] = closestPoint[2] = 0.0;
dist2 = -1.0;
}
return 0;
}
//----------------------------------------------------------------------------
void vtkEmptyCell::EvaluateLocation(int& vtkNotUsed(subId),
double vtkNotUsed(pcoords)[3],
double x[3],
double *vtkNotUsed(weights))
{
x[0] = x[1] = x[2] = 0.0;
}
//----------------------------------------------------------------------------
int vtkEmptyCell::CellBoundary(int vtkNotUsed(subId),
double vtkNotUsed(pcoords)[3],
vtkIdList* pts)
{
pts->Reset();
return 0;
}
//----------------------------------------------------------------------------
void vtkEmptyCell::Contour(double vtkNotUsed(value),
vtkDataArray *vtkNotUsed(cellScalars),
vtkIncrementalPointLocator *vtkNotUsed(locator),
vtkCellArray *vtkNotUsed(verts),
vtkCellArray *vtkNotUsed(lines),
vtkCellArray *vtkNotUsed(polys),
vtkPointData *vtkNotUsed(inPd),
vtkPointData *vtkNotUsed(outPd),
vtkCellData *vtkNotUsed(inCd),
vtkIdType vtkNotUsed(cellId),
vtkCellData *vtkNotUsed(outCd))
{
}
//----------------------------------------------------------------------------
// Project point on line. If it lies between 0<=t<=1 and distance off line
// is less than tolerance, intersection detected.
int vtkEmptyCell::IntersectWithLine(double vtkNotUsed(p1)[3],
double vtkNotUsed(p2)[3],
double vtkNotUsed(tol),
double& vtkNotUsed(t),
double vtkNotUsed(x)[3],
double vtkNotUsed(pcoords)[3],
int& vtkNotUsed(subId))
{
return 0;
}
//----------------------------------------------------------------------------
int vtkEmptyCell::Triangulate(int vtkNotUsed(index),
vtkIdList *ptIds, vtkPoints *pts)
{
pts->Reset();
ptIds->Reset();
return 1;
}
//----------------------------------------------------------------------------
void vtkEmptyCell::Derivatives(int vtkNotUsed(subId),
double vtkNotUsed(pcoords)[3],
double *vtkNotUsed(values),
int vtkNotUsed(dim),
double *vtkNotUsed(derivs))
{
}
//----------------------------------------------------------------------------
void vtkEmptyCell::Clip(double vtkNotUsed(value),
vtkDataArray *vtkNotUsed(cellScalars),
vtkIncrementalPointLocator *vtkNotUsed(locator),
vtkCellArray *vtkNotUsed(verts),
vtkPointData *vtkNotUsed(inPD),
vtkPointData *vtkNotUsed(outPD),
vtkCellData *vtkNotUsed(inCD),
vtkIdType vtkNotUsed(cellId),
vtkCellData *vtkNotUsed(outCD),
int vtkNotUsed(insideOut))
{
}
//----------------------------------------------------------------------------
void vtkEmptyCell::PrintSelf(ostream& os, vtkIndent indent)
{
this->Superclass::PrintSelf(os,indent);
}
|
// -*- mode:c++; tab-width:2; indent-tabs-mode:nil; c-basic-offset:2 -*-
/*
* Copyright 2008-2011 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <zxing/common/CharacterSetECI.h>
#include <zxing/common/IllegalArgumentException.h>
#include <zxing/FormatException.h>
using std::string;
using zxing::common::CharacterSetECI;
using zxing::IllegalArgumentException;
std::map<int, zxing::Ref<CharacterSetECI> > CharacterSetECI::VALUE_TO_ECI;
std::map<std::string, zxing::Ref<CharacterSetECI> > CharacterSetECI::NAME_TO_ECI;
const bool CharacterSetECI::inited = CharacterSetECI::init_tables();
#define ADD_CHARACTER_SET(VALUES, STRINGS) \
{ static int values[] = {VALUES, -1}; \
static char const* strings[] = {STRINGS, 0}; \
addCharacterSet(values, strings); }
#define XC ,
bool CharacterSetECI::init_tables()
{
ADD_CHARACTER_SET(0 XC 2, "Cp437");
ADD_CHARACTER_SET(1 XC 3, "ISO8859_1" XC "ISO-8859-1");
ADD_CHARACTER_SET(4, "ISO8859_2" XC "ISO-8859-2");
ADD_CHARACTER_SET(5, "ISO8859_3" XC "ISO-8859-3");
ADD_CHARACTER_SET(6, "ISO8859_4" XC "ISO-8859-4");
ADD_CHARACTER_SET(7, "ISO8859_5" XC "ISO-8859-5");
ADD_CHARACTER_SET(8, "ISO8859_6" XC "ISO-8859-6");
ADD_CHARACTER_SET(9, "ISO8859_7" XC "ISO-8859-7");
ADD_CHARACTER_SET(10, "ISO8859_8" XC "ISO-8859-8");
ADD_CHARACTER_SET(11, "ISO8859_9" XC "ISO-8859-9");
ADD_CHARACTER_SET(12, "ISO8859_10" XC "ISO-8859-10");
ADD_CHARACTER_SET(13, "ISO8859_11" XC "ISO-8859-11");
ADD_CHARACTER_SET(15, "ISO8859_13" XC "ISO-8859-13");
ADD_CHARACTER_SET(16, "ISO8859_14" XC "ISO-8859-14");
ADD_CHARACTER_SET(17, "ISO8859_15" XC "ISO-8859-15");
ADD_CHARACTER_SET(18, "ISO8859_16" XC "ISO-8859-16");
ADD_CHARACTER_SET(20, "SJIS" XC "Shift_JIS");
ADD_CHARACTER_SET(21, "Cp1250" XC "windows-1250");
ADD_CHARACTER_SET(22, "Cp1251" XC "windows-1251");
ADD_CHARACTER_SET(23, "Cp1252" XC "windows-1252");
ADD_CHARACTER_SET(24, "Cp1256" XC "windows-1256");
ADD_CHARACTER_SET(25, "UnicodeBigUnmarked" XC "UTF-16BE" XC "UnicodeBig");
ADD_CHARACTER_SET(26, "UTF8" XC "UTF-8");
ADD_CHARACTER_SET(27 XC 170, "ASCII" XC "US-ASCII");
ADD_CHARACTER_SET(28, "Big5");
ADD_CHARACTER_SET(29, "GB18030" XC "GB2312" XC "EUC_CN" XC "GBK");
ADD_CHARACTER_SET(30, "EUC_KR" XC "EUC-KR");
return true;
}
#undef XC
CharacterSetECI::CharacterSetECI(int const *values,
char const *const *names)
: values_(values), names_(names)
{
zxing::Ref<CharacterSetECI> this_ref(this);
for (int const *values = values_; *values != -1; values++) {
VALUE_TO_ECI[*values] = this_ref;
}
for (char const *const *names = names_; *names; names++) {
NAME_TO_ECI[string(*names)] = this_ref;
}
}
char const *CharacterSetECI::name() const
{
return names_[0];
}
int CharacterSetECI::getValue() const
{
return values_[0];
}
void CharacterSetECI::addCharacterSet(int const *values, char const *const *names)
{
new CharacterSetECI(values, names);
}
CharacterSetECI *CharacterSetECI::getCharacterSetECIByValue(int value)
{
if (value < 0 || value >= 900) {
return NULL;
}
return VALUE_TO_ECI[value];
}
CharacterSetECI *CharacterSetECI::getCharacterSetECIByName(string const &name)
{
return NAME_TO_ECI[name];
}
|
#include "splash.h"
#include <unistd.h>
#include <sys/stat.h>
#include <stdio.h>
void splash_timeout(void *param) {
((Fl_Double_Window*) param)->hide();
}
int cnt = 0;
float r_from = 0x33;
float g_from = 0x99;
float b_from = 0xff;
float r_to = 0xff;
float g_to = 0xff;
float b_to = 0xff;
void fadein_timeout(void*) {
cnt += 5;
if (cnt > 255) cnt = 255;
double v = cnt / 255.0;
Fl_Color color = (((int) (v * r_to + (1 - v) * r_from)) << 24) |
(((int) (v * g_to + (1 - v) * g_from)) << 16) |
(((int) (v * b_to + (1 - v) * b_from)) << 8);
// printf("color: %08X\n", color);
header_box->labelcolor(color);
// footer_box->labelcolor(color);
if (cnt < 255) {
Fl::repeat_timeout(0.01, fadein_timeout);
}
splash_wnd->redraw();
}
int main(int argc, char *argv[]) {
Fl_Double_Window *wnd = make_window();
wnd->show(argc, argv);
int n = 0; // Fl::screen_num(wnd->x(), wnd->y(), wnd->w(), wnd->h());
int x, y, w, h;
Fl::screen_xywh(x, y, w, h, n);
wnd->resize(x + w / 2 - wnd->w() / 2,
y + h / 2 - wnd->h() / 2,
wnd->w(),
wnd->h()); // center
Fl::add_timeout(0.01, fadein_timeout, wnd);
// Fl::add_timeout(5.0, splash_timeout, wnd);
char path[256];
sprintf(path, "/proc/self/exe");
// printf("path: %s\n", path);
char fname[1024];
ssize_t sz = readlink(path, fname, 1023);
fname[sz] = 0;
// printf("fname: %s\n", fname);
char *end = strrchr(fname, '/');
*end = 0;
char *pwd = strdup(fname);
strcat(fname, "/splash_target");
// printf("target: %s\n", fname);
int pid = fork();
if (pid == -1) {
perror("Error while executing fork()");
return -1;
}
if (pid == 0) {
char **new_argv = (char**) malloc(sizeof(char*) * (argc + 1));
new_argv[0] = fname;
new_argv[argc] = 0;
for (int i = 1; i < argc; i++) new_argv[i] = argv[i];
if (execve(fname, new_argv, NULL) == -1) {
perror("Error on execve()");
return -1;
}
}
return Fl::run();
}
|
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010- Facebook, Inc. (http://www.facebook.com) |
| Copyright (c) 1997-2010 The PHP Group |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include <runtime/ext/ext_debugger.h>
#include <runtime/ext/ext_string.h>
#include <runtime/eval/debugger/cmd/cmd_user.h>
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
using namespace Eval;
const int64 q_DebuggerClient$$AUTO_COMPLETE_FILENAMES =
DebuggerClient::AutoCompleteFileNames;
const int64 q_DebuggerClient$$AUTO_COMPLETE_VARIABLES =
DebuggerClient::AutoCompleteVariables;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CONSTANTS =
DebuggerClient::AutoCompleteConstants;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CLASSES =
DebuggerClient::AutoCompleteClasses;
const int64 q_DebuggerClient$$AUTO_COMPLETE_FUNCTIONS =
DebuggerClient::AutoCompleteFunctions;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CLASS_METHODS =
DebuggerClient::AutoCompleteClassMethods;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CLASS_PROPERTIES =
DebuggerClient::AutoCompleteClassProperties;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CLASS_CONSTANTS =
DebuggerClient::AutoCompleteClassConstants;
const int64 q_DebuggerClient$$AUTO_COMPLETE_KEYWORDS =
DebuggerClient::AutoCompleteKeyword;
const int64 q_DebuggerClient$$AUTO_COMPLETE_CODE =
DebuggerClient::AutoCompleteCode;
///////////////////////////////////////////////////////////////////////////////
bool f_hphpd_install_user_command(CStrRef cmd, CStrRef clsname) {
return CmdUser::InstallCommand(cmd, clsname);
}
Array f_hphpd_get_user_commands() {
return CmdUser::GetCommands();
}
void f_hphpd_break(bool condition /* = true */) {
if (RuntimeOption::EnableDebugger && condition) {
ThreadInfo *ti = ThreadInfo::s_threadInfo.getNoCheck();
FrameInjection *frame = FrameInjection::GetStackFrame(1);
if (frame && ti->m_reqInjectionData.debugger) {
Eval::InterruptSite site(frame);
Eval::Debugger::InterruptHard(site);
}
}
}
///////////////////////////////////////////////////////////////////////////////
c_DebuggerProxy::c_DebuggerProxy() {
}
c_DebuggerProxy::~c_DebuggerProxy() {
}
void c_DebuggerProxy::t___construct() {
}
bool c_DebuggerProxy::t_islocal() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerProxy, DebuggerProxy::islocal);
return m_proxy->isLocal();
}
Variant c_DebuggerProxy::t_send(CObjRef cmd) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerProxy, DebuggerProxy::send);
CmdUser cmdUser(cmd);
return m_proxy->send(&cmdUser);
}
Variant c_DebuggerProxy::t___destruct() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerProxy, DebuggerProxy::__destruct);
return null;
}
///////////////////////////////////////////////////////////////////////////////
c_DebuggerClient::c_DebuggerClient() {
}
c_DebuggerClient::~c_DebuggerClient() {
}
void c_DebuggerClient::t___construct() {
}
void c_DebuggerClient::t_quit() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::quit);
m_client->quit();
}
static String format_string(DebuggerClient *client,
int _argc, CStrRef format, CArrRef _argv) {
Variant ret = f_sprintf(_argc, format, _argv);
if (ret.isString()) {
return ret;
}
client->error("Debugger extension failed to format string: %s",
format.data());
return "";
}
void c_DebuggerClient::t_print(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::print);
m_client->print(format_string(m_client, _argc, format, _argv));
}
void c_DebuggerClient::t_help(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::help);
m_client->help(format_string(m_client, _argc, format, _argv));
}
void c_DebuggerClient::t_info(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::info);
m_client->info(format_string(m_client, _argc, format, _argv));
}
void c_DebuggerClient::t_output(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::output);
m_client->output(format_string(m_client, _argc, format, _argv));
}
void c_DebuggerClient::t_error(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::error);
m_client->error(format_string(m_client, _argc, format, _argv));
}
void c_DebuggerClient::t_code(CStrRef source, int highlight_line /* = 0 */,
int start_line_no /* = 0 */,
int end_line_no /* = 0 */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::code);
m_client->code(source, highlight_line, start_line_no, end_line_no);
}
Variant c_DebuggerClient::t_ask(int _argc, CStrRef format,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::ask);
String ret = format_string(m_client, _argc, format, _argv);
return String::FromChar(m_client->ask("%s", ret.data()));
}
String c_DebuggerClient::t_wrap(CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::wrap);
return m_client->wrap(str.data());
}
void c_DebuggerClient::t_helptitle(CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::helptitle);
m_client->helpTitle(str.data());
}
void c_DebuggerClient::t_helpcmds(int _argc, CStrRef cmd, CStrRef desc,
CArrRef _argv /* = null_array */) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::helpcmds);
std::vector<String> holders;
std::vector<const char *> cmds;
cmds.push_back(cmd.data());
cmds.push_back(desc.data());
for (int i = 0; i < _argv.size(); i++) {
String s = _argv[i].toString();
holders.push_back(s);
cmds.push_back(s.data());
}
m_client->helpCmds(cmds);
}
void c_DebuggerClient::t_helpbody(CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::helpbody);
m_client->helpBody(str.data());
}
void c_DebuggerClient::t_helpsection(CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::helpsection);
m_client->helpSection(str.data());
}
void c_DebuggerClient::t_tutorial(CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::tutorial);
m_client->tutorial(str.data());
}
String c_DebuggerClient::t_getcode() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::getcode);
return m_client->getCode();
}
String c_DebuggerClient::t_getcommand() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::getcommand);
return m_client->getCommand();
}
bool c_DebuggerClient::t_arg(int index, CStrRef str) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::arg);
return m_client->arg(index + 1, str.data());
}
int c_DebuggerClient::t_argcount() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::argcount);
return m_client->argCount() - 1;
}
String c_DebuggerClient::t_argvalue(int index) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::argvalue);
return m_client->argValue(index + 1);
}
String c_DebuggerClient::t_argrest(int index) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::argrest);
return m_client->argRest(index + 1);
}
Array c_DebuggerClient::t_args() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::args);
StringVec *args = m_client->args();
Array ret(Array::Create());
for (unsigned int i = 1; i < args->size(); i++) {
ret.append(String(args->at(i)));
}
return ret;
}
Variant c_DebuggerClient::t_send(CObjRef cmd) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::send);
CmdUser cmdUser(cmd);
m_client->send(&cmdUser);
return true;
}
Variant c_DebuggerClient::t_xend(CObjRef cmd) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::xend);
CmdUser cmdUser(cmd);
CmdUserPtr ret = m_client->xend<CmdUser>(&cmdUser);
return ret->getUserCommand();
}
Variant c_DebuggerClient::t_getcurrentlocation() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::getcurrentlocation);
BreakPointInfoPtr bpi = m_client->getCurrentLocation();
Array ret(Array::Create());
if (bpi) {
ret.set("file", String(bpi->m_file));
ret.set("line", (int64)bpi->m_line1);
ret.set("namespace", String(bpi->getNamespace()));
ret.set("class", String(bpi->getClass()));
ret.set("function", String(bpi->getFunction()));
ret.set("text", String(bpi->site()));
}
return ret;
}
Variant c_DebuggerClient::t_getstacktrace() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::getstacktrace);
return m_client->getStackTrace();
}
int c_DebuggerClient::t_getframe() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::getframe);
return m_client->getFrame();
}
void c_DebuggerClient::t_printframe(int index) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::printframe);
m_client->printFrame(index, m_client->getStackTrace()[index]);
}
void c_DebuggerClient::t_addcompletion(CVarRef list) {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::addcompletion);
if (list.isInteger()) {
m_client->addCompletion((DebuggerClient::AutoComplete)list.toInt64());
} else {
Array arr = list.toArray(); // handles string, array and iterators
std::vector<String> items;
for (ArrayIter iter(arr); iter; ++iter) {
items.push_back(iter.second().toString());
}
m_client->addCompletion(items);
}
}
Variant c_DebuggerClient::t___destruct() {
INSTANCE_METHOD_INJECTION_BUILTIN(DebuggerClient, DebuggerClient::__destruct);
return null;
}
///////////////////////////////////////////////////////////////////////////////
}
|
# include "../headers/unixclientstream.hpp"
# include "../headers/exception.hpp"
# include <string>
# include <iostream>
# include <string.h>
// Builds a connection to /tmp/unixsocket and sends/receives data using it.
int main(void)
{
using libsocket::unix_stream_client;
using std::string;
string path = "/tmp/unixsocket";
char* answer = new char[128];
memset(answer,0,128);
try {
unix_stream_client sock(path);
sock.snd("Hello World!\n",13);
sock.rcv(answer,127);
std::cout << answer;
} catch (const libsocket::socket_exception& exc)
{
std::cerr << exc.mesg;
}
return 0;
}
|
/**
* Author: soosoo
* Time: 2021-02-20 12:44:44
**/
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
typedef pair<ll, ll> pll;
typedef pair<string, string> pss;
typedef vector<int> vi;
typedef vector<vi> vvi;
typedef vector<pii> vii;
typedef vector<ll> vl;
typedef vector<vl> vvl;
double EPS=1e-9;
int INF=1000000005;
long long INFF=1000000000000000005ll;
double PI=acos(-1);
int dirx[8]={ -1, 0, 0, 1, -1, -1, 1, 1 };
int diry[8]={ 0, 1, -1, 0, -1, 1, -1, 1 };
ll MOD = 1000000007;
#define DEBUG fprintf(stderr, "====TESTING====\n")
#define VALUE(x) cerr << "The value of " << #x << " is " << x << endl
#define OUT(x) cout << x << endl
#define OUTH(x) cout << x << " "
#define debug(...) fprintf(stderr, __VA_ARGS__)
#define READ(x) for(auto &(z):x) cin >> z;
#define FOR(a, b, c) for (int(a)=(b); (a) < (c); ++(a))
#define FORN(a, b, c) for (int(a)=(b); (a) <= (c); ++(a))
#define FORD(a, b, c) for (int(a)=(b); (a) >= (c); --(a))
#define FORSQ(a, b, c) for (int(a)=(b); (a) * (a) <= (c); ++(a))
#define FORC(a, b, c) for (char(a)=(b); (a) <= (c); ++(a))
#define EACH(a, b) for (auto&(a) : (b))
#define REP(i, n) FOR(i, 0, n)
#define REPN(i, n) FORN(i, 1, n)
#define MAX(a, b) a=max(a, b)
#define MIN(a, b) a=min(a, b)
#define SQR(x) ((ll)(x) * (x))
#define RESET(a, b) memset(a, b, sizeof(a))
#define fi first
#define se second
#define mp make_pair
#define pb push_back
#define ALL(v) v.begin(), v.end()
#define ALLA(arr, sz) arr, arr + sz
#define SIZE(v) (int)v.size()
#define SORT(v) sort(ALL(v))
#define REVERSE(v) reverse(ALL(v))
#define SORTA(arr, sz) sort(ALLA(arr, sz))
#define REVERSEA(arr, sz) reverse(ALLA(arr, sz))
#define PERMUTE next_permutation
#define TC(t) while (t--)
#define FAST_INP ios_base::sync_with_stdio(false);cin.tie(NULL)
#define what_is(x) cerr << #x << " is " << x << endl
void solve() {
int s, n; cin >> s >> n;
vii v(n);
REP(i, n) cin >> v[i].fi >> v[i].se;
SORT(v);
REP(i, n) {
if(v[i].fi >= s) {
OUT("NO");
return;
}
s += v[i].se;
}
OUT("YES");
}
int main()
{
FAST_INP;
// #ifndef ONLINE_JUDGE
// freopen("input.txt","r", stdin);
// freopen("output.txt","w", stdout);
// #endif
// int tc; cin >> tc;
// TC(tc) solve();
solve();
return 0;
}
|
// Copyright (c) 2015-present, Qihoo, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
#include <stdio.h>
#include <unistd.h>
#include "slash/include/xdebug.h"
#include "pink/include/pink_conn.h"
#include "pink/include/pink_thread.h"
#include "pink/src/pink_util.h"
namespace pink {
PinkConn::PinkConn(const int fd,
const std::string &ip_port,
ServerThread *thread)
: fd_(fd),
ip_port_(ip_port),
is_reply_(false),
#ifdef __ENABLE_SSL
ssl_(nullptr),
#endif
server_thread_(thread) {
gettimeofday(&last_interaction_, nullptr);
}
PinkConn::~PinkConn() {
#ifdef __ENABLE_SSL
SSL_free(ssl_);
ssl_ = nullptr;
#endif
}
bool PinkConn::SetNonblock() {
flags_ = Setnonblocking(fd());
if (flags_ == -1) {
return false;
}
return true;
}
#ifdef __ENABLE_SSL
bool PinkConn::CreateSSL(SSL_CTX* ssl_ctx) {
ssl_ = SSL_new(ssl_ctx);
if (!ssl_) {
log_warn("SSL_new() failed");
return false;
}
if (SSL_set_fd(ssl_, fd_) == 0) {
log_warn("SSL_set_fd() failed");
return false;
}
SSL_set_accept_state(ssl_);
return true;
}
#endif
} // namespace pink
|
#include "xuly.h"
void thiBay(ThuBay *dv1, ThuBay *dv2)
{
dv1->bay();
dv2->bay();
}
void thiBoi(ThuBoi *dv1, ThuBoi *dv2)
{
dv1->boi();
dv2->boi();
}
void thuanhoaThu(ThuDeCon *dv)
{
ThuDeCon con = dv->deCon();
}
void nuoiCa(LopCa *dv)
{
dv->boi();
LopCa con = dv->deTrung();
}
void nuoiBoSat(LopBoSat *dv)
{
dv->bo();
dv->anTap();
LopBoSat con = dv->deTrung();
}
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#include "render_action_move_element.h"
namespace WeexCore {
RenderActionMoveElement::RenderActionMoveElement(const std::string &pageId, const std::string &ref,
const std::string &parentRef, int index) {
this->mPageId = pageId;
this->mRef = ref;
this->mParentRef = parentRef;
this->mIndex = index;
}
void RenderActionMoveElement::ExecuteAction() {
Bridge_Impl_Android::getInstance()->callMoveElement(mPageId.c_str(), mRef.c_str(), mParentRef.c_str(), mIndex);
}
}
|
// Copyright John McFarlane 2015 - 2017.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file ../LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <cnl/_impl/type_traits/identical.h>
#include <cnl/static_number.h>
#include <gtest/gtest.h>
using cnl::_impl::identical;
namespace {
namespace test_multiply {
static_assert(identical(
cnl::static_number<6>{7}*cnl::static_number<13>{321},
cnl::static_number<19>{2247}), "");
}
#if !defined(CNL_UNREACHABLE_UB_ENABLED)
TEST(static_number, most_negative_number) { // NOLINT
static_assert(cnl::static_number<1>{1}, "in-range boundary test");
static_assert(cnl::static_number<1>{-1}, "in-range boundary test");
ASSERT_DEATH(cnl::static_number<1>{-2}, "negative overflow");
}
#endif
TEST(static_number, pre_increment) { // NOLINT
auto a = cnl::static_number<4, -2>{2.75};
auto& b = ++ a;
static_assert(
std::is_same<decltype(b), cnl::static_number<4, -2>&>::value,
"static_number pre-increment return value");
ASSERT_EQ(&b, &a) << "static_number pre-increment return address";
ASSERT_EQ(3.75, b) << "static_number pre-increment";
}
TEST(static_number, pre_decrement) { // NOLINT
auto a = cnl::static_number<4, -2>{-2.75};
auto& b = -- a;
static_assert(
std::is_same<decltype(b), cnl::static_number<4, -2>&>::value,
"static_number pre-increment return value");
ASSERT_EQ(&b, &a) << "static_number pre-increment return address";
ASSERT_EQ(-3.75, b) << "static_number pre-increment";
}
TEST(static_number, post_increment) { // NOLINT
auto a = cnl::static_number<4, -2>{2.75};
auto const& b = a ++;
static_assert(
std::is_same<decltype(b), cnl::static_number<4, -2> const&>::value,
"static_number pre-increment return value");
ASSERT_NE(&b, &a) << "static_number pre-increment return address";
ASSERT_EQ(3.75, a) << "static_number pre-increment";
ASSERT_EQ(2.75, b) << "static_number pre-increment";
}
TEST(static_number, post_decrement) { // NOLINT
auto a = cnl::static_number<4, -2>{-2.75};
auto const& b = a --;
static_assert(
std::is_same<decltype(b), cnl::static_number<4, -2> const&>::value,
"static_number pre-increment return value");
ASSERT_NE(&b, &a) << "static_number pre-increment return address";
ASSERT_EQ(-3.75, a) << "static_number pre-increment";
ASSERT_EQ(-2.75, b) << "static_number pre-increment";
}
TEST(static_number, stress) { // NOLINT
auto expected = 2809;
auto s = cnl::make_static_number<cnl::nearest_rounding_tag, cnl::saturated_overflow_tag>(70) / 3;
auto s2 = s*s;
auto s2po = s2+1;
auto s2pooten = s2po/10;
auto s4pooten = s2pooten*s2pooten;
auto actual = static_cast<int>(s4pooten);
ASSERT_EQ(expected, actual);
}
#if !defined(CNL_UNREACHABLE_UB_ENABLED)
TEST(static_number, pre_increment_overflow) { // NOLINT
auto a = cnl::static_number<4, -2>{3.0};
ASSERT_DEATH(++ a, "positive overflow");
}
TEST(static_number, pre_decrement_overflow) { // NOLINT
auto a = cnl::static_number<4, -2>{-3.0};
ASSERT_DEATH(-- a, "negative overflow");
}
TEST(static_number, post_increment_overflow) { // NOLINT
auto a = cnl::static_number<4, -2>{3.0};
ASSERT_DEATH(a ++, "positive overflow");
}
TEST(static_number, post_decrement_overflow) { // NOLINT
auto a = cnl::static_number<4, -2>{-3.0};
ASSERT_DEATH(a --, "negative overflow");
}
#endif
}
|
class Solution {
public:
double myPow(double x, int nn) {
double ans(1.0);
int sig = (nn < 0 ? -1 : 1);
long long n = abs(nn);
if(n == 0)
return 1;
if(x == 1.0)
return 1.0;
while(n) {
if(n & 1)
ans = ans * x;
x = x * x;
n >>= 1;
}
if(sig == -1)
return 1.0 / ans;
return ans;
}
};
|
// Copyright (C) 2018-2021 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include "op_fuzzy.hpp"
#include <cnpy.h>
#include <fstream>
#include "engines_util/test_engines.hpp"
#include "ngraph/ngraph.hpp"
#include "paddle_utils.hpp"
#include "util/test_control.hpp"
using namespace ngraph;
using namespace InferenceEngine;
using namespace ngraph;
using namespace ov::frontend;
using PaddleFuzzyOpTest = FrontEndFuzzyOpTest;
static const std::vector<std::string> models{std::string("argmax"),
std::string("argmax1"),
std::string("assign_value_boolean"),
std::string("assign_value_fp32"),
std::string("assign_value_int32"),
std::string("assign_value_int64"),
std::string("avgAdaptivePool2D_test1"),
std::string("avgPool_test1"),
std::string("avgPool_test10"),
std::string("avgPool_test11"),
std::string("avgPool_test2"),
std::string("avgPool_test3"),
std::string("avgPool_test4"),
std::string("avgPool_test5"),
// avgPool_test6<nchw support is disabled now>,
std::string("avgPool_test7"),
std::string("avgPool_test8"),
std::string("avgPool_test9"),
std::string("batch_norm_nchw"),
std::string("batch_norm_nhwc"),
std::string("bicubic_downsample_false_0"),
std::string("bicubic_downsample_false_1"),
std::string("bicubic_downsample_true_0"),
std::string("bicubic_upsample_false_0"),
std::string("bicubic_upsample_false_1"),
std::string("bicubic_upsample_scales"),
std::string("bicubic_upsample_scales2"),
std::string("bicubic_upsample_true_0"),
std::string("bilinear_downsample_false_0"),
std::string("bilinear_downsample_false_1"),
std::string("bilinear_downsample_true_0"),
std::string("bilinear_upsample_false_0"),
std::string("bilinear_upsample_false_1"),
std::string("bilinear_upsample_scales"),
std::string("bilinear_upsample_scales2"),
std::string("bilinear_upsample_true_0"),
std::string("bmm"),
std::string("clip"),
std::string("conv2d_dilation_assymetric_pads_strides"),
std::string("conv2d_SAME_padding"),
std::string("conv2d_strides_assymetric_padding"),
std::string("conv2d_strides_no_padding"),
std::string("conv2d_strides_padding"),
std::string("conv2d_transpose_dilation_assymetric_pads_strides"),
// conv2d_transpose_SAME_padding(Paddle outputs wrong results),
std::string("conv2d_transpose_strides_assymetric_padding"),
std::string("conv2d_transpose_strides_no_padding"),
std::string("conv2d_transpose_strides_padding"),
std::string("conv2d_transpose_VALID_padding"),
std::string("conv2d_VALID_padding"),
std::string("cumsum"),
std::string("cumsum_i32"),
std::string("cumsum_i64"),
std::string("cumsum_f32"),
std::string("cumsum_f64"),
std::string("depthwise_conv2d_convolution"),
std::string("depthwise_conv2d_transpose_convolution"),
std::string("dropout"),
std::string("dropout_upscale_in_train"),
std::string("elementwise_add1"),
std::string("elementwise_div1"),
std::string("elementwise_max1"),
std::string("elementwise_min1"),
std::string("elementwise_mul1"),
std::string("elementwise_pow1"),
std::string("elementwise_sub1"),
std::string("elementwise_add2"),
std::string("elementwise_div2"),
std::string("elementwise_max2"),
std::string("elementwise_min2"),
std::string("elementwise_mul2"),
std::string("elementwise_pow2"),
std::string("elementwise_sub2"),
std::string("elementwise_add3"),
std::string("elementwise_div3"),
std::string("elementwise_max3"),
std::string("elementwise_min3"),
std::string("elementwise_mul3"),
std::string("elementwise_pow3"),
std::string("elementwise_sub3"),
std::string("elementwise_add4"),
std::string("elementwise_div4"),
std::string("elementwise_max4"),
std::string("elementwise_min4"),
std::string("elementwise_mul4"),
std::string("elementwise_pow4"),
std::string("elementwise_sub4"),
std::string("embedding_0"),
std::string("embedding_sparse"),
std::string("embedding_none_weight"),
std::string("embedding_paddings"),
std::string("embedding_paddings_neg1"),
std::string("embedding_tensorIds"),
std::string("embedding_tensorIds_paddings"),
std::string("equal"),
std::string("expand_v2"),
std::string("expand_v2_tensor"),
std::string("expand_v2_tensor_list"),
std::string("exp_test_float32"),
std::string("fill_any_like"),
std::string("fill_any_like_f16"),
std::string("fill_any_like_f32"),
std::string("fill_any_like_f64"),
std::string("fill_any_like_i32"),
std::string("fill_any_like_i64"),
std::string("fill_constant"),
std::string("fill_constant_batch_size_like"),
std::string("fill_constant_int32"),
std::string("fill_constant_int64"),
std::string("fill_constant_tensor"),
std::string("fill_constant_shape_tensor"),
std::string("fill_constant_shape_tensor_list"),
std::string("flatten_contiguous_range_test1"),
std::string("gelu_erf"),
std::string("gelu_tanh"),
// greater_equal_big_int64(failure due to CPU inference),
std::string("greater_equal_float32"),
std::string("greater_equal_int32"),
std::string("greater_equal_int64"),
std::string("hard_sigmoid"),
std::string("hard_swish"),
std::string("layer_norm"),
std::string("layer_norm_noall"),
std::string("layer_norm_noscale"),
std::string("layer_norm_noshift"),
std::string("leaky_relu"),
std::string("linear_downsample_false_0"),
std::string("linear_downsample_false_1"),
std::string("linear_downsample_true_0"),
std::string("linear_upsample_false_0"),
std::string("linear_upsample_false_1"),
std::string("linear_upsample_scales"),
std::string("linear_upsample_scales2"),
std::string("linear_upsample_true_0"),
std::string("log"),
std::string("logical_not"),
std::string("matmul_xt"),
std::string("matmul_xt_yt"),
std::string("matmul_yt"),
std::string("matmul_v2_1dx1d"),
std::string("matmul_v2_1dx2d"),
std::string("matmul_v2_2dx1d"),
std::string("matmul_v2_ndxmd"),
std::string("matmul_v2_xt"),
std::string("matmul_v2_xt_yt"),
std::string("matmul_v2_yt"),
std::string("maxAdaptivePool2D_test1"),
std::string("maxPool_test1"),
std::string("maxPool_test10"),
std::string("maxPool_test11"),
std::string("maxPool_test2"),
std::string("maxPool_test3"),
std::string("maxPool_test4"),
std::string("maxPool_test5"),
// maxPool_test6(nchw support is disabled now),
std::string("maxPool_test7"),
std::string("maxPool_test8"),
std::string("maxPool_test9"),
std::string("nearest_downsample_false_0"),
std::string("nearest_downsample_false_1"),
std::string("nearest_upsample_false_0"),
std::string("nearest_upsample_false_1"),
std::string("pad3d_test1"),
std::string("pad3d_test2"),
std::string("pad3d_test3"),
// pad3d_test4,
std::string("pow_float32"),
std::string("pow_int32"),
std::string("pow_int64"),
// pow_int64_out_of_range(out of range of OV int64),
std::string("pow_y_tensor"),
std::string("prior_box_attrs_mmar_order_true"),
std::string("prior_box_default"),
std::string("prior_box_flip_clip_false"),
std::string("prior_box_max_sizes_none"),
std::string("range0"),
std::string("range1"),
std::string("range2"),
std::string("relu"),
std::string("relu6"),
std::string("relu6_1"),
std::string("reshape"),
std::string("reshape_tensor"),
std::string("reshape_tensor_list"),
std::string("rnn_lstm_layer_1_bidirectional"),
std::string("rnn_lstm_layer_1_forward"),
std::string("rnn_lstm_layer_2_bidirectional"),
std::string("rnn_lstm_layer_2_forward"),
std::string("rnn_lstm_layer_1_forward_seq_len_4"),
std::string("rnn_lstm_layer_2_bidirectional_seq_len_4"),
std::string("scale_bias_after_float32"),
std::string("scale_bias_after_int32"),
std::string("scale_bias_after_int64"),
std::string("scale_bias_before_float32"),
std::string("scale_bias_before_int32"),
std::string("scale_bias_before_int64"),
std::string("scale_tensor_bias_after"),
std::string("scale_tensor_bias_before"),
std::string("shape"),
std::string("sigmoid"),
std::string("slice"),
std::string("slice_1d"),
std::string("slice_decrease_axis/slice_decrease_axis.pdmodel"),
std::string("slice_decrease_axis_all/slice_decrease_axis_all.pdmodel"),
std::string("slice_reshape/slice_reshape.pdmodel"),
std::string("softmax"),
std::string("softmax_minus"),
std::string("softplus_default_params"),
std::string("split_test1"),
std::string("split_test2"),
std::string("split_test3"),
std::string("split_test4"),
std::string("split_test5"),
std::string("split_test6"),
std::string("split_test_dim_int32"),
std::string("split_test_dim_int64"),
std::string("split_test_list"),
std::string("split_test_list_tensor"),
std::string("squeeze"),
std::string("squeeze_null_axes"),
std::string("stack_test_float32"),
std::string("stack_test_int32"),
std::string("stack_test_neg_axis"),
std::string("stack_test_none_axis"),
std::string("tanh"),
std::string("trilinear_downsample_false_0"),
std::string("trilinear_downsample_false_1"),
std::string("trilinear_downsample_true_0"),
std::string("trilinear_upsample_false_0"),
std::string("trilinear_upsample_false_1"),
std::string("trilinear_upsample_scales"),
std::string("trilinear_upsample_scales2"),
std::string("trilinear_upsample_true_0"),
std::string("unsqueeze"),
// Temporily disable them until root caused to secure CI stable.
// CVS-66703 to track this.
// std::string("yolo_box_clip_box"),
// std::string("yolo_box_default"),
// std::string("yolo_box_scale_xy"),
std::string("yolo_box_uneven_wh")};
INSTANTIATE_TEST_SUITE_P(PaddleFuzzyOpTest,
FrontEndFuzzyOpTest,
::testing::Combine(::testing::Values(PADDLE_FE),
::testing::Values(std::string(TEST_PADDLE_MODELS_DIRNAME)),
::testing::ValuesIn(models)),
PaddleFuzzyOpTest::getTestCaseName);
|
#include "stdafx.h"
#include <regex>
namespace Oobe
{
namespace internal
{
// The parsing function attempts to read the launcher file in a type safe way.
// Should new requirements need to extend the parsing grammar, here is what needs to be done:
// 1. Add new entries in the SupportedTypes enum and respective cases in the switch statement of the
// `parseExitStatusFile()` function.
// 2. Map the expected keys in the launcher file to the expected value types in the grammar map.
enum class SupportedTypes
{
UInt,
Double,
String
};
const std::map<std::string, SupportedTypes> grammar{{"action", SupportedTypes::String},
{"defaultUid", SupportedTypes::UInt}};
nonstd::expected<KeyValuePairs, const wchar_t*> parseExitStatusFile(std::istream& file)
{
const std::regex keyValueRe(R"(^\s*(\w+)\s*[=:]\s*(\w+).*$)",
std::regex_constants::icase | std::regex_constants::ECMAScript);
const std::regex commentRe(R"(^\s*#+.*)", std::regex_constants::icase | std::regex_constants::ECMAScript);
std::string line;
std::smatch match;
KeyValuePairs parsed;
while (std::getline(file, line)) {
if (std::regex_search(line, match, commentRe)) {
continue; // It's a comment. Nothing to do.
}
if (std::regex_search(line, match, keyValueRe)) {
if (match.size() != 3) {
// Ill-formed. There should be only three matches: the whole line, the key and value.
continue;
}
auto key = match[1].str();
auto value = match[2].str();
try {
auto grammarIt = grammar.find(key);
if (grammarIt == grammar.end()) {
// Fail silently on unsupported keys.
continue;
}
switch (auto valueType = grammarIt->second; valueType) {
case SupportedTypes::String:
parsed[key] = value;
break;
case SupportedTypes::UInt:
parsed[key] = std::stoul(value);
break;
case SupportedTypes::Double:
parsed[key] = std::stod(value);
break;
default:
// unsupported stuff is just ignored.
break;
}
// Keep the strategy of silent failure.
// std::map promises that if an exception is thrown by any operation, the insertion has no
// effect
} catch ([[maybe_unused]] const std::invalid_argument& ex) {
// It would be nice to log it, but no other action is required.
} catch ([[maybe_unused]] const std::out_of_range& ex) {
// It would be nice to log it, but no other action is required.
}
}
}
// In the end of this while loop we should have the relevant contents of that file inside this map.
if (parsed.empty()) {
return nonstd::make_unexpected(L"Failed parsing the launcher command file.");
}
return parsed;
}
}
}
|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/core/kernels/data/single_threaded_executor.h"
#include "tensorflow/core/common_runtime/executor.h"
#include "tensorflow/core/common_runtime/executor_factory.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace data {
namespace {
typedef gtl::InlinedVector<TensorValue, 4> TensorValueVec;
typedef gtl::InlinedVector<AllocatorAttributes, 4> AllocatorAttributeVec;
class SingleThreadedExecutorImpl : public Executor {
public:
explicit SingleThreadedExecutorImpl(const LocalExecutorParams& params)
: params_(params) {}
~SingleThreadedExecutorImpl() override {
for (const KernelState& kernel_state : kernels_) {
params_.delete_kernel(kernel_state.kernel);
}
}
Status Initialize(const Graph& graph) {
// Topologicially sort `graph` to get a sequence of OpKernels.
std::vector<Node*> ordered_nodes;
ordered_nodes.reserve(graph.num_nodes());
GetReversePostOrder(graph, &ordered_nodes);
if (ordered_nodes.size() != graph.num_nodes()) {
return errors::InvalidArgument("Graph had ", graph.num_nodes(),
" but reverse post-order had ",
ordered_nodes.size());
}
kernels_.reserve(ordered_nodes.size());
std::vector<Node*> nodes_with_kernels;
nodes_with_kernels.reserve(ordered_nodes.size());
std::map<size_t, Node*> arg_index_to_node_map;
std::unordered_map<Node*, size_t> node_to_index_map;
// Create the kernel and input-related structures for each node in `graph`.
for (Node* n : ordered_nodes) {
for (DataType dt : n->output_types()) {
if (IsRefType(dt)) {
return errors::Unimplemented(
"Single-threaded executor does not support reference-typed "
"edges. But saw type ",
DataTypeString(dt), " in outputs of node ", n->name());
}
}
if (n->IsControlFlow()) {
return errors::FailedPrecondition(
"Single-threaded executor does not support low level control flow, "
" but saw control flow node ",
n->name(),
". Perhaps your graph contains old-style control flow primitives? "
"Try using tf.compat.v1.enable_control_flow_v2().");
}
if (n->IsSend() || n->IsHostSend() || n->IsRecv() || n->IsHostRecv()) {
return errors::Unimplemented(
"Single-threaded executor does not support partitioned graphs. "
"But saw send/recv node ",
n->name());
}
if (n->IsCollective()) {
return errors::Unimplemented(
"Single-threaded executor does not support collective ops. But "
"saw collective node ",
n->name());
}
if (n->IsArg()) {
int32 arg_index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &arg_index));
if (arg_index < 0) {
return errors::InvalidArgument("Invalid argument index ", arg_index,
" in node ", n->name());
}
arg_index_to_node_map[arg_index] = n;
// We do not create a kernel for Arg nodes, and instead inline the
// argument handling directly in the executor code.
continue;
}
const size_t kernel_index = kernels_.size();
kernels_.push_back({});
nodes_with_kernels.push_back(n);
KernelState& kernel_state = kernels_[kernel_index];
node_to_index_map[n] = kernel_index;
TF_RETURN_IF_ERROR(
params_.create_kernel(n->properties(), &kernel_state.kernel));
kernel_state.num_inputs = n->num_inputs();
kernel_state.num_outputs = n->num_outputs();
if (kernel_index == 0) {
kernel_state.input_start_index = 0;
} else {
const KernelState& previous_kernel_state = kernels_[kernel_index - 1];
kernel_state.input_start_index =
previous_kernel_state.input_start_index +
previous_kernel_state.num_inputs;
}
}
// Build the mapping from each Arg node output to the input slot for the
// corresponding destination node.
if (!arg_index_to_node_map.empty()) {
const size_t num_args = arg_index_to_node_map.rbegin()->first + 1;
arg_output_locations_.resize(num_args);
for (const auto& arg_index_node_pair : arg_index_to_node_map) {
const size_t arg_index = arg_index_node_pair.first;
const Node* arg_node = arg_index_node_pair.second;
arg_output_locations_[arg_index].reserve(arg_node->out_edges().size());
for (const Edge* e : arg_node->out_edges()) {
if (e->src_output() == Graph::kControlSlot) {
continue;
} else if (e->src_output() != 0) {
return errors::Internal("Invalid output index ", e->src_output(),
" from argument node ", arg_index);
}
arg_output_locations_[arg_index].push_back(
kernels_[node_to_index_map[e->dst()]].input_start_index +
e->dst_input());
}
}
}
// Build the mapping from each node output to the input slot for the
// corresponding destination node.
for (size_t i = 0; i < kernels_.size(); ++i) {
Node* n = nodes_with_kernels[i];
KernelState& kernel_state = kernels_[i];
kernel_state.output_locations.resize(kernel_state.num_outputs);
for (const Edge* e : n->out_edges()) {
if (!e->IsControlEdge()) {
kernel_state.output_locations[e->src_output()].push_back(
kernels_[node_to_index_map[e->dst()]].input_start_index +
e->dst_input());
}
}
// Compute allocator attributes for each node output, and corresponding
// node input.
kernel_state.output_alloc_attrs.resize(kernel_state.num_outputs);
AllocatorAttributes* attrs = kernel_state.output_alloc_attrs.data();
OpKernel* op_kernel = kernel_state.kernel;
for (int out = 0; out < n->num_outputs(); out++) {
DCHECK_LT(out, op_kernel->output_memory_types().size());
bool on_host = op_kernel->output_memory_types()[out] == HOST_MEMORY;
if (on_host) {
AllocatorAttributes h;
h.set_on_host(on_host);
attrs[out].Merge(h);
}
}
}
if (!kernels_.empty()) {
const KernelState& last_kernel_state = kernels_.back();
total_num_inputs_ =
last_kernel_state.input_start_index + last_kernel_state.num_inputs;
input_alloc_attrs_.resize(total_num_inputs_);
for (size_t i = 0; i < kernels_.size(); ++i) {
for (size_t j = 0; j < kernels_[i].output_locations.size(); ++j) {
for (size_t output_location : kernels_[i].output_locations[j]) {
input_alloc_attrs_[output_location] =
kernels_[i].output_alloc_attrs[j];
}
}
}
} else {
total_num_inputs_ = 0;
}
return Status::OK();
}
Status Run(const Args& args) override {
// The inputs to each kernel are stored contiguously in `inputs`.
//
// We use `kernels_[i].input_start_index` and `kernels_[i].num_inputs` to
// determine the range of elements in this vector that correspond to
// the inputs of `kernels_[i]`.
//
// This vector has the following layout:
//
// * Kernel 0, input 0.
// * Kernel 0, input 1.
// * ...
// * Kernel 0, input `kernels_[0].num_inputs - 1`.
// * Kernel 1, input 0.
// * ...
// * Kernel 1, input `kernels_[1].num_inputs - 1`.
// * ...
// * Kernel `kernels_.size() - 1`, input 0.
// * ...
// * Kernel `kernels_.size() - 1`, input `kernels_.back().num_inputs - 1`.
//
// Note that kernels with zero inputs do not correspond to any elements in
// this vector.
//
// We use `ManualConstructor<Tensor>` to avoid the overhead of
// default-constructing an invalid `Tensor` for each slot at the beginning
// of execution:
// * Elements are initialized when the outputs of a kernel execution are
// propagated to the inputs of kernels that depend on them.
// * The elements corresponding to the inputs for kernel `i` are destroyed
// after kernel `i` executes.
// * In an error case (see below), we use the connectivity information in
// `KernelState::output_locations` to determine which locations have been
// initialized, and manually destroy them.
std::vector<ManualConstructor<Tensor>> inputs(total_num_inputs_);
// TODO(mrry): Can we avoid copying into these vectors? Consider modifying
// OpKernelContext to take the TensorValueVec as a pointer into `inputs`.
TensorValueVec node_inputs;
AllocatorAttributeVec input_alloc_attrs;
// Prepare the parameters that will be the same for all kernels.
OpKernelContext::Params params;
params.step_id = args.step_id;
Device* device = params_.device;
params.device = device;
params.log_memory = false; // TODO(mrry): Too severe?
params.record_tensor_accesses = false; // TODO(mrry): Too severe?
params.rendezvous = args.rendezvous;
params.session_state = args.session_state;
params.tensor_store = args.tensor_store;
params.cancellation_manager = args.cancellation_manager;
params.call_frame = args.call_frame;
params.function_library = params_.function_library;
params.resource_manager = device->resource_manager();
params.step_container = args.step_container;
params.slice_reader_cache = nullptr; // TODO(mrry): Too severe?
params.inputs = &node_inputs;
params.input_alloc_attrs = &input_alloc_attrs;
Args::Runner runner_copy = args.runner;
params.runner = &runner_copy;
params.run_all_kernels_inline = args.run_all_kernels_inline;
params.stats_collector = args.stats_collector;
// NOTE(mrry): We are assuming that the graph is loopless and condless.
params.frame_iter = FrameAndIter(0, 0);
params.is_input_dead = false;
// TODO(mrry): Add non-default device context inference.
params.op_device_context = nullptr;
// TODO(mrry): Consider implementing forwarding.
params.forward_from_array = nullptr;
const size_t received_args =
args.call_frame ? args.call_frame->num_args() : 0;
if (arg_output_locations_.size() > received_args) {
return errors::InvalidArgument("Expected ", arg_output_locations_.size(),
" arguments, but only received ",
received_args, ".");
}
// ArgOp is a relatively expensive OpKernel due to the Tensor
// allocations that it performs. Therefore we specialize its implementation
// and forward arguments directly to the inputs of kernels that consume
// them.
for (size_t i = 0; i < arg_output_locations_.size(); ++i) {
const size_t num_destinations = arg_output_locations_[i].size();
if (num_destinations > 0) {
Tensor arg;
TF_CHECK_OK(args.call_frame->GetArg(i, &arg));
for (size_t j = 0; j < num_destinations - 1; ++j) {
inputs[arg_output_locations_[i][j]].Init(arg);
}
// Move `arg` to the last consumer to avoid the cost of copying it.
inputs[arg_output_locations_[i][num_destinations - 1]].Init(
std::move(arg));
}
}
// Execute the kernels one-at-a-time in topological order.
for (size_t i = 0; i < kernels_.size(); ++i) {
const KernelState& kernel_state = kernels_[i];
// Prepare the per-kernel parameters.
const size_t input_start_index = kernel_state.input_start_index;
const size_t num_inputs = kernel_state.num_inputs;
const size_t num_outputs = kernel_state.num_outputs;
node_inputs.clear();
node_inputs.resize(num_inputs);
input_alloc_attrs.clear();
input_alloc_attrs.resize(num_inputs);
for (size_t j = 0; j < num_inputs; ++j) {
auto t = inputs[input_start_index + j].get();
node_inputs[j].tensor = t;
input_alloc_attrs[j] = input_alloc_attrs_[input_start_index + j];
}
params.op_kernel = kernel_state.kernel;
params.output_attr_array = kernel_state.output_alloc_attrs.data();
OpKernelContext ctx(¶ms, num_outputs);
// Actually execute the kernel.
device->Compute(kernel_state.kernel, &ctx);
if (!ctx.status().ok()) {
// On failure, we must manually free all intermediate tensors. We have
// already freed all the inputs for kernels up to (but not including)
// the `i`th kernel. We scan through the previously executed kernels and
// destroy any tensors that were destined to be the input for a kernel
// that has not yet executed.
for (size_t j = 0; j < arg_output_locations_.size(); ++j) {
for (size_t output_location : arg_output_locations_[j]) {
if (output_location >= input_start_index) {
// Only destroy an output location if it is an input to an
// operation that has not yet executed.
inputs[output_location].Destroy();
}
}
}
for (size_t j = 0; j < i; ++j) {
const KernelState& executed_kernel_state = kernels_[j];
for (size_t k = 0; k < executed_kernel_state.num_outputs; ++k) {
for (size_t output_location :
executed_kernel_state.output_locations[k]) {
if (output_location >= input_start_index) {
// Only destroy an output location if it is an input to an
// operation that has not yet executed.
inputs[output_location].Destroy();
}
}
}
}
return ctx.status();
}
// Free the inputs to the current kernel.
for (size_t j = 0; j < num_inputs; ++j) {
inputs[input_start_index + j].Destroy();
}
// Forward the outputs of the kernel to the inputs of subsequent kernels.
for (size_t j = 0; j < num_outputs; ++j) {
TensorValue val = ctx.release_output(j);
const size_t num_destinations = kernel_state.output_locations[j].size();
if (num_destinations > 0) {
// TODO(mrry): Consider flattening the `output_locations` vector
// to improve the cache-friendliness of this loop.
for (size_t k = 0; k < num_destinations - 1; ++k) {
// TODO(mrry): Validate that the types match the expected values or
// ensure that the necessary validation has already happened.
inputs[kernel_state.output_locations[j][k]].Init(*val.tensor);
}
// Move `arg` to the last consumer to avoid the cost of copying it.
inputs[kernel_state.output_locations[j][num_destinations - 1]].Init(
std::move(*val.tensor));
}
delete val.tensor;
}
}
return Status::OK();
}
void RunAsync(const Args& args, DoneCallback done) override {
done(Run(args));
}
private:
const LocalExecutorParams params_;
// All following members are read-only after Initialize().
// The sum of the number of inputs for each node in the graph. This determines
// the length of the flat `inputs` vector. See comment at the beginning of
// `RunAsync()` for details.
size_t total_num_inputs_;
// Represents cached graph structure state for each kernel.
struct KernelState {
// The kernel object. Not owned.
//
// This pointer is managed by `params_.create_kernel()` and
// `params_.delete_kernel()`.
OpKernel* kernel;
// These fields determine the range of elements in `inputs` that corresponds
// to the inputs of `kernel`.
size_t input_start_index;
size_t num_inputs;
size_t num_outputs;
// For the `j`th output of `kernel`, `output_locations[j]` contains the
// locations in the flat `inputs` vector to which that output must be
// copied. See comment at the beginning of `RunAsync()` for details.
std::vector<std::vector<size_t>>
output_locations; // Length = `num_outputs`.
// Memory space information for each output of `kernel`.
std::vector<AllocatorAttributes>
output_alloc_attrs; // Length = `num_outputs`.
};
std::vector<KernelState> kernels_;
// For the `i`th argument, `arg_output_locations_[i]` contains the locations
// in the flat `inputs` vector to which that argument must be copied.
std::vector<std::vector<size_t>>
arg_output_locations_; // Length = `num_args`.
// Memory space information for each input. This information is stored in the
// same order as the flat `inputs` vector. See comment at the beginning of
// `RunAsync()` for details.
std::vector<AllocatorAttributes>
input_alloc_attrs_; // Length = `total_num_inputs_`.
};
class SingleThreadedExecutorRegistrar {
public:
SingleThreadedExecutorRegistrar() {
ExecutorFactory::Register("SINGLE_THREADED_EXECUTOR", new Factory());
}
private:
class Factory : public ExecutorFactory {
Status NewExecutor(const LocalExecutorParams& params, const Graph& graph,
std::unique_ptr<Executor>* out_executor) override {
Executor* ret;
TF_RETURN_IF_ERROR(NewSingleThreadedExecutor(params, graph, &ret));
out_executor->reset(ret);
return Status::OK();
}
};
};
static SingleThreadedExecutorRegistrar registrar;
} // namespace
Status NewSingleThreadedExecutor(const LocalExecutorParams& params,
const Graph& graph, Executor** executor) {
auto impl = absl::make_unique<SingleThreadedExecutorImpl>(params);
TF_RETURN_IF_ERROR(impl->Initialize(graph));
*executor = impl.release();
return Status::OK();
}
} // namespace data
} // namespace tensorflow
|
#ifndef FACTORY_DECODER_LDPC_HPP
#define FACTORY_DECODER_LDPC_HPP
#include <vector>
#include <string>
#include <memory>
#include <map>
#include "Tools/Arguments/Argument_tools.hpp"
#include "Tools/Algo/Matrix/Sparse_matrix/Sparse_matrix.hpp"
#include "Module/Encoder/Encoder.hpp"
#include "Module/Decoder/Decoder_SIHO.hpp"
#include "Module/Decoder/Decoder_SISO_SIHO.hpp"
#include "Factory/Module/Decoder/Decoder.hpp"
namespace aff3ct
{
namespace factory
{
extern const std::string Decoder_LDPC_name;
extern const std::string Decoder_LDPC_prefix;
struct Decoder_LDPC : public Decoder
{
class parameters : public Decoder::parameters
{
public:
// ------------------------------------------------------------------------------------------------- PARAMETERS
// required parameters
std::string H_path;
// optional parameters
std::string H_reorder = "NONE";
std::string min = "MINL";
std::string simd_strategy = "";
float norm_factor = 1.f;
float offset = 0.f;
float mwbf_factor = 1.f;
bool enable_syndrome = true;
int syndrome_depth = 1;
int n_ite = 10;
std::vector<float> ppbf_proba;
// ---------------------------------------------------------------------------------------------------- METHODS
explicit parameters(const std::string &p = Decoder_LDPC_prefix);
virtual ~parameters() = default;
Decoder_LDPC::parameters* clone() const;
// parameters construction
void get_description(tools::Argument_map_info &args) const;
void store (const tools::Argument_map_value &vals);
void get_headers (std::map<std::string,header_list>& headers, const bool full = true) const;
// builder
template <typename B = int, typename Q = float>
module::Decoder_SIHO<B,Q>* build(const tools::Sparse_matrix &H,
const std::vector<unsigned> &info_bits_pos,
const std::unique_ptr<module::Encoder<B>>& encoder = nullptr) const;
template <typename B = int, typename Q = float>
module::Decoder_SISO_SIHO<B,Q>* build_siso(const tools::Sparse_matrix &H,
const std::vector<unsigned> &info_bits_pos,
const std::unique_ptr<module::Encoder<B>>& encoder = nullptr) const;
};
template <typename B = int, typename Q = float>
static module::Decoder_SIHO<B,Q>* build(const parameters& params, const tools::Sparse_matrix &H,
const std::vector<unsigned> &info_bits_pos,
const std::unique_ptr<module::Encoder<B>>& encoder = nullptr);
template <typename B = int, typename Q = float>
static module::Decoder_SISO_SIHO<B,Q>* build_siso(const parameters& params,
const tools::Sparse_matrix &H,
const std::vector<unsigned> &info_bits_pos,
const std::unique_ptr<module::Encoder<B>>& encoder = nullptr);
};
}
}
#endif /* FACTORY_DECODER_LDPC_HPP */
|
#include "space_age.h"
namespace space_age {
long long space_age::seconds() const { return _ageInSeconds; }
double space_age::on_earth() const {
return _ageInSeconds / _earthSecondsPerYear;
}
double space_age::on_jupiter() const {
return _ageInSeconds / _jupiterSecondsPerYear;
}
double space_age::on_mars() const {
return _ageInSeconds / _marsSecondsPerYear;
}
double space_age::on_mercury() const {
return _ageInSeconds / _mercurySecondsPerYear;
}
double space_age::on_neptune() const {
return _ageInSeconds / _neptuneSecondsPeryear;
}
double space_age::on_saturn() const {
return _ageInSeconds / _saturnSecondsPerYear;
}
double space_age::on_uranus() const {
return _ageInSeconds / _uranusSecondsPerYear;
}
double space_age::on_venus() const {
return _ageInSeconds / _venusSecondsPerYear;
}
} // namespace space_age
|
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/waf-regional/model/UpdateWebACLRequest.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <utility>
using namespace Aws::WAFRegional::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
UpdateWebACLRequest::UpdateWebACLRequest() :
m_webACLIdHasBeenSet(false),
m_changeTokenHasBeenSet(false),
m_updatesHasBeenSet(false),
m_defaultActionHasBeenSet(false)
{
}
Aws::String UpdateWebACLRequest::SerializePayload() const
{
JsonValue payload;
if(m_webACLIdHasBeenSet)
{
payload.WithString("WebACLId", m_webACLId);
}
if(m_changeTokenHasBeenSet)
{
payload.WithString("ChangeToken", m_changeToken);
}
if(m_updatesHasBeenSet)
{
Array<JsonValue> updatesJsonList(m_updates.size());
for(unsigned updatesIndex = 0; updatesIndex < updatesJsonList.GetLength(); ++updatesIndex)
{
updatesJsonList[updatesIndex].AsObject(m_updates[updatesIndex].Jsonize());
}
payload.WithArray("Updates", std::move(updatesJsonList));
}
if(m_defaultActionHasBeenSet)
{
payload.WithObject("DefaultAction", m_defaultAction.Jsonize());
}
return payload.View().WriteReadable();
}
Aws::Http::HeaderValueCollection UpdateWebACLRequest::GetRequestSpecificHeaders() const
{
Aws::Http::HeaderValueCollection headers;
headers.insert(Aws::Http::HeaderValuePair("X-Amz-Target", "AWSWAF_Regional_20161128.UpdateWebACL"));
return headers;
}
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
typedef struct __attribute__((__aligned__(16)))
{
float x, y, z;
}
float3;
typedef struct __attribute__((__aligned__(16)))
{
float x, y, z, w;
}
float4;
#pragma omp declare target
inline float3 operator*(const float3 &a, const float b)
{
return {a.x * b, a.y * b, a.z * b};
}
inline float3 operator-(const float3 &a, const float3 &b)
{
return {a.x - b.x, a.y - b.y, a.z - b.z};
}
inline float dot(const float3 &a, const float3 &b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline float3 normalize(const float3 &v)
{
float invLen = 1.f / sqrtf(dot(v, v));
return v * invLen;
}
inline float3 cross(const float3 &a, const float3 &b)
{
return {a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x};
}
inline float length(const float3 &v)
{
return sqrtf(dot(v, v));
}
inline float4 normalEstimate(const float3 *points, int idx, int width, int height)
{
float3 query_pt = points[idx];
if (isnan(query_pt.z))
return {0.f,0.f,0.f,0.f};
int xIdx = idx % width;
int yIdx = idx / width;
// are we at a border? are our neighbor valid points?
bool west_valid = (xIdx > 1) && !isnan (points[idx-1].z) && fabsf (points[idx-1].z - query_pt.z) < 200.f;
bool east_valid = (xIdx < width-1) && !isnan (points[idx+1].z) && fabsf (points[idx+1].z - query_pt.z) < 200.f;
bool north_valid = (yIdx > 1) && !isnan (points[idx-width].z) && fabsf (points[idx-width].z - query_pt.z) < 200.f;
bool south_valid = (yIdx < height-1) && !isnan (points[idx+width].z) && fabsf (points[idx+width].z - query_pt.z) < 200.f;
float3 horiz, vert;
if (west_valid & east_valid)
horiz = points[idx+1] - points[idx-1];
if (west_valid & !east_valid)
horiz = points[idx] - points[idx-1];
if (!west_valid & east_valid)
horiz = points[idx+1] - points[idx];
if (!west_valid & !east_valid)
return {0.f,0.f,0.f,1.f};
if (south_valid & north_valid)
vert = points[idx-width] - points[idx+width];
if (south_valid & !north_valid)
vert = points[idx] - points[idx+width];
if (!south_valid & north_valid)
vert = points[idx-width] - points[idx];
if (!south_valid & !north_valid)
return {0.f,0.f,0.f,1.f};
float3 normal = cross (horiz, vert);
float curvature = length (normal);
curvature = fabsf(horiz.z) > 0.04f || fabsf(vert.z) > 0.04f ||
!west_valid || !east_valid || !north_valid || !south_valid;
float3 mc = normalize (normal);
if ( dot (query_pt, mc) > 0.f )
mc = mc * -1.f;
return {mc.x, mc.y, mc.z, curvature};
}
#pragma omp end declare target
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage: %s <width> <height>\n", argv[0]);
return 1;
}
const int width = atoi(argv[1]);
const int height = atoi(argv[2]);
const int numPts = width * height;
const int size = numPts * sizeof(float3);
const int normal_size = numPts * sizeof(float4);
float3 *points = (float3*) malloc (size);
float4 *normal_points = (float4*) malloc (normal_size);
srand(123);
for (int i = 0; i < numPts; i++) {
points[i].x = rand() % width;
points[i].y = rand() % height;
points[i].z = rand() % 256;
}
#pragma omp target data map (to: points[0:numPts]) \
map (from: normal_points[0:numPts])
{
for (int i = 0; i < 100; i++) {
#pragma omp target teams distribute parallel for thread_limit(256)
for (int idx = 0; idx < numPts; idx++)
normal_points[idx] = normalEstimate(points, idx, width, height);
}
}
float sx, sy, sz, sw;
sx = sy = sz = sw = 0.f;
for (int i = 0; i < numPts; i++) {
sx += normal_points[i].x;
sy += normal_points[i].y;
sz += normal_points[i].z;
sw += normal_points[i].w;
}
printf("Checksum: x=%f y=%f z=%f w=%f\n", sx, sy, sz, sw);
free(normal_points);
free(points);
return 0;
}
|
// (C) Copyright 2009 Andrew Sutton
//
// Use, modification and distribution are subject to the
// Boost Software License, Version 1.0 (See accompanying file
// LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
#ifndef TEST_DESTRUCTION_HPP
#define TEST_DESTRUCTION_HPP
#include <boost/concept/assert.hpp>
#include <utility>
/** @name Destroy Graph
* Destroy the graph by removing vertices (if possible).
*/
//@{
// This will basically catch adjacency matrices, which don't get torn down.
template < typename Graph, typename VertexSet, typename Remove, typename Label >
void destroy_graph(Graph&, VertexSet const&, Remove, Label)
{
}
// This matches MutableGraph, so just remove a vertex and then clear.
template < typename Graph, typename VertexSet >
void destroy_graph(
Graph& g, VertexSet const& verts, boost::mpl::true_, boost::mpl::false_)
{
using namespace boost;
BOOST_CONCEPT_ASSERT((VertexListGraphConcept< Graph >));
BOOST_CONCEPT_ASSERT((VertexMutableGraphConcept< Graph >));
std::cout << "...destroy_normal\n";
// Remove the roof vertex
remove_vertex(verts[0], g);
BOOST_ASSERT(num_vertices(g) == N - 1);
}
// This will match labeled graphs.
template < typename Graph, typename VertexSet >
void destroy_graph(
Graph& g, VertexSet const&, boost::mpl::false_, boost::mpl::true_)
{
using namespace boost;
BOOST_CONCEPT_ASSERT((VertexListGraphConcept< Graph >));
// BOOST_CONCEPT_ASSERT(( VeretexMutableGraphConcept<Graph> ));
std::cout << "...destroy_labeled\n";
// Remove the roof vertex
remove_vertex(0, g);
BOOST_ASSERT(num_vertices(g) == N - 1);
}
//@}
/** @name Disconnect Graph
* Disconnect edges in the graph. Note that this doesn't fully disconnect the
* graph. It simply determines if we can disconnect an edge or two and verify
* that the resulting graph is valid. The Labeled type parameter is used to
* dispatch for unlabeled and labeled graphs.
*
* @todo This doesn't quite work for multigraphs...
*/
//@{
template < typename Graph, typename VertexSet >
void disconnect_graph(Graph& g, VertexSet const& verts, boost::mpl::false_)
{
using namespace boost;
BOOST_CONCEPT_ASSERT((EdgeListGraphConcept< Graph >));
BOOST_CONCEPT_ASSERT((EdgeMutableGraphConcept< Graph >));
std::cout << "...disconnect_normal\n";
typedef typename graph_traits< Graph >::edge_descriptor Edge;
// Disconnect the "lollipop" from the house.
Edge e = edge(verts[5], verts[3], g).first;
remove_edge(e, g);
BOOST_ASSERT(num_edges(g) == M - 1);
// Remove the "floor" edge from the house.
remove_edge(verts[3], verts[2], g);
BOOST_ASSERT(num_edges(g) == M - 2);
// Fully disconnect the roof vertex.
clear_vertex(verts[0], g);
BOOST_ASSERT(num_edges(g) == M - 4);
// What happens if we try to remove an edge that doesn't exist?
remove_edge(verts[5], verts[0], g);
BOOST_ASSERT(num_edges(g) == M - 4);
}
template < typename Graph, typename VertexSet >
void disconnect_graph(Graph& g, VertexSet const&, boost::mpl::true_)
{
using namespace boost;
BOOST_CONCEPT_ASSERT((EdgeListGraphConcept< Graph >));
// BOOST_CONCEPT_ASSERT((EdgeMutableGraphConcept<Graph>));
std::cout << "...disconnect_labeled\n";
typedef typename boost::graph_traits< Graph >::edge_descriptor Edge;
// Disconnect the "lollipop" from the house.
Edge e = boost::edge_by_label(5, 3, g).first;
boost::remove_edge(e, g);
BOOST_ASSERT(boost::num_edges(g) == M - 1);
// Remove the "floor" edge from the house.
boost::remove_edge_by_label(3, 2, g);
BOOST_ASSERT(boost::num_edges(g) == M - 2);
// Fully disconnect the roof vertex.
clear_vertex_by_label(0, g);
BOOST_ASSERT(boost::num_edges(g) == M - 4);
// What happens if we try to remove an edge that doesn't exist?
boost::remove_edge_by_label(5, 0, g);
BOOST_ASSERT(boost::num_edges(g) == M - 4);
}
//@}
#endif
|
/*
Copyright(c) 2015 - 2022 Denis Blank <denis.blank at outlook dot com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions :
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
**/
#include <test-continuable.hpp>
static cti::continuable<std::string> http_request(std::string) {
return cti::make_ready_continuable<std::string>("");
}
struct my_callable {
void operator()(std::string) && {
// ...
}
void operator()(cti::exception_arg_t, cti::exception_t) && {
// ...
}
};
TEST(simple_compilation_tests, error_compile_tests) {
http_request("github.com").next(my_callable{});
http_request("github.com") | [](std::string) {
// ...
return 0;
} | [] {
// ...
};
http_request("github.com")
.then([](std::string) {
// ...
return 0;
})
.then([](int) {
// ...
})
.fail([](cti::exception_t) {
// ...
});
(http_request("github.com") && http_request("github.com"))
.then([](std::string, std::string) {
// ...
})
.fail([](cti::exception_t) {
// ...
})
.apply([](auto&& me) {
// ...
return std::forward<decltype(me)>(me);
});
(http_request("github.com") || http_request("github.com"))
.then([](std::string) {
// ...
})
.fail([](cti::exception_t) {
// ...
});
(http_request("github.com") >> http_request("github.com"))
.then([](std::string, std::string) {
// ...
})
.fail([](cti::exception_t) {
// ...
});
}
TEST(simple_compilation_tests, connection_compile_tests) {
cti::when_seq(
cti::make_ready_continuable(0, 1), 2, //< See this plain value
cti::populate(cti::make_ready_continuable(3),
cti::make_ready_continuable(4)),
std::make_tuple(std::make_tuple(cti::make_ready_continuable(5))))
.then([](int r0, int r1, int r2, std::vector<int> r34,
std::tuple<std::tuple<int>> r5) {
// ...
unused(r0, r1, r2, r34, r5);
});
auto v = cti::populate(cti::make_ready_continuable(8),
cti::make_ready_continuable(9));
cti::when_seq(v.begin(), v.end()).then([](auto) {
// ...
});
cti::when_seq(cti::make_ready_continuable()) // ...
.then([] {
// ...
});
cti::when_seq() // ...
.then([] {
// ...
});
cti::when_seq(cti::make_exceptional_continuable<void>(cti::exception_t{}))
.fail([](auto) {
// ...
});
cti::when_all(
cti::make_ready_continuable(0, 1), 2, //< See this plain value
cti::populate(cti::make_ready_continuable(3),
cti::make_ready_continuable(4)),
std::make_tuple(std::make_tuple(cti::make_ready_continuable(5))))
.then([](int r0, int r1, int r2, std::vector<int> r34,
std::tuple<std::tuple<int>> r5) {
// ...
unused(r0, r1, r2, r34, r5);
});
cti::when_any(cti::make_ready_continuable(22),
cti::make_ready_continuable(44))
.then([](int) {
// ...
});
}
|
// /*
// Copyright 2016 AROGAN Group
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// */
#pragma once
#include "defs.hpp"
#include <vector>
class Voxel
{
public:
Voxel(){}
Voxel(glm::vec3 position, glm::vec3 scale, glm::vec3 color);
void SetPosition(glm::vec3 position);
void SetScale(glm::vec3 scale);
void SetIdentity();
void SetColor(glm::vec3 color);
void Plot(glm::vec3 position, glm::vec3 scale);
static void RenderVoxel();
auto GetModelMatrix() const -> mat4 const;
auto GetColor() const -> vec3 const;
static GLuint mVAO, mVertexBuffer, mNormalBuffer, mElementBuffer;
static std::vector<GLfloat> voxelData;
static std::vector<GLfloat> voxelDataNormal;
static std::vector<unsigned int> voxelIndices;
static void BuildVoxelData();
private:
mat4 mModelMatrix;
vec3 mColor;
//static const unsigned int Voxel::voxelIndices[];
//static const GLfloat voxelData[];
};
|
//
//
//
#ifndef AEYON3D_ENTITYID_HPP
#define AEYON3D_ENTITYID_HPP
#include "UUID.hpp"
namespace aeyon
{
using EntityID = UUID;
}
#endif
|
/*
* LPDegreeOrdered.cpp
*
* Created on: 24.09.2013
* Author: cls
*/
#include <unordered_map>
#include "LPDegreeOrdered.h"
#include "../auxiliary/Log.h"
#include "../auxiliary/Parallel.h"
namespace NetworKit {
LPDegreeOrdered::LPDegreeOrdered(const Graph& G) : CommunityDetectionAlgorithm(G) {}
void LPDegreeOrdered::run() {
count n = G.numberOfNodes();
count theta = n / 1e5;
DEBUG("theta: " , theta);
index z = G.upperNodeIdBound();
Partition labels(z);
// initialize all labels to singletons
labels.allToSingletons();
// initialize all nodes as active
std::vector<int> active(z + 1, 1); // not a boolean vector because there might be problems with parallel access
count nUpdated; // number of nodes which have been updated in last iteration
nUpdated = n; // all nodes have new labels -> first loop iteration runs
nIterations = 0; // number of iterations
auto propagateLabels = [&](node v){
if ((active[v]) && (G.degree(v) > 0)) {
std::unordered_map<label, count> labelCounts; // neighborLabelCounts maps label -> frequency in the neighbors
// count the labels in the neighborhood of v
G.forNeighborsOf(v, [&](node w) {
label lw = labels.subsetOf(w);//labels[w];
labelCounts[lw] += 1; // add weight of edge {v, w}
});
// get dominant label
label dominant = std::max_element(labelCounts.begin(),
labelCounts.end(),
[](const std::pair<label, count>& p1, const std::pair<label, count>& p2) {
return p1.second < p2.second;})->first;
if (labels[v] != dominant) { // UPDATE
labels.moveToSubset(dominant,v);//labels[v] = dominant;
nUpdated += 1; // TODO: atomic update?
G.forNeighborsOf(v, [&](node u) {
active[u] = 1;
});
} else {
active[v] = 0;
}
} // else node is isolated or inactive
};
// sort nodes by degree
std::vector<node> nodes;
G.forNodes([&](node v) {
nodes.push_back(v);
});
Aux::Parallel::sort(nodes.begin(), nodes.end(), [&](node u, node v) {
return G.degree(u) < G.degree(v); // lower degree before higher degree
});
// propagate labels
while (nUpdated > theta) { // as long as a label has changed...
nUpdated = 0; // reset update counter
for (node v : nodes) {
propagateLabels(v);
}
INFO("updated labels: " , nUpdated);
nIterations += 1;
}
result = std::move(labels);
hasRun = true;
}
count LPDegreeOrdered::numberOfIterations() {
return this->nIterations;
}
std::string LPDegreeOrdered::toString() {
return "LPDegreeOrdered()";
}
} /* namespace NetworKit */
|
// Copyright (c) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "net/quic/congestion_control/prr_sender.h"
#include <algorithm>
#include "base/logging.h"
#include "net/quic/crypto/crypto_protocol.h"
#include "net/quic/quic_bandwidth.h"
#include "net/quic/quic_protocol.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace net {
namespace test {
namespace {
// Constant based on TCP defaults.
const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
} // namespace
class PrrSenderTest : public ::testing::Test {};
TEST_F(PrrSenderTest, SingleLossResultsInSendOnEveryOtherAck) {
PrrSender prr;
QuicPacketCount num_packets_in_flight = 50;
QuicByteCount bytes_in_flight = num_packets_in_flight * kMaxSegmentSize;
const QuicPacketCount ssthresh_after_loss = num_packets_in_flight / 2;
const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
prr.OnPacketLost(bytes_in_flight);
// Ack a packet. PRR allows one packet to leave immediately.
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_EQ(QuicTime::Delta::Zero(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Send retransmission.
prr.OnPacketSent(kMaxSegmentSize);
// PRR shouldn't allow sending any more packets.
EXPECT_EQ(QuicTime::Delta::Infinite(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// One packet is lost, and one ack was consumed above. PRR now paces
// transmissions through the remaining 48 acks. PRR will alternatively
// disallow and allow a packet to be sent in response to an ack.
for (uint64_t i = 0; i < ssthresh_after_loss - 1; ++i) {
// Ack a packet. PRR shouldn't allow sending a packet in response.
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_EQ(QuicTime::Delta::Infinite(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Ack another packet. PRR should now allow sending a packet in response.
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_EQ(QuicTime::Delta::Zero(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Send a packet in response.
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
// Since bytes_in_flight is now equal to congestion_window, PRR now maintains
// packet conservation, allowing one packet to be sent in response to an ack.
EXPECT_EQ(congestion_window, bytes_in_flight);
for (int i = 0; i < 10; ++i) {
// Ack a packet.
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_EQ(QuicTime::Delta::Zero(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Send a packet in response, since PRR allows it.
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
// Since bytes_in_flight is equal to the congestion_window,
// PRR disallows sending.
EXPECT_EQ(congestion_window, bytes_in_flight);
EXPECT_EQ(QuicTime::Delta::Infinite(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
}
}
TEST_F(PrrSenderTest, BurstLossResultsInSlowStart) {
PrrSender prr;
QuicByteCount bytes_in_flight = 20 * kMaxSegmentSize;
const QuicPacketCount num_packets_lost = 13;
const QuicPacketCount ssthresh_after_loss = 10;
const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
// Lose 13 packets.
bytes_in_flight -= num_packets_lost * kMaxSegmentSize;
prr.OnPacketLost(bytes_in_flight);
// PRR-SSRB will allow the following 3 acks to send up to 2 packets.
for (int i = 0; i < 3; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
// PRR-SSRB should allow two packets to be sent.
for (int j = 0; j < 2; ++j) {
EXPECT_EQ(QuicTime::Delta::Zero(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Send a packet in response.
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
// PRR should allow no more than 2 packets in response to an ack.
EXPECT_EQ(QuicTime::Delta::Infinite(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
}
// Out of SSRB mode, PRR allows one send in response to each ack.
for (int i = 0; i < 10; ++i) {
prr.OnPacketAcked(kMaxSegmentSize);
bytes_in_flight -= kMaxSegmentSize;
EXPECT_EQ(QuicTime::Delta::Zero(),
prr.TimeUntilSend(congestion_window, bytes_in_flight,
ssthresh_after_loss * kMaxSegmentSize));
// Send a packet in response.
prr.OnPacketSent(kMaxSegmentSize);
bytes_in_flight += kMaxSegmentSize;
}
}
} // namespace test
} // namespace net
|
/*
Copyright 2019 Ilia S. Kovalev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "pch.h"
#include <tuple>
TEST(Typelist, type)
{
using testlist = Typelist<int, char, bool, int, float, float>;
static_assert(std::is_same_v<
testlist::at<0>,
int
>);
static_assert(std::is_same_v<
testlist::at<1>,
char
>);
static_assert(std::is_same_v<
testlist::at<2>,
bool
>);
static_assert(std::is_same_v<
testlist::at<3>,
int
>);
static_assert(std::is_same_v<
testlist::at<4>,
float
>);
static_assert(std::is_same_v<
testlist::at<5>,
float
>);
}
TEST(Typelist, size)
{
using testlist = Typelist<int, char, bool, int, float, float>;
static_assert(testlist::size == 6);
static_assert(Typelist<>::size == 0);
}
TEST(Typelist, tuple)
{
static_assert(std::is_same_v<Typelist<>::values_container<std::tuple>, std::tuple<>>);
using case1 = Typelist<int, char, bool, int, float, float>::values_container<std::tuple>;
using correct1 = std::tuple<int, char, bool, int, float, float>;
static_assert(std::is_same_v<case1, correct1>);
using case2 = Typelist<int, char, Typelist<bool, bool>, int, float, float>::values_container<std::tuple>;
using correct2 = std::tuple<int, char, std::tuple<bool, bool>, int, float, float>;
static_assert(std::is_same_v<case2, correct2>);
}
TEST(Typelist, modification)
{
using testlist = Typelist<int, char, bool, int, float, float>;
static_assert(std::is_same_v<
testlist::head,
int
>);
static_assert(std::is_same_v<
testlist::tail,
Typelist<char, bool, int, float, float>
>);
static_assert(std::is_same_v<
testlist::back,
float
>);
static_assert(std::is_same_v<
Typelist<>::push_back<>,
Typelist<>
>);
static_assert(std::is_same_v<
Typelist<int>::push_back<>,
Typelist<int>
>);
static_assert(std::is_same_v<
Typelist<>::push_back<int>,
Typelist<int>
>);
static_assert(std::is_same_v<
Typelist<>::push_back<int, bool>,
Typelist<int, bool>
>);
static_assert(std::is_same_v<
Typelist<float>::push_back<int>,
Typelist<float, int>
>);
static_assert(std::is_same_v<
Typelist<bool>::push_back<int, bool>,
Typelist<bool, int, bool>
>);
static_assert(std::is_same_v<
Typelist<>::push_front<>,
Typelist<>
>);
static_assert(std::is_same_v<
Typelist<int>::push_front<>,
Typelist<int>
>);
static_assert(std::is_same_v<
Typelist<>::push_front<int>,
Typelist<int>
>);
static_assert(std::is_same_v<
Typelist<>::push_front<int, bool>,
Typelist<int, bool>
>);
static_assert(std::is_same_v<
Typelist<float>::push_front<int>,
Typelist<int, float>
>);
static_assert(std::is_same_v<
Typelist<bool>::push_front<int, bool>,
Typelist<int, bool, bool>
>);
static_assert(std::is_same_v<
Typelist<>::flat,
Typelist<>
>);
static_assert(std::is_same_v<
Typelist<int>::flat,
Typelist<int>
>);
static_assert(std::is_same_v<
Typelist<int, bool>::flat,
Typelist<int, bool>
>);
static_assert(std::is_same_v<
Typelist<Typelist<>, bool>::flat,
Typelist<bool>
>);
static_assert(std::is_same_v<
Typelist<Typelist<>, bool, Typelist<>, Typelist<>>::flat,
Typelist<bool>
>);
static_assert(std::is_same_v<
Typelist<Typelist<int, double>, bool, Typelist<>, Typelist<char>>::flat,
Typelist<int, double, bool, char>
>);
}
TEST(Typelist, predicates)
{
static_assert(!is_type_list_v<int>);
static_assert(is_type_list_v<Typelist<>>);
static_assert(is_type_list_v<Typelist<int>>);
static_assert(is_type_list_v<Typelist<int, char>>);
static_assert(Typelist<>::is_empty);
static_assert(!Typelist<bool>::is_empty);
static_assert(!Typelist<>::contains<int>);
static_assert(!Typelist<bool, float>::contains<int>);
static_assert(Typelist<bool, float>::contains<bool>);
static_assert(!Typelist<bool, Typelist<int>, float>::contains<int>);
}
|
/* AUTHOR: julianferres, dom 16 ago 2020 11:56:03 -03 */
#include <bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef vector<ll> vi;
typedef pair<ll,ll> ii;
typedef vector<ii> vii; typedef vector<bool> vb;
#define FIN ios::sync_with_stdio(0);cout.tie(0)
#define forr(i, a, b) for(int i = (a); i < (int) (b); i++)
#define forn(i, n) forr(i, 0, n)
#define pb push_back
#define mp make_pair
#define all(c) (c).begin(),(c).end()
#define DBG(x) cerr << #x << " = " << (x) << endl
#define show(v,n) cerr << #v << " = "; forn(i,n) cerr << v[i] << " "; cerr << endl;
#define esta(x,c) ((c).find(x) != (c).end())
#define RAYA cerr << "===============================" << endl
const ll INF = 1LL<<60;
const int MOD = 1e9+7; // 998244353
const int MAXN = 2e5+5;
void apply(vi &v) {
ll maxi = (ll)(1e18+7);
maxi *= -1;
for(ll u : v) maxi = max(u,maxi);
forn(i,v.size()) {
v[i] = maxi - v[i];
}
}
int main()
{
FIN;
int t;
cin >> t;
while(t--) {
int n; ll k;
cin >> n >> k;
vector <ll> v(n);
forn(i,n) cin >> v[i];
apply(v); k--;
if(k%2 == 0) {
for(ll u : v) cout << u << " "; cout << "\n";
}
else {
apply(v);
for(ll u : v) cout << u << " "; cout << "\n";
}
}
return 0;
}
|
// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#include "vm/heap/scavenger.h"
#include "vm/dart.h"
#include "vm/dart_api_state.h"
#include "vm/flag_list.h"
#include "vm/heap/become.h"
#include "vm/heap/pointer_block.h"
#include "vm/heap/safepoint.h"
#include "vm/heap/verifier.h"
#include "vm/heap/weak_table.h"
#include "vm/isolate.h"
#include "vm/lockers.h"
#include "vm/object.h"
#include "vm/object_id_ring.h"
#include "vm/object_set.h"
#include "vm/stack_frame.h"
#include "vm/thread_barrier.h"
#include "vm/thread_registry.h"
#include "vm/timeline.h"
#include "vm/visitor.h"
namespace dart {
DEFINE_FLAG(int,
early_tenuring_threshold,
66,
"When more than this percentage of promotion candidates survive, "
"promote all survivors of next scavenge.");
DEFINE_FLAG(int,
new_gen_garbage_threshold,
90,
"Grow new gen when less than this percentage is garbage.");
DEFINE_FLAG(int, new_gen_growth_factor, 2, "Grow new gen by this factor.");
// Scavenger uses ObjectLayout::kMarkBit to distinguish forwarded and
// non-forwarded objects. The kMarkBit does not intersect with the target
// address because of object alignment.
enum {
kForwardingMask = 1 << ObjectLayout::kOldAndNotMarkedBit,
kNotForwarded = 0,
kForwarded = kForwardingMask,
};
static inline bool IsForwarding(uword header) {
uword bits = header & kForwardingMask;
ASSERT((bits == kNotForwarded) || (bits == kForwarded));
return bits == kForwarded;
}
static inline uword ForwardedAddr(uword header) {
ASSERT(IsForwarding(header));
return header & ~kForwardingMask;
}
static inline uword ForwardingHeader(uword target) {
// Make sure forwarding can be encoded.
ASSERT((target & kForwardingMask) == 0);
return target | kForwarded;
}
// Races: The first word in the copied region is a header word that may be
// updated by the scavenger worker in another thread, so we might copy either
// the original object header or an installed forwarding pointer. This race is
// harmless because if we copy the installed forwarding pointer, the scavenge
// worker in the current thread will abandon this copy. We do not mark the loads
// here as relaxed so the C++ compiler still has the freedom to reorder them.
NO_SANITIZE_THREAD
static inline void objcpy(void* dst, const void* src, size_t size) {
// A memcopy specialized for objects. We can assume:
// - dst and src do not overlap
ASSERT(
(reinterpret_cast<uword>(dst) + size <= reinterpret_cast<uword>(src)) ||
(reinterpret_cast<uword>(src) + size <= reinterpret_cast<uword>(dst)));
// - dst and src are word aligned
ASSERT(Utils::IsAligned(reinterpret_cast<uword>(dst), sizeof(uword)));
ASSERT(Utils::IsAligned(reinterpret_cast<uword>(src), sizeof(uword)));
// - size is strictly positive
ASSERT(size > 0);
// - size is a multiple of double words
ASSERT(Utils::IsAligned(size, 2 * sizeof(uword)));
uword* __restrict dst_cursor = reinterpret_cast<uword*>(dst);
const uword* __restrict src_cursor = reinterpret_cast<const uword*>(src);
do {
uword a = *src_cursor++;
uword b = *src_cursor++;
*dst_cursor++ = a;
*dst_cursor++ = b;
size -= (2 * sizeof(uword));
} while (size > 0);
}
template <bool parallel>
class ScavengerVisitorBase : public ObjectPointerVisitor {
public:
explicit ScavengerVisitorBase(IsolateGroup* isolate_group,
Scavenger* scavenger,
SemiSpace* from,
FreeList* freelist,
PromotionStack* promotion_stack)
: ObjectPointerVisitor(isolate_group),
thread_(nullptr),
scavenger_(scavenger),
from_(from),
page_space_(scavenger->heap_->old_space()),
freelist_(freelist),
bytes_promoted_(0),
visiting_old_object_(nullptr),
promoted_list_(promotion_stack),
labs_(8) {
ASSERT(labs_.length() == 0);
labs_.Add({0, 0, 0});
ASSERT(labs_.length() == 1);
}
virtual void VisitTypedDataViewPointers(TypedDataViewPtr view,
ObjectPtr* first,
ObjectPtr* last) {
// First we forward all fields of the typed data view.
VisitPointers(first, last);
if (view->ptr()->data_ == nullptr) {
ASSERT(RawSmiValue(view->ptr()->offset_in_bytes_) == 0 &&
RawSmiValue(view->ptr()->length_) == 0);
return;
}
// Validate 'this' is a typed data view.
const uword view_header =
*reinterpret_cast<uword*>(ObjectLayout::ToAddr(view));
ASSERT(!IsForwarding(view_header) || view->IsOldObject());
ASSERT(IsTypedDataViewClassId(view->GetClassIdMayBeSmi()));
// Validate that the backing store is not a forwarding word.
TypedDataBasePtr td = view->ptr()->typed_data_;
ASSERT(td->IsHeapObject());
const uword td_header = *reinterpret_cast<uword*>(ObjectLayout::ToAddr(td));
ASSERT(!IsForwarding(td_header) || td->IsOldObject());
// We can always obtain the class id from the forwarded backing store.
const classid_t cid = td->GetClassId();
// If we have external typed data we can simply return since the backing
// store lives in C-heap and will not move.
if (IsExternalTypedDataClassId(cid)) {
return;
}
// Now we update the inner pointer.
ASSERT(IsTypedDataClassId(cid));
view->ptr()->RecomputeDataFieldForInternalTypedData();
}
virtual void VisitPointers(ObjectPtr* first, ObjectPtr* last) {
ASSERT(Utils::IsAligned(first, sizeof(*first)));
ASSERT(Utils::IsAligned(last, sizeof(*last)));
for (ObjectPtr* current = first; current <= last; current++) {
ScavengePointer(current);
}
}
void VisitingOldObject(ObjectPtr obj) {
ASSERT((obj == nullptr) || obj->IsOldObject());
visiting_old_object_ = obj;
if (obj != nullptr) {
// Card update happens in HeapPage::VisitRememberedCards.
ASSERT(!obj->ptr()->IsCardRemembered());
}
}
intptr_t bytes_promoted() const { return bytes_promoted_; }
void AddNewTLAB(uword top, uword end) {
producer_index_++;
ScavengerLAB lab;
lab.top = top;
lab.end = end;
lab.resolved_top = top;
labs_.Add(lab);
}
void ProcessRoots() {
thread_ = Thread::Current();
page_space_->AcquireLock(freelist_);
scavenger_->IterateRoots(this);
}
void ProcessSurvivors() {
// Iterate until all work has been drained.
do {
ProcessToSpace();
ProcessPromotedList();
} while (HasWork());
}
void ProcessAll() {
do {
ProcessSurvivors();
ProcessWeakProperties();
} while (HasWork());
}
inline void ProcessWeakProperties();
bool HasWork() {
// N.B.: Normally if any TLABs have things left to resolve, then the
// TLAB we are allocating from (producer_index_) will too because we
// always immediately allocate when we switch to a new TLAB. However,
// this first allocation may be undone if we lose the race to install
// the forwarding pointer, so we must also check that there aren't
// any TLABs after the resolution cursor.
return (consumer_index_ < producer_index_) ||
(labs_[producer_index_].top !=
labs_[producer_index_].resolved_top) ||
!promoted_list_.IsEmpty();
}
void Finalize() {
ASSERT(!HasWork());
for (intptr_t i = 0; i <= producer_index_; i++) {
ASSERT(labs_[i].top <= labs_[i].end);
ASSERT(labs_[i].resolved_top == labs_[i].top);
}
MakeProducerTLABIterable();
promoted_list_.Finalize();
MournWeakProperties();
page_space_->ReleaseLock(freelist_);
thread_ = nullptr;
}
void DonateTLABs() {
MutexLocker ml(&scavenger_->space_lock_);
// NOTE: We could make all [labs_] re-usable after a scavenge if we remember
// the promotion pointer of each TLAB.
const auto& lab = labs_[producer_index_];
if (lab.end == scavenger_->top_) {
scavenger_->top_ = lab.top;
}
}
private:
void UpdateStoreBuffer(ObjectPtr* p, ObjectPtr obj) {
ASSERT(obj->IsHeapObject());
// If the newly written object is not a new object, drop it immediately.
if (!obj->IsNewObject() || visiting_old_object_->ptr()->IsRemembered()) {
return;
}
visiting_old_object_->ptr()->SetRememberedBit();
thread_->StoreBufferAddObjectGC(visiting_old_object_);
}
DART_FORCE_INLINE
void ScavengePointer(ObjectPtr* p) {
// ScavengePointer cannot be called recursively.
ObjectPtr raw_obj = *p;
if (raw_obj->IsSmiOrOldObject()) {
return;
}
uword raw_addr = ObjectLayout::ToAddr(raw_obj);
// The scavenger is only expects objects located in the from space.
ASSERT(from_->Contains(raw_addr));
// Read the header word of the object and determine if the object has
// already been copied.
uword header = reinterpret_cast<std::atomic<uword>*>(raw_addr)->load(
std::memory_order_relaxed);
uword new_addr = 0;
if (IsForwarding(header)) {
// Get the new location of the object.
new_addr = ForwardedAddr(header);
} else {
intptr_t size = raw_obj->ptr()->HeapSize(header);
// Check whether object should be promoted.
if (raw_addr >= scavenger_->survivor_end_) {
// Not a survivor of a previous scavenge. Just copy the object into the
// to space.
new_addr = TryAllocateCopy(size);
}
if (new_addr == 0) {
// This object is a survivor of a previous scavenge. Attempt to promote
// the object. (Or, unlikely, to-space was exhausted by fragmentation.)
new_addr = page_space_->TryAllocatePromoLocked(freelist_, size);
if (LIKELY(new_addr != 0)) {
// If promotion succeeded then we need to remember it so that it can
// be traversed later.
promoted_list_.Push(ObjectLayout::FromAddr(new_addr));
bytes_promoted_ += size;
} else {
// Promotion did not succeed. Copy into the to space instead.
scavenger_->failed_to_promote_ = true;
new_addr = TryAllocateCopy(size);
// To-space was exhausted by fragmentation and old-space could not
// grow.
if (UNLIKELY(new_addr == 0)) {
FATAL("Failed to allocate during scavenge");
}
}
}
ASSERT(new_addr != 0);
// Copy the object to the new location.
objcpy(reinterpret_cast<void*>(new_addr),
reinterpret_cast<void*>(raw_addr), size);
ObjectPtr new_obj = ObjectLayout::FromAddr(new_addr);
if (new_obj->IsOldObject()) {
// Promoted: update age/barrier tags.
uint32_t tags = static_cast<uint32_t>(header);
tags = ObjectLayout::OldBit::update(true, tags);
tags = ObjectLayout::OldAndNotRememberedBit::update(true, tags);
tags = ObjectLayout::NewBit::update(false, tags);
// Setting the forwarding pointer below will make this tenured object
// visible to the concurrent marker, but we haven't visited its slots
// yet. We mark the object here to prevent the concurrent marker from
// adding it to the mark stack and visiting its unprocessed slots. We
// push it to the mark stack after forwarding its slots.
tags = ObjectLayout::OldAndNotMarkedBit::update(!thread_->is_marking(),
tags);
new_obj->ptr()->tags_ = tags;
} else {
ASSERT(scavenger_->to_->Contains(new_addr));
}
intptr_t cid = ObjectLayout::ClassIdTag::decode(header);
if (IsTypedDataClassId(cid)) {
static_cast<TypedDataPtr>(new_obj)->ptr()->RecomputeDataField();
}
// Try to install forwarding address.
uword forwarding_header = ForwardingHeader(new_addr);
if (!InstallForwardingPointer(raw_addr, &header, forwarding_header)) {
ASSERT(IsForwarding(header));
if (new_obj->IsOldObject()) {
// Abandon as a free list element.
FreeListElement::AsElement(new_addr, size);
bytes_promoted_ -= size;
} else {
// Undo to-space allocation.
ASSERT(labs_[producer_index_].top == (new_addr + size));
labs_[producer_index_].top = new_addr;
}
// Use the winner's forwarding target.
new_addr = ForwardedAddr(header);
if (ObjectLayout::FromAddr(new_addr)->IsNewObject()) {
ASSERT(scavenger_->to_->Contains(new_addr));
}
}
}
// Update the reference.
ObjectPtr new_obj = ObjectLayout::FromAddr(new_addr);
if (!new_obj->IsNewObject()) {
// Setting the mark bit above must not be ordered after a publishing store
// of this object. Note this could be a publishing store even if the
// object was promoted by an early invocation of ScavengePointer. Compare
// Object::Allocate.
reinterpret_cast<std::atomic<ObjectPtr>*>(p)->store(
new_obj, std::memory_order_release);
} else {
ASSERT(scavenger_->to_->Contains(ObjectLayout::ToAddr(new_obj)));
*p = new_obj;
}
// Update the store buffer as needed.
if (visiting_old_object_ != nullptr) {
UpdateStoreBuffer(p, new_obj);
}
}
DART_FORCE_INLINE
bool InstallForwardingPointer(uword addr,
uword* old_header,
uword new_header) {
if (parallel) {
return reinterpret_cast<std::atomic<uword>*>(addr)
->compare_exchange_strong(*old_header, new_header,
std::memory_order_relaxed);
} else {
*reinterpret_cast<uword*>(addr) = new_header;
return true;
}
}
DART_FORCE_INLINE
uword TryAllocateCopy(intptr_t size) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ScavengerLAB& lab = labs_[producer_index_];
uword result = lab.top;
uword new_top = result + size;
if (LIKELY(new_top <= lab.end)) {
ASSERT(scavenger_->to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
lab.top = new_top;
ASSERT((scavenger_->to_->Contains(new_top)) ||
(new_top == scavenger_->to_->end()));
return result;
}
return TryAllocateCopySlow(size);
}
DART_NOINLINE inline uword TryAllocateCopySlow(intptr_t size);
void MakeProducerTLABIterable() {
uword top = labs_[producer_index_].top;
uword end = labs_[producer_index_].end;
intptr_t size = end - top;
if (size != 0) {
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ForwardingCorpse::AsForwarder(top, size);
ASSERT(ObjectLayout::FromAddr(top)->ptr()->HeapSize() == size);
}
}
inline void ProcessToSpace();
DART_FORCE_INLINE intptr_t ProcessCopied(ObjectPtr raw_obj);
inline void ProcessPromotedList();
inline void EnqueueWeakProperty(WeakPropertyPtr raw_weak);
inline void MournWeakProperties();
Thread* thread_;
Scavenger* scavenger_;
SemiSpace* from_;
PageSpace* page_space_;
FreeList* freelist_;
intptr_t bytes_promoted_;
ObjectPtr visiting_old_object_;
PromotionWorkList promoted_list_;
WeakPropertyPtr delayed_weak_properties_ = nullptr;
struct ScavengerLAB {
uword top;
uword end;
uword resolved_top;
};
MallocGrowableArray<ScavengerLAB> labs_;
intptr_t consumer_index_ = 1;
intptr_t producer_index_ = 0;
DISALLOW_COPY_AND_ASSIGN(ScavengerVisitorBase);
};
typedef ScavengerVisitorBase<false> SerialScavengerVisitor;
typedef ScavengerVisitorBase<true> ParallelScavengerVisitor;
class ScavengerWeakVisitor : public HandleVisitor {
public:
ScavengerWeakVisitor(Thread* thread, Scavenger* scavenger)
: HandleVisitor(thread),
scavenger_(scavenger),
class_table_(thread->isolate_group()->shared_class_table()) {
ASSERT(scavenger->heap_->isolate_group() == thread->isolate_group());
}
void VisitHandle(uword addr) {
FinalizablePersistentHandle* handle =
reinterpret_cast<FinalizablePersistentHandle*>(addr);
ObjectPtr* p = handle->raw_addr();
if (scavenger_->IsUnreachable(p)) {
handle->UpdateUnreachable(thread()->isolate_group());
} else {
handle->UpdateRelocated(thread()->isolate_group());
}
}
private:
Scavenger* scavenger_;
SharedClassTable* class_table_;
DISALLOW_COPY_AND_ASSIGN(ScavengerWeakVisitor);
};
class ParallelScavengerTask : public ThreadPool::Task {
public:
ParallelScavengerTask(IsolateGroup* isolate_group,
ThreadBarrier* barrier,
ParallelScavengerVisitor* visitor,
RelaxedAtomic<uintptr_t>* num_busy)
: isolate_group_(isolate_group),
barrier_(barrier),
visitor_(visitor),
num_busy_(num_busy) {}
virtual void Run() {
bool result = Thread::EnterIsolateGroupAsHelper(
isolate_group_, Thread::kScavengerTask, /*bypass_safepoint=*/true);
ASSERT(result);
RunEnteredIsolateGroup();
Thread::ExitIsolateGroupAsHelper(/*bypass_safepoint=*/true);
// This task is done. Notify the original thread.
barrier_->Exit();
}
void RunEnteredIsolateGroup() {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ParallelScavenge");
visitor_->ProcessRoots();
// Phase 1: Copying.
bool more_to_scavenge = false;
do {
do {
visitor_->ProcessSurvivors();
// I can't find more work right now. If no other task is busy,
// then there will never be more work (NB: 1 is *before* decrement).
if (num_busy_->fetch_sub(1u) == 1) break;
// Wait for some work to appear.
// TODO(iposva): Replace busy-waiting with a solution using Monitor,
// and redraw the boundaries between stack/visitor/task as needed.
while (!visitor_->HasWork() && num_busy_->load() > 0) {
}
// If no tasks are busy, there will never be more work.
if (num_busy_->load() == 0) break;
// I saw some work; get busy and compete for it.
num_busy_->fetch_add(1u);
} while (true);
// Wait for all scavengers to stop.
barrier_->Sync();
#if defined(DEBUG)
ASSERT(num_busy_->load() == 0);
// Caveat: must not allow any marker to continue past the barrier
// before we checked num_busy, otherwise one of them might rush
// ahead and increment it.
barrier_->Sync();
#endif
// Check if we have any pending properties with marked keys.
// Those might have been marked by another marker.
visitor_->ProcessWeakProperties();
more_to_scavenge = visitor_->HasWork();
if (more_to_scavenge) {
// We have more work to do. Notify others.
num_busy_->fetch_add(1u);
}
// Wait for all other scavengers to finish processing their pending
// weak properties and decide if they need to continue marking.
// Caveat: we need two barriers here to make this decision in lock step
// between all scavengers and the main thread.
barrier_->Sync();
if (!more_to_scavenge && (num_busy_->load() > 0)) {
// All scavengers continue to mark as long as any single marker has
// some work to do.
num_busy_->fetch_add(1u);
more_to_scavenge = true;
}
barrier_->Sync();
} while (more_to_scavenge);
// Phase 2: Weak processing, statistics.
visitor_->Finalize();
barrier_->Sync();
}
private:
IsolateGroup* isolate_group_;
ThreadBarrier* barrier_;
ParallelScavengerVisitor* visitor_;
RelaxedAtomic<uintptr_t>* num_busy_;
DISALLOW_COPY_AND_ASSIGN(ParallelScavengerTask);
};
SemiSpace::SemiSpace(VirtualMemory* reserved)
: reserved_(reserved), region_(NULL, 0) {
if (reserved != NULL) {
region_ = MemoryRegion(reserved_->address(), reserved_->size());
}
}
SemiSpace::~SemiSpace() {
delete reserved_;
}
Mutex* SemiSpace::mutex_ = NULL;
SemiSpace* SemiSpace::cache_ = NULL;
void SemiSpace::Init() {
if (mutex_ == NULL) {
mutex_ = new Mutex();
}
ASSERT(mutex_ != NULL);
}
void SemiSpace::Cleanup() {
MutexLocker locker(mutex_);
delete cache_;
cache_ = NULL;
}
SemiSpace* SemiSpace::New(intptr_t size_in_words, const char* name) {
SemiSpace* result = nullptr;
{
MutexLocker locker(mutex_);
// TODO(koda): Cache one entry per size.
if (cache_ != nullptr && cache_->size_in_words() == size_in_words) {
result = cache_;
cache_ = nullptr;
}
}
if (result != nullptr) {
#ifdef DEBUG
result->reserved_->Protect(VirtualMemory::kReadWrite);
#endif
// Initialized by generated code.
MSAN_UNPOISON(result->reserved_->address(), size_in_words << kWordSizeLog2);
return result;
}
if (size_in_words == 0) {
return new SemiSpace(nullptr);
} else {
intptr_t size_in_bytes = size_in_words << kWordSizeLog2;
const bool kExecutable = false;
VirtualMemory* memory =
VirtualMemory::Allocate(size_in_bytes, kExecutable, name);
if (memory == nullptr) {
// TODO(koda): If cache_ is not empty, we could try to delete it.
return nullptr;
}
#if defined(DEBUG)
memset(memory->address(), Heap::kZapByte, size_in_bytes);
#endif // defined(DEBUG)
// Initialized by generated code.
MSAN_UNPOISON(memory->address(), size_in_bytes);
return new SemiSpace(memory);
}
}
void SemiSpace::Delete() {
if (reserved_ != nullptr) {
const intptr_t size_in_bytes = size_in_words() << kWordSizeLog2;
#ifdef DEBUG
memset(reserved_->address(), Heap::kZapByte, size_in_bytes);
reserved_->Protect(VirtualMemory::kNoAccess);
#endif
MSAN_POISON(reserved_->address(), size_in_bytes);
}
SemiSpace* old_cache = nullptr;
{
MutexLocker locker(mutex_);
old_cache = cache_;
cache_ = this;
}
// TODO(rmacnak): This can take an order of magnitude longer the rest of
// a scavenge. Consider moving it to another thread, perhaps the idle
// notifier.
delete old_cache;
}
void SemiSpace::WriteProtect(bool read_only) {
if (reserved_ != NULL) {
reserved_->Protect(read_only ? VirtualMemory::kReadOnly
: VirtualMemory::kReadWrite);
}
}
// The initial estimate of how many words we can scavenge per microsecond (usage
// before / scavenge time). This is a conservative value observed running
// Flutter on a Nexus 4. After the first scavenge, we instead use a value based
// on the device's actual speed.
static const intptr_t kConservativeInitialScavengeSpeed = 40;
Scavenger::Scavenger(Heap* heap, intptr_t max_semi_capacity_in_words)
: heap_(heap),
max_semi_capacity_in_words_(max_semi_capacity_in_words),
scavenging_(false),
gc_time_micros_(0),
collections_(0),
scavenge_words_per_micro_(kConservativeInitialScavengeSpeed),
idle_scavenge_threshold_in_words_(0),
external_size_(0),
failed_to_promote_(false) {
// Verify assumptions about the first word in objects which the scavenger is
// going to use for forwarding pointers.
ASSERT(Object::tags_offset() == 0);
// Set initial semi space size in words.
const intptr_t initial_semi_capacity_in_words = Utils::Minimum(
max_semi_capacity_in_words, FLAG_new_gen_semi_initial_size * MBInWords);
const char* name = Heap::RegionName(Heap::kNew);
to_ = SemiSpace::New(initial_semi_capacity_in_words, name);
if (to_ == NULL) {
OUT_OF_MEMORY();
}
// Setup local fields.
top_ = FirstObjectStart();
resolved_top_ = top_;
end_ = to_->end();
survivor_end_ = FirstObjectStart();
idle_scavenge_threshold_in_words_ = initial_semi_capacity_in_words;
UpdateMaxHeapCapacity();
UpdateMaxHeapUsage();
}
Scavenger::~Scavenger() {
ASSERT(!scavenging_);
to_->Delete();
}
intptr_t Scavenger::NewSizeInWords(intptr_t old_size_in_words) const {
if (stats_history_.Size() == 0) {
return old_size_in_words;
}
double garbage = stats_history_.Get(0).ExpectedGarbageFraction();
if (garbage < (FLAG_new_gen_garbage_threshold / 100.0)) {
return Utils::Minimum(max_semi_capacity_in_words_,
old_size_in_words * FLAG_new_gen_growth_factor);
} else {
return old_size_in_words;
}
}
class CollectStoreBufferVisitor : public ObjectPointerVisitor {
public:
explicit CollectStoreBufferVisitor(ObjectSet* in_store_buffer)
: ObjectPointerVisitor(IsolateGroup::Current()),
in_store_buffer_(in_store_buffer) {}
void VisitPointers(ObjectPtr* from, ObjectPtr* to) {
for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
ObjectPtr raw_obj = *ptr;
RELEASE_ASSERT(!raw_obj->ptr()->IsCardRemembered());
RELEASE_ASSERT(raw_obj->ptr()->IsRemembered());
RELEASE_ASSERT(raw_obj->IsOldObject());
in_store_buffer_->Add(raw_obj);
}
}
private:
ObjectSet* const in_store_buffer_;
};
class CheckStoreBufferVisitor : public ObjectVisitor,
public ObjectPointerVisitor {
public:
CheckStoreBufferVisitor(ObjectSet* in_store_buffer, const SemiSpace* to)
: ObjectVisitor(),
ObjectPointerVisitor(IsolateGroup::Current()),
in_store_buffer_(in_store_buffer),
to_(to) {}
void VisitObject(ObjectPtr raw_obj) {
if (raw_obj->IsPseudoObject()) return;
RELEASE_ASSERT(raw_obj->IsOldObject());
if (raw_obj->ptr()->IsCardRemembered()) {
RELEASE_ASSERT(!raw_obj->ptr()->IsRemembered());
// TODO(rmacnak): Verify card tables.
return;
}
RELEASE_ASSERT(raw_obj->ptr()->IsRemembered() ==
in_store_buffer_->Contains(raw_obj));
visiting_ = raw_obj;
is_remembered_ = raw_obj->ptr()->IsRemembered();
raw_obj->ptr()->VisitPointers(this);
}
void VisitPointers(ObjectPtr* from, ObjectPtr* to) {
for (ObjectPtr* ptr = from; ptr <= to; ptr++) {
ObjectPtr raw_obj = *ptr;
if (raw_obj->IsHeapObject() && raw_obj->IsNewObject()) {
if (!is_remembered_) {
FATAL3(
"Old object %#" Px "references new object %#" Px
", but it is not"
" in any store buffer. Consider using rr to watch the slot %p and"
" reverse-continue to find the store with a missing barrier.\n",
static_cast<uword>(visiting_), static_cast<uword>(raw_obj), ptr);
}
RELEASE_ASSERT(to_->Contains(ObjectLayout::ToAddr(raw_obj)));
}
}
}
private:
const ObjectSet* const in_store_buffer_;
const SemiSpace* const to_;
ObjectPtr visiting_;
bool is_remembered_;
};
void Scavenger::VerifyStoreBuffers() {
Thread* thread = Thread::Current();
StackZone stack_zone(thread);
Zone* zone = stack_zone.GetZone();
ObjectSet* in_store_buffer = new (zone) ObjectSet(zone);
heap_->AddRegionsToObjectSet(in_store_buffer);
{
CollectStoreBufferVisitor visitor(in_store_buffer);
heap_->isolate_group()->store_buffer()->VisitObjectPointers(&visitor);
}
{
CheckStoreBufferVisitor visitor(in_store_buffer, to_);
heap_->old_space()->VisitObjects(&visitor);
}
}
SemiSpace* Scavenger::Prologue() {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Prologue");
heap_->isolate_group()->ReleaseStoreBuffers();
if (FLAG_verify_store_buffer) {
OS::PrintErr("Verifying remembered set before Scavenge...");
heap_->WaitForSweeperTasksAtSafepoint(Thread::Current());
VerifyStoreBuffers();
OS::PrintErr(" done.\n");
}
// Need to stash the old remembered set before any worker begins adding to the
// new remembered set.
blocks_ = heap_->isolate_group()->store_buffer()->TakeBlocks();
// Flip the two semi-spaces so that to_ is always the space for allocating
// objects.
SemiSpace* from = to_;
const char* name = Heap::RegionName(Heap::kNew);
to_ = SemiSpace::New(NewSizeInWords(from->size_in_words()), name);
if (to_ == NULL) {
// TODO(koda): We could try to recover (collect old space, wait for another
// isolate to finish scavenge, etc.).
OUT_OF_MEMORY();
}
UpdateMaxHeapCapacity();
{
MutexLocker ml(&space_lock_);
top_ = FirstObjectStart();
resolved_top_ = top_;
end_ = to_->end();
}
return from;
}
void Scavenger::Epilogue(SemiSpace* from) {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "Epilogue");
// All objects in the to space have been copied from the from space at this
// moment.
// Ensure the mutator thread will fail the next allocation. This will force
// mutator to allocate a new TLAB
heap_->isolate_group()->ForEachIsolate(
[&](Isolate* isolate) {
Thread* mutator_thread = isolate->mutator_thread();
ASSERT(mutator_thread == nullptr ||
mutator_thread->tlab().IsAbandoned());
},
/*at_safepoint=*/true);
double avg_frac = stats_history_.Get(0).PromoCandidatesSuccessFraction();
if (stats_history_.Size() >= 2) {
// Previous scavenge is only given half as much weight.
avg_frac += 0.5 * stats_history_.Get(1).PromoCandidatesSuccessFraction();
avg_frac /= 1.0 + 0.5; // Normalize.
}
if (avg_frac < (FLAG_early_tenuring_threshold / 100.0)) {
// Remember the limit to which objects have been copied.
survivor_end_ = top_;
} else {
// Move survivor end to the end of the to_ space, making all surviving
// objects candidates for promotion next time.
survivor_end_ = end_;
}
// Update estimate of scavenger speed. This statistic assumes survivorship
// rates don't change much.
intptr_t history_used = 0;
intptr_t history_micros = 0;
ASSERT(stats_history_.Size() > 0);
for (intptr_t i = 0; i < stats_history_.Size(); i++) {
history_used += stats_history_.Get(i).UsedBeforeInWords();
history_micros += stats_history_.Get(i).DurationMicros();
}
if (history_micros == 0) {
history_micros = 1;
}
scavenge_words_per_micro_ = history_used / history_micros;
if (scavenge_words_per_micro_ == 0) {
scavenge_words_per_micro_ = 1;
}
// Update amount of new-space we must allocate before performing an idle
// scavenge. This is based on the amount of work we expect to be able to
// complete in a typical idle period.
intptr_t average_idle_task_micros = 6000;
idle_scavenge_threshold_in_words_ =
scavenge_words_per_micro_ * average_idle_task_micros;
// Even if the scavenge speed is slow, make sure we don't scavenge too
// frequently, which just wastes power and falsely increases the promotion
// rate.
intptr_t lower_bound = 512 * KBInWords;
if (idle_scavenge_threshold_in_words_ < lower_bound) {
idle_scavenge_threshold_in_words_ = lower_bound;
}
// Even if the scavenge speed is very high, make sure we start considering
// idle scavenges before new space is full to avoid requiring a scavenge in
// the middle of a frame.
intptr_t upper_bound = 8 * CapacityInWords() / 10;
if (idle_scavenge_threshold_in_words_ > upper_bound) {
idle_scavenge_threshold_in_words_ = upper_bound;
}
if (FLAG_verify_store_buffer) {
// Scavenging will insert into the store buffer block on the current
// thread (later will parallel scavenge, the worker's threads). We need to
// flush this thread-local block to the isolate group or we will incorrectly
// report some objects as absent from the store buffer. This might cause
// a program to hit a store buffer overflow a bit sooner than it might
// otherwise, since overflow is measured in blocks. Store buffer overflows
// are very rare.
heap_->isolate_group()->ReleaseStoreBuffers();
OS::PrintErr("Verifying remembered set after Scavenge...");
heap_->WaitForSweeperTasksAtSafepoint(Thread::Current());
VerifyStoreBuffers();
OS::PrintErr(" done.\n");
}
from->Delete();
UpdateMaxHeapUsage();
if (heap_ != NULL) {
heap_->UpdateGlobalMaxUsed();
}
}
bool Scavenger::ShouldPerformIdleScavenge(int64_t deadline) {
// To make a consistent decision, we should not yield for a safepoint in the
// middle of deciding whether to perform an idle GC.
NoSafepointScope no_safepoint;
// TODO(rmacnak): Investigate collecting a history of idle period durations.
intptr_t used_in_words = UsedInWords();
if (used_in_words < idle_scavenge_threshold_in_words_) {
return false;
}
int64_t estimated_scavenge_completion =
OS::GetCurrentMonotonicMicros() +
used_in_words / scavenge_words_per_micro_;
return estimated_scavenge_completion <= deadline;
}
void Scavenger::IterateIsolateRoots(ObjectPointerVisitor* visitor) {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateIsolateRoots");
heap_->isolate_group()->VisitObjectPointers(
visitor, ValidationPolicy::kDontValidateFrames);
}
template <bool parallel>
void Scavenger::IterateStoreBuffers(ScavengerVisitorBase<parallel>* visitor) {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateStoreBuffers");
// Iterating through the store buffers.
// Grab the deduplication sets out of the isolate's consolidated store buffer.
StoreBuffer* store_buffer = heap_->isolate_group()->store_buffer();
StoreBufferBlock* pending = blocks_;
blocks_ = nullptr;
intptr_t total_count = 0;
while (pending != NULL) {
StoreBufferBlock* next = pending->next();
// Generated code appends to store buffers; tell MemorySanitizer.
MSAN_UNPOISON(pending, sizeof(*pending));
intptr_t count = pending->Count();
total_count += count;
while (!pending->IsEmpty()) {
ObjectPtr raw_object = pending->Pop();
ASSERT(!raw_object->IsForwardingCorpse());
ASSERT(raw_object->ptr()->IsRemembered());
raw_object->ptr()->ClearRememberedBit();
visitor->VisitingOldObject(raw_object);
// Note that this treats old-space WeakProperties as strong. A dead key
// won't be reclaimed until after the key is promoted.
raw_object->ptr()->VisitPointersNonvirtual(visitor);
}
pending->Reset();
// Return the emptied block for recycling (no need to check threshold).
store_buffer->PushBlock(pending, StoreBuffer::kIgnoreThreshold);
pending = next;
}
// Done iterating through old objects remembered in the store buffers.
visitor->VisitingOldObject(NULL);
heap_->RecordData(kStoreBufferEntries, total_count);
heap_->RecordData(kDataUnused1, 0);
heap_->RecordData(kDataUnused2, 0);
}
template <bool parallel>
void Scavenger::IterateRememberedCards(
ScavengerVisitorBase<parallel>* visitor) {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateRememberedCards");
heap_->old_space()->VisitRememberedCards(visitor);
visitor->VisitingOldObject(NULL);
}
void Scavenger::IterateObjectIdTable(ObjectPointerVisitor* visitor) {
#ifndef PRODUCT
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "IterateObjectIdTable");
heap_->isolate_group()->ForEachIsolate(
[&](Isolate* isolate) {
isolate->object_id_ring()->VisitPointers(visitor);
},
/*at_safepoint=*/true);
#endif // !PRODUCT
}
enum RootSlices {
kIsolate = 0,
kObjectIdRing,
kCards,
kStoreBuffer,
kNumRootSlices,
};
template <bool parallel>
void Scavenger::IterateRoots(ScavengerVisitorBase<parallel>* visitor) {
for (;;) {
intptr_t slice = root_slices_started_.fetch_add(1);
if (slice >= kNumRootSlices) {
return; // No more slices.
}
switch (slice) {
case kIsolate:
IterateIsolateRoots(visitor);
break;
case kObjectIdRing:
IterateObjectIdTable(visitor);
break;
case kCards:
IterateRememberedCards(visitor);
break;
case kStoreBuffer:
IterateStoreBuffers(visitor);
break;
default:
UNREACHABLE();
}
}
}
bool Scavenger::IsUnreachable(ObjectPtr* p) {
ObjectPtr raw_obj = *p;
if (!raw_obj->IsHeapObject()) {
return false;
}
if (!raw_obj->IsNewObject()) {
return false;
}
uword raw_addr = ObjectLayout::ToAddr(raw_obj);
if (to_->Contains(raw_addr)) {
return false;
}
uword header = *reinterpret_cast<uword*>(raw_addr);
if (IsForwarding(header)) {
uword new_addr = ForwardedAddr(header);
*p = ObjectLayout::FromAddr(new_addr);
return false;
}
return true;
}
void Scavenger::MournWeakHandles() {
Thread* thread = Thread::Current();
TIMELINE_FUNCTION_GC_DURATION(thread, "MournWeakHandles");
ScavengerWeakVisitor weak_visitor(thread, this);
heap_->isolate_group()->VisitWeakPersistentHandles(&weak_visitor);
}
template <bool parallel>
void ScavengerVisitorBase<parallel>::ProcessToSpace() {
intptr_t i = consumer_index_;
while (i <= producer_index_) {
uword resolved_top = labs_[i].resolved_top;
while (resolved_top < labs_[i].top) {
ObjectPtr raw_obj = ObjectLayout::FromAddr(resolved_top);
resolved_top += ProcessCopied(raw_obj);
}
labs_[i].resolved_top = resolved_top;
if (i == producer_index_) {
return; // More objects may yet be copied to this TLAB.
}
i++;
consumer_index_ = i;
ASSERT(consumer_index_ < labs_.length());
}
}
template <bool parallel>
void ScavengerVisitorBase<parallel>::ProcessPromotedList() {
ObjectPtr raw_object;
while ((raw_object = promoted_list_.Pop()) != nullptr) {
// Resolve or copy all objects referred to by the current object. This
// can potentially push more objects on this stack as well as add more
// objects to be resolved in the to space.
ASSERT(!raw_object->ptr()->IsRemembered());
VisitingOldObject(raw_object);
raw_object->ptr()->VisitPointersNonvirtual(this);
if (raw_object->ptr()->IsMarked()) {
// Complete our promise from ScavengePointer. Note that marker cannot
// visit this object until it pops a block from the mark stack, which
// involves a memory fence from the mutex, so even on architectures
// with a relaxed memory model, the marker will see the fully
// forwarded contents of this object.
thread_->MarkingStackAddObject(raw_object);
}
}
VisitingOldObject(NULL);
}
template <bool parallel>
void ScavengerVisitorBase<parallel>::ProcessWeakProperties() {
// Finished this round of scavenging. Process the pending weak properties
// for which the keys have become reachable. Potentially this adds more
// objects to the to space.
WeakPropertyPtr cur_weak = delayed_weak_properties_;
delayed_weak_properties_ = nullptr;
while (cur_weak != nullptr) {
uword next_weak = cur_weak->ptr()->next_;
// Promoted weak properties are not enqueued. So we can guarantee that
// we do not need to think about store barriers here.
ASSERT(cur_weak->IsNewObject());
ObjectPtr raw_key = cur_weak->ptr()->key_;
ASSERT(raw_key->IsHeapObject());
// Key still points into from space even if the object has been
// promoted to old space by now. The key will be updated accordingly
// below when VisitPointers is run.
ASSERT(raw_key->IsNewObject());
uword raw_addr = ObjectLayout::ToAddr(raw_key);
ASSERT(from_->Contains(raw_addr));
uword header = *reinterpret_cast<uword*>(raw_addr);
// Reset the next pointer in the weak property.
cur_weak->ptr()->next_ = 0;
if (IsForwarding(header)) {
cur_weak->ptr()->VisitPointersNonvirtual(this);
} else {
EnqueueWeakProperty(cur_weak);
}
// Advance to next weak property in the queue.
cur_weak = static_cast<WeakPropertyPtr>(next_weak);
}
}
void Scavenger::UpdateMaxHeapCapacity() {
#if !defined(PRODUCT)
if (heap_ == NULL) {
// Some unit tests.
return;
}
ASSERT(to_ != NULL);
ASSERT(heap_ != NULL);
auto isolate_group = heap_->isolate_group();
ASSERT(isolate_group != NULL);
isolate_group->GetHeapNewCapacityMaxMetric()->SetValue(to_->size_in_words() *
kWordSize);
#endif // !defined(PRODUCT)
}
void Scavenger::UpdateMaxHeapUsage() {
#if !defined(PRODUCT)
if (heap_ == NULL) {
// Some unit tests.
return;
}
ASSERT(to_ != NULL);
ASSERT(heap_ != NULL);
auto isolate_group = heap_->isolate_group();
ASSERT(isolate_group != NULL);
isolate_group->GetHeapNewUsedMaxMetric()->SetValue(UsedInWords() * kWordSize);
#endif // !defined(PRODUCT)
}
template <bool parallel>
void ScavengerVisitorBase<parallel>::EnqueueWeakProperty(
WeakPropertyPtr raw_weak) {
ASSERT(raw_weak->IsHeapObject());
ASSERT(raw_weak->IsNewObject());
ASSERT(raw_weak->IsWeakProperty());
#if defined(DEBUG)
uword raw_addr = ObjectLayout::ToAddr(raw_weak);
uword header = *reinterpret_cast<uword*>(raw_addr);
ASSERT(!IsForwarding(header));
#endif // defined(DEBUG)
ASSERT(raw_weak->ptr()->next_ == 0);
raw_weak->ptr()->next_ = static_cast<uword>(delayed_weak_properties_);
delayed_weak_properties_ = raw_weak;
}
template <bool parallel>
intptr_t ScavengerVisitorBase<parallel>::ProcessCopied(ObjectPtr raw_obj) {
intptr_t class_id = raw_obj->GetClassId();
if (UNLIKELY(class_id == kWeakPropertyCid)) {
WeakPropertyPtr raw_weak = static_cast<WeakPropertyPtr>(raw_obj);
// The fate of the weak property is determined by its key.
ObjectPtr raw_key = raw_weak->ptr()->key_;
if (raw_key->IsHeapObject() && raw_key->IsNewObject()) {
uword raw_addr = ObjectLayout::ToAddr(raw_key);
uword header = *reinterpret_cast<uword*>(raw_addr);
if (!IsForwarding(header)) {
// Key is white. Enqueue the weak property.
EnqueueWeakProperty(raw_weak);
return raw_weak->ptr()->HeapSize();
}
}
// Key is gray or black. Make the weak property black.
}
return raw_obj->ptr()->VisitPointersNonvirtual(this);
}
void Scavenger::MournWeakTables() {
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "MournWeakTables");
auto rehash_weak_table = [](WeakTable* table, WeakTable* replacement_new,
WeakTable* replacement_old) {
intptr_t size = table->size();
for (intptr_t i = 0; i < size; i++) {
if (table->IsValidEntryAtExclusive(i)) {
ObjectPtr raw_obj = table->ObjectAtExclusive(i);
ASSERT(raw_obj->IsHeapObject());
uword raw_addr = ObjectLayout::ToAddr(raw_obj);
uword header = *reinterpret_cast<uword*>(raw_addr);
if (IsForwarding(header)) {
// The object has survived. Preserve its record.
uword new_addr = ForwardedAddr(header);
raw_obj = ObjectLayout::FromAddr(new_addr);
auto replacement =
raw_obj->IsNewObject() ? replacement_new : replacement_old;
replacement->SetValueExclusive(raw_obj, table->ValueAtExclusive(i));
}
}
}
};
// Rehash the weak tables now that we know which objects survive this cycle.
for (int sel = 0; sel < Heap::kNumWeakSelectors; sel++) {
const auto selector = static_cast<Heap::WeakSelector>(sel);
auto table = heap_->GetWeakTable(Heap::kNew, selector);
auto table_old = heap_->GetWeakTable(Heap::kOld, selector);
// Create a new weak table for the new-space.
auto table_new = WeakTable::NewFrom(table);
rehash_weak_table(table, table_new, table_old);
heap_->SetWeakTable(Heap::kNew, selector, table_new);
// Remove the old table as it has been replaced with the newly allocated
// table above.
delete table;
}
// Each isolate might have a weak table used for fast snapshot writing (i.e.
// isolate communication). Rehash those tables if need be.
heap_->isolate_group()->ForEachIsolate(
[&](Isolate* isolate) {
auto table = isolate->forward_table_new();
if (table != nullptr) {
auto replacement = WeakTable::NewFrom(table);
rehash_weak_table(table, replacement, isolate->forward_table_old());
isolate->set_forward_table_new(replacement);
}
},
/*at_safepoint=*/true);
}
template <bool parallel>
void ScavengerVisitorBase<parallel>::MournWeakProperties() {
// The queued weak properties at this point do not refer to reachable keys,
// so we clear their key and value fields.
WeakPropertyPtr cur_weak = delayed_weak_properties_;
delayed_weak_properties_ = nullptr;
while (cur_weak != nullptr) {
uword next_weak = cur_weak->ptr()->next_;
// Reset the next pointer in the weak property.
cur_weak->ptr()->next_ = 0;
#if defined(DEBUG)
ObjectPtr raw_key = cur_weak->ptr()->key_;
uword raw_addr = ObjectLayout::ToAddr(raw_key);
uword header = *reinterpret_cast<uword*>(raw_addr);
ASSERT(!IsForwarding(header));
ASSERT(raw_key->IsHeapObject());
ASSERT(raw_key->IsNewObject()); // Key still points into from space.
#endif // defined(DEBUG)
WeakProperty::Clear(cur_weak);
// Advance to next weak property in the queue.
cur_weak = static_cast<WeakPropertyPtr>(next_weak);
}
}
void Scavenger::MakeNewSpaceIterable() {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
(Thread::Current()->task_kind() == Thread::kCompactorTask));
auto isolate_group = heap_->isolate_group();
MonitorLocker ml(isolate_group->threads_lock(), false);
// Make all scheduled thread's TLABs iterable.
Thread* current = heap_->isolate_group()->thread_registry()->active_list();
while (current != NULL) {
const TLAB tlab = current->tlab();
if (!tlab.IsAbandoned()) {
MakeTLABIterable(tlab);
}
current = current->next();
}
// All unscheduled mutator threads should have already abnadoned their TLABs.
isolate_group->ForEachIsolate(
[&](Isolate* isolate) {
Thread* mutator_thread = isolate->mutator_thread();
if (mutator_thread != NULL) {
if (isolate->scheduled_mutator_thread_ == nullptr) {
RELEASE_ASSERT(mutator_thread->tlab().IsAbandoned());
}
}
},
/*at_safepoint=*/true);
for (intptr_t i = 0; i < free_tlabs_.length(); ++i) {
MakeTLABIterable(free_tlabs_[i]);
}
}
void Scavenger::AbandonTLABsLocked() {
ASSERT(Thread::Current()->IsAtSafepoint());
IsolateGroup* isolate_group = heap_->isolate_group();
MonitorLocker ml(isolate_group->threads_lock(), false);
// Abandon TLABs of all scheduled threads.
Thread* current = isolate_group->thread_registry()->active_list();
while (current != NULL) {
const TLAB tlab = current->tlab();
AddAbandonedInBytesLocked(tlab.RemainingSize());
current->set_tlab(TLAB());
current = current->next();
}
// All unscheduled mutator threads should have already abandoned their TLAB.
isolate_group->ForEachIsolate(
[&](Isolate* isolate) {
Thread* mutator_thread = isolate->mutator_thread();
if (mutator_thread != NULL) {
if (isolate->scheduled_mutator_thread_ == nullptr) {
RELEASE_ASSERT(mutator_thread->tlab().IsAbandoned());
}
}
},
/*at_safepoint=*/true);
while (free_tlabs_.length() > 0) {
const TLAB tlab = free_tlabs_.RemoveLast();
AddAbandonedInBytesLocked(tlab.RemainingSize());
}
}
void Scavenger::VisitObjectPointers(ObjectPointerVisitor* visitor) {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask) ||
(Thread::Current()->task_kind() == Thread::kCompactorTask));
MakeNewSpaceIterable();
uword cur = FirstObjectStart();
while (cur < top_) {
ObjectPtr raw_obj = ObjectLayout::FromAddr(cur);
cur += raw_obj->ptr()->VisitPointers(visitor);
}
}
void Scavenger::VisitObjects(ObjectVisitor* visitor) {
ASSERT(Thread::Current()->IsAtSafepoint() ||
(Thread::Current()->task_kind() == Thread::kMarkerTask));
MakeNewSpaceIterable();
uword cur = FirstObjectStart();
while (cur < top_) {
ObjectPtr raw_obj = ObjectLayout::FromAddr(cur);
visitor->VisitObject(raw_obj);
cur += raw_obj->ptr()->HeapSize();
}
}
void Scavenger::AddRegionsToObjectSet(ObjectSet* set) const {
set->AddRegion(to_->start(), to_->end());
}
ObjectPtr Scavenger::FindObject(FindObjectVisitor* visitor) {
ASSERT(!scavenging_);
MakeNewSpaceIterable();
uword cur = FirstObjectStart();
if (visitor->VisitRange(cur, top_)) {
while (cur < top_) {
ObjectPtr raw_obj = ObjectLayout::FromAddr(cur);
uword next = cur + raw_obj->ptr()->HeapSize();
if (visitor->VisitRange(cur, next) &&
raw_obj->ptr()->FindObject(visitor)) {
return raw_obj; // Found object, return it.
}
cur = next;
}
ASSERT(cur == top_);
}
return Object::null();
}
void Scavenger::TryAllocateNewTLAB(Thread* thread) {
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(!scavenging_);
MutexLocker ml(&space_lock_);
// We might need a new TLAB not because the current TLAB is empty but because
// we failed to allocate alarge object in new space. So in case the remaining
// TLAB is still big enough to be useful we cache it.
CacheTLABLocked(thread->tlab());
thread->set_tlab(TLAB());
uword result = top_;
intptr_t remaining = end_ - top_;
intptr_t size = kTLABSize;
if (remaining < size) {
// Grab whatever is remaining
size = Utils::RoundDown(remaining, kObjectAlignment);
}
ASSERT(Utils::IsAligned(size, kObjectAlignment));
if (size == 0) {
thread->set_tlab(TryAcquireCachedTLABLocked());
return;
}
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
top_ += size;
ASSERT(to_->Contains(top_) || (top_ == to_->end()));
ASSERT(result < top_);
thread->set_tlab(TLAB(result, top_));
}
void Scavenger::MakeTLABIterable(const TLAB& tlab) {
ASSERT(tlab.end >= tlab.top);
const intptr_t size = tlab.RemainingSize();
ASSERT(Utils::IsAligned(size, kObjectAlignment));
if (size >= kObjectAlignment) {
// ForwardingCorpse(forwarding to default null) will work as filler.
ForwardingCorpse::AsForwarder(tlab.top, size);
ASSERT(ObjectLayout::FromAddr(tlab.top)->ptr()->HeapSize() == size);
}
}
void Scavenger::AbandonRemainingTLABForDebugging(Thread* thread) {
MutexLocker ml(&space_lock_);
const TLAB tlab = thread->tlab();
MakeTLABIterable(tlab);
AddAbandonedInBytesLocked(tlab.RemainingSize());
thread->set_tlab(TLAB());
}
template <bool parallel>
uword ScavengerVisitorBase<parallel>::TryAllocateCopySlow(intptr_t size) {
MakeProducerTLABIterable();
if (!scavenger_->TryAllocateNewTLAB(this)) {
return 0;
}
const uword result = labs_[producer_index_].top;
const intptr_t remaining =
labs_[producer_index_].end - labs_[producer_index_].top;
ASSERT(size <= remaining);
ASSERT(scavenger_->to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
labs_[producer_index_].top = result + size;
return result;
}
template <bool parallel>
bool Scavenger::TryAllocateNewTLAB(ScavengerVisitorBase<parallel>* visitor) {
intptr_t size = kTLABSize;
ASSERT(Utils::IsAligned(size, kObjectAlignment));
ASSERT(heap_ != Dart::vm_isolate()->heap());
ASSERT(scavenging_);
MutexLocker ml(&space_lock_);
const uword result = top_;
const intptr_t remaining = end_ - top_;
if (remaining < size) {
// Grab whatever is remaining
size = Utils::RoundDown(remaining, kObjectAlignment);
}
if (size == 0) {
return false;
}
ASSERT(to_->Contains(result));
ASSERT((result & kObjectAlignmentMask) == kNewObjectAlignmentOffset);
top_ += size;
ASSERT(to_->Contains(top_) || (top_ == to_->end()));
ASSERT(result < top_);
visitor->AddNewTLAB(result, top_);
return true;
}
TLAB Scavenger::TryAcquireCachedTLABLocked() {
if (free_tlabs_.length() == 0) {
return TLAB();
}
return free_tlabs_.RemoveLast();
}
void Scavenger::CacheTLABLocked(TLAB tlab) {
// If the memory following this TLAB is the unused new space, we'll merge the
// bytes into there.
if (tlab.end == top_) {
top_ = tlab.top;
return;
}
MakeTLABIterable(tlab);
// If this TLAB is lare enough to be useful in the future, we'll make it
// reusable, otherwise we abandon it.
const uword size = tlab.RemainingSize();
if (size > (50 * KB)) {
free_tlabs_.Add(tlab);
return;
}
// Else we discard the memory.
AddAbandonedInBytesLocked(size);
}
void Scavenger::Scavenge() {
int64_t start = OS::GetCurrentMonotonicMicros();
// Ensure that all threads for this isolate are at a safepoint (either stopped
// or in native code). If two threads are racing at this point, the loser
// will continue with its scavenge after waiting for the winner to complete.
// TODO(koda): Consider moving SafepointThreads into allocation failure/retry
// logic to avoid needless collections.
Thread* thread = Thread::Current();
SafepointOperationScope safepoint_scope(thread);
int64_t safe_point = OS::GetCurrentMonotonicMicros();
heap_->RecordTime(kSafePoint, safe_point - start);
// Scavenging is not reentrant. Make sure that is the case.
ASSERT(!scavenging_);
scavenging_ = true;
if (FLAG_verify_before_gc) {
OS::PrintErr("Verifying before Scavenge...");
heap_->WaitForSweeperTasksAtSafepoint(thread);
heap_->VerifyGC(thread->is_marking() ? kAllowMarked : kForbidMarked);
OS::PrintErr(" done.\n");
}
// Prepare for a scavenge.
AbandonTLABsLocked();
failed_to_promote_ = false;
root_slices_started_ = 0;
intptr_t abandoned_bytes = GetAndResetAbandonedInBytes();
SpaceUsage usage_before = GetCurrentUsage();
intptr_t promo_candidate_words =
(survivor_end_ - FirstObjectStart()) / kWordSize;
SemiSpace* from = Prologue();
intptr_t bytes_promoted;
if (FLAG_scavenger_tasks == 0) {
bytes_promoted = SerialScavenge(from);
} else {
bytes_promoted = ParallelScavenge(from);
}
MournWeakHandles();
MournWeakTables();
// Restore write-barrier assumptions.
heap_->isolate_group()->RememberLiveTemporaries();
// Scavenge finished. Run accounting.
int64_t end = OS::GetCurrentMonotonicMicros();
stats_history_.Add(ScavengeStats(
start, end, usage_before, GetCurrentUsage(), promo_candidate_words,
bytes_promoted >> kWordSizeLog2, abandoned_bytes >> kWordSizeLog2));
Epilogue(from);
if (FLAG_verify_after_gc) {
OS::PrintErr("Verifying after Scavenge...");
heap_->WaitForSweeperTasksAtSafepoint(thread);
heap_->VerifyGC(thread->is_marking() ? kAllowMarked : kForbidMarked);
OS::PrintErr(" done.\n");
}
// Done scavenging. Reset the marker.
ASSERT(scavenging_);
scavenging_ = false;
}
intptr_t Scavenger::SerialScavenge(SemiSpace* from) {
FreeList* freelist = heap_->old_space()->DataFreeList(0);
SerialScavengerVisitor visitor(heap_->isolate_group(), this, from, freelist,
&promotion_stack_);
visitor.ProcessRoots();
{
TIMELINE_FUNCTION_GC_DURATION(Thread::Current(), "ProcessToSpace");
visitor.ProcessAll();
}
visitor.Finalize();
visitor.DonateTLABs();
return visitor.bytes_promoted();
}
intptr_t Scavenger::ParallelScavenge(SemiSpace* from) {
intptr_t bytes_promoted = 0;
const intptr_t num_tasks = FLAG_scavenger_tasks;
ASSERT(num_tasks > 0);
ThreadBarrier barrier(num_tasks, heap_->barrier(), heap_->barrier_done());
RelaxedAtomic<uintptr_t> num_busy = num_tasks;
ParallelScavengerVisitor** visitors =
new ParallelScavengerVisitor*[num_tasks];
for (intptr_t i = 0; i < num_tasks; i++) {
FreeList* freelist = heap_->old_space()->DataFreeList(i);
visitors[i] = new ParallelScavengerVisitor(
heap_->isolate_group(), this, from, freelist, &promotion_stack_);
if (i < (num_tasks - 1)) {
// Begin scavenging on a helper thread.
bool result = Dart::thread_pool()->Run<ParallelScavengerTask>(
heap_->isolate_group(), &barrier, visitors[i], &num_busy);
ASSERT(result);
} else {
// Last worker is the main thread.
ParallelScavengerTask task(heap_->isolate_group(), &barrier, visitors[i],
&num_busy);
task.RunEnteredIsolateGroup();
barrier.Exit();
}
}
for (intptr_t i = 0; i < num_tasks; i++) {
bytes_promoted += visitors[i]->bytes_promoted();
visitors[i]->DonateTLABs();
delete visitors[i];
}
delete[] visitors;
return bytes_promoted;
}
void Scavenger::WriteProtect(bool read_only) {
ASSERT(!scavenging_);
to_->WriteProtect(read_only);
}
#ifndef PRODUCT
void Scavenger::PrintToJSONObject(JSONObject* object) const {
auto isolate_group = IsolateGroup::Current();
ASSERT(isolate_group != nullptr);
JSONObject space(object, "new");
space.AddProperty("type", "HeapSpace");
space.AddProperty("name", "new");
space.AddProperty("vmName", "Scavenger");
space.AddProperty("collections", collections());
if (collections() > 0) {
int64_t run_time = isolate_group->UptimeMicros();
run_time = Utils::Maximum(run_time, static_cast<int64_t>(0));
double run_time_millis = MicrosecondsToMilliseconds(run_time);
double avg_time_between_collections =
run_time_millis / static_cast<double>(collections());
space.AddProperty("avgCollectionPeriodMillis",
avg_time_between_collections);
} else {
space.AddProperty("avgCollectionPeriodMillis", 0.0);
}
space.AddProperty64("used", UsedInWords() * kWordSize);
space.AddProperty64("capacity", CapacityInWords() * kWordSize);
space.AddProperty64("external", ExternalInWords() * kWordSize);
space.AddProperty("time", MicrosecondsToSeconds(gc_time_micros()));
}
#endif // !PRODUCT
void Scavenger::AllocateExternal(intptr_t cid, intptr_t size) {
ASSERT(size >= 0);
external_size_ += size;
}
void Scavenger::FreeExternal(intptr_t size) {
ASSERT(size >= 0);
external_size_ -= size;
ASSERT(external_size_ >= 0);
}
void Scavenger::Evacuate() {
// We need a safepoint here to prevent allocation right before or right after
// the scavenge.
// The former can introduce an object that we might fail to collect.
// The latter means even if the scavenge promotes every object in the new
// space, the new allocation means the space is not empty,
// causing the assertion below to fail.
SafepointOperationScope scope(Thread::Current());
// Forces the next scavenge to promote all the objects in the new space.
survivor_end_ = top_;
Scavenge();
// It is possible for objects to stay in the new space
// if the VM cannot create more pages for these objects.
ASSERT((UsedInWords() == 0) || failed_to_promote_);
}
} // namespace dart
|
/*
-----------------------------------------------------------------------------
This source file is part of OGRE
(Object-oriented Graphics Rendering Engine)
For the latest info, see http://www.ogre3d.org/
Copyright (c) 2000-2014 Torus Knot Software Ltd
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-----------------------------------------------------------------------------
*/
#include "OgreRoot.h"
#include "OgreException.h"
#include "OgreLogManager.h"
#include "OgreStringConverter.h"
#include "OgreWindowEventUtilities.h"
#include "OgreGLRenderSystemCommon.h"
#include "OgreAndroidEGLSupport.h"
#include "OgreAndroidEGLWindow.h"
#include "OgreViewport.h"
#include <android/native_window.h>
#include <iostream>
#include <algorithm>
#include <climits>
namespace Ogre {
AndroidEGLWindow::AndroidEGLWindow(AndroidEGLSupport *glsupport)
: EGLWindow(glsupport),
mMaxBufferSize(32),
mMinBufferSize(16),
mMaxDepthSize(16),
mMaxStencilSize(0),
mMSAA(0),
mCSAA(0),
mPreserveContext(false)
{
}
void AndroidEGLWindow::getLeftAndTopFromNativeWindow( int & left, int & top, uint width, uint height )
{
// We don't have a native window.... but I think all android windows are origined
left = top = 0;
}
void AndroidEGLWindow::initNativeCreatedWindow(const NameValuePairList *miscParams)
{
}
void AndroidEGLWindow::createNativeWindow( int &left, int &top, uint &width, uint &height, String &title )
{
}
void AndroidEGLWindow::reposition( int left, int top )
{
}
void AndroidEGLWindow::resize(uint width, uint height)
{
}
void AndroidEGLWindow::windowMovedOrResized()
{
if(mActive)
{
// When using GPU rendering for Android UI the os creates a context in the main thread
// Now we have 2 choices create OGRE in its own thread or set our context current before doing
// anything else. I put this code here because this function called before any rendering is done.
// Because the events for screen rotation / resizing did not worked on all devices it is the best way
// to query the correct dimensions.
mContext->setCurrent();
eglQuerySurface(mEglDisplay, mEglSurface, EGL_WIDTH, (EGLint*)&mWidth);
eglQuerySurface(mEglDisplay, mEglSurface, EGL_HEIGHT, (EGLint*)&mHeight);
// Notify viewports of resize
ViewportList::iterator it = mViewportList.begin();
while( it != mViewportList.end() )
(*it++).second->_updateDimensions();
}
}
void AndroidEGLWindow::switchFullScreen(bool fullscreen)
{
}
void AndroidEGLWindow::create(const String& name, uint width, uint height,
bool fullScreen, const NameValuePairList *miscParams)
{
mName = name;
mWidth = width;
mHeight = height;
mLeft = 0;
mTop = 0;
mIsFullScreen = fullScreen;
void* eglContext = NULL;
AConfiguration* config = NULL;
bool preserveContextOpt = false;
if (miscParams)
{
NameValuePairList::const_iterator opt;
NameValuePairList::const_iterator end = miscParams->end();
if ((opt = miscParams->find("currentGLContext")) != end &&
StringConverter::parseBool(opt->second))
{
eglContext = eglGetCurrentContext();
if (!eglContext)
{
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"currentGLContext was specified with no current GL context",
"EGLWindow::create");
}
mEglSurface = eglGetCurrentSurface(EGL_DRAW);
mEglDisplay = eglGetCurrentDisplay();
}
if((opt = miscParams->find("externalWindowHandle")) != end)
{
mWindow = (ANativeWindow*)(Ogre::StringConverter::parseSizeT(opt->second));
}
if((opt = miscParams->find("androidConfig")) != end)
{
config = (AConfiguration*)(Ogre::StringConverter::parseSizeT(opt->second));
}
if((opt = miscParams->find("maxColourBufferSize")) != end)
{
mMaxBufferSize = Ogre::StringConverter::parseInt(opt->second);
}
if((opt = miscParams->find("maxDepthBufferSize")) != end)
{
mMaxDepthSize = Ogre::StringConverter::parseInt(opt->second);
}
if((opt = miscParams->find("maxStencilBufferSize")) != end)
{
mMaxStencilSize = Ogre::StringConverter::parseInt(opt->second);
}
if((opt = miscParams->find("minColourBufferSize")) != end)
{
mMinBufferSize = Ogre::StringConverter::parseInt(opt->second);
if (mMinBufferSize > mMaxBufferSize) mMinBufferSize = mMaxBufferSize;
}
if((opt = miscParams->find("MSAA")) != end)
{
mMSAA = Ogre::StringConverter::parseInt(opt->second);
}
if((opt = miscParams->find("CSAA")) != end)
{
mCSAA = Ogre::StringConverter::parseInt(opt->second);
}
if ((opt = miscParams->find("preserveContext")) != end &&
StringConverter::parseBool(opt->second))
{
preserveContextOpt = true;
}
}
initNativeCreatedWindow(miscParams);
if (mEglSurface)
{
mEglConfig = mGLSupport->getGLConfigFromDrawable (mEglSurface, &width, &height);
}
if (!mEglConfig && eglContext)
{
mEglConfig = mGLSupport->getGLConfigFromContext(eglContext);
if (!mEglConfig)
{
// This should never happen.
OGRE_EXCEPT(Exception::ERR_RENDERINGAPI_ERROR,
"Unexpected failure to determine a EGLFBConfig",
"EGLWindow::create");
}
}
mIsExternal = (mEglSurface != 0);
if (!mEglConfig)
{
_notifySurfaceCreated(mWindow, config);
mHwGamma = false;
}
mContext = createEGLContext();
mContext->setCurrent();
eglQuerySurface(mEglDisplay, mEglSurface, EGL_WIDTH, (EGLint*)&mWidth);
eglQuerySurface(mEglDisplay, mEglSurface, EGL_HEIGHT, (EGLint*)&mHeight);
EGL_CHECK_ERROR
mActive = true;
mVisible = true;
mClosed = false;
mPreserveContext = preserveContextOpt;
}
void AndroidEGLWindow::_notifySurfaceDestroyed()
{
if(mClosed)
return;
if (!mPreserveContext)
{
mContext->setCurrent();
static_cast<GLRenderSystemCommon*>(Root::getSingletonPtr()->getRenderSystem())->notifyOnContextLost();
mContext->_destroyInternalResources();
}
eglDestroySurface(mEglDisplay, mEglSurface);
EGL_CHECK_ERROR
eglTerminate(mEglDisplay);
EGL_CHECK_ERROR
mEglDisplay = 0;
mEglSurface = 0;
mActive = false;
mVisible = false;
mClosed = true;
}
void AndroidEGLWindow::_notifySurfaceCreated(void* window, void* config)
{
mWindow = reinterpret_cast<EGLNativeWindowType>(window);
if (mPreserveContext)
{
mEglDisplay = mGLSupport->getGLDisplay();
mEglSurface = createSurfaceFromWindow(mEglDisplay, mWindow);
mContext->_updateInternalResources(mEglDisplay, mEglConfig, mEglSurface);
}
else
{
int minAttribs[] = {
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_BUFFER_SIZE, mMinBufferSize,
EGL_DEPTH_SIZE, 16,
EGL_NONE
};
int maxAttribs[] = {
EGL_BUFFER_SIZE, mMaxBufferSize,
EGL_DEPTH_SIZE, mMaxDepthSize,
EGL_STENCIL_SIZE, mMaxStencilSize,
EGL_NONE
};
bool bAASuccess = false;
if (mCSAA)
{
try
{
int CSAAminAttribs[] = {
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_BUFFER_SIZE, mMinBufferSize,
EGL_DEPTH_SIZE, 16,
EGL_COVERAGE_BUFFERS_NV, 1,
EGL_COVERAGE_SAMPLES_NV, mCSAA,
EGL_NONE
};
int CSAAmaxAttribs[] = {
EGL_BUFFER_SIZE, mMaxBufferSize,
EGL_DEPTH_SIZE, mMaxDepthSize,
EGL_STENCIL_SIZE, mMaxStencilSize,
EGL_COVERAGE_BUFFERS_NV, 1,
EGL_COVERAGE_SAMPLES_NV, mCSAA,
EGL_NONE
};
mEglConfig = mGLSupport->selectGLConfig(CSAAminAttribs, CSAAmaxAttribs);
bAASuccess = true;
}
catch (Exception& e)
{
LogManager::getSingleton().logMessage("AndroidEGLWindow::_createInternalResources: setting CSAA failed");
}
}
if (mMSAA && !bAASuccess)
{
try
{
int MSAAminAttribs[] = {
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_BUFFER_SIZE, mMinBufferSize,
EGL_DEPTH_SIZE, 16,
EGL_SAMPLE_BUFFERS, 1,
EGL_SAMPLES, mMSAA,
EGL_NONE
};
int MSAAmaxAttribs[] = {
EGL_BUFFER_SIZE, mMaxBufferSize,
EGL_DEPTH_SIZE, mMaxDepthSize,
EGL_STENCIL_SIZE, mMaxStencilSize,
EGL_SAMPLE_BUFFERS, 1,
EGL_SAMPLES, mMSAA,
EGL_NONE
};
mEglConfig = mGLSupport->selectGLConfig(MSAAminAttribs, MSAAmaxAttribs);
bAASuccess = true;
}
catch (Exception& e)
{
LogManager::getSingleton().logMessage("AndroidEGLWindow::_createInternalResources: setting MSAA failed");
}
}
mEglDisplay = mGLSupport->getGLDisplay();
if (!bAASuccess) mEglConfig = mGLSupport->selectGLConfig(minAttribs, maxAttribs);
EGLint format;
eglGetConfigAttrib(mEglDisplay, mEglConfig, EGL_NATIVE_VISUAL_ID, &format);
EGL_CHECK_ERROR
ANativeWindow_setBuffersGeometry(mWindow, 0, 0, format);
mEglSurface = createSurfaceFromWindow(mEglDisplay, mWindow);
if (config)
{
bool isLandscape = (int)AConfiguration_getOrientation((AConfiguration*)config) == 2;
mGLSupport->setConfigOption("Orientation", isLandscape ? "Landscape" : "Portrait");
}
}
if(mContext)
{
mActive = true;
mVisible = true;
mClosed = false;
if (!mPreserveContext)
{
mContext->_createInternalResources(mEglDisplay, mEglConfig, mEglSurface, NULL);
static_cast<GLRenderSystemCommon*>(Ogre::Root::getSingletonPtr()->getRenderSystem())->resetRenderer(this);
}
}
}
}
|
/******************************************************
* Cloud WebAccess
* http://gitlab.qq/cloudproject/backend-web-access.git
* Created: 01.04.2020
* Copyright (C) goganoga 2020
*******************************************************/
#include "FHT/Common/DBFacade/Postgresql/Postgresql.h"
#include "FHT/Common/Template.h"
#include "FHT/Common/Controller/Controller.h"
#include <algorithm>
#include <stdexcept>
namespace FHT {
struct Connection {
using ptrConnection = std::shared_ptr<PostgresConnection>;
Connection(ptrConnection connection, std::function<void(ptrConnection)> destructor) :m_connection(connection), m_destructor(destructor){}
~Connection() { m_destructor(m_connection); }
PGconn* get() {
if (m_connection) {
return m_connection->connection().get();
}
return nullptr;
}
ptrConnection getPtr() {
if (m_connection) {
return m_connection;
}
return nullptr;
}
private:
ptrConnection m_connection;
std::function<void(ptrConnection)> m_destructor;
};
Postgres::returnQuery Postgres::query_private(std::string& query, int size, const char* const* params) {
if (!m_isRun) {
FHT::LoggerStream::Log(FHT::LoggerStream::FATAL) << METHOD_NAME << "Need runnig FHT::iDBFacade::DBConnector<FHT::iDBFacade::iDBSettings>()::Run";
throw "iPostgres not runing!";
}
returnQuery out;
auto uuid = "web_access_" + guid(query);
uuid.erase(std::remove(uuid.begin(), uuid.end(), '-'), uuid.end());
{
std::unique_ptr<PGresult, decltype(&PQclear)> m_result{ nullptr, [](PGresult* res) { if (res) PQclear(res); } };
Connection connection{ getConnection(), [&](ptrConnection a) { if (a) freeConnection(a); } };
if (!connection.getPtr()) {
throw std::string("Error Connection").c_str();
}
if (!connection.getPtr()->findStmt(uuid)) {
m_result.reset(PQprepare(connection.get(), uuid.c_str(), query.c_str(), 0, nullptr));
if (!m_result) {
throw std::string("Error create PG stmt").c_str();
}
if (PQresultStatus(m_result.get()) != PGRES_COMMAND_OK) {
throw PQresultErrorMessage(m_result.get());
}
connection.getPtr()->addStmt(uuid);
}
m_result.reset(PQexecPrepared(connection.get(), uuid.c_str(), size, params, nullptr, nullptr, 0));
if (!m_result) {
throw std::string("Error exec PG query trough stmt").c_str();
}
if (auto const status = PQresultStatus(m_result.get()); status != PGRES_TUPLES_OK && status != PGRES_COMMAND_OK) {
throw PQresultErrorMessage(m_result.get());
}
if (PQntuples(m_result.get())) {
for (int i = 0; i < PQnfields(m_result.get()); i++) {
std::vector<std::string> vector;
for (int j = 0; j < PQntuples(m_result.get()); j++) {
vector.push_back(PQgetvalue(m_result.get(), j, i));
}
out.emplace(PQfname(m_result.get(), i), vector);
}
}
}
return out;
}
void Postgres::queryPrivate(std::string& query, std::vector<std::string>& param, Postgres::returnQuery& result) {
std::vector<std::unique_ptr<char[]>> list;
if (!param.empty()) {
std::unique_ptr<char* []> paramValues(new char* [param.size()]);
for (int i = 0; i < param.size(); i++) {
if (param[i].empty()) {
paramValues[i] = nullptr;
}
else {
std::unique_ptr<char[]> str(new char[param[i].size() + 1]);
std::strncpy(str.get(), param[i].c_str(), param[i].size() + 1);
list.push_back(std::move(str));
paramValues[i] = list.back().get();
}
}
result = query_private(query, static_cast<int>(param.size()), paramValues.get());
}
else {
result = query_private(query, 0, nullptr);
}
}
Postgres::~Postgres() {
m_isRun = false;
}
Postgres::ptrConnection Postgres::getConnection() {
std::unique_lock<std::mutex> lock_(m_mux);
for (; m_pool_conn.empty();) {
m_condition.wait(lock_);
}
auto connection = m_pool_conn.front();
m_pool_conn.pop();
return connection;
}
void Postgres::freeConnection(ptrConnection connection) {
std::unique_lock<std::mutex> lock_(m_mux);
m_pool_conn.push(connection);
lock_.unlock();
m_condition.notify_one();
}
bool Postgres::run(std::unique_ptr<iDBFacade::Configuration> config) {
try {
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "host" << config->m_host;
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "name" << config->m_name;
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "user" << config->m_user;
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "pass" << config->m_pass;
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "port" << config->m_port;
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "worker" << config->m_worker;
if (m_isRun) {
throw "Postgres runing!";
}
for (int i = 0; i < config->m_worker; i++) {
m_pool_conn.emplace(std::make_shared<PostgresConnection>(config->m_port, config->m_host, config->m_name, config->m_user, config->m_pass));
}
Conrtoller::getTask()->postLoopTask(FHT::iTask::MAIN, [&]() {
std::string query = "DELETE FROM notifications.notification WHERE status = 'true';";
auto a = query_private(query, 0, nullptr);
FHT::LoggerStream::Log(FHT::LoggerStream::DEBUG) << METHOD_NAME << "DELETE FROM notifications";
return FHT::iTask::state::CONTINUE;
},
1000 * 60 * 60 * 8); // ms * s * m * h
m_isRun = true;
return m_isRun;
}
catch (std::exception const& e) {
throw e;
}
return m_isRun;
}
}
|
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "ash/system/tray/tray_popup_utils.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "ash/public/cpp/ash_constants.h"
#include "ash/public/cpp/ash_view_ids.h"
#include "ash/resources/vector_icons/vector_icons.h"
#include "ash/session/session_controller_impl.h"
#include "ash/shell.h"
#include "ash/style/ash_color_provider.h"
#include "ash/style/default_color_constants.h"
#include "ash/system/tray/hover_highlight_view.h"
#include "ash/system/tray/size_range_layout.h"
#include "ash/system/tray/tray_constants.h"
#include "ash/system/tray/unfocusable_label.h"
#include "ash/system/unified/unified_system_tray_view.h"
#include "ui/base/l10n/l10n_util.h"
#include "ui/gfx/color_palette.h"
#include "ui/gfx/geometry/insets.h"
#include "ui/gfx/paint_vector_icon.h"
#include "ui/views/animation/flood_fill_ink_drop_ripple.h"
#include "ui/views/animation/ink_drop_highlight.h"
#include "ui/views/animation/ink_drop_impl.h"
#include "ui/views/animation/square_ink_drop_ripple.h"
#include "ui/views/controls/button/button.h"
#include "ui/views/controls/button/md_text_button.h"
#include "ui/views/controls/button/toggle_button.h"
#include "ui/views/controls/highlight_path_generator.h"
#include "ui/views/controls/image_view.h"
#include "ui/views/controls/label.h"
#include "ui/views/controls/separator.h"
#include "ui/views/controls/slider.h"
#include "ui/views/layout/box_layout.h"
#include "ui/views/layout/fill_layout.h"
#include "ui/views/painter.h"
namespace ash {
namespace {
// Creates a layout manager that positions Views vertically. The Views will be
// stretched horizontally and centered vertically.
std::unique_ptr<views::LayoutManager> CreateDefaultCenterLayoutManager() {
// TODO(bruthig): Use constants instead of magic numbers.
auto box_layout = std::make_unique<views::BoxLayout>(
views::BoxLayout::Orientation::kVertical,
gfx::Insets(8, kTrayPopupLabelHorizontalPadding));
box_layout->set_main_axis_alignment(
views::BoxLayout::MainAxisAlignment::kCenter);
box_layout->set_cross_axis_alignment(
views::BoxLayout::CrossAxisAlignment::kStretch);
return std::move(box_layout);
}
// Creates a layout manager that positions Views horizontally. The Views will be
// centered along the horizontal and vertical axis.
std::unique_ptr<views::LayoutManager> CreateDefaultEndsLayoutManager() {
auto box_layout = std::make_unique<views::BoxLayout>(
views::BoxLayout::Orientation::kHorizontal);
box_layout->set_main_axis_alignment(
views::BoxLayout::MainAxisAlignment::kCenter);
box_layout->set_cross_axis_alignment(
views::BoxLayout::CrossAxisAlignment::kCenter);
return std::move(box_layout);
}
std::unique_ptr<views::LayoutManager> CreateDefaultLayoutManager(
TriView::Container container) {
switch (container) {
case TriView::Container::START:
case TriView::Container::END:
return CreateDefaultEndsLayoutManager();
case TriView::Container::CENTER:
return CreateDefaultCenterLayoutManager();
}
// Required by some compilers.
NOTREACHED();
return nullptr;
}
// Configures the default size and flex value for the specified |container|
// of the given |tri_view|. Used by CreateDefaultRowView().
void ConfigureDefaultSizeAndFlex(TriView* tri_view,
TriView::Container container) {
int min_width = 0;
switch (container) {
case TriView::Container::START:
min_width = kTrayPopupItemMinStartWidth;
break;
case TriView::Container::CENTER:
tri_view->SetFlexForContainer(TriView::Container::CENTER, 1.f);
break;
case TriView::Container::END:
min_width = kTrayPopupItemMinEndWidth;
break;
}
tri_view->SetMinSize(container,
gfx::Size(min_width, kTrayPopupItemMinHeight));
constexpr int kTrayPopupItemMaxHeight = 144;
tri_view->SetMaxSize(
container,
gfx::Size(SizeRangeLayout::kAbsoluteMaxSize, kTrayPopupItemMaxHeight));
}
gfx::Insets GetInkDropInsets(TrayPopupInkDropStyle ink_drop_style) {
if (ink_drop_style == TrayPopupInkDropStyle::HOST_CENTERED ||
ink_drop_style == TrayPopupInkDropStyle::INSET_BOUNDS) {
return gfx::Insets(kTrayPopupInkDropInset);
}
return gfx::Insets();
}
class HighlightPathGenerator : public views::HighlightPathGenerator {
public:
explicit HighlightPathGenerator(TrayPopupInkDropStyle ink_drop_style)
: ink_drop_style_(ink_drop_style) {}
HighlightPathGenerator(const HighlightPathGenerator&) = delete;
HighlightPathGenerator& operator=(const HighlightPathGenerator&) = delete;
// views::HighlightPathGenerator:
base::Optional<gfx::RRectF> GetRoundRect(const gfx::RectF& rect) override {
gfx::RectF bounds = rect;
bounds.Inset(GetInkDropInsets(ink_drop_style_));
float corner_radius = 0.f;
switch (ink_drop_style_) {
case TrayPopupInkDropStyle::HOST_CENTERED:
corner_radius = std::min(bounds.width(), bounds.height()) / 2.f;
bounds.ClampToCenteredSize(gfx::SizeF(corner_radius, corner_radius));
break;
case TrayPopupInkDropStyle::INSET_BOUNDS:
corner_radius = kTrayPopupInkDropCornerRadius;
break;
case TrayPopupInkDropStyle::FILL_BOUNDS:
break;
}
return gfx::RRectF(bounds, corner_radius);
}
private:
const TrayPopupInkDropStyle ink_drop_style_;
};
} // namespace
TriView* TrayPopupUtils::CreateDefaultRowView() {
TriView* tri_view = CreateMultiTargetRowView();
tri_view->SetContainerLayout(
TriView::Container::START,
CreateDefaultLayoutManager(TriView::Container::START));
tri_view->SetContainerLayout(
TriView::Container::CENTER,
CreateDefaultLayoutManager(TriView::Container::CENTER));
tri_view->SetContainerLayout(
TriView::Container::END,
CreateDefaultLayoutManager(TriView::Container::END));
return tri_view;
}
TriView* TrayPopupUtils::CreateSubHeaderRowView(bool start_visible) {
TriView* tri_view = CreateDefaultRowView();
if (!start_visible) {
tri_view->SetInsets(gfx::Insets(
0, kTrayPopupPaddingHorizontal - kTrayPopupLabelHorizontalPadding, 0,
0));
tri_view->SetContainerVisible(TriView::Container::START, false);
}
return tri_view;
}
TriView* TrayPopupUtils::CreateMultiTargetRowView() {
TriView* tri_view = new TriView(0 /* padding_between_items */);
tri_view->SetInsets(gfx::Insets(0, kMenuExtraMarginFromLeftEdge, 0, 0));
ConfigureDefaultSizeAndFlex(tri_view, TriView::Container::START);
ConfigureDefaultSizeAndFlex(tri_view, TriView::Container::CENTER);
ConfigureDefaultSizeAndFlex(tri_view, TriView::Container::END);
tri_view->SetContainerLayout(TriView::Container::START,
std::make_unique<views::FillLayout>());
tri_view->SetContainerLayout(TriView::Container::CENTER,
std::make_unique<views::FillLayout>());
tri_view->SetContainerLayout(TriView::Container::END,
std::make_unique<views::FillLayout>());
return tri_view;
}
views::Label* TrayPopupUtils::CreateDefaultLabel() {
views::Label* label = new views::Label();
label->SetHorizontalAlignment(gfx::ALIGN_LEFT);
label->SetSubpixelRenderingEnabled(false);
return label;
}
UnfocusableLabel* TrayPopupUtils::CreateUnfocusableLabel() {
UnfocusableLabel* label = new UnfocusableLabel();
label->SetHorizontalAlignment(gfx::ALIGN_LEFT);
label->SetSubpixelRenderingEnabled(false);
return label;
}
views::ImageView* TrayPopupUtils::CreateMainImageView() {
auto* image = new views::ImageView;
image->SetPreferredSize(
gfx::Size(kTrayPopupItemMinStartWidth, kTrayPopupItemMinHeight));
return image;
}
views::Slider* TrayPopupUtils::CreateSlider(views::SliderListener* listener) {
views::Slider* slider = new views::Slider(listener);
slider->SetBorder(views::CreateEmptyBorder(
gfx::Insets(0, kTrayPopupSliderHorizontalPadding)));
return slider;
}
views::ToggleButton* TrayPopupUtils::CreateToggleButton(
views::ButtonListener* listener,
int accessible_name_id) {
constexpr SkColor kTrackAlpha = 0x66;
auto GetColor = [](bool is_on, SkAlpha alpha = SK_AlphaOPAQUE) {
AshColorProvider::ContentLayerType type =
is_on ? AshColorProvider::ContentLayerType::kProminentIconButton
: AshColorProvider::ContentLayerType::kTextPrimary;
return SkColorSetA(AshColorProvider::Get()->GetContentLayerColor(
type, AshColorProvider::AshColorMode::kDark),
alpha);
};
views::ToggleButton* toggle = new views::ToggleButton(listener);
const gfx::Size toggle_size(toggle->GetPreferredSize());
const int vertical_padding = (kMenuButtonSize - toggle_size.height()) / 2;
const int horizontal_padding =
(kTrayToggleButtonWidth - toggle_size.width()) / 2;
toggle->SetBorder(views::CreateEmptyBorder(
gfx::Insets(vertical_padding, horizontal_padding)));
toggle->SetAccessibleName(l10n_util::GetStringUTF16(accessible_name_id));
toggle->SetThumbOnColor(GetColor(true));
toggle->SetThumbOffColor(GetColor(false));
toggle->SetTrackOnColor(GetColor(true, kTrackAlpha));
toggle->SetTrackOffColor(GetColor(false, kTrackAlpha));
return toggle;
}
std::unique_ptr<views::Painter> TrayPopupUtils::CreateFocusPainter() {
return views::Painter::CreateSolidFocusPainter(
UnifiedSystemTrayView::GetFocusRingColor(), kFocusBorderThickness,
gfx::InsetsF());
}
void TrayPopupUtils::ConfigureTrayPopupButton(views::Button* button) {
button->SetInstallFocusRingOnFocus(true);
button->SetFocusForPlatform();
button->SetInkDropMode(views::InkDropHostView::InkDropMode::ON);
button->set_has_ink_drop_action_on_click(true);
}
void TrayPopupUtils::ConfigureAsStickyHeader(views::View* view) {
view->SetID(VIEW_ID_STICKY_HEADER);
view->SetBorder(
views::CreateEmptyBorder(gfx::Insets(kMenuSeparatorVerticalPadding, 0)));
view->SetPaintToLayer();
view->layer()->SetFillsBoundsOpaquely(false);
}
void TrayPopupUtils::ConfigureContainer(TriView::Container container,
views::View* container_view) {
container_view->SetLayoutManager(CreateDefaultLayoutManager(container));
}
views::LabelButton* TrayPopupUtils::CreateTrayPopupButton(
views::ButtonListener* listener,
const base::string16& text) {
auto button = views::MdTextButton::Create(listener, text);
button->SetProminent(true);
return button.release();
}
views::Separator* TrayPopupUtils::CreateVerticalSeparator() {
views::Separator* separator = new views::Separator();
separator->SetPreferredHeight(24);
separator->SetColor(AshColorProvider::Get()->GetContentLayerColor(
AshColorProvider::ContentLayerType::kSeparator,
AshColorProvider::AshColorMode::kLight));
return separator;
}
std::unique_ptr<views::InkDrop> TrayPopupUtils::CreateInkDrop(
views::InkDropHostView* host) {
std::unique_ptr<views::InkDropImpl> ink_drop =
std::make_unique<views::InkDropImpl>(host, host->size());
ink_drop->SetAutoHighlightMode(
views::InkDropImpl::AutoHighlightMode::SHOW_ON_RIPPLE);
ink_drop->SetShowHighlightOnHover(false);
return std::move(ink_drop);
}
std::unique_ptr<views::InkDropRipple> TrayPopupUtils::CreateInkDropRipple(
TrayPopupInkDropStyle ink_drop_style,
const views::View* host,
const gfx::Point& center_point,
SkColor background_color) {
const AshColorProvider::RippleAttributes ripple_attributes =
AshColorProvider::Get()->GetRippleAttributes(background_color);
return std::make_unique<views::FloodFillInkDropRipple>(
host->size(), GetInkDropInsets(ink_drop_style), center_point,
ripple_attributes.base_color, ripple_attributes.inkdrop_opacity);
}
std::unique_ptr<views::InkDropHighlight> TrayPopupUtils::CreateInkDropHighlight(
TrayPopupInkDropStyle ink_drop_style,
const views::View* host,
SkColor background_color) {
const AshColorProvider::RippleAttributes ripple_attributes =
AshColorProvider::Get()->GetRippleAttributes(background_color);
auto highlight = std::make_unique<views::InkDropHighlight>(
gfx::SizeF(host->size()), ripple_attributes.base_color);
highlight->set_visible_opacity(ripple_attributes.highlight_opacity);
return highlight;
}
void TrayPopupUtils::InstallHighlightPathGenerator(
views::View* host,
TrayPopupInkDropStyle ink_drop_style) {
views::HighlightPathGenerator::Install(
host, std::make_unique<HighlightPathGenerator>(ink_drop_style));
}
views::Separator* TrayPopupUtils::CreateListItemSeparator(bool left_inset) {
views::Separator* separator = new views::Separator();
separator->SetColor(AshColorProvider::Get()->GetContentLayerColor(
AshColorProvider::ContentLayerType::kSeparator,
AshColorProvider::AshColorMode::kLight));
separator->SetBorder(views::CreateEmptyBorder(
kMenuSeparatorVerticalPadding - views::Separator::kThickness,
left_inset
? kMenuExtraMarginFromLeftEdge + kMenuButtonSize +
kTrayPopupLabelHorizontalPadding
: 0,
kMenuSeparatorVerticalPadding, 0));
return separator;
}
bool TrayPopupUtils::CanOpenWebUISettings() {
return Shell::Get()->session_controller()->ShouldEnableSettings();
}
void TrayPopupUtils::InitializeAsCheckableRow(HoverHighlightView* container,
bool checked,
bool enterprise_managed) {
const int dip_size = GetDefaultSizeOfVectorIcon(kCheckCircleIcon);
gfx::ImageSkia check_mark =
CreateVectorIcon(kHollowCheckCircleIcon, dip_size, gfx::kGoogleGreen300);
if (enterprise_managed) {
gfx::ImageSkia enterprise_managed_icon = CreateVectorIcon(
kLoginScreenEnterpriseIcon, dip_size, gfx::kGoogleGrey100);
container->AddRightIcon(enterprise_managed_icon,
enterprise_managed_icon.width());
}
container->AddRightIcon(check_mark, check_mark.width());
UpdateCheckMarkVisibility(container, checked);
}
void TrayPopupUtils::UpdateCheckMarkVisibility(HoverHighlightView* container,
bool visible) {
container->SetRightViewVisible(visible);
container->SetAccessibilityState(
visible ? HoverHighlightView::AccessibilityState::CHECKED_CHECKBOX
: HoverHighlightView::AccessibilityState::UNCHECKED_CHECKBOX);
}
} // namespace ash
|
/***************************************************************************
* Copyright (c) Johan Mabille, Sylvain Corlay and Wolf Vollprecht *
* Copyright (c) QuantStack *
* *
* Distributed under the terms of the BSD 3-Clause License. *
* *
* The full license is in the file LICENSE, distributed with this software. *
****************************************************************************/
#ifndef XTENSOR_MANIPULATION_HPP
#define XTENSOR_MANIPULATION_HPP
#include "xbuilder.hpp"
#include "xstrided_view.hpp"
#include "xutils.hpp"
#include "xtensor_config.hpp"
#include "xrepeat.hpp"
namespace xt
{
namespace check_policy
{
struct none
{
};
struct full
{
};
}
template <class E>
auto transpose(E&& e) noexcept;
template <class E, class S, class Tag = check_policy::none>
auto transpose(E&& e, S&& permutation, Tag check_policy = Tag());
template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL, class E>
auto ravel(E&& e);
template <layout_type L = XTENSOR_DEFAULT_TRAVERSAL, class E>
auto flatten(E&& e);
template <layout_type L, class T>
auto flatnonzero(const T& arr);
template <class E>
auto trim_zeros(E&& e, const std::string& direction = "fb");
template <class E>
auto squeeze(E&& e);
template <class E, class S, class Tag = check_policy::none, std::enable_if_t<!std::is_integral<S>::value, int> = 0>
auto squeeze(E&& e, S&& axis, Tag check_policy = Tag());
template <class E>
auto expand_dims(E&& e, std::size_t axis);
template <std::size_t N, class E>
auto atleast_Nd(E&& e);
template <class E>
auto atleast_1d(E&& e);
template <class E>
auto atleast_2d(E&& e);
template <class E>
auto atleast_3d(E&& e);
template <class E>
auto split(E& e, std::size_t n, std::size_t axis = 0);
template <class E>
auto hsplit(E& e, std::size_t n);
template <class E>
auto vsplit(E& e, std::size_t n);
template <class E>
auto flip(E&& e, std::size_t axis);
template <std::ptrdiff_t N = 1, class E>
auto rot90(E&& e, const std::array<std::ptrdiff_t, 2>& axes = {0, 1});
template<class E>
auto roll(E&& e, std::ptrdiff_t shift);
template<class E>
auto roll(E&& e, std::ptrdiff_t shift, std::ptrdiff_t axis);
template<class E>
auto repeat(E&& e, std::size_t repeats, std::size_t axis);
template<class E>
auto repeat(E&& e, const std::vector<std::size_t>& repeats, std::size_t axis);
template<class E>
auto repeat(E&& e, std::vector<std::size_t>&& repeats, std::size_t axis);
/****************************
* transpose implementation *
****************************/
namespace detail
{
inline layout_type transpose_layout_noexcept(layout_type l) noexcept
{
layout_type result = l;
if (l == layout_type::row_major)
{
result = layout_type::column_major;
}
else if (l == layout_type::column_major)
{
result = layout_type::row_major;
}
return result;
}
inline layout_type transpose_layout(layout_type l)
{
if (l != layout_type::row_major && l != layout_type::column_major)
{
XTENSOR_THROW(transpose_error, "cannot compute transposed layout of dynamic layout");
}
return transpose_layout_noexcept(l);
}
template <class E, class S>
inline auto transpose_impl(E&& e, S&& permutation, check_policy::none)
{
if (sequence_size(permutation) != e.dimension())
{
XTENSOR_THROW(transpose_error, "Permutation does not have the same size as shape");
}
// permute stride and shape
using shape_type = xindex_type_t<typename std::decay_t<E>::shape_type>;
shape_type temp_shape;
resize_container(temp_shape, e.shape().size());
using strides_type = get_strides_t<shape_type>;
strides_type temp_strides;
resize_container(temp_strides, e.strides().size());
using size_type = typename std::decay_t<E>::size_type;
for (std::size_t i = 0; i < e.shape().size(); ++i)
{
if (std::size_t(permutation[i]) >= e.dimension())
{
XTENSOR_THROW(transpose_error, "Permutation contains wrong axis");
}
size_type perm = static_cast<size_type>(permutation[i]);
temp_shape[i] = e.shape()[perm];
temp_strides[i] = e.strides()[perm];
}
layout_type new_layout = layout_type::dynamic;
if (std::is_sorted(std::begin(permutation), std::end(permutation)))
{
// keep old layout
new_layout = e.layout();
}
else if (std::is_sorted(std::begin(permutation), std::end(permutation), std::greater<>()))
{
new_layout = transpose_layout_noexcept(e.layout());
}
return strided_view(std::forward<E>(e), std::move(temp_shape), std::move(temp_strides), get_offset<XTENSOR_DEFAULT_LAYOUT>(e), new_layout);
}
template <class E, class S>
inline auto transpose_impl(E&& e, S&& permutation, check_policy::full)
{
// check if axis appears twice in permutation
for (std::size_t i = 0; i < sequence_size(permutation); ++i)
{
for (std::size_t j = i + 1; j < sequence_size(permutation); ++j)
{
if (permutation[i] == permutation[j])
{
XTENSOR_THROW(transpose_error, "Permutation contains axis more than once");
}
}
}
return transpose_impl(std::forward<E>(e), std::forward<S>(permutation), check_policy::none());
}
template <class E, class S, class X, std::enable_if_t<has_data_interface<std::decay_t<E>>::value>* = nullptr>
inline void compute_transposed_strides(E&& e, const S&, X& strides)
{
std::copy(e.strides().crbegin(), e.strides().crend(), strides.begin());
}
template <class E, class S, class X, std::enable_if_t<!has_data_interface<std::decay_t<E>>::value>* = nullptr>
inline void compute_transposed_strides(E&&, const S& shape, X& strides)
{
// In the case where E does not have a data interface, the transposition
// makes use of a flat storage adaptor that has layout XTENSOR_DEFAULT_TRAVERSAL
// which should be the one inverted.
layout_type l = transpose_layout(XTENSOR_DEFAULT_TRAVERSAL);
compute_strides(shape, l, strides);
}
}
/**
* Returns a transpose view by reversing the dimensions of xexpression e
* @param e the input expression
*/
template <class E>
inline auto transpose(E&& e) noexcept
{
using shape_type = xindex_type_t<typename std::decay_t<E>::shape_type>;
shape_type shape;
resize_container(shape, e.shape().size());
std::copy(e.shape().crbegin(), e.shape().crend(), shape.begin());
get_strides_t<shape_type> strides;
resize_container(strides, e.shape().size());
detail::compute_transposed_strides(e, shape, strides);
layout_type new_layout = detail::transpose_layout_noexcept(e.layout());
return strided_view(std::forward<E>(e), std::move(shape), std::move(strides), detail::get_offset<XTENSOR_DEFAULT_TRAVERSAL>(e), new_layout);
}
/**
* Returns a transpose view by permuting the xexpression e with @p permutation.
* @param e the input expression
* @param permutation the sequence containing permutation
* @param check_policy the check level (check_policy::full() or check_policy::none())
* @tparam Tag selects the level of error checking on permutation vector defaults to check_policy::none.
*/
template <class E, class S, class Tag>
inline auto transpose(E&& e, S&& permutation, Tag check_policy)
{
return detail::transpose_impl(std::forward<E>(e), std::forward<S>(permutation), check_policy);
}
/// @cond DOXYGEN_INCLUDE_SFINAE
#ifdef X_OLD_CLANG
template <class E, class I, class Tag = check_policy::none>
inline auto transpose(E&& e, std::initializer_list<I> permutation, Tag check_policy = Tag())
{
dynamic_shape<I> perm(permutation);
return detail::transpose_impl(std::forward<E>(e), std::move(perm), check_policy);
}
#else
template <class E, class I, std::size_t N, class Tag = check_policy::none>
inline auto transpose(E&& e, const I(&permutation)[N], Tag check_policy = Tag())
{
return detail::transpose_impl(std::forward<E>(e), permutation, check_policy);
}
#endif
/// @endcond
/************************************
* ravel and flatten implementation *
************************************/
template <class I, class CI>
class xiterator_adaptor;
/**
* Returns a flatten view of the given expression. No copy is made.
* @param e the input expression
* @tparam L the layout used to read the elements of e. If no parameter
* is specified, XTENSOR_DEFAULT_TRAVERSAL is used.
* @tparam E the type of the expression
*/
template <layout_type L, class E>
inline auto ravel(E&& e)
{
using iterator = decltype(e.template begin<L>());
using const_iterator = decltype(e.template cbegin<L>());
using adaptor_type = xiterator_adaptor<iterator, const_iterator>;
constexpr layout_type layout = std::is_pointer<iterator>::value ? L : layout_type::dynamic;
using type = xtensor_view<adaptor_type, 1, layout, extension::get_expression_tag_t<E>>;
return type(adaptor_type(e.template begin<L>(), e.template cbegin<L>(), e.size()), { e.size() });
}
/**
* Returns a flatten view of the given expression. No copy is made. This
* method is equivalent to ravel and is provided for API sameness with
* Numpy.
* @param e the input expression
* @tparam L the layout used to read the elements of e. If no parameter
* is specified, XTENSOR_DEFAULT_TRAVERSAL is used.
* @tparam E the type of the expression
* @sa ravel
*/
template <layout_type L, class E>
inline auto flatten(E&& e)
{
return ravel<L>(std::forward<E>(e));
}
/**
* @brief return indices that are non-zero in the flattened version of arr,
* equivalent to nonzero(ravel<layout_type>(arr))[0];
*
* @param arr input array
* @return indices that are non-zero in the flattened version of arr
*/
template <layout_type L, class T>
inline auto flatnonzero(const T& arr)
{
return nonzero(ravel<L>(arr))[0];
}
/*****************************
* trim_zeros implementation *
*****************************/
/**
* Trim zeros at beginning, end or both of 1D sequence.
*
* @param e input xexpression
* @param direction string of either 'f' for trim from beginning, 'b' for trim from end
* or 'fb' (default) for both.
* @return returns a view without zeros at the beginning and end
*/
template <class E>
inline auto trim_zeros(E&& e, const std::string& direction)
{
XTENSOR_ASSERT_MSG(e.dimension() == 1, "Dimension for trim_zeros has to be 1.");
std::ptrdiff_t begin = 0, end = static_cast<std::ptrdiff_t>(e.size());
auto find_fun = [](const auto& i) {
return i != 0;
};
if (direction.find("f") != std::string::npos)
{
begin = std::find_if(e.cbegin(), e.cend(), find_fun) - e.cbegin();
}
if (direction.find("b") != std::string::npos && begin != end)
{
end -= std::find_if(e.crbegin(), e.crend(), find_fun) - e.crbegin();
}
return strided_view(std::forward<E>(e), { range(begin, end) });
}
/**************************
* squeeze implementation *
**************************/
/**
* Returns a squeeze view of the given expression. No copy is made.
* Squeezing an expression removes dimensions of extent 1.
*
* @param e the input expression
* @tparam E the type of the expression
*/
template <class E>
inline auto squeeze(E&& e)
{
dynamic_shape<std::size_t> new_shape;
dynamic_shape<std::ptrdiff_t> new_strides;
std::copy_if(e.shape().cbegin(), e.shape().cend(), std::back_inserter(new_shape),
[](std::size_t i) { return i != 1; });
decltype(auto) old_strides = detail::get_strides<XTENSOR_DEFAULT_LAYOUT>(e);
std::copy_if(old_strides.cbegin(), old_strides.cend(), std::back_inserter(new_strides),
[](std::ptrdiff_t i) { return i != 0; });
return strided_view(std::forward<E>(e), std::move(new_shape), std::move(new_strides), 0, e.layout());
}
namespace detail
{
template <class E, class S>
inline auto squeeze_impl(E&& e, S&& axis, check_policy::none)
{
std::size_t new_dim = e.dimension() - axis.size();
dynamic_shape<std::size_t> new_shape(new_dim);
dynamic_shape<std::ptrdiff_t> new_strides(new_dim);
decltype(auto) old_strides = detail::get_strides<XTENSOR_DEFAULT_LAYOUT>(e);
for (std::size_t i = 0, ix = 0; i < e.dimension(); ++i)
{
if (axis.cend() == std::find(axis.cbegin(), axis.cend(), i))
{
new_shape[ix] = e.shape()[i];
new_strides[ix++] = old_strides[i];
}
}
return strided_view(std::forward<E>(e), std::move(new_shape), std::move(new_strides), 0, e.layout());
}
template <class E, class S>
inline auto squeeze_impl(E&& e, S&& axis, check_policy::full)
{
for (auto ix : axis)
{
if (static_cast<std::size_t>(ix) > e.dimension())
{
XTENSOR_THROW(std::runtime_error, "Axis argument to squeeze > dimension of expression");
}
if (e.shape()[static_cast<std::size_t>(ix)] != 1)
{
XTENSOR_THROW(std::runtime_error, "Trying to squeeze axis != 1");
}
}
return squeeze_impl(std::forward<E>(e), std::forward<S>(axis), check_policy::none());
}
}
/**
* @brief Remove single-dimensional entries from the shape of an xexpression
*
* @param e input xexpression
* @param axis integer or container of integers, select a subset of single-dimensional
* entries of the shape.
* @param check_policy select check_policy. With check_policy::full(), selecting an axis
* which is greater than one will throw a runtime_error.
*/
template <class E, class S, class Tag, std::enable_if_t<!std::is_integral<S>::value, int>>
inline auto squeeze(E&& e, S&& axis, Tag check_policy)
{
return detail::squeeze_impl(std::forward<E>(e), std::forward<S>(axis), check_policy);
}
/// @cond DOXYGEN_INCLUDE_SFINAE
#ifdef X_OLD_CLANG
template <class E, class I, class Tag = check_policy::none>
inline auto squeeze(E&& e, std::initializer_list<I> axis, Tag check_policy = Tag())
{
dynamic_shape<I> ax(axis);
return detail::squeeze_impl(std::forward<E>(e), std::move(ax), check_policy);
}
#else
template <class E, class I, std::size_t N, class Tag = check_policy::none>
inline auto squeeze(E&& e, const I(&axis)[N], Tag check_policy = Tag())
{
using arr_t = std::array<I, N>;
return detail::squeeze_impl(std::forward<E>(e), xtl::forward_sequence<arr_t, decltype(axis)>(axis), check_policy);
}
#endif
template <class E, class Tag = check_policy::none>
inline auto squeeze(E&& e, std::size_t axis, Tag check_policy = Tag())
{
return squeeze(std::forward<E>(e), std::array<std::size_t, 1>{ axis }, check_policy);
}
/// @endcond
/******************************
* expand_dims implementation *
******************************/
/**
* @brief Expand the shape of an xexpression.
*
* Insert a new axis that will appear at the axis position in the expanded array shape.
* This will return a ``strided_view`` with a ``xt::newaxis()`` at the indicated axis.
*
* @param e input xexpression
* @param axis axis to expand
* @return returns a ``strided_view`` with expanded dimension
*/
template <class E>
inline auto expand_dims(E&& e, std::size_t axis)
{
xstrided_slice_vector sv(e.dimension() + 1, all());
sv[axis] = newaxis();
return strided_view(std::forward<E>(e), std::move(sv));
}
/*****************************
* atleast_Nd implementation *
*****************************/
/**
* Expand dimensions of xexpression to at least `N`
*
* This adds ``newaxis()`` slices to a ``strided_view`` until
* the dimension of the view reaches at least `N`.
* Note: dimensions are added equally at the beginning and the end.
* For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1).
*
* @param e input xexpression
* @tparam N the number of requested dimensions
* @return ``strided_view`` with expanded dimensions
*/
template <std::size_t N, class E>
inline auto atleast_Nd(E&& e)
{
xstrided_slice_vector sv((std::max)(e.dimension(), N), all());
if (e.dimension() < N)
{
std::size_t i = 0;
std::size_t end = static_cast<std::size_t>(std::round(double(N - e.dimension()) / double(N)));
for (; i < end; ++i)
{
sv[i] = newaxis();
}
i += e.dimension();
for (; i < N; ++i)
{
sv[i] = newaxis();
}
}
return strided_view(std::forward<E>(e), std::move(sv));
}
/**
* Expand to at least 1D
* @sa atleast_Nd
*/
template <class E>
inline auto atleast_1d(E&& e)
{
return atleast_Nd<1>(std::forward<E>(e));
}
/**
* Expand to at least 2D
* @sa atleast_Nd
*/
template <class E>
inline auto atleast_2d(E&& e)
{
return atleast_Nd<2>(std::forward<E>(e));
}
/**
* Expand to at least 3D
* @sa atleast_Nd
*/
template <class E>
inline auto atleast_3d(E&& e)
{
return atleast_Nd<3>(std::forward<E>(e));
}
/************************
* split implementation *
************************/
/**
* @brief Split xexpression along axis into subexpressions
*
* This splits an xexpression along the axis in `n` equal parts and
* returns a vector of ``strided_view``.
* Calling split with axis > dimension of e or a `n` that does not result in
* an equal division of the xexpression will throw a runtime_error.
*
* @param e input xexpression
* @param n number of elements to return
* @param axis axis along which to split the expression
*/
template <class E>
inline auto split(E& e, std::size_t n, std::size_t axis)
{
if (axis >= e.dimension())
{
XTENSOR_THROW(std::runtime_error, "Split along axis > dimension.");
}
std::size_t ax_sz = e.shape()[axis];
xstrided_slice_vector sv(e.dimension(), all());
std::size_t step = ax_sz / n;
std::size_t rest = ax_sz % n;
if (rest)
{
XTENSOR_THROW(std::runtime_error, "Split does not result in equal division.");
}
std::vector<decltype(strided_view(e, sv))> result;
for (std::size_t i = 0; i < n; ++i)
{
sv[axis] = range(i * step, (i + 1) * step);
result.emplace_back(strided_view(e, sv));
}
return result;
}
/**
* @brief Split an xexpression into subexpressions horizontally (column-wise)
*
* This method is equivalent to ``split(e, n, 1)``.
*
* @param e input xexpression
* @param n number of elements to return
*/
template <class E>
inline auto hsplit(E& e, std::size_t n)
{
return split(e, n, std::size_t(1));
}
/**
* @brief Split an xexpression into subexpressions vertically (row-wise)
*
* This method is equivalent to ``split(e, n, 0)``.
*
* @param e input xexpression
* @param n number of elements to return
*/
template <class E>
inline auto vsplit(E& e, std::size_t n)
{
return split(e, n, std::size_t(0));
}
/***********************
* flip implementation *
***********************/
/**
* @brief Reverse the order of elements in an xexpression along the given axis.
* Note: A NumPy/Matlab style `flipud(arr)` is equivalent to `xt::flip(arr, 0)`,
* `fliplr(arr)` to `xt::flip(arr, 1)`.
*
* @param e the input xexpression
* @param axis the axis along which elements should be reversed
*
* @return returns a view with the result of the flip
*/
template <class E>
inline auto flip(E&& e, std::size_t axis)
{
using shape_type = xindex_type_t<typename std::decay_t<E>::shape_type>;
shape_type shape;
resize_container(shape, e.shape().size());
std::copy(e.shape().cbegin(), e.shape().cend(), shape.begin());
get_strides_t<shape_type> strides;
decltype(auto) old_strides = detail::get_strides<XTENSOR_DEFAULT_LAYOUT>(e);
resize_container(strides, old_strides.size());
std::copy(old_strides.cbegin(), old_strides.cend(), strides.begin());
strides[axis] *= -1;
std::size_t offset = static_cast<std::size_t>(static_cast<std::ptrdiff_t>(e.data_offset()) + old_strides[axis] * (static_cast<std::ptrdiff_t>(e.shape()[axis]) - 1));
return strided_view(std::forward<E>(e), std::move(shape), std::move(strides), offset);
}
/************************
* rot90 implementation *
************************/
template <std::ptrdiff_t N>
struct rot90_impl;
template <>
struct rot90_impl<0>
{
template <class E>
inline auto operator()(E&& e, const std::array<std::size_t, 2>& /*axes*/)
{
return std::forward<E>(e);
}
};
template <>
struct rot90_impl<1>
{
template <class E>
inline auto operator()(E&& e, const std::array<std::size_t, 2>& axes)
{
using std::swap;
dynamic_shape<std::ptrdiff_t> axes_list(e.shape().size());
std::iota(axes_list.begin(), axes_list.end(), 0);
swap(axes_list[axes[0]], axes_list[axes[1]]);
return transpose(flip(std::forward<E>(e), axes[1]), std::move(axes_list));
}
};
template <>
struct rot90_impl<2>
{
template <class E>
inline auto operator()(E&& e, const std::array<std::size_t, 2>& axes)
{
return flip(flip(std::forward<E>(e), axes[0]), axes[1]);
}
};
template <>
struct rot90_impl<3>
{
template <class E>
inline auto operator()(E&& e, const std::array<std::size_t, 2>& axes)
{
using std::swap;
dynamic_shape<std::ptrdiff_t> axes_list(e.shape().size());
std::iota(axes_list.begin(), axes_list.end(), 0);
swap(axes_list[axes[0]], axes_list[axes[1]]);
return flip(transpose(std::forward<E>(e), std::move(axes_list)), axes[1]);
}
};
/**
* @brief Rotate an array by 90 degrees in the plane specified by axes.
* Rotation direction is from the first towards the second axis.
*
* @param e the input xexpression
* @param axes the array is rotated in the plane defined by the axes. Axes must be different.
* @tparam N number of times the array is rotated by 90 degrees. Default is 1.
*
* @return returns a view with the result of the rotation
*/
template <std::ptrdiff_t N, class E>
inline auto rot90(E&& e, const std::array<std::ptrdiff_t, 2>& axes)
{
auto ndim = static_cast<std::ptrdiff_t>(e.shape().size());
if (axes[0] == axes[1] || std::abs(axes[0] - axes[1]) == ndim)
{
XTENSOR_THROW(std::runtime_error, "Axes must be different");
}
auto norm_axes = forward_normalize<std::array<std::size_t, 2>>(e, axes);
constexpr std::ptrdiff_t n = (4 + (N % 4)) % 4;
return rot90_impl<n>()(std::forward<E>(e), norm_axes);
}
/***********************
* roll implementation *
***********************/
/**
* @brief Roll an expression.
* The expression is flatten before shifting, after which the original
* shape is restore. Elements that roll beyond the last position are
* re-introduced at the first. This function does not change the input
* expression.
*
* @param e the input xexpression
* @param shift the number of places by which elements are shifted
* @param axis the axis along which elements are shifted.
*
* @return a roll of the input expression
*/
template<class E>
inline auto roll(E&& e, std::ptrdiff_t shift)
{
auto cpy = empty_like(e);
auto flat_size = std::accumulate(cpy.shape().begin(), cpy.shape().end(), 1L, std::multiplies<std::size_t>());
while(shift < 0)
{
shift += flat_size;
}
shift %= flat_size;
std::copy(e.begin(), e.end() - shift,
std::copy(e.end() - shift, e.end(), cpy.begin()));
return cpy;
}
namespace detail
{
/**
* Algorithm adapted from pythran/pythonic/numpy/roll.hpp
*/
template < class To, class From, class S>
To roll(To to, From from, std::ptrdiff_t shift, std::size_t axis, S const& shape, std::size_t M)
{
std::ptrdiff_t dim = std::ptrdiff_t(shape[M]);
std::ptrdiff_t offset = std::accumulate(shape.begin() + M + 1, shape.end(), std::ptrdiff_t(1), std::multiplies<std::ptrdiff_t>());
if(shape.size() == M + 1)
{
if (axis == M)
{
const auto split = from + (dim - shift) * offset;
for(auto iter = split, end = from + dim * offset; iter != end; iter += offset, ++to)
{
*to = *iter;
}
for(auto iter = from, end = split; iter != end; iter += offset, ++to)
{
*to = *iter;
}
}
else
{
for(auto iter = from, end = from + dim * offset; iter != end; iter += offset, ++to)
{
*to = *iter;
}
}
}
else
{
if (axis == M)
{
const auto split = from + (dim - shift) * offset;
for(auto iter = split, end = from + dim * offset; iter != end; iter += offset)
{
to = roll(to, iter, shift, axis, shape, M + 1);
}
for(auto iter = from, end = split; iter != end; iter += offset)
{
to = roll(to, iter, shift, axis, shape, M + 1);
}
}
else
{
for (auto iter = from, end = from + dim * offset; iter != end; iter += offset)
{
to = roll(to, iter, shift, axis, shape, M + 1);
}
}
}
return to;
}
}
/**
* @brief Roll an expression along a given axis.
* Elements that roll beyond the last position are re-introduced at the first.
* This function does not change the input expression.
*
* @param e the input xexpression
* @param shift the number of places by which elements are shifted
* @param axis the axis along which elements are shifted.
*
* @return a roll of the input expression
*/
template<class E>
inline auto roll(E&& e, std::ptrdiff_t shift, std::ptrdiff_t axis)
{
auto cpy = empty_like(e);
auto const& shape = cpy.shape();
std::size_t saxis = static_cast<std::size_t>(axis);
if(axis < 0)
{
axis += std::ptrdiff_t(cpy.dimension());
}
if(saxis >= cpy.dimension() || axis < 0)
{
XTENSOR_THROW(std::runtime_error, "axis is no within shape dimension.");
}
const auto axis_dim = static_cast<std::ptrdiff_t>(shape[saxis]);
while(shift < 0)
{
shift += axis_dim;
}
detail::roll(cpy.begin(), e.begin(), shift, saxis, shape, 0);
return cpy;
}
/****************************
* repeat implementation *
****************************/
namespace detail
{
template<class E, class R>
inline auto make_xrepeat(E&& e, R&& r, typename std::decay_t<E>::size_type axis)
{
return xrepeat<E, R>(std::forward<E>(e), std::forward<R>(r), axis);
}
}
template <class E>
inline auto repeat(E&& e, std::size_t repeats, std::size_t axis)
{
const auto casted_axis = static_cast<typename std::decay_t<E>::size_type>(axis);
std::vector<std::size_t> broadcasted_repeats(e.shape(casted_axis));
std::fill(broadcasted_repeats.begin(), broadcasted_repeats.end(), repeats);
return repeat(std::forward<E>(e), std::move(broadcasted_repeats), axis);
}
template <class E>
inline auto repeat(E&& e, const std::vector<std::size_t>& repeats, std::size_t axis)
{
const auto casted_axis = static_cast<typename std::decay_t<E>::size_type>(axis);
if (repeats.size() != e.shape(casted_axis))
{
XTENSOR_THROW(std::invalid_argument, "repeats must have the same size as the specified axis");
}
return detail::make_xrepeat(std::forward<E>(e), repeats, casted_axis);
}
template <class E>
inline auto repeat(E&& e, std::vector<std::size_t>&& repeats, std::size_t axis)
{
const auto casted_axis = static_cast<typename std::decay_t<E>::size_type>(axis);
if (repeats.size() != e.shape(casted_axis))
{
XTENSOR_THROW(std::invalid_argument, "repeats must have the same size as the specified axis");
}
return detail::make_xrepeat(std::forward<E>(e), std::move(repeats), casted_axis);
}
}
#endif
|
// Generated by using Rcpp::compileAttributes() -> do not edit by hand
// Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#include <Rcpp.h>
using namespace Rcpp;
// initFactories
void initFactories(std::string data_folder);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_initFactories(SEXP data_folderSEXP) {
BEGIN_RCPP
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< std::string >::type data_folder(data_folderSEXP);
initFactories(data_folder);
return R_NilValue;
END_RCPP
}
// solveFSP
List solveFSP(std::string mh, Rcpp::CharacterVector rproblem, long seed, Rcpp::CharacterVector rparams, bool verbose);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_solveFSP(SEXP mhSEXP, SEXP rproblemSEXP, SEXP seedSEXP, SEXP rparamsSEXP, SEXP verboseSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< std::string >::type mh(mhSEXP);
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type rproblem(rproblemSEXP);
Rcpp::traits::input_parameter< long >::type seed(seedSEXP);
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type rparams(rparamsSEXP);
Rcpp::traits::input_parameter< bool >::type verbose(verboseSEXP);
rcpp_result_gen = Rcpp::wrap(solveFSP(mh, rproblem, seed, rparams, verbose));
return rcpp_result_gen;
END_RCPP
}
// sampleSolutionStatisticsFLA
List sampleSolutionStatisticsFLA(std::string dataFolder, Rcpp::CharacterVector rproblem, long noSamples, long seed);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_sampleSolutionStatisticsFLA(SEXP dataFolderSEXP, SEXP rproblemSEXP, SEXP noSamplesSEXP, SEXP seedSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< std::string >::type dataFolder(dataFolderSEXP);
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type rproblem(rproblemSEXP);
Rcpp::traits::input_parameter< long >::type noSamples(noSamplesSEXP);
Rcpp::traits::input_parameter< long >::type seed(seedSEXP);
rcpp_result_gen = Rcpp::wrap(sampleSolutionStatisticsFLA(dataFolder, rproblem, noSamples, seed));
return rcpp_result_gen;
END_RCPP
}
// sampleRandomWalk
std::vector<double> sampleRandomWalk(std::string dataFolder, Rcpp::CharacterVector rproblem, int noSamples, std::string samplingStrat, long seed);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_sampleRandomWalk(SEXP dataFolderSEXP, SEXP rproblemSEXP, SEXP noSamplesSEXP, SEXP samplingStratSEXP, SEXP seedSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< std::string >::type dataFolder(dataFolderSEXP);
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type rproblem(rproblemSEXP);
Rcpp::traits::input_parameter< int >::type noSamples(noSamplesSEXP);
Rcpp::traits::input_parameter< std::string >::type samplingStrat(samplingStratSEXP);
Rcpp::traits::input_parameter< long >::type seed(seedSEXP);
rcpp_result_gen = Rcpp::wrap(sampleRandomWalk(dataFolder, rproblem, noSamples, samplingStrat, seed));
return rcpp_result_gen;
END_RCPP
}
// enumerateAllFitness
std::vector<double> enumerateAllFitness(Rcpp::CharacterVector rproblem);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_enumerateAllFitness(SEXP rproblemSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type rproblem(rproblemSEXP);
rcpp_result_gen = Rcpp::wrap(enumerateAllFitness(rproblem));
return rcpp_result_gen;
END_RCPP
}
// enumerateSolutions
List enumerateSolutions(Rcpp::List fspInstance, Rcpp::CharacterVector fspProblem);
RcppExport SEXP _flowshopNehBasedHeuristicRecommendation_enumerateSolutions(SEXP fspInstanceSEXP, SEXP fspProblemSEXP) {
BEGIN_RCPP
Rcpp::RObject rcpp_result_gen;
Rcpp::RNGScope rcpp_rngScope_gen;
Rcpp::traits::input_parameter< Rcpp::List >::type fspInstance(fspInstanceSEXP);
Rcpp::traits::input_parameter< Rcpp::CharacterVector >::type fspProblem(fspProblemSEXP);
rcpp_result_gen = Rcpp::wrap(enumerateSolutions(fspInstance, fspProblem));
return rcpp_result_gen;
END_RCPP
}
static const R_CallMethodDef CallEntries[] = {
{"_flowshopNehBasedHeuristicRecommendation_initFactories", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_initFactories, 1},
{"_flowshopNehBasedHeuristicRecommendation_solveFSP", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_solveFSP, 5},
{"_flowshopNehBasedHeuristicRecommendation_sampleSolutionStatisticsFLA", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_sampleSolutionStatisticsFLA, 4},
{"_flowshopNehBasedHeuristicRecommendation_sampleRandomWalk", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_sampleRandomWalk, 5},
{"_flowshopNehBasedHeuristicRecommendation_enumerateAllFitness", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_enumerateAllFitness, 1},
{"_flowshopNehBasedHeuristicRecommendation_enumerateSolutions", (DL_FUNC) &_flowshopNehBasedHeuristicRecommendation_enumerateSolutions, 2},
{NULL, NULL, 0}
};
RcppExport void R_init_flowshopNehBasedHeuristicRecommendation(DllInfo *dll) {
R_registerRoutines(dll, NULL, CallEntries, NULL, NULL);
R_useDynamicSymbols(dll, FALSE);
}
|
#include "json.h"
#include "ui_json.h"
#include <QNetworkRequest>
#include <QJsonDocument>
#include <QJsonArray>
#include <QJsonObject>
#include <QVariantMap>
#include <QDebug>
json::json(QWidget *parent) :
QWidget(parent),
ui(new Ui::json)
mNetManager(new QNetworkAccessManager(this)),
mNetReply(nullptr),
mDataBuffer(new QByteArray)
{
ui->setupUi(this);
}
json::~json()
{
delete ui;
}
void json::on_downloadBtn_clicked()
{
const QUrl API_ENDPOINT("https://www.qt.io/company");
QNetworkRequest request;
request.setUrl(API_ENDPOINT);
mNetReply = mNetManager->get(request);
connect(mNetReply,&QIODevice::readyRead,this,&Widget::dataReadyRead);
connect(mNetReply,&QNetworkReply::finished,this,&Widget::dataReadFinished);
}
void json::dataReadyRead()
{
mDataBuffer->append(mNetReply->readAll());
}
void json::dataReadFinished()
{
if( mNetReply->error())
{
qDebug() << "Error : " << mNetReply->errorString();
}else
{
qDebug() << "Data fetch finished : " << QString(*mDataBuffer);
QJsonDocument doc = QJsonDocument::fromJson(*mDataBuffer);
QJsonArray array = doc.array();
for ( int i = 0; i < array.size(); i++)
{
QJsonObject object = array.at(i).toObject();
QVariantMap map = object.toVariantMap();
QString title = map["title"].toString();
ui->listWidget->addItem("["+ QString::number(i+1) + "] " + title);
}
}
}
|
#include "stm32f051/gpio.hpp"
#include "stm32f051/rcc.hpp"
using namespace JL;
static void configGpio(void) {
Rcc::hsiConfig(pllMul12, ahbPre1, apbPre1);
ClockPack<PortA::Clock, PortB::Clock, PortC::Clock>::Enable();
PinPack<PinC8, PinC9>::Config(modeOutput, speedHigh, pupdNone);
}
int main(void) {
configGpio();
PinC8::Set();
for (;;) {
PinPack<PinC8, PinC9>::Toggle();
DelayMs(1000);
}
return 0;
}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
#include "knowhere/index/vector_index/ConfAdapter.h"
#include <cmath>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "knowhere/index/vector_index/helpers/IndexParameter.h"
#ifdef MILVUS_GPU_VERSION
#include "faiss/gpu/utils/DeviceUtils.h"
#endif
namespace milvus {
namespace knowhere {
static const int64_t MIN_NLIST = 1;
static const int64_t MAX_NLIST = 1LL << 20;
static const int64_t MIN_NPROBE = 1;
static const int64_t MAX_NPROBE = MAX_NLIST;
static const int64_t DEFAULT_MIN_DIM = 1;
static const int64_t DEFAULT_MAX_DIM = 32768;
static const int64_t DEFAULT_MIN_ROWS = 1; // minimum size for build index
static const int64_t DEFAULT_MAX_ROWS = 50000000;
static const std::vector<std::string> METRICS{knowhere::Metric::L2, knowhere::Metric::IP};
#define CheckIntByRange(key, min, max) \
if (!oricfg.contains(key) || !oricfg[key].is_number_integer() || oricfg[key].get<int64_t>() > max || \
oricfg[key].get<int64_t>() < min) { \
return false; \
}
#define CheckIntByValues(key, container) \
if (!oricfg.contains(key) || !oricfg[key].is_number_integer()) { \
return false; \
} else { \
auto finder = std::find(std::begin(container), std::end(container), oricfg[key].get<int64_t>()); \
if (finder == std::end(container)) { \
return false; \
} \
}
#define CheckStrByValues(key, container) \
if (!oricfg.contains(key) || !oricfg[key].is_string()) { \
return false; \
} else { \
auto finder = std::find(std::begin(container), std::end(container), oricfg[key].get<std::string>()); \
if (finder == std::end(container)) { \
return false; \
} \
}
bool
ConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
CheckIntByRange(knowhere::meta::DIM, DEFAULT_MIN_DIM, DEFAULT_MAX_DIM);
CheckStrByValues(knowhere::Metric::TYPE, METRICS);
return true;
}
bool
ConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
const int64_t DEFAULT_MIN_K = 1;
const int64_t DEFAULT_MAX_K = 16384;
CheckIntByRange(knowhere::meta::TOPK, DEFAULT_MIN_K - 1, DEFAULT_MAX_K);
return true;
}
int64_t
MatchNlist(int64_t size, int64_t nlist) {
const int64_t TYPICAL_COUNT = 1000000;
const int64_t PER_NLIST = 16384;
if (nlist * TYPICAL_COUNT > size * PER_NLIST) {
// nlist is too large, adjust to a proper value
nlist = std::max(1L, size * PER_NLIST / TYPICAL_COUNT);
}
return nlist;
}
bool
IVFConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
CheckIntByRange(knowhere::IndexParams::nlist, MIN_NLIST, MAX_NLIST);
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
// int64_t nlist = oricfg[knowhere::IndexParams::nlist];
// CheckIntByRange(knowhere::meta::ROWS, nlist, DEFAULT_MAX_ROWS);
// auto tune params
auto nq = oricfg[knowhere::meta::ROWS].get<int64_t>();
auto nlist = oricfg[knowhere::IndexParams::nlist].get<int64_t>();
oricfg[knowhere::IndexParams::nlist] = MatchNlist(nq, nlist);
// Best Practice
// static int64_t MIN_POINTS_PER_CENTROID = 40;
// static int64_t MAX_POINTS_PER_CENTROID = 256;
// CheckIntByRange(knowhere::meta::ROWS, MIN_POINTS_PER_CENTROID * nlist, MAX_POINTS_PER_CENTROID * nlist);
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
IVFConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
int64_t max_nprobe = MAX_NPROBE;
#ifdef MILVUS_GPU_VERSION
if (mode == IndexMode::MODE_GPU) {
max_nprobe = faiss::gpu::getMaxKSelection();
}
#endif
CheckIntByRange(knowhere::IndexParams::nprobe, MIN_NPROBE, max_nprobe);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
IVFSQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
const int64_t DEFAULT_NBITS = 8;
oricfg[knowhere::IndexParams::nbits] = DEFAULT_NBITS;
return IVFConfAdapter::CheckTrain(oricfg, mode);
}
bool
IVFPQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
const int64_t DEFAULT_NBITS = 8;
oricfg[knowhere::IndexParams::nbits] = DEFAULT_NBITS;
CheckStrByValues(knowhere::Metric::TYPE, METRICS);
CheckIntByRange(knowhere::meta::DIM, DEFAULT_MIN_DIM, DEFAULT_MAX_DIM);
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::nlist, MIN_NLIST, MAX_NLIST);
// int64_t nlist = oricfg[knowhere::IndexParams::nlist];
// CheckIntByRange(knowhere::meta::ROWS, nlist, DEFAULT_MAX_ROWS);
// auto tune params
oricfg[knowhere::IndexParams::nlist] =
MatchNlist(oricfg[knowhere::meta::ROWS].get<int64_t>(), oricfg[knowhere::IndexParams::nlist].get<int64_t>());
auto m = oricfg[knowhere::IndexParams::m].get<int64_t>();
auto dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
// Best Practice
// static int64_t MIN_POINTS_PER_CENTROID = 40;
// static int64_t MAX_POINTS_PER_CENTROID = 256;
// CheckIntByRange(knowhere::meta::ROWS, MIN_POINTS_PER_CENTROID * nlist, MAX_POINTS_PER_CENTROID * nlist);
/*std::vector<int64_t> resset;
IVFPQConfAdapter::GetValidCPUM(dimension, resset);*/
IndexMode ivfpq_mode = mode;
return GetValidM(dimension, m, ivfpq_mode);
}
bool
IVFPQConfAdapter::GetValidM(int64_t dimension, int64_t m, IndexMode& mode) {
#ifdef MILVUS_GPU_VERSION
if (mode == knowhere::IndexMode::MODE_GPU && !IVFPQConfAdapter::GetValidGPUM(dimension, m)) {
mode = knowhere::IndexMode::MODE_CPU;
}
#endif
if (mode == knowhere::IndexMode::MODE_CPU && !IVFPQConfAdapter::GetValidCPUM(dimension, m)) {
return false;
}
return true;
}
bool
IVFPQConfAdapter::GetValidGPUM(int64_t dimension, int64_t m) {
/*
* Faiss 1.6
* Only 1, 2, 3, 4, 6, 8, 10, 12, 16, 20, 24, 28, 32 dims per sub-quantizer are currently supported with
* no precomputed codes. Precomputed codes supports any number of dimensions, but will involve memory overheads.
*/
static const std::vector<int64_t> support_dim_per_subquantizer{32, 28, 24, 20, 16, 12, 10, 8, 6, 4, 3, 2, 1};
static const std::vector<int64_t> support_subquantizer{96, 64, 56, 48, 40, 32, 28, 24, 20, 16, 12, 8, 4, 3, 2, 1};
int64_t sub_dim = dimension / m;
return (std::find(std::begin(support_subquantizer), std::end(support_subquantizer), m) !=
support_subquantizer.end()) &&
(std::find(std::begin(support_dim_per_subquantizer), std::end(support_dim_per_subquantizer), sub_dim) !=
support_dim_per_subquantizer.end());
/*resset.clear();
for (const auto& dimperquantizer : support_dim_per_subquantizer) {
if (!(dimension % dimperquantizer)) {
auto subquantzier_num = dimension / dimperquantizer;
auto finder = std::find(support_subquantizer.begin(), support_subquantizer.end(), subquantzier_num);
if (finder != support_subquantizer.end()) {
resset.push_back(subquantzier_num);
}
}
}*/
}
bool
IVFPQConfAdapter::GetValidCPUM(int64_t dimension, int64_t m) {
return (dimension % m == 0);
}
bool
NSGConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
const int64_t MIN_KNNG = 5;
const int64_t MAX_KNNG = 300;
const int64_t MIN_SEARCH_LENGTH = 10;
const int64_t MAX_SEARCH_LENGTH = 300;
const int64_t MIN_OUT_DEGREE = 5;
const int64_t MAX_OUT_DEGREE = 300;
const int64_t MIN_CANDIDATE_POOL_SIZE = 50;
const int64_t MAX_CANDIDATE_POOL_SIZE = 1000;
CheckStrByValues(knowhere::Metric::TYPE, METRICS);
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::knng, MIN_KNNG, MAX_KNNG);
CheckIntByRange(knowhere::IndexParams::search_length, MIN_SEARCH_LENGTH, MAX_SEARCH_LENGTH);
CheckIntByRange(knowhere::IndexParams::out_degree, MIN_OUT_DEGREE, MAX_OUT_DEGREE);
CheckIntByRange(knowhere::IndexParams::candidate, MIN_CANDIDATE_POOL_SIZE, MAX_CANDIDATE_POOL_SIZE);
// auto tune params
oricfg[knowhere::IndexParams::nlist] = MatchNlist(oricfg[knowhere::meta::ROWS].get<int64_t>(), 8192);
int64_t nprobe = int(oricfg[knowhere::IndexParams::nlist].get<int64_t>() * 0.1);
oricfg[knowhere::IndexParams::nprobe] = nprobe < 1 ? 1 : nprobe;
return true;
}
bool
NSGConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
static int64_t MIN_SEARCH_LENGTH = 1;
static int64_t MAX_SEARCH_LENGTH = 300;
CheckIntByRange(knowhere::IndexParams::search_length, MIN_SEARCH_LENGTH, MAX_SEARCH_LENGTH);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
HNSWConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static int64_t MIN_EFCONSTRUCTION = 8;
static int64_t MAX_EFCONSTRUCTION = 512;
static int64_t MIN_M = 4;
static int64_t MAX_M = 64;
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::efConstruction, MIN_EFCONSTRUCTION, MAX_EFCONSTRUCTION);
CheckIntByRange(knowhere::IndexParams::M, MIN_M, MAX_M);
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
HNSWConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
static int64_t MAX_EF = 4096;
CheckIntByRange(knowhere::IndexParams::ef, oricfg[knowhere::meta::TOPK], MAX_EF);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
RHNSWFlatConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static int64_t MIN_EFCONSTRUCTION = 8;
static int64_t MAX_EFCONSTRUCTION = 512;
static int64_t MIN_M = 4;
static int64_t MAX_M = 64;
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::efConstruction, MIN_EFCONSTRUCTION, MAX_EFCONSTRUCTION);
CheckIntByRange(knowhere::IndexParams::M, MIN_M, MAX_M);
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
RHNSWFlatConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
static int64_t MAX_EF = 4096;
CheckIntByRange(knowhere::IndexParams::ef, oricfg[knowhere::meta::TOPK], MAX_EF);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
RHNSWPQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static int64_t MIN_EFCONSTRUCTION = 8;
static int64_t MAX_EFCONSTRUCTION = 512;
static int64_t MIN_M = 4;
static int64_t MAX_M = 64;
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::efConstruction, MIN_EFCONSTRUCTION, MAX_EFCONSTRUCTION);
CheckIntByRange(knowhere::IndexParams::M, MIN_M, MAX_M);
auto dimension = oricfg[knowhere::meta::DIM].get<int64_t>();
IVFPQConfAdapter::GetValidCPUM(dimension, oricfg[knowhere::IndexParams::PQM].get<int64_t>());
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
RHNSWPQConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
static int64_t MAX_EF = 4096;
CheckIntByRange(knowhere::IndexParams::ef, oricfg[knowhere::meta::TOPK], MAX_EF);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
RHNSWSQConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static int64_t MIN_EFCONSTRUCTION = 8;
static int64_t MAX_EFCONSTRUCTION = 512;
static int64_t MIN_M = 4;
static int64_t MAX_M = 64;
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::IndexParams::efConstruction, MIN_EFCONSTRUCTION, MAX_EFCONSTRUCTION);
CheckIntByRange(knowhere::IndexParams::M, MIN_M, MAX_M);
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
RHNSWSQConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
static int64_t MAX_EF = 4096;
CheckIntByRange(knowhere::IndexParams::ef, oricfg[knowhere::meta::TOPK], MAX_EF);
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
bool
BinIDMAPConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static const std::vector<std::string> METRICS{knowhere::Metric::HAMMING, knowhere::Metric::JACCARD,
knowhere::Metric::TANIMOTO, knowhere::Metric::SUBSTRUCTURE,
knowhere::Metric::SUPERSTRUCTURE};
CheckIntByRange(knowhere::meta::DIM, DEFAULT_MIN_DIM, DEFAULT_MAX_DIM);
CheckStrByValues(knowhere::Metric::TYPE, METRICS);
return true;
}
bool
BinIVFConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static const std::vector<std::string> METRICS{knowhere::Metric::HAMMING, knowhere::Metric::JACCARD,
knowhere::Metric::TANIMOTO};
CheckIntByRange(knowhere::meta::ROWS, DEFAULT_MIN_ROWS, DEFAULT_MAX_ROWS);
CheckIntByRange(knowhere::meta::DIM, DEFAULT_MIN_DIM, DEFAULT_MAX_DIM);
CheckIntByRange(knowhere::IndexParams::nlist, MIN_NLIST, MAX_NLIST);
CheckStrByValues(knowhere::Metric::TYPE, METRICS);
int64_t nlist = oricfg[knowhere::IndexParams::nlist];
CheckIntByRange(knowhere::meta::ROWS, nlist, DEFAULT_MAX_ROWS);
// Best Practice
// static int64_t MIN_POINTS_PER_CENTROID = 40;
// static int64_t MAX_POINTS_PER_CENTROID = 256;
// CheckIntByRange(knowhere::meta::ROWS, MIN_POINTS_PER_CENTROID * nlist, MAX_POINTS_PER_CENTROID * nlist);
return true;
}
bool
ANNOYConfAdapter::CheckTrain(Config& oricfg, const IndexMode mode) {
static int64_t MIN_NTREES = 1;
// too large of n_trees takes much time, if there is real requirement, change this threshold.
static int64_t MAX_NTREES = 1024;
CheckIntByRange(knowhere::IndexParams::n_trees, MIN_NTREES, MAX_NTREES);
return ConfAdapter::CheckTrain(oricfg, mode);
}
bool
ANNOYConfAdapter::CheckSearch(Config& oricfg, const IndexType type, const IndexMode mode) {
CheckIntByRange(knowhere::IndexParams::search_k, std::numeric_limits<int64_t>::min(),
std::numeric_limits<int64_t>::max());
return ConfAdapter::CheckSearch(oricfg, type, mode);
}
} // namespace knowhere
} // namespace milvus
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <wallet/wallet.h>
#include <checkpoints.h>
#include <chain.h>
#include <wallet/coincontrol.h>
#include <consensus/consensus.h>
#include <consensus/validation.h>
#include <fs.h>
#include <init.h>
#include <key.h>
#include <key_io.h>
#include <keystore.h>
#include <validation.h>
#include <net.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
#include <script/script.h>
#include <scheduler.h>
#include <shutdown.h>
#include <timedata.h>
#include <txmempool.h>
#include <utilmoneystr.h>
#include <wallet/fees.h>
#include <kernel.h>
#include <masternode-payments.h>
#include <instantx.h>
#include <wallet/walletutil.h>
#include <algorithm>
#include <assert.h>
#include <future>
#include <boost/algorithm/string/replace.hpp>
static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10;
static CCriticalSection cs_wallets;
static std::vector<CWallet*> vpwallets GUARDED_BY(cs_wallets);
unsigned int nTxConfirmTarget = 1;
bool AddWallet(CWallet* wallet)
{
LOCK(cs_wallets);
assert(wallet);
std::vector<CWallet*>::const_iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet);
if (i != vpwallets.end()) return false;
vpwallets.push_back(wallet);
return true;
}
bool RemoveWallet(CWallet* wallet)
{
LOCK(cs_wallets);
assert(wallet);
std::vector<CWallet*>::iterator i = std::find(vpwallets.begin(), vpwallets.end(), wallet);
if (i == vpwallets.end()) return false;
vpwallets.erase(i);
return true;
}
bool HasWallets()
{
LOCK(cs_wallets);
return !vpwallets.empty();
}
std::vector<CWallet*> GetWallets()
{
LOCK(cs_wallets);
return vpwallets;
}
CWallet* GetWallet(const std::string& name)
{
LOCK(cs_wallets);
for (CWallet* wallet : vpwallets) {
if (wallet->GetName() == name) return wallet;
}
return nullptr;
}
const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000180;
const uint256 CMerkleTx::ABANDON_HASH(uint256S("0000000000000000000000000000000000000000000000000000000000000001"));
/** @defgroup mapWallet
*
* @{
*/
std::string COutput::ToString() const
{
return strprintf("COutput(%s, %d, %d) [%s]", tx->GetHash().ToString(), i, nDepth, FormatMoney(tx->tx->vout[i].nValue));
}
class CAffectedKeysVisitor : public boost::static_visitor<void> {
private:
const CKeyStore &keystore;
std::vector<CKeyID> &vKeys;
public:
CAffectedKeysVisitor(const CKeyStore &keystoreIn, std::vector<CKeyID> &vKeysIn) : keystore(keystoreIn), vKeys(vKeysIn) {}
void Process(const CScript &script) {
txnouttype type;
std::vector<CTxDestination> vDest;
int nRequired;
if (ExtractDestinations(script, type, vDest, nRequired)) {
for (const CTxDestination &dest : vDest)
boost::apply_visitor(*this, dest);
}
}
void operator()(const CKeyID &keyId) {
if (keystore.HaveKey(keyId))
vKeys.push_back(keyId);
}
void operator()(const CScriptID &scriptId) {
CScript script;
if (keystore.GetCScript(scriptId, script))
Process(script);
}
void operator()(const WitnessV0ScriptHash& scriptID)
{
CScriptID id;
CRIPEMD160().Write(scriptID.begin(), 32).Finalize(id.begin());
CScript script;
if (keystore.GetCScript(id, script)) {
Process(script);
}
}
void operator()(const WitnessV0KeyHash& keyid)
{
CKeyID id(keyid);
if (keystore.HaveKey(id)) {
vKeys.push_back(id);
}
}
template<typename X>
void operator()(const X &none) {}
};
const CWalletTx* CWallet::GetWalletTx(const uint256& hash) const
{
LOCK(cs_wallet);
std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(hash);
if (it == mapWallet.end())
return nullptr;
return &(it->second);
}
CPubKey CWallet::GenerateNewKey(WalletBatch &batch, bool internal)
{
AssertLockHeld(cs_wallet); // mapKeyMetadata
bool fCompressed = CanSupportFeature(FEATURE_COMPRPUBKEY); // default to compressed public keys if we want 0.6.0 wallets
CKey secret;
// Create new metadata
int64_t nCreationTime = GetTime();
CKeyMetadata metadata(nCreationTime);
// use HD key derivation if HD was enabled during wallet creation
if (IsHDEnabled()) {
DeriveNewChildKey(batch, metadata, secret, (CanSupportFeature(FEATURE_HD_SPLIT) ? internal : false));
} else {
secret.MakeNewKey(fCompressed);
}
// Compressed public keys were introduced in version 0.6.0
if (fCompressed) {
SetMinVersion(FEATURE_COMPRPUBKEY);
}
CPubKey pubkey = secret.GetPubKey();
assert(secret.VerifyPubKey(pubkey));
mapKeyMetadata[pubkey.GetID()] = metadata;
UpdateTimeFirstKey(nCreationTime);
if (!AddKeyPubKeyWithDB(batch, secret, pubkey)) {
throw std::runtime_error(std::string(__func__) + ": AddKey failed");
}
return pubkey;
}
void CWallet::DeriveNewChildKey(WalletBatch &batch, CKeyMetadata& metadata, CKey& secret, bool internal)
{
// for now we use a fixed keypath scheme of m/0'/0'/k
CKey key; //master key seed (256bit)
CExtKey masterKey; //hd master key
CExtKey accountKey; //key at m/0'
CExtKey chainChildKey; //key at m/0'/0' (external) or m/0'/1' (internal)
CExtKey childKey; //key at m/0'/0'/<n>'
// try to get the master key
if (!GetKey(hdChain.masterKeyID, key))
throw std::runtime_error(std::string(__func__) + ": Master key not found");
masterKey.SetMaster(key.begin(), key.size());
// derive m/0'
// use hardened derivation (child keys >= 0x80000000 are hardened after bip32)
masterKey.Derive(accountKey, BIP32_HARDENED_KEY_LIMIT);
// derive m/0'/0' (external chain) OR m/0'/1' (internal chain)
assert(internal ? CanSupportFeature(FEATURE_HD_SPLIT) : true);
accountKey.Derive(chainChildKey, BIP32_HARDENED_KEY_LIMIT+(internal ? 1 : 0));
// derive child key at next index, skip keys already known to the wallet
do {
// always derive hardened keys
// childIndex | BIP32_HARDENED_KEY_LIMIT = derive childIndex in hardened child-index-range
// example: 1 | BIP32_HARDENED_KEY_LIMIT == 0x80000001 == 2147483649
if (internal) {
chainChildKey.Derive(childKey, hdChain.nInternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
metadata.hdKeypath = "m/0'/1'/" + std::to_string(hdChain.nInternalChainCounter) + "'";
hdChain.nInternalChainCounter++;
}
else {
chainChildKey.Derive(childKey, hdChain.nExternalChainCounter | BIP32_HARDENED_KEY_LIMIT);
metadata.hdKeypath = "m/0'/0'/" + std::to_string(hdChain.nExternalChainCounter) + "'";
hdChain.nExternalChainCounter++;
}
} while (HaveKey(childKey.key.GetPubKey().GetID()));
secret = childKey.key;
metadata.hdMasterKeyID = hdChain.masterKeyID;
// update the chain model in the database
if (!batch.WriteHDChain(hdChain))
throw std::runtime_error(std::string(__func__) + ": Writing HD chain model failed");
}
bool CWallet::AddKeyPubKeyWithDB(WalletBatch &batch, const CKey& secret, const CPubKey &pubkey)
{
AssertLockHeld(cs_wallet); // mapKeyMetadata
// CCryptoKeyStore has no concept of wallet databases, but calls AddCryptedKey
// which is overridden below. To avoid flushes, the database handle is
// tunneled through to it.
bool needsDB = !encrypted_batch;
if (needsDB) {
encrypted_batch = &batch;
}
if (!CCryptoKeyStore::AddKeyPubKey(secret, pubkey)) {
if (needsDB) encrypted_batch = nullptr;
return false;
}
if (needsDB) encrypted_batch = nullptr;
// check if we need to remove from watch-only
CScript script;
script = GetScriptForDestination(pubkey.GetID());
if (HaveWatchOnly(script)) {
RemoveWatchOnly(script);
}
script = GetScriptForRawPubKey(pubkey);
if (HaveWatchOnly(script)) {
RemoveWatchOnly(script);
}
if (!IsCrypted()) {
return batch.WriteKey(pubkey,
secret.GetPrivKey(),
mapKeyMetadata[pubkey.GetID()]);
}
return true;
}
bool CWallet::AddKeyPubKey(const CKey& secret, const CPubKey &pubkey)
{
WalletBatch batch(*database);
return CWallet::AddKeyPubKeyWithDB(batch, secret, pubkey);
}
bool CWallet::AddCryptedKey(const CPubKey &vchPubKey,
const std::vector<unsigned char> &vchCryptedSecret)
{
if (!CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret))
return false;
{
LOCK(cs_wallet);
if (encrypted_batch)
return encrypted_batch->WriteCryptedKey(vchPubKey,
vchCryptedSecret,
mapKeyMetadata[vchPubKey.GetID()]);
else
return WalletBatch(*database).WriteCryptedKey(vchPubKey,
vchCryptedSecret,
mapKeyMetadata[vchPubKey.GetID()]);
}
}
bool CWallet::LoadKeyMetadata(const CKeyID& keyID, const CKeyMetadata &meta)
{
AssertLockHeld(cs_wallet); // mapKeyMetadata
UpdateTimeFirstKey(meta.nCreateTime);
mapKeyMetadata[keyID] = meta;
return true;
}
bool CWallet::LoadScriptMetadata(const CScriptID& script_id, const CKeyMetadata &meta)
{
AssertLockHeld(cs_wallet); // m_script_metadata
UpdateTimeFirstKey(meta.nCreateTime);
m_script_metadata[script_id] = meta;
return true;
}
bool CWallet::LoadCryptedKey(const CPubKey &vchPubKey, const std::vector<unsigned char> &vchCryptedSecret)
{
return CCryptoKeyStore::AddCryptedKey(vchPubKey, vchCryptedSecret);
}
/**
* Update wallet first key creation time. This should be called whenever keys
* are added to the wallet, with the oldest key creation time.
*/
void CWallet::UpdateTimeFirstKey(int64_t nCreateTime)
{
AssertLockHeld(cs_wallet);
if (nCreateTime <= 1) {
// Cannot determine birthday information, so set the wallet birthday to
// the beginning of time.
nTimeFirstKey = 1;
} else if (!nTimeFirstKey || nCreateTime < nTimeFirstKey) {
nTimeFirstKey = nCreateTime;
}
}
bool CWallet::AddCScript(const CScript& redeemScript)
{
if (!CCryptoKeyStore::AddCScript(redeemScript))
return false;
return WalletBatch(*database).WriteCScript(Hash160(redeemScript), redeemScript);
}
bool CWallet::LoadCScript(const CScript& redeemScript)
{
/* A sanity check was added in pull #3843 to avoid adding redeemScripts
* that never can be redeemed. However, old wallets may still contain
* these. Do not add them to the wallet and warn. */
if (redeemScript.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
std::string strAddr = EncodeDestination(CScriptID(redeemScript));
LogPrintf("%s: Warning: This wallet contains a redeemScript of size %i which exceeds maximum size %i thus can never be redeemed. Do not use address %s.\n",
__func__, redeemScript.size(), MAX_SCRIPT_ELEMENT_SIZE, strAddr);
return true;
}
return CCryptoKeyStore::AddCScript(redeemScript);
}
bool CWallet::AddWatchOnly(const CScript& dest)
{
if (!CCryptoKeyStore::AddWatchOnly(dest))
return false;
const CKeyMetadata& meta = m_script_metadata[CScriptID(dest)];
UpdateTimeFirstKey(meta.nCreateTime);
NotifyWatchonlyChanged(true);
return WalletBatch(*database).WriteWatchOnly(dest, meta);
}
CAmount GetStakeReward(CAmount blockReward, unsigned int percentage)
{
return (blockReward / 100) * percentage;
}
bool CWallet::AddWatchOnly(const CScript& dest, int64_t nCreateTime)
{
m_script_metadata[CScriptID(dest)].nCreateTime = nCreateTime;
return AddWatchOnly(dest);
}
bool CWallet::RemoveWatchOnly(const CScript &dest)
{
AssertLockHeld(cs_wallet);
if (!CCryptoKeyStore::RemoveWatchOnly(dest))
return false;
if (!HaveWatchOnly())
NotifyWatchonlyChanged(false);
if (!WalletBatch(*database).EraseWatchOnly(dest))
return false;
return true;
}
bool CWallet::LoadWatchOnly(const CScript &dest)
{
return CCryptoKeyStore::AddWatchOnly(dest);
}
bool CWallet::Unlock(const SecureString& strWalletPassphrase, bool stakingOnly)
{
if (!IsLocked()) {
fWalletUnlockStakingOnly = stakingOnly;
return true;
}
CCrypter crypter;
CKeyingMaterial _vMasterKey;
{
LOCK(cs_wallet);
for (const MasterKeyMap::value_type& pMasterKey : mapMasterKeys)
{
if(!crypter.SetKeyFromPassphrase(strWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod))
return false;
if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, _vMasterKey))
continue; // try another master key
if (CCryptoKeyStore::Unlock(_vMasterKey))
fWalletUnlockStakingOnly = stakingOnly;
return true;
}
}
return false;
}
bool CWallet::ChangeWalletPassphrase(const SecureString& strOldWalletPassphrase, const SecureString& strNewWalletPassphrase)
{
bool fWasLocked = IsLocked();
{
LOCK(cs_wallet);
Lock();
CCrypter crypter;
CKeyingMaterial _vMasterKey;
for (MasterKeyMap::value_type& pMasterKey : mapMasterKeys)
{
if(!crypter.SetKeyFromPassphrase(strOldWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod))
return false;
if (!crypter.Decrypt(pMasterKey.second.vchCryptedKey, _vMasterKey))
return false;
if (CCryptoKeyStore::Unlock(_vMasterKey))
{
int64_t nStartTime = GetTimeMillis();
crypter.SetKeyFromPassphrase(strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod);
pMasterKey.second.nDeriveIterations = static_cast<unsigned int>(pMasterKey.second.nDeriveIterations * (100 / ((double)(GetTimeMillis() - nStartTime))));
nStartTime = GetTimeMillis();
crypter.SetKeyFromPassphrase(strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod);
pMasterKey.second.nDeriveIterations = (pMasterKey.second.nDeriveIterations + static_cast<unsigned int>(pMasterKey.second.nDeriveIterations * 100 / ((double)(GetTimeMillis() - nStartTime)))) / 2;
if (pMasterKey.second.nDeriveIterations < 25000)
pMasterKey.second.nDeriveIterations = 25000;
LogPrintf("Wallet passphrase changed to an nDeriveIterations of %i\n", pMasterKey.second.nDeriveIterations);
if (!crypter.SetKeyFromPassphrase(strNewWalletPassphrase, pMasterKey.second.vchSalt, pMasterKey.second.nDeriveIterations, pMasterKey.second.nDerivationMethod))
return false;
if (!crypter.Encrypt(_vMasterKey, pMasterKey.second.vchCryptedKey))
return false;
WalletBatch(*database).WriteMasterKey(pMasterKey.first, pMasterKey.second);
if (fWasLocked)
Lock();
return true;
}
}
}
return false;
}
void CWallet::ChainStateFlushed(const CBlockLocator& loc)
{
WalletBatch batch(*database);
batch.WriteBestBlock(loc);
}
bool CWallet::SetMinVersion(enum WalletFeature nVersion, WalletBatch* batch_in, bool fExplicit)
{
LOCK(cs_wallet); // nWalletVersion
if (nWalletVersion >= nVersion)
return true;
// when doing an explicit upgrade, if we pass the max version permitted, upgrade all the way
if (fExplicit && nVersion > nWalletMaxVersion)
nVersion = FEATURE_LATEST;
nWalletVersion = nVersion;
if (nVersion > nWalletMaxVersion)
nWalletMaxVersion = nVersion;
{
WalletBatch* batch = batch_in ? batch_in : new WalletBatch(*database);
if (nWalletVersion > 40000)
batch->WriteMinVersion(nWalletVersion);
if (!batch_in)
delete batch;
}
return true;
}
bool CWallet::SetMaxVersion(int nVersion)
{
LOCK(cs_wallet); // nWalletVersion, nWalletMaxVersion
// cannot downgrade below current version
if (nWalletVersion > nVersion)
return false;
nWalletMaxVersion = nVersion;
return true;
}
std::set<uint256> CWallet::GetConflicts(const uint256& txid) const
{
std::set<uint256> result;
AssertLockHeld(cs_wallet);
std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(txid);
if (it == mapWallet.end())
return result;
const CWalletTx& wtx = it->second;
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range;
for (const CTxIn& txin : wtx.tx->vin)
{
if (mapTxSpends.count(txin.prevout) <= 1)
continue; // No conflict if zero or one spends
range = mapTxSpends.equal_range(txin.prevout);
for (TxSpends::const_iterator _it = range.first; _it != range.second; ++_it)
result.insert(_it->second);
}
return result;
}
bool CWallet::HasWalletSpend(const uint256& txid) const
{
AssertLockHeld(cs_wallet);
auto iter = mapTxSpends.lower_bound(COutPoint(txid, 0));
return (iter != mapTxSpends.end() && iter->first.hash == txid);
}
void CWallet::Flush(bool shutdown)
{
database->Flush(shutdown);
}
void CWallet::SyncMetaData(std::pair<TxSpends::iterator, TxSpends::iterator> range)
{
// We want all the wallet transactions in range to have the same metadata as
// the oldest (smallest nOrderPos).
// So: find smallest nOrderPos:
int nMinOrderPos = std::numeric_limits<int>::max();
const CWalletTx* copyFrom = nullptr;
for (TxSpends::iterator it = range.first; it != range.second; ++it) {
const CWalletTx* wtx = &mapWallet.at(it->second);
if (wtx->nOrderPos < nMinOrderPos) {
nMinOrderPos = wtx->nOrderPos;;
copyFrom = wtx;
}
}
assert(copyFrom);
// Now copy data from copyFrom to rest:
for (TxSpends::iterator it = range.first; it != range.second; ++it)
{
const uint256& hash = it->second;
CWalletTx* copyTo = &mapWallet.at(hash);
if (copyFrom == copyTo) continue;
assert(copyFrom && "Oldest wallet transaction in range assumed to have been found.");
if (!copyFrom->IsEquivalentTo(*copyTo)) continue;
copyTo->mapValue = copyFrom->mapValue;
copyTo->vOrderForm = copyFrom->vOrderForm;
// fTimeReceivedIsTxTime not copied on purpose
// nTimeReceived not copied on purpose
copyTo->nTimeSmart = copyFrom->nTimeSmart;
copyTo->fFromMe = copyFrom->fFromMe;
copyTo->strFromAccount = copyFrom->strFromAccount;
// nOrderPos not copied on purpose
// cached members not copied on purpose
}
}
/**
* Outpoint is spent if any non-conflicted transaction
* spends it:
*/
bool CWallet::IsSpent(const uint256& hash, unsigned int n) const
{
const COutPoint outpoint(hash, n);
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range;
range = mapTxSpends.equal_range(outpoint);
for (TxSpends::const_iterator it = range.first; it != range.second; ++it)
{
const uint256& wtxid = it->second;
std::map<uint256, CWalletTx>::const_iterator mit = mapWallet.find(wtxid);
if (mit != mapWallet.end()) {
int depth = mit->second.GetDepthInMainChain();
if (depth > 0 || (depth == 0 && !mit->second.isAbandoned()))
return true; // Spent
}
}
return false;
}
void CWallet::AddToSpends(const COutPoint& outpoint, const uint256& wtxid)
{
mapTxSpends.insert(std::make_pair(outpoint, wtxid));
setWalletUTXO.erase(outpoint);
std::pair<TxSpends::iterator, TxSpends::iterator> range;
range = mapTxSpends.equal_range(outpoint);
SyncMetaData(range);
}
void CWallet::AddToSpends(const uint256& wtxid)
{
auto it = mapWallet.find(wtxid);
assert(it != mapWallet.end());
CWalletTx& thisTx = it->second;
if (thisTx.IsCoinBase()) // Coinbases don't spend anything!
return;
for (const CTxIn& txin : thisTx.tx->vin)
AddToSpends(txin.prevout, wtxid);
}
bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
{
if (IsCrypted())
return false;
CKeyingMaterial _vMasterKey;
_vMasterKey.resize(WALLET_CRYPTO_KEY_SIZE);
GetStrongRandBytes(&_vMasterKey[0], WALLET_CRYPTO_KEY_SIZE);
CMasterKey kMasterKey;
kMasterKey.vchSalt.resize(WALLET_CRYPTO_SALT_SIZE);
GetStrongRandBytes(&kMasterKey.vchSalt[0], WALLET_CRYPTO_SALT_SIZE);
CCrypter crypter;
int64_t nStartTime = GetTimeMillis();
crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, 25000, kMasterKey.nDerivationMethod);
kMasterKey.nDeriveIterations = static_cast<unsigned int>(2500000 / ((double)(GetTimeMillis() - nStartTime)));
nStartTime = GetTimeMillis();
crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, kMasterKey.nDeriveIterations, kMasterKey.nDerivationMethod);
kMasterKey.nDeriveIterations = (kMasterKey.nDeriveIterations + static_cast<unsigned int>(kMasterKey.nDeriveIterations * 100 / ((double)(GetTimeMillis() - nStartTime)))) / 2;
if (kMasterKey.nDeriveIterations < 25000)
kMasterKey.nDeriveIterations = 25000;
LogPrintf("Encrypting Wallet with an nDeriveIterations of %i\n", kMasterKey.nDeriveIterations);
if (!crypter.SetKeyFromPassphrase(strWalletPassphrase, kMasterKey.vchSalt, kMasterKey.nDeriveIterations, kMasterKey.nDerivationMethod))
return false;
if (!crypter.Encrypt(_vMasterKey, kMasterKey.vchCryptedKey))
return false;
{
LOCK(cs_wallet);
mapMasterKeys[++nMasterKeyMaxID] = kMasterKey;
assert(!encrypted_batch);
encrypted_batch = new WalletBatch(*database);
if (!encrypted_batch->TxnBegin()) {
delete encrypted_batch;
encrypted_batch = nullptr;
return false;
}
encrypted_batch->WriteMasterKey(nMasterKeyMaxID, kMasterKey);
if (!EncryptKeys(_vMasterKey))
{
encrypted_batch->TxnAbort();
delete encrypted_batch;
// We now probably have half of our keys encrypted in memory, and half not...
// die and let the user reload the unencrypted wallet.
assert(false);
}
// Encryption was introduced in version 0.4.0
SetMinVersion(FEATURE_WALLETCRYPT, encrypted_batch, true);
if (!encrypted_batch->TxnCommit()) {
delete encrypted_batch;
// We now have keys encrypted in memory, but not on disk...
// die to avoid confusion and let the user reload the unencrypted wallet.
assert(false);
}
delete encrypted_batch;
encrypted_batch = nullptr;
Lock();
Unlock(strWalletPassphrase);
// if we are using HD, replace the HD master key (seed) with a new one
if (IsHDEnabled()) {
if (!SetHDMasterKey(GenerateNewHDMasterKey())) {
return false;
}
}
NewKeyPool();
Lock();
// Need to completely rewrite the wallet file; if we don't, bdb might keep
// bits of the unencrypted private key in slack space in the database file.
database->Rewrite();
}
NotifyStatusChanged(this);
return true;
}
DBErrors CWallet::ReorderTransactions()
{
LOCK(cs_wallet);
WalletBatch batch(*database);
// Old wallets didn't have any defined order for transactions
// Probably a bad idea to change the output of this
// First: get all CWalletTx and CAccountingEntry into a sorted-by-time multimap.
typedef std::pair<CWalletTx*, CAccountingEntry*> TxPair;
typedef std::multimap<int64_t, TxPair > TxItems;
TxItems txByTime;
for (auto& entry : mapWallet)
{
CWalletTx* wtx = &entry.second;
txByTime.insert(std::make_pair(wtx->nTimeReceived, TxPair(wtx, nullptr)));
}
std::list<CAccountingEntry> acentries;
batch.ListAccountCreditDebit("", acentries);
for (CAccountingEntry& entry : acentries)
{
txByTime.insert(std::make_pair(entry.nTime, TxPair(nullptr, &entry)));
}
nOrderPosNext = 0;
std::vector<int64_t> nOrderPosOffsets;
for (TxItems::iterator it = txByTime.begin(); it != txByTime.end(); ++it)
{
CWalletTx *const pwtx = (*it).second.first;
CAccountingEntry *const pacentry = (*it).second.second;
int64_t& nOrderPos = (pwtx != nullptr) ? pwtx->nOrderPos : pacentry->nOrderPos;
if (nOrderPos == -1)
{
nOrderPos = nOrderPosNext++;
nOrderPosOffsets.push_back(nOrderPos);
if (pwtx)
{
if (!batch.WriteTx(*pwtx))
return DBErrors::LOAD_FAIL;
}
else
if (!batch.WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
return DBErrors::LOAD_FAIL;
}
else
{
int64_t nOrderPosOff = 0;
for (const int64_t& nOffsetStart : nOrderPosOffsets)
{
if (nOrderPos >= nOffsetStart)
++nOrderPosOff;
}
nOrderPos += nOrderPosOff;
nOrderPosNext = std::max(nOrderPosNext, nOrderPos + 1);
if (!nOrderPosOff)
continue;
// Since we're changing the order, write it back
if (pwtx)
{
if (!batch.WriteTx(*pwtx))
return DBErrors::LOAD_FAIL;
}
else
if (!batch.WriteAccountingEntry(pacentry->nEntryNo, *pacentry))
return DBErrors::LOAD_FAIL;
}
}
batch.WriteOrderPosNext(nOrderPosNext);
return DBErrors::LOAD_OK;
}
int64_t CWallet::IncOrderPosNext(WalletBatch *batch)
{
AssertLockHeld(cs_wallet); // nOrderPosNext
int64_t nRet = nOrderPosNext++;
if (batch) {
batch->WriteOrderPosNext(nOrderPosNext);
} else {
WalletBatch(*database).WriteOrderPosNext(nOrderPosNext);
}
return nRet;
}
bool CWallet::AccountMove(std::string strFrom, std::string strTo, CAmount nAmount, std::string strComment)
{
WalletBatch batch(*database);
if (!batch.TxnBegin())
return false;
int64_t nNow = GetAdjustedTime();
// Debit
CAccountingEntry debit;
debit.nOrderPos = IncOrderPosNext(&batch);
debit.strAccount = strFrom;
debit.nCreditDebit = -nAmount;
debit.nTime = nNow;
debit.strOtherAccount = strTo;
debit.strComment = strComment;
AddAccountingEntry(debit, &batch);
// Credit
CAccountingEntry credit;
credit.nOrderPos = IncOrderPosNext(&batch);
credit.strAccount = strTo;
credit.nCreditDebit = nAmount;
credit.nTime = nNow;
credit.strOtherAccount = strFrom;
credit.strComment = strComment;
AddAccountingEntry(credit, &batch);
if (!batch.TxnCommit())
return false;
return true;
}
bool CWallet::GetLabelDestination(CTxDestination &dest, const std::string& label, bool bForceNew)
{
WalletBatch batch(*database);
CAccount account;
batch.ReadAccount(label, account);
if (!bForceNew) {
if (!account.vchPubKey.IsValid())
bForceNew = true;
else {
// Check if the current key has been used (TODO: check other addresses with the same key)
CScript scriptPubKey = GetScriptForDestination(GetDestinationForKey(account.vchPubKey, m_default_address_type));
for (std::map<uint256, CWalletTx>::iterator it = mapWallet.begin();
it != mapWallet.end() && account.vchPubKey.IsValid();
++it)
for (const CTxOut& txout : (*it).second.tx->vout)
if (txout.scriptPubKey == scriptPubKey) {
bForceNew = true;
break;
}
}
}
// Generate a new key
if (bForceNew) {
if (!GetKeyFromPool(account.vchPubKey, false))
return false;
LearnRelatedScripts(account.vchPubKey, m_default_address_type);
dest = GetDestinationForKey(account.vchPubKey, m_default_address_type);
SetAddressBook(dest, label, "receive");
batch.WriteAccount(label, account);
} else {
dest = GetDestinationForKey(account.vchPubKey, m_default_address_type);
}
return true;
}
void CWallet::MarkDirty()
{
{
LOCK(cs_wallet);
for (std::pair<const uint256, CWalletTx>& item : mapWallet)
item.second.MarkDirty();
}
}
bool CWallet::MarkReplaced(const uint256& originalHash, const uint256& newHash)
{
LOCK(cs_wallet);
auto mi = mapWallet.find(originalHash);
// There is a bug if MarkReplaced is not called on an existing wallet transaction.
assert(mi != mapWallet.end());
CWalletTx& wtx = (*mi).second;
// Ensure for now that we're not overwriting data
assert(wtx.mapValue.count("replaced_by_txid") == 0);
wtx.mapValue["replaced_by_txid"] = newHash.ToString();
WalletBatch batch(*database, "r+");
bool success = true;
if (!batch.WriteTx(wtx)) {
LogPrintf("%s: Updating batch tx %s failed\n", __func__, wtx.GetHash().ToString());
success = false;
}
NotifyTransactionChanged(this, originalHash, CT_UPDATED);
return success;
}
bool CWallet::AddToWallet(const CWalletTx& wtxIn, bool fFlushOnClose)
{
LOCK(cs_wallet);
WalletBatch batch(*database, "r+", fFlushOnClose);
uint256 hash = wtxIn.GetHash();
// Inserts only if not already there, returns tx inserted or tx found
std::pair<std::map<uint256, CWalletTx>::iterator, bool> ret = mapWallet.insert(std::make_pair(hash, wtxIn));
CWalletTx& wtx = (*ret.first).second;
wtx.BindWallet(this);
bool fInsertedNew = ret.second;
if (fInsertedNew)
{
wtx.nTimeReceived = GetAdjustedTime();
wtx.nOrderPos = IncOrderPosNext(&batch);
wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
wtx.nTimeSmart = ComputeTimeSmart(wtx);
AddToSpends(hash);
for(size_t i = 0; i < wtx.tx->vout.size(); ++i) {
if (IsMine(wtx.tx->vout[i]) && !IsSpent(hash, i)) {
setWalletUTXO.insert(COutPoint(hash, i));
}
}
}
bool fUpdated = false;
if (!fInsertedNew)
{
// Merge
if (!wtxIn.hashUnset() && wtxIn.hashBlock != wtx.hashBlock)
{
wtx.hashBlock = wtxIn.hashBlock;
fUpdated = true;
}
// If no longer abandoned, update
if (wtxIn.hashBlock.IsNull() && wtx.isAbandoned())
{
wtx.hashBlock = wtxIn.hashBlock;
fUpdated = true;
}
if (wtxIn.nIndex != -1 && (wtxIn.nIndex != wtx.nIndex))
{
wtx.nIndex = wtxIn.nIndex;
fUpdated = true;
}
if (wtxIn.fFromMe && wtxIn.fFromMe != wtx.fFromMe)
{
wtx.fFromMe = wtxIn.fFromMe;
fUpdated = true;
}
// If we have a witness-stripped version of this transaction, and we
// see a new version with a witness, then we must be upgrading a pre-segwit
// wallet. Store the new version of the transaction with the witness,
// as the stripped-version must be invalid.
// TODO: Store all versions of the transaction, instead of just one.
if (wtxIn.tx->HasWitness() && !wtx.tx->HasWitness()) {
wtx.SetTx(wtxIn.tx);
fUpdated = true;
}
}
//// debug print
LogPrintf("AddToWallet %s %s%s\n", wtxIn.GetHash().ToString(), (fInsertedNew ? "new" : ""), (fUpdated ? "update" : ""));
// Write to disk
if (fInsertedNew || fUpdated)
if (!batch.WriteTx(wtx))
return false;
// Break debit/credit balance caches:
wtx.MarkDirty();
// Notify UI of new or updated transaction
NotifyTransactionChanged(this, hash, fInsertedNew ? CT_NEW : CT_UPDATED);
// notify an external script when a wallet transaction comes in or is updated
std::string strCmd = gArgs.GetArg("-walletnotify", "");
if (!strCmd.empty())
{
boost::replace_all(strCmd, "%s", wtxIn.GetHash().GetHex());
std::thread t(runCommand, strCmd);
t.detach(); // thread runs free
}
return true;
}
bool CWallet::LoadToWallet(const CWalletTx& wtxIn)
{
uint256 hash = wtxIn.GetHash();
CWalletTx& wtx = mapWallet.emplace(hash, wtxIn).first->second;
wtx.BindWallet(this);
wtxOrdered.insert(std::make_pair(wtx.nOrderPos, TxPair(&wtx, nullptr)));
AddToSpends(hash);
for (const CTxIn& txin : wtx.tx->vin) {
auto it = mapWallet.find(txin.prevout.hash);
if (it != mapWallet.end()) {
CWalletTx& prevtx = it->second;
if (prevtx.nIndex == -1 && !prevtx.hashUnset()) {
MarkConflicted(prevtx.hashBlock, wtx.GetHash());
}
}
}
return true;
}
/**
* Add a transaction to the wallet, or update it. pIndex and posInBlock should
* be set when the transaction was known to be included in a block. When
* pIndex == nullptr, then wallet state is not updated in AddToWallet, but
* notifications happen and cached balances are marked dirty.
*
* If fUpdate is true, existing transactions will be updated.
* TODO: One exception to this is that the abandoned state is cleared under the
* assumption that any further notification of a transaction that was considered
* abandoned is an indication that it is not safe to be considered abandoned.
* Abandoned state should probably be more carefully tracked via different
* posInBlock signals or by checking mempool presence when necessary.
*/
bool CWallet::AddToWalletIfInvolvingMe(const CTransactionRef& ptx, const CBlockIndex* pIndex, int posInBlock, bool fUpdate)
{
const CTransaction& tx = *ptx;
{
AssertLockHeld(cs_wallet);
if (pIndex != nullptr) {
for (const CTxIn& txin : tx.vin) {
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(txin.prevout);
while (range.first != range.second) {
if (range.first->second != tx.GetHash()) {
LogPrintf("Transaction %s (in block %s) conflicts with wallet transaction %s (both spend %s:%i)\n", tx.GetHash().ToString(), pIndex->GetBlockHash().ToString(), range.first->second.ToString(), range.first->first.hash.ToString(), range.first->first.n);
MarkConflicted(pIndex->GetBlockHash(), range.first->second);
}
range.first++;
}
}
}
bool fExisted = mapWallet.count(tx.GetHash()) != 0;
if (fExisted && !fUpdate) return false;
if (fExisted || IsMine(tx) || IsFromMe(tx))
{
/* Check if any keys in the wallet keypool that were supposed to be unused
* have appeared in a new transaction. If so, remove those keys from the keypool.
* This can happen when restoring an old wallet backup that does not contain
* the mostly recently created transactions from newer versions of the wallet.
*/
// loop though all outputs
for (const CTxOut& txout: tx.vout) {
// extract addresses and check if they match with an unused keypool key
std::vector<CKeyID> vAffected;
CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey);
for (const CKeyID &keyid : vAffected) {
std::map<CKeyID, int64_t>::const_iterator mi = m_pool_key_to_index.find(keyid);
if (mi != m_pool_key_to_index.end()) {
LogPrintf("%s: Detected a used keypool key, mark all keypool key up to this key as used\n", __func__);
MarkReserveKeysAsUsed(mi->second);
if (!TopUpKeyPool()) {
LogPrintf("%s: Topping up keypool failed (locked wallet)\n", __func__);
}
}
}
}
CWalletTx wtx(this, ptx);
// Get merkle branch if transaction was found in a block
if (pIndex != nullptr)
wtx.SetMerkleBranch(pIndex, posInBlock);
return AddToWallet(wtx, false);
}
}
return false;
}
bool CWallet::TransactionCanBeAbandoned(const uint256& hashTx) const
{
LOCK2(cs_main, cs_wallet);
const CWalletTx* wtx = GetWalletTx(hashTx);
return wtx && !wtx->isAbandoned() && wtx->GetDepthInMainChain() == 0 && !wtx->InMempool();
}
bool CWallet::AbandonTransaction(const uint256& hashTx)
{
LOCK2(cs_main, cs_wallet);
WalletBatch batch(*database, "r+");
std::set<uint256> todo;
std::set<uint256> done;
// Can't mark abandoned if confirmed or in mempool
auto it = mapWallet.find(hashTx);
assert(it != mapWallet.end());
CWalletTx& origtx = it->second;
if (origtx.GetDepthInMainChain() != 0 || origtx.InMempool()) {
return false;
}
todo.insert(hashTx);
while (!todo.empty()) {
uint256 now = *todo.begin();
todo.erase(now);
done.insert(now);
auto it = mapWallet.find(now);
assert(it != mapWallet.end());
CWalletTx& wtx = it->second;
int currentconfirm = wtx.GetDepthInMainChain();
// If the orig tx was not in block, none of its spends can be
assert(currentconfirm <= 0);
// if (currentconfirm < 0) {Tx and spends are already conflicted, no need to abandon}
if (currentconfirm == 0 && !wtx.isAbandoned()) {
// If the orig tx was not in block/mempool, none of its spends can be in mempool
assert(!wtx.InMempool());
wtx.nIndex = -1;
wtx.setAbandoned();
wtx.MarkDirty();
batch.WriteTx(wtx);
NotifyTransactionChanged(this, wtx.GetHash(), CT_UPDATED);
// Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too
TxSpends::const_iterator iter = mapTxSpends.lower_bound(COutPoint(hashTx, 0));
while (iter != mapTxSpends.end() && iter->first.hash == now) {
if (!done.count(iter->second)) {
todo.insert(iter->second);
}
iter++;
}
// If a transaction changes 'conflicted' state, that changes the balance
// available of the outputs it spends. So force those to be recomputed
for (const CTxIn& txin : wtx.tx->vin)
{
auto it = mapWallet.find(txin.prevout.hash);
if (it != mapWallet.end()) {
it->second.MarkDirty();
}
}
}
}
return true;
}
void CWallet::MarkConflicted(const uint256& hashBlock, const uint256& hashTx)
{
LOCK2(cs_main, cs_wallet);
int conflictconfirms = 0;
CBlockIndex* pindex = LookupBlockIndex(hashBlock);
if (pindex && chainActive.Contains(pindex)) {
conflictconfirms = -(chainActive.Height() - pindex->nHeight + 1);
}
// If number of conflict confirms cannot be determined, this means
// that the block is still unknown or not yet part of the main chain,
// for example when loading the wallet during a reindex. Do nothing in that
// case.
if (conflictconfirms >= 0)
return;
// Do not flush the wallet here for performance reasons
WalletBatch batch(*database, "r+", false);
std::set<uint256> todo;
std::set<uint256> done;
todo.insert(hashTx);
while (!todo.empty()) {
uint256 now = *todo.begin();
todo.erase(now);
done.insert(now);
auto it = mapWallet.find(now);
assert(it != mapWallet.end());
CWalletTx& wtx = it->second;
int currentconfirm = wtx.GetDepthInMainChain();
if (conflictconfirms < currentconfirm) {
// Block is 'more conflicted' than current confirm; update.
// Mark transaction as conflicted with this block.
wtx.nIndex = -1;
wtx.hashBlock = hashBlock;
wtx.MarkDirty();
batch.WriteTx(wtx);
// Iterate over all its outputs, and mark transactions in the wallet that spend them conflicted too
TxSpends::const_iterator iter = mapTxSpends.lower_bound(COutPoint(now, 0));
while (iter != mapTxSpends.end() && iter->first.hash == now) {
if (!done.count(iter->second)) {
todo.insert(iter->second);
}
iter++;
}
// If a transaction changes 'conflicted' state, that changes the balance
// available of the outputs it spends. So force those to be recomputed
for (const CTxIn& txin : wtx.tx->vin) {
auto it = mapWallet.find(txin.prevout.hash);
if (it != mapWallet.end()) {
it->second.MarkDirty();
}
}
}
}
}
void CWallet::SyncTransaction(const CTransactionRef& ptx, const CBlockIndex *pindex, int posInBlock) {
const CTransaction& tx = *ptx;
if (!AddToWalletIfInvolvingMe(ptx, pindex, posInBlock, true))
return; // Not one of ours
// If a transaction changes 'conflicted' state, that changes the balance
// available of the outputs it spends. So force those to be
// recomputed, also:
for (const CTxIn& txin : tx.vin) {
auto it = mapWallet.find(txin.prevout.hash);
if (it != mapWallet.end()) {
it->second.MarkDirty();
}
}
}
void CWallet::TransactionAddedToMempool(const CTransactionRef& ptx) {
LOCK2(cs_main, cs_wallet);
SyncTransaction(ptx);
auto it = mapWallet.find(ptx->GetHash());
if (it != mapWallet.end()) {
it->second.fInMempool = true;
}
}
void CWallet::TransactionRemovedFromMempool(const CTransactionRef &ptx) {
LOCK(cs_wallet);
auto it = mapWallet.find(ptx->GetHash());
if (it != mapWallet.end()) {
it->second.fInMempool = false;
}
}
void CWallet::BlockConnected(const std::shared_ptr<const CBlock>& pblock, const CBlockIndex *pindex, const std::vector<CTransactionRef>& vtxConflicted) {
LOCK2(cs_main, cs_wallet);
// TODO: Temporarily ensure that mempool removals are notified before
// connected transactions. This shouldn't matter, but the abandoned
// state of transactions in our wallet is currently cleared when we
// receive another notification and there is a race condition where
// notification of a connected conflict might cause an outside process
// to abandon a transaction and then have it inadvertently cleared by
// the notification that the conflicted transaction was evicted.
// if(witnessEnabled && m_default_address_type != OutputType::P2SH_SEGWIT)
// m_default_address_type = OutputType::P2SH_SEGWIT;
for (const CTransactionRef& ptx : vtxConflicted) {
SyncTransaction(ptx);
TransactionRemovedFromMempool(ptx);
}
for (size_t i = 0; i < pblock->vtx.size(); i++) {
SyncTransaction(pblock->vtx[i], pindex, i);
TransactionRemovedFromMempool(pblock->vtx[i]);
}
m_last_block_processed = pindex;
}
void CWallet::BlockDisconnected(const std::shared_ptr<const CBlock>& pblock) {
LOCK2(cs_main, cs_wallet);
for (const CTransactionRef& ptx : pblock->vtx) {
SyncTransaction(ptx);
}
}
void CWallet::BlockUntilSyncedToCurrentChain() {
AssertLockNotHeld(cs_main);
AssertLockNotHeld(cs_wallet);
{
// Skip the queue-draining stuff if we know we're caught up with
// chainActive.Tip()...
// We could also take cs_wallet here, and call m_last_block_processed
// protected by cs_wallet instead of cs_main, but as long as we need
// cs_main here anyway, it's easier to just call it cs_main-protected.
LOCK(cs_main);
const CBlockIndex* initialChainTip = chainActive.Tip();
if (m_last_block_processed->GetAncestor(initialChainTip->nHeight) == initialChainTip) {
return;
}
}
// ...otherwise put a callback in the validation interface queue and wait
// for the queue to drain enough to execute it (indicating we are caught up
// at least with the time we entered this function).
SyncWithValidationInterfaceQueue();
}
isminetype CWallet::IsMine(const CTxIn &txin) const
{
{
LOCK(cs_wallet);
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.hash);
if (mi != mapWallet.end())
{
const CWalletTx& prev = (*mi).second;
if (txin.prevout.n < prev.tx->vout.size())
return IsMine(prev.tx->vout[txin.prevout.n]);
}
}
return ISMINE_NO;
}
// Note that this function doesn't distinguish between a 0-valued input,
// and a not-"is mine" (according to the filter) input.
CAmount CWallet::GetDebit(const CTxIn &txin, const isminefilter& filter) const
{
{
LOCK(cs_wallet);
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(txin.prevout.hash);
if (mi != mapWallet.end())
{
const CWalletTx& prev = (*mi).second;
if (txin.prevout.n < prev.tx->vout.size())
if (IsMine(prev.tx->vout[txin.prevout.n]) & filter)
return prev.tx->vout[txin.prevout.n].nValue;
}
}
return 0;
}
isminetype CWallet::IsMine(const CTxOut& txout) const
{
return ::IsMine(*this, txout.scriptPubKey);
}
CAmount CWallet::GetCredit(const CTxOut& txout, const isminefilter& filter) const
{
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
return ((IsMine(txout) & filter) ? txout.nValue : 0);
}
bool CWallet::IsChange(const CTxOut& txout) const
{
// TODO: fix handling of 'change' outputs. The assumption is that any
// payment to a script that is ours, but is not in the address book
// is change. That assumption is likely to break when we implement multisignature
// wallets that return change back into a multi-signature-protected address;
// a better way of identifying which outputs are 'the send' and which are
// 'the change' will need to be implemented (maybe extend CWalletTx to remember
// which output, if any, was change).
if (::IsMine(*this, txout.scriptPubKey))
{
CTxDestination address;
if (!ExtractDestination(txout.scriptPubKey, address))
return true;
LOCK(cs_wallet);
if (!mapAddressBook.count(address))
return true;
}
return false;
}
CAmount CWallet::GetChange(const CTxOut& txout) const
{
if (!MoneyRange(txout.nValue))
throw std::runtime_error(std::string(__func__) + ": value out of range");
return (IsChange(txout) ? txout.nValue : 0);
}
bool CWallet::IsMine(const CTransaction& tx) const
{
for (const CTxOut& txout : tx.vout)
if (IsMine(txout))
return true;
return false;
}
bool CWallet::IsFromMe(const CTransaction& tx) const
{
return (GetDebit(tx, ISMINE_ALL) > 0);
}
CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) const
{
CAmount nDebit = 0;
for (const CTxIn& txin : tx.vin)
{
nDebit += GetDebit(txin, filter);
if (!MoneyRange(nDebit))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
return nDebit;
}
bool CWallet::IsAllFromMe(const CTransaction& tx, const isminefilter& filter) const
{
LOCK(cs_wallet);
for (const CTxIn& txin : tx.vin)
{
auto mi = mapWallet.find(txin.prevout.hash);
if (mi == mapWallet.end())
return false; // any unknown inputs can't be from us
const CWalletTx& prev = (*mi).second;
if (txin.prevout.n >= prev.tx->vout.size())
return false; // invalid input!
if (!(IsMine(prev.tx->vout[txin.prevout.n]) & filter))
return false;
}
return true;
}
CAmount CWallet::GetCredit(const CTransaction& tx, const isminefilter& filter) const
{
CAmount nCredit = 0;
for (const CTxOut& txout : tx.vout)
{
nCredit += GetCredit(txout, filter);
if (!MoneyRange(nCredit))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
return nCredit;
}
CAmount CWallet::GetChange(const CTransaction& tx) const
{
CAmount nChange = 0;
for (const CTxOut& txout : tx.vout)
{
nChange += GetChange(txout);
if (!MoneyRange(nChange))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
return nChange;
}
CPubKey CWallet::GenerateNewHDMasterKey()
{
CKey key;
key.MakeNewKey(true);
int64_t nCreationTime = GetTime();
CKeyMetadata metadata(nCreationTime);
// calculate the pubkey
CPubKey pubkey = key.GetPubKey();
assert(key.VerifyPubKey(pubkey));
// set the hd keypath to "m" -> Master, refers the masterkeyid to itself
metadata.hdKeypath = "m";
metadata.hdMasterKeyID = pubkey.GetID();
{
LOCK(cs_wallet);
// mem store the metadata
mapKeyMetadata[pubkey.GetID()] = metadata;
// write the key&metadata to the database
if (!AddKeyPubKey(key, pubkey))
throw std::runtime_error(std::string(__func__) + ": AddKeyPubKey failed");
}
return pubkey;
}
bool CWallet::SetHDMasterKey(const CPubKey& pubkey)
{
LOCK(cs_wallet);
// store the keyid (hash160) together with
// the child index counter in the database
// as a hdchain object
CHDChain newHdChain;
newHdChain.nVersion = CanSupportFeature(FEATURE_HD_SPLIT) ? CHDChain::VERSION_HD_CHAIN_SPLIT : CHDChain::VERSION_HD_BASE;
newHdChain.masterKeyID = pubkey.GetID();
SetHDChain(newHdChain, false);
return true;
}
bool CWallet::SetHDChain(const CHDChain& chain, bool memonly)
{
LOCK(cs_wallet);
if (!memonly && !WalletBatch(*database).WriteHDChain(chain))
throw std::runtime_error(std::string(__func__) + ": writing chain failed");
hdChain = chain;
return true;
}
bool CWallet::IsHDEnabled() const
{
return !hdChain.masterKeyID.IsNull();
}
void CWallet::SetWalletFlag(uint64_t flags)
{
LOCK(cs_wallet);
m_wallet_flags |= flags;
if (!WalletBatch(*database).WriteWalletFlags(m_wallet_flags))
throw std::runtime_error(std::string(__func__) + ": writing wallet flags failed");
}
void CWallet::UnsetWalletFlag(uint64_t flag)
{
LOCK(cs_wallet);
m_wallet_flags &= ~flag;
if (!WalletBatch(*database).WriteWalletFlags(m_wallet_flags))
throw std::runtime_error(std::string(__func__) + ": writing wallet flags failed");
}
bool CWallet::IsWalletFlagSet(uint64_t flag)
{
return (m_wallet_flags & flag);
}
bool CWallet::SetWalletFlags(uint64_t overwriteFlags, bool memonly)
{
LOCK(cs_wallet);
m_wallet_flags = overwriteFlags;
if (((overwriteFlags & g_known_wallet_flags) >> 32) ^ (overwriteFlags >> 32)) {
// contains unknown non-tolerable wallet flags
return false;
}
if (!memonly && !WalletBatch(*database).WriteWalletFlags(m_wallet_flags)) {
throw std::runtime_error(std::string(__func__) + ": writing wallet flags failed");
}
return true;
}
int64_t CWalletTx::GetTxTime() const
{
int64_t n = nTimeSmart;
return n ? n : nTimeReceived;
}
int CWalletTx::GetRequestCount() const
{
// Returns -1 if it wasn't being tracked
int nRequests = -1;
{
LOCK(pwallet->cs_wallet);
if (IsCoinBase())
{
// Generated block
if (!hashUnset())
{
std::map<uint256, int>::const_iterator mi = pwallet->mapRequestCount.find(hashBlock);
if (mi != pwallet->mapRequestCount.end())
nRequests = (*mi).second;
}
}
else
{
// Did anyone request this transaction?
std::map<uint256, int>::const_iterator mi = pwallet->mapRequestCount.find(GetHash());
if (mi != pwallet->mapRequestCount.end())
{
nRequests = (*mi).second;
// How about the block it's in?
if (nRequests == 0 && !hashUnset())
{
std::map<uint256, int>::const_iterator _mi = pwallet->mapRequestCount.find(hashBlock);
if (_mi != pwallet->mapRequestCount.end())
nRequests = (*_mi).second;
else
nRequests = 1; // If it's in someone else's block it must have got out
}
}
}
}
return nRequests;
}
// Helper for producing a max-sized low-S signature (eg 72 bytes)
bool CWallet::DummySignInput(CTxIn &tx_in, const CTxOut &txout) const
{
// Fill in dummy signatures for fee calculation.
const CScript& scriptPubKey = txout.scriptPubKey;
SignatureData sigdata;
if (!ProduceSignature(*this, DUMMY_SIGNATURE_CREATOR, scriptPubKey, sigdata))
{
return false;
} else {
UpdateInput(tx_in, sigdata);
}
return true;
}
// Helper for producing a bunch of max-sized low-S signatures (eg 72 bytes)
bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut> &txouts) const
{
// Fill in dummy signatures for fee calculation.
int nIn = 0;
for (const auto& txout : txouts)
{
if (!DummySignInput(txNew.vin[nIn], txout)) {
return false;
}
nIn++;
}
return true;
}
int64_t CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *wallet)
{
std::vector<CTxOut> txouts;
// Look up the inputs. We should have already checked that this transaction
// IsAllFromMe(ISMINE_SPENDABLE), so every input should already be in our
// wallet, with a valid index into the vout array, and the ability to sign.
for (auto& input : tx.vin) {
const auto mi = wallet->mapWallet.find(input.prevout.hash);
if (mi == wallet->mapWallet.end()) {
return -1;
}
assert(input.prevout.n < mi->second.tx->vout.size());
txouts.emplace_back(mi->second.tx->vout[input.prevout.n]);
}
return CalculateMaximumSignedTxSize(tx, wallet, txouts);
}
// txouts needs to be in the order of tx.vin
int64_t CalculateMaximumSignedTxSize(const CTransaction &tx, const CWallet *wallet, const std::vector<CTxOut>& txouts)
{
CMutableTransaction txNew(tx);
if (!wallet->DummySignTx(txNew, txouts)) {
// This should never happen, because IsAllFromMe(ISMINE_SPENDABLE)
// implies that we can sign for every input.
return -1;
}
return GetVirtualTransactionSize(txNew);
}
int CalculateMaximumSignedInputSize(const CTxOut& txout, const CWallet* wallet)
{
CMutableTransaction txn;
txn.vin.push_back(CTxIn(COutPoint()));
if (!wallet->DummySignInput(txn.vin[0], txout)) {
// This should never happen, because IsAllFromMe(ISMINE_SPENDABLE)
// implies that we can sign for every input.
return -1;
}
return GetVirtualTransactionInputSize(txn.vin[0]);
}
void CWalletTx::GetAmounts(std::list<COutputEntry>& listReceived,
std::list<COutputEntry>& listSent, CAmount& nFee, std::string& strSentAccount, const isminefilter& filter) const
{
nFee = 0;
listReceived.clear();
listSent.clear();
strSentAccount = strFromAccount;
// Compute fee:
CAmount nDebit = GetDebit(filter);
if (nDebit > 0) // debit>0 means we signed/sent this transaction
{
CAmount nValueOut = tx->GetValueOut();
nFee = nDebit - nValueOut;
}
// Sent/received.
for (unsigned int i = 0; i < tx->vout.size(); ++i)
{
const CTxOut& txout = tx->vout[i];
isminetype fIsMine = pwallet->IsMine(txout);
// Only need to handle txouts if AT LEAST one of these is true:
// 1) they debit from us (sent)
// 2) the output is to us (received)
if (nDebit > 0)
{
// Don't report 'change' txouts
if (pwallet->IsChange(txout))
continue;
}
else if (!(fIsMine & filter))
continue;
// In either case, we need to get the destination address
CTxDestination address;
if (!ExtractDestination(txout.scriptPubKey, address) && !txout.scriptPubKey.IsUnspendable())
{
LogPrintf("CWalletTx::GetAmounts: Unknown transaction type found, txid %s\n",
this->GetHash().ToString());
address = CNoDestination();
}
COutputEntry output = {address, txout.nValue, (int)i};
// If we are debited by the transaction, add the output as a "sent" entry
if (nDebit > 0)
listSent.push_back(output);
// If we are receiving the output, add it as a "received" entry
if (fIsMine & filter)
listReceived.push_back(output);
}
}
/**
* Scan active chain for relevant transactions after importing keys. This should
* be called whenever new keys are added to the wallet, with the oldest key
* creation time.
*
* @return Earliest timestamp that could be successfully scanned from. Timestamp
* returned will be higher than startTime if relevant blocks could not be read.
*/
int64_t CWallet::RescanFromTime(int64_t startTime, const WalletRescanReserver& reserver, bool update)
{
// Find starting block. May be null if nCreateTime is greater than the
// highest blockchain timestamp, in which case there is nothing that needs
// to be scanned.
CBlockIndex* startBlock = nullptr;
{
LOCK(cs_main);
startBlock = chainActive.FindEarliestAtLeast(startTime - TIMESTAMP_WINDOW);
LogPrintf("%s: Rescanning last %i blocks\n", __func__, startBlock ? chainActive.Height() - startBlock->nHeight + 1 : 0);
}
if (startBlock) {
const CBlockIndex* const failedBlock = ScanForWalletTransactions(startBlock, nullptr, reserver, update);
if (failedBlock) {
return failedBlock->GetBlockTimeMax() + TIMESTAMP_WINDOW + 1;
}
}
return startTime;
}
/**
* Scan the block chain (starting in pindexStart) for transactions
* from or to us. If fUpdate is true, found transactions that already
* exist in the wallet will be updated.
*
* Returns null if scan was successful. Otherwise, if a complete rescan was not
* possible (due to pruning or corruption), returns pointer to the most recent
* block that could not be scanned.
*
* If pindexStop is not a nullptr, the scan will stop at the block-index
* defined by pindexStop
*
* Caller needs to make sure pindexStop (and the optional pindexStart) are on
* the main chain after to the addition of any new keys you want to detect
* transactions for.
*/
CBlockIndex* CWallet::ScanForWalletTransactions(CBlockIndex* pindexStart, CBlockIndex* pindexStop, const WalletRescanReserver &reserver, bool fUpdate)
{
int64_t nNow = GetTime();
const CChainParams& chainParams = Params();
assert(reserver.isReserved());
if (pindexStop) {
assert(pindexStop->nHeight >= pindexStart->nHeight);
}
CBlockIndex* pindex = pindexStart;
CBlockIndex* ret = nullptr;
if (pindex) LogPrintf("Rescan started from block %d...\n", pindex->nHeight);
{
fAbortRescan = false;
ShowProgress(_("Rescanning..."), 0); // show rescan progress in GUI as dialog or on splashscreen, if -rescan on startup
CBlockIndex* tip = nullptr;
double dProgressStart;
double dProgressTip;
{
LOCK(cs_main);
tip = chainActive.Tip();
dProgressStart = GuessVerificationProgress(chainParams.TxData(), pindex);
dProgressTip = GuessVerificationProgress(chainParams.TxData(), tip);
}
double gvp = dProgressStart;
while (pindex && !fAbortRescan && !ShutdownRequested())
{
if (pindex->nHeight % 100 == 0 && dProgressTip - dProgressStart > 0.0) {
ShowProgress(_("Rescanning..."), std::max(1, std::min(99, (int)((gvp - dProgressStart) / (dProgressTip - dProgressStart) * 100))));
}
if (GetTime() >= nNow + 60) {
nNow = GetTime();
LogPrintf("Still rescanning. At block %d. Progress=%f\n", pindex->nHeight, gvp);
}
CBlock block;
if (ReadBlockFromDisk(block, pindex, Params().GetConsensus())) {
LOCK2(cs_main, cs_wallet);
if (pindex && !chainActive.Contains(pindex)) {
// Abort scan if current block is no longer active, to prevent
// marking transactions as coming from the wrong block.
ret = pindex;
break;
}
for (size_t posInBlock = 0; posInBlock < block.vtx.size(); ++posInBlock) {
AddToWalletIfInvolvingMe(block.vtx[posInBlock], pindex, posInBlock, fUpdate);
}
} else {
ret = pindex;
}
if (pindex == pindexStop) {
break;
}
{
LOCK(cs_main);
pindex = chainActive.Next(pindex);
gvp = GuessVerificationProgress(chainParams.TxData(), pindex);
if (tip != chainActive.Tip()) {
tip = chainActive.Tip();
// in case the tip has changed, update progress max
dProgressTip = GuessVerificationProgress(chainParams.TxData(), tip);
}
}
}
if (pindex && fAbortRescan) {
LogPrintf("Rescan aborted at block %d. Progress=%f\n", pindex->nHeight, gvp);
} else if (pindex && ShutdownRequested()) {
LogPrintf("Rescan interrupted by shutdown request at block %d. Progress=%f\n", pindex->nHeight, gvp);
}
ShowProgress(_("Rescanning..."), 100); // hide progress dialog in GUI
}
return ret;
}
void CWallet::ReacceptWalletTransactions()
{
// If transactions aren't being broadcasted, don't let them into local mempool either
if (!fBroadcastTransactions)
return;
LOCK2(cs_main, cs_wallet);
std::map<int64_t, CWalletTx*> mapSorted;
// Sort pending wallet transactions based on their initial wallet insertion order
for (std::pair<const uint256, CWalletTx>& item : mapWallet)
{
const uint256& wtxid = item.first;
CWalletTx& wtx = item.second;
assert(wtx.GetHash() == wtxid);
int nDepth = wtx.GetDepthInMainChain();
if (!wtx.IsCoinBase() && !wtx.IsCoinStake() && (nDepth == 0 && !wtx.isAbandoned())) {
mapSorted.insert(std::make_pair(wtx.nOrderPos, &wtx));
}
}
// Try to add wallet transactions to memory pool
for (std::pair<const int64_t, CWalletTx*>& item : mapSorted) {
CWalletTx& wtx = *(item.second);
CValidationState state;
wtx.AcceptToMemoryPool(maxTxFee, state);
}
}
bool CWalletTx::RelayWalletTransaction(CConnman* connman)
{
assert(pwallet->GetBroadcastTransactions());
if (!IsCoinBase() && !isAbandoned() && GetDepthInMainChain() == 0)
{
CValidationState state;
/* GetDepthInMainChain already catches known conflicts. */
if (InMempool() || AcceptToMemoryPool(maxTxFee, state)) {
LogPrintf("Relaying wtx %s\n", GetHash().ToString());
if (connman) {
CInv inv(MSG_TX, GetHash());
connman->ForEachNode([&inv](CNode* pnode)
{
pnode->PushInventory(inv);
});
return true;
}
}
}
return false;
}
std::set<uint256> CWalletTx::GetConflicts() const
{
std::set<uint256> result;
if (pwallet != nullptr)
{
uint256 myHash = GetHash();
result = pwallet->GetConflicts(myHash);
result.erase(myHash);
}
return result;
}
CAmount CWalletTx::GetDebit(const isminefilter& filter) const
{
if (tx->vin.empty())
return 0;
CAmount debit = 0;
if(filter & ISMINE_SPENDABLE)
{
if (fDebitCached)
debit += nDebitCached;
else
{
nDebitCached = pwallet->GetDebit(*tx, ISMINE_SPENDABLE);
fDebitCached = true;
debit += nDebitCached;
}
}
if(filter & ISMINE_WATCH_ONLY)
{
if(fWatchDebitCached)
debit += nWatchDebitCached;
else
{
nWatchDebitCached = pwallet->GetDebit(*tx, ISMINE_WATCH_ONLY);
fWatchDebitCached = true;
debit += nWatchDebitCached;
}
}
return debit;
}
CAmount CWalletTx::GetCredit(const isminefilter& filter) const
{
// Must wait until coinbase is safely deep enough in the chain before valuing it
if (IsCoinBase() && GetBlocksToMaturity() > 0)
return 0;
CAmount credit = 0;
if (filter & ISMINE_SPENDABLE)
{
// GetBalance can assume transactions in mapWallet won't change
if (fCreditCached)
credit += nCreditCached;
else
{
nCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE);
fCreditCached = true;
credit += nCreditCached;
}
}
if (filter & ISMINE_WATCH_ONLY)
{
if (fWatchCreditCached)
credit += nWatchCreditCached;
else
{
nWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY);
fWatchCreditCached = true;
credit += nWatchCreditCached;
}
}
return credit;
}
CAmount CWalletTx::GetImmatureCredit(bool fUseCache) const
{
if (GetBlocksToMaturity() > 0 && IsInMainChain())
{
if (fUseCache && fImmatureCreditCached)
return nImmatureCreditCached;
nImmatureCreditCached = pwallet->GetCredit(*tx, ISMINE_SPENDABLE);
fImmatureCreditCached = true;
return nImmatureCreditCached;
}
return 0;
}
CAmount CWalletTx::GetAvailableCredit(bool fUseCache) const
{
if (pwallet == nullptr)
return 0;
// Must wait until coinbase is safely deep enough in the chain before valuing it
if (GetBlocksToMaturity() > 0)
return 0;
if (fUseCache && fAvailableCreditCached)
return nAvailableCreditCached;
CAmount nCredit = 0;
uint256 hashTx = GetHash();
for (unsigned int i = 0; i < tx->vout.size(); i++)
{
if (!pwallet->IsSpent(hashTx, i))
{
const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, ISMINE_SPENDABLE);
if (!MoneyRange(nCredit))
throw std::runtime_error(std::string(__func__) + " : value out of range");
}
}
nAvailableCreditCached = nCredit;
fAvailableCreditCached = true;
return nCredit;
}
CAmount CWalletTx::GetImmatureWatchOnlyCredit(const bool fUseCache) const
{
if (IsCoinBase() && GetBlocksToMaturity() > 0 && IsInMainChain())
{
if (fUseCache && fImmatureWatchCreditCached)
return nImmatureWatchCreditCached;
nImmatureWatchCreditCached = pwallet->GetCredit(*tx, ISMINE_WATCH_ONLY);
fImmatureWatchCreditCached = true;
return nImmatureWatchCreditCached;
}
return 0;
}
CAmount CWalletTx::GetAvailableWatchOnlyCredit(const bool fUseCache) const
{
if (pwallet == nullptr)
return 0;
// Must wait until coinbase is safely deep enough in the chain before valuing it
if (IsCoinBase() && GetBlocksToMaturity() > 0)
return 0;
if (fUseCache && fAvailableWatchCreditCached)
return nAvailableWatchCreditCached;
CAmount nCredit = 0;
for (unsigned int i = 0; i < tx->vout.size(); i++)
{
if (!pwallet->IsSpent(GetHash(), i))
{
const CTxOut &txout = tx->vout[i];
nCredit += pwallet->GetCredit(txout, ISMINE_WATCH_ONLY);
if (!MoneyRange(nCredit))
throw std::runtime_error(std::string(__func__) + ": value out of range");
}
}
nAvailableWatchCreditCached = nCredit;
fAvailableWatchCreditCached = true;
return nCredit;
}
CAmount CWalletTx::GetChange() const
{
if (fChangeCached)
return nChangeCached;
nChangeCached = pwallet->GetChange(*tx);
fChangeCached = true;
return nChangeCached;
}
bool CWalletTx::InMempool() const
{
return fInMempool;
}
bool CWalletTx::IsTrusted() const
{
// Quick answer in most cases
if (!CheckFinalTx(*tx))
return false;
int nDepth = GetDepthInMainChain();
if (nDepth >= 1)
return true;
if (nDepth < 0)
return false;
if (!pwallet->m_spend_zero_conf_change || !IsFromMe(ISMINE_ALL)) // using wtx's cached debit
return false;
// Don't trust unconfirmed transactions from us unless they are in the mempool.
if (!InMempool())
return false;
// Trusted if all inputs are from us and are in the mempool:
for (const CTxIn& txin : tx->vin)
{
// Transactions not sent by us: not trusted
const CWalletTx* parent = pwallet->GetWalletTx(txin.prevout.hash);
if (parent == nullptr)
return false;
const CTxOut& parentOut = parent->tx->vout[txin.prevout.n];
if (pwallet->IsMine(parentOut) != ISMINE_SPENDABLE)
return false;
}
return true;
}
bool CWalletTx::IsEquivalentTo(const CWalletTx& _tx) const
{
CMutableTransaction tx1 = *this->tx;
CMutableTransaction tx2 = *_tx.tx;
for (auto& txin : tx1.vin) txin.scriptSig = CScript();
for (auto& txin : tx2.vin) txin.scriptSig = CScript();
return CTransaction(tx1) == CTransaction(tx2);
}
std::vector<uint256> CWallet::ResendWalletTransactionsBefore(int64_t nTime, CConnman* connman)
{
std::vector<uint256> result;
LOCK(cs_wallet);
// Sort them in chronological order
std::multimap<unsigned int, CWalletTx*> mapSorted;
for (std::pair<const uint256, CWalletTx>& item : mapWallet)
{
CWalletTx& wtx = item.second;
// Don't rebroadcast if newer than nTime:
if (wtx.nTimeReceived > nTime)
continue;
mapSorted.insert(std::make_pair(wtx.nTimeReceived, &wtx));
}
for (std::pair<const unsigned int, CWalletTx*>& item : mapSorted)
{
CWalletTx& wtx = *item.second;
if (wtx.RelayWalletTransaction(connman))
result.push_back(wtx.GetHash());
}
return result;
}
void CWallet::ResendWalletTransactions(int64_t nBestBlockTime, CConnman* connman)
{
// Do this infrequently and randomly to avoid giving away
// that these are our transactions.
if (GetTime() < nNextResend || !fBroadcastTransactions)
return;
bool fFirst = (nNextResend == 0);
nNextResend = GetTime() + GetRand(30 * 60);
if (fFirst)
return;
// Only do it if there's been a new block since last time
if (nBestBlockTime < nLastResend)
return;
nLastResend = GetTime();
// Rebroadcast unconfirmed txes older than 5 minutes before the last
// block was found:
std::vector<uint256> relayed = ResendWalletTransactionsBefore(nBestBlockTime-5*60, connman);
if (!relayed.empty())
LogPrintf("%s: rebroadcast %u unconfirmed transactions\n", __func__, relayed.size());
}
/** @} */ // end of mapWallet
/** @defgroup Actions
*
* @{
*/
CAmount CWallet::GetBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
if (pcoin->IsTrusted())
nTotal += pcoin->GetAvailableCredit();
}
}
return nTotal;
}
CAmount CWallet::GetUnconfirmedBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool())
nTotal += pcoin->GetAvailableCredit();
}
}
return nTotal;
}
CAmount CWallet::GetImmatureBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
nTotal += pcoin->GetImmatureCredit();
}
}
return nTotal;
}
CAmount CWallet::GetWatchOnlyBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
if (pcoin->IsTrusted())
nTotal += pcoin->GetAvailableWatchOnlyCredit();
}
}
return nTotal;
}
CAmount CWallet::GetUnconfirmedWatchOnlyBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool())
nTotal += pcoin->GetAvailableWatchOnlyCredit();
}
}
return nTotal;
}
CAmount CWallet::GetImmatureWatchOnlyBalance() const
{
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
for (const auto& entry : mapWallet)
{
const CWalletTx* pcoin = &entry.second;
nTotal += pcoin->GetImmatureWatchOnlyCredit();
}
}
return nTotal;
}
// Calculate total balance in a different way from GetBalance. The biggest
// difference is that GetBalance sums up all unspent TxOuts paying to the
// wallet, while this sums up both spent and unspent TxOuts paying to the
// wallet, and then subtracts the values of TxIns spending from the wallet. This
// also has fewer restrictions on which unconfirmed transactions are considered
// trusted.
CAmount CWallet::GetLegacyBalance(const isminefilter& filter, int minDepth, const std::string* account) const
{
LOCK2(cs_main, cs_wallet);
CAmount balance = 0;
for (const auto& entry : mapWallet) {
const CWalletTx& wtx = entry.second;
const int depth = wtx.GetDepthInMainChain();
if (depth < 0 || !CheckFinalTx(*wtx.tx) || wtx.GetBlocksToMaturity() > 0) {
continue;
}
// Loop through tx outputs and add incoming payments. For outgoing txs,
// treat change outputs specially, as part of the amount debited.
CAmount debit = wtx.GetDebit(filter);
const bool outgoing = debit > 0;
for (const CTxOut& out : wtx.tx->vout) {
if (outgoing && IsChange(out)) {
debit -= out.nValue;
} else if (IsMine(out) & filter && depth >= minDepth && (!account || *account == GetLabelName(out.scriptPubKey))) {
balance += out.nValue;
}
}
// For outgoing txs, subtract amount debited.
if (outgoing && (!account || *account == wtx.strFromAccount)) {
balance -= debit;
}
}
if (account) {
balance += WalletBatch(*database).GetAccountCreditDebit(*account);
}
return balance;
}
CAmount CWallet::GetAvailableBalance(const CCoinControl* coinControl) const
{
LOCK2(cs_main, cs_wallet);
CAmount balance = 0;
std::vector<COutput> vCoins;
AvailableCoins(vCoins, true, coinControl);
for (const COutput& out : vCoins) {
if (out.fSpendable) {
balance += out.tx->tx->vout[out.i].nValue;
}
}
return balance;
}
static bool IsCorrectType(CAmount nAmount, AvailableCoinsType nCoinType)
{
bool found = false;
if(nCoinType == ONLY_DENOMINATED) {
#if 0
found = CPrivateSend::IsDenominatedAmount(nAmount);
#endif
} else if(nCoinType == ONLY_NONDENOMINATED) {
#if 0
if (!CPrivateSend::IsCollateralAmount(nAmount))
found = !CPrivateSend::IsDenominatedAmount(nAmount);
#endif
} else if(nCoinType == ONLY_MASTERNODE_COLLATERAL) {
for(int i=0; i<Params().CollateralLevels(); i++) {
if(nAmount == (Params().ValidCollateralAmounts()[i] * COIN)) {
found = true;
break;
}
}
} else if(nCoinType == ONLY_PRIVATESEND_COLLATERAL) {
#if 0
found = CPrivateSend::IsCollateralAmount(nAmount);
#endif
} else {
found = true;
}
return found;
}
void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const CCoinControl *coinControl, const CAmount &nMinimumAmount, const CAmount &nMaximumAmount, const CAmount &nMinimumSumAmount, const uint64_t nMaximumCount, const int nMinDepth, const int nMaxDepth, bool fUseInstantSend) const
{
AssertLockHeld(cs_main);
AssertLockHeld(cs_wallet);
vCoins.clear();
CAmount nTotal = 0;
auto nCoinType = coinControl ? coinControl->nCoinType : ALL_COINS;
for (const auto& entry : mapWallet)
{
const uint256& wtxid = entry.first;
const CWalletTx* pcoin = &entry.second;
if (!CheckFinalTx(*pcoin->tx))
continue;
if (pcoin->GetBlocksToMaturity() > 0)
continue;
int nDepth = pcoin->GetDepthInMainChain();
if (nDepth < 0)
continue;
// We should not consider coins which aren't at least in our mempool
// It's possible for these to be conflicted via ancestors which we may never be able to detect
if (nDepth == 0 && !pcoin->InMempool())
continue;
// do not use IX for inputs that have less then INSTANTSEND_CONFIRMATIONS_REQUIRED blockchain confirmations
if (fUseInstantSend && nDepth < INSTANTSEND_CONFIRMATIONS_REQUIRED)
continue;
bool safeTx = pcoin->IsTrusted();
// We should not consider coins from transactions that are replacing
// other transactions.
//
// Example: There is a transaction A which is replaced by bumpfee
// transaction B. In this case, we want to prevent creation of
// a transaction B' which spends an output of B.
//
// Reason: If transaction A were initially confirmed, transactions B
// and B' would no longer be valid, so the user would have to create
// a new transaction C to replace B'. However, in the case of a
// one-block reorg, transactions B' and C might BOTH be accepted,
// when the user only wanted one of them. Specifically, there could
// be a 1-block reorg away from the chain where transactions A and C
// were accepted to another chain where B, B', and C were all
// accepted.
if (nDepth == 0 && pcoin->mapValue.count("replaces_txid")) {
safeTx = false;
}
// Similarly, we should not consider coins from transactions that
// have been replaced. In the example above, we would want to prevent
// creation of a transaction A' spending an output of A, because if
// transaction B were initially confirmed, conflicting with A and
// A', we wouldn't want to the user to create a transaction D
// intending to replace A', but potentially resulting in a scenario
// where A, A', and D could all be accepted (instead of just B and
// D, or just A and A' like the user would want).
if (nDepth == 0 && pcoin->mapValue.count("replaced_by_txid")) {
safeTx = false;
}
if (fOnlySafe && !safeTx) {
continue;
}
if (nDepth < nMinDepth || nDepth > nMaxDepth)
continue;
for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++) {
if(!IsCorrectType(pcoin->tx->vout[i].nValue, nCoinType))
continue;
if (pcoin->tx->vout[i].nValue < nMinimumAmount || pcoin->tx->vout[i].nValue > nMaximumAmount)
continue;
if (coinControl && coinControl->HasSelected() && !coinControl->fAllowOtherInputs && !coinControl->IsSelected(COutPoint(entry.first, i)))
continue;
if (IsLockedCoin(entry.first, i) && nCoinType != ONLY_MASTERNODE_COLLATERAL)
continue;
if (IsSpent(wtxid, i))
continue;
isminetype mine = IsMine(pcoin->tx->vout[i]);
if (mine == ISMINE_NO) {
continue;
}
bool fSpendableIn = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (coinControl && coinControl->fAllowWatchOnly && (mine & ISMINE_WATCH_SOLVABLE) != ISMINE_NO);
bool fSolvableIn = (mine & (ISMINE_SPENDABLE | ISMINE_WATCH_SOLVABLE)) != ISMINE_NO;
vCoins.push_back(COutput(pcoin, i, nDepth, fSpendableIn, fSolvableIn, safeTx));
// Checks the sum amount of all UTXO's.
if (nMinimumSumAmount != MAX_MONEY) {
nTotal += pcoin->tx->vout[i].nValue;
if (nTotal >= nMinimumSumAmount) {
return;
}
}
// Checks the maximum number of UTXO's.
if (nMaximumCount > 0 && vCoins.size() >= nMaximumCount) {
return;
}
}
}
}
std::map<CTxDestination, std::vector<COutput>> CWallet::ListCoins() const
{
// TODO: Add AssertLockHeld(cs_wallet) here.
//
// Because the return value from this function contains pointers to
// CWalletTx objects, callers to this function really should acquire the
// cs_wallet lock before calling it. However, the current caller doesn't
// acquire this lock yet. There was an attempt to add the missing lock in
// https://github.com/5g/5g/pull/10340, but that change has been
// postponed until after https://github.com/5g/5g/pull/10244 to
// avoid adding some extra complexity to the Qt code.
std::map<CTxDestination, std::vector<COutput>> result;
std::vector<COutput> availableCoins;
LOCK2(cs_main, cs_wallet);
AvailableCoins(availableCoins);
for (auto& coin : availableCoins) {
CTxDestination address;
if (coin.fSpendable &&
ExtractDestination(FindNonChangeParentOutput(*coin.tx->tx, coin.i).scriptPubKey, address)) {
result[address].emplace_back(std::move(coin));
}
}
std::vector<COutPoint> lockedCoins;
ListLockedCoins(lockedCoins);
for (const auto& output : lockedCoins) {
auto it = mapWallet.find(output.hash);
if (it != mapWallet.end()) {
int depth = it->second.GetDepthInMainChain();
if (depth >= 0 && output.n < it->second.tx->vout.size() &&
IsMine(it->second.tx->vout[output.n]) == ISMINE_SPENDABLE) {
CTxDestination address;
if (ExtractDestination(FindNonChangeParentOutput(*it->second.tx, output.n).scriptPubKey, address)) {
result[address].emplace_back(
&it->second, output.n, depth, true /* spendable */, true /* solvable */, false /* safe */);
}
}
}
}
return result;
}
const CTxOut& CWallet::FindNonChangeParentOutput(const CTransaction& tx, int output) const
{
const CTransaction* ptx = &tx;
int n = output;
while (IsChange(ptx->vout[n]) && ptx->vin.size() > 0) {
const COutPoint& prevout = ptx->vin[0].prevout;
auto it = mapWallet.find(prevout.hash);
if (it == mapWallet.end() || it->second.tx->vout.size() <= prevout.n ||
!IsMine(it->second.tx->vout[prevout.n])) {
break;
}
ptx = it->second.tx.get();
n = prevout.n;
}
return ptx->vout[n];
}
bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<OutputGroup> groups, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params, bool& bnb_used) const
{
setCoinsRet.clear();
nValueRet = 0;
std::vector<OutputGroup> utxo_pool;
if (coin_selection_params.use_bnb) {
// Get long term estimate
FeeCalculation feeCalc;
CCoinControl temp;
temp.m_confirm_target = 1008;
CFeeRate long_term_feerate = GetMinimumFeeRate(*this, temp, ::mempool, ::feeEstimator, &feeCalc);
// Calculate cost of change
CAmount cost_of_change = GetDiscardRate(*this, ::feeEstimator).GetFee(coin_selection_params.change_spend_size) + coin_selection_params.effective_fee.GetFee(coin_selection_params.change_output_size);
// Filter by the min conf specs and add to utxo_pool and calculate effective value
for (OutputGroup& group : groups) {
if (!group.EligibleForSpending(eligibility_filter)) continue;
group.fee = 0;
group.long_term_fee = 0;
group.effective_value = 0;
for (auto it = group.m_outputs.begin(); it != group.m_outputs.end(); ) {
const CInputCoin& coin = *it;
CAmount effective_value = coin.txout.nValue - (coin.m_input_bytes < 0 ? 0 : coin_selection_params.effective_fee.GetFee(coin.m_input_bytes));
// Only include outputs that are positive effective value (i.e. not dust)
if (effective_value > 0) {
group.fee += coin.m_input_bytes < 0 ? 0 : coin_selection_params.effective_fee.GetFee(coin.m_input_bytes);
group.long_term_fee += coin.m_input_bytes < 0 ? 0 : long_term_feerate.GetFee(coin.m_input_bytes);
group.effective_value += effective_value;
++it;
} else {
it = group.Discard(coin);
}
}
if (group.effective_value > 0) utxo_pool.push_back(group);
}
// Calculate the fees for things that aren't inputs
CAmount not_input_fees = coin_selection_params.effective_fee.GetFee(coin_selection_params.tx_noinputs_size);
bnb_used = true;
return SelectCoinsBnB(utxo_pool, nTargetValue, cost_of_change, setCoinsRet, nValueRet, not_input_fees);
} else {
// Filter by the min conf specs and add to utxo_pool
for (const OutputGroup& group : groups) {
if (!group.EligibleForSpending(eligibility_filter)) continue;
utxo_pool.push_back(group);
}
bnb_used = false;
return KnapsackSolver(nTargetValue, utxo_pool, setCoinsRet, nValueRet);
}
}
bool CWallet::MintableCoins()
{
// CAmount nBalance = GetBalance();
// if (mapArgs.count("-reservebalance") && !ParseMoney(mapArgs["-reservebalance"], nReserveBalance))
// return error("MintableCoins() : invalid reserve balance amount");
// if (nBalance <= nReserveBalance)
// return false;
std::vector<COutput> vCoins;
LOCK2(cs_main, cs_wallet);
AvailableCoins(vCoins, true);
for (const COutput& out : vCoins)
{
if (GetTime() - out.tx->GetTxTime() > Params().GetConsensus().nStakeMinAge)
{
return true;
}
}
return false;
}
bool CWallet::SelectStakeCoins(StakeCoinsSet &setCoins, CAmount nTargetAmount, bool fSelectWitness, const CScript &scriptFilterPubKey) const
{
std::vector<COutput> vCoins;
CCoinControl coinControl;
coinControl.fAllowWatchOnly = !scriptFilterPubKey.empty();
{
LOCK2(cs_main, cs_wallet);
AvailableCoins(vCoins, !scriptFilterPubKey.empty(), &coinControl);
}
CAmount nAmountSelected = 0;
std::set<CScript> rejectCache;
for (const COutput& out : vCoins) {
CScript scriptPubKeyKernel;
scriptPubKeyKernel = out.tx->tx->vout[out.i].scriptPubKey;
if(!coinControl.fAllowWatchOnly && !out.fSpendable)
continue;
CTxDestination dest;
if(!ExtractDestination(scriptPubKeyKernel, dest))
continue;
if(!boost::get<CKeyID>(&dest) && !boost::get<WitnessV0KeyHash>(&dest) &&
!boost::get<CScriptID>(&dest))
continue;
if(!fSelectWitness && !boost::get<CKeyID>(&dest))
continue;
if (GetTime() - out.tx->GetTxTime() < Params().GetConsensus().nStakeMinAge)
continue;
if (out.nDepth < (out.tx->tx->IsCoinStake() ? COINBASE_MATURITY : 10))
continue;
auto scriptPubKeyCoin = out.tx->tx->vout[out.i].scriptPubKey;
if(!scriptFilterPubKey.empty() && scriptPubKeyCoin != scriptFilterPubKey)
continue;
if(rejectCache.count(scriptPubKeyCoin))
continue;
if (out.nDepth < Params().GetConsensus().nMinStakeHistory)
continue;
if (out.tx->tx->vout[out.i].nValue < Params().GetConsensus().nMinStakeAmount)
continue;
nAmountSelected += out.tx->tx->vout[out.i].nValue;
setCoins.emplace(out.tx, out.i);
}
return true;
}
struct CompareByAmount
{
bool operator()(const CompactTallyItem& t1,
const CompactTallyItem& t2) const
{
return t1.nAmount > t2.nAmount;
}
};
bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAmount& nTargetValue, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CCoinControl& coin_control, CoinSelectionParams& coin_selection_params, bool& bnb_used) const
{
std::vector<COutput> vCoins(vAvailableCoins);
// coin control -> return all selected outputs (we want all selected to go into the transaction for sure)
if (coin_control.HasSelected() && !coin_control.fAllowOtherInputs)
{
// We didn't use BnB here, so set it to false.
bnb_used = false;
for (const COutput& out : vCoins)
{
if (!out.fSpendable)
continue;
nValueRet += out.tx->tx->vout[out.i].nValue;
setCoinsRet.insert(out.GetInputCoin());
}
return (nValueRet >= nTargetValue);
}
// calculate value from preset inputs and store them
std::set<CInputCoin> setPresetCoins;
CAmount nValueFromPresetInputs = 0;
std::vector<COutPoint> vPresetInputs;
coin_control.ListSelected(vPresetInputs);
for (const COutPoint& outpoint : vPresetInputs)
{
// For now, don't use BnB if preset inputs are selected. TODO: Enable this later
bnb_used = false;
coin_selection_params.use_bnb = false;
std::map<uint256, CWalletTx>::const_iterator it = mapWallet.find(outpoint.hash);
if (it != mapWallet.end())
{
const CWalletTx* pcoin = &it->second;
// Clearly invalid input, fail
if (pcoin->tx->vout.size() <= outpoint.n)
return false;
// Just to calculate the marginal byte size
nValueFromPresetInputs += pcoin->tx->vout[outpoint.n].nValue;
setPresetCoins.insert(CInputCoin(pcoin->tx, outpoint.n));
} else
return false; // TODO: Allow non-wallet inputs
}
// remove preset inputs from vCoins
for (std::vector<COutput>::iterator it = vCoins.begin(); it != vCoins.end() && coin_control.HasSelected();)
{
if (setPresetCoins.count(it->GetInputCoin()))
it = vCoins.erase(it);
else
++it;
}
// form groups from remaining coins; note that preset coins will not
// automatically have their associated (same address) coins included
if (coin_control.m_avoid_partial_spends && vCoins.size() > OUTPUT_GROUP_MAX_ENTRIES) {
// Cases where we have 11+ outputs all pointing to the same destination may result in
// privacy leaks as they will potentially be deterministically sorted. We solve that by
// explicitly shuffling the outputs before processing
std::shuffle(vCoins.begin(), vCoins.end(), FastRandomContext());
}
std::vector<OutputGroup> groups = GroupOutputs(vCoins, !coin_control.m_avoid_partial_spends);
size_t max_ancestors = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT));
size_t max_descendants = (size_t)std::max<int64_t>(1, gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT));
bool fRejectLongChains = gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS);
bool res = nTargetValue <= nValueFromPresetInputs ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 6, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(1, 1, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, 2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) ||
(m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(nTargetValue - nValueFromPresetInputs, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used));
// because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset
util::insert(setCoinsRet, setPresetCoins);
// add preset inputs to the total value selected
nValueRet += nValueFromPresetInputs;
return res;
}
bool CWallet::SignTransaction(CMutableTransaction &tx)
{
AssertLockHeld(cs_wallet); // mapWallet
// sign the new tx
CTransaction txNewConst(tx);
int nIn = 0;
for (const auto& input : tx.vin) {
std::map<uint256, CWalletTx>::const_iterator mi = mapWallet.find(input.prevout.hash);
if(mi == mapWallet.end() || input.prevout.n >= mi->second.tx->vout.size()) {
return false;
}
const CScript& scriptPubKey = mi->second.tx->vout[input.prevout.n].scriptPubKey;
const CAmount& amount = mi->second.tx->vout[input.prevout.n].nValue;
SignatureData sigdata;
if (!ProduceSignature(*this, TransactionSignatureCreator(&txNewConst, nIn, amount, SIGHASH_ALL), scriptPubKey, sigdata)) {
return false;
}
UpdateTransaction(tx, nIn, sigdata);
nIn++;
}
return true;
}
bool CWallet::FundTransaction(CMutableTransaction& tx, CAmount& nFeeRet, int& nChangePosInOut, std::string& strFailReason, bool lockUnspents, const std::set<int>& setSubtractFeeFromOutputs, CCoinControl coinControl)
{
std::vector<CRecipient> vecSend;
// Turn the txout set into a CRecipient vector.
for (size_t idx = 0; idx < tx.vout.size(); idx++) {
const CTxOut& txOut = tx.vout[idx];
CRecipient recipient = {txOut.scriptPubKey, txOut.nValue, setSubtractFeeFromOutputs.count(idx) == 1};
vecSend.push_back(recipient);
}
coinControl.fAllowOtherInputs = true;
for (const CTxIn& txin : tx.vin) {
coinControl.Select(txin.prevout);
}
// Acquire the locks to prevent races to the new locked unspents between the
// CreateTransaction call and LockCoin calls (when lockUnspents is true).
LOCK2(cs_main, cs_wallet);
CReserveKey reservekey(this);
CTransactionRef tx_new;
if (!CreateTransaction(vecSend, tx_new, reservekey, nFeeRet, nChangePosInOut, strFailReason, coinControl, false)) {
return false;
}
if (nChangePosInOut != -1) {
tx.vout.insert(tx.vout.begin() + nChangePosInOut, tx_new->vout[nChangePosInOut]);
// We don't have the normal Create/Commit cycle, and don't want to risk
// reusing change, so just remove the key from the keypool here.
reservekey.KeepKey();
}
// Copy output sizes from new transaction; they may have had the fee
// subtracted from them.
for (unsigned int idx = 0; idx < tx.vout.size(); idx++) {
tx.vout[idx].nValue = tx_new->vout[idx].nValue;
}
// Add new txins while keeping original txin scriptSig/order.
for (const CTxIn& txin : tx_new->vin) {
if (!coinControl.IsSelected(txin.prevout)) {
tx.vin.push_back(txin);
if (lockUnspents) {
LockCoin(txin.prevout);
}
}
}
return true;
}
OutputType CWallet::TransactionChangeType(OutputType change_type, const std::vector<CRecipient>& vecSend)
{
// if m_default_address_type is legacy, use legacy address as change (even
// if some of the outputs are P2WPKH or P2WSH).
if (m_default_address_type == OutputType::LEGACY) {
return OutputType::LEGACY;
}
// if any destination is P2WPKH or P2WSH, use P2WPKH for the change
// output.
for (const auto& recipient : vecSend) {
// Check if any destination contains a witness program:
int witnessversion = 0;
std::vector<unsigned char> witnessprogram;
if (recipient.scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) {
return OutputType::BECH32;
}
}
// else use m_default_address_type for change
return m_default_address_type;
}
bool CWallet::GetBudgetSystemCollateralTX(CWalletTx& tx, uint256 hash, CAmount amount, bool fUseInstantSend)
{
// make our change address
CReserveKey reservekey(this);
CScript scriptChange;
scriptChange << OP_RETURN << ToByteVector(hash);
CAmount nFeeRet = 0;
int nChangePosRet = -1;
std::string strFail = "";
std::vector< CRecipient > vecSend;
vecSend.push_back({scriptChange, amount, false});
bool success = CreateTransaction(vecSend, tx.tx, reservekey, nFeeRet, nChangePosRet, strFail, {}, true);
if(!success){
LogPrintf("CWallet::GetBudgetSystemCollateralTX -- Error: %s\n", strFail);
return false;
}
return true;
}
bool CWallet::CreateTransaction(const std::vector<CRecipient>& vecSend, CTransactionRef& tx, CReserveKey& reservekey, CAmount& nFeeRet,
int& nChangePosInOut, std::string& strFailReason, const CCoinControl& coin_control, bool sign, OnTransactionToBeSigned onTxToBeSigned)
{
CAmount nValue = 0;
int nChangePosRequest = nChangePosInOut;
unsigned int nSubtractFeeFromAmount = 0;
for (const auto& recipient : vecSend)
{
if (nValue < 0 || recipient.nAmount < 0)
{
strFailReason = _("Transaction amounts must not be negative");
return false;
}
nValue += recipient.nAmount;
if (recipient.fSubtractFeeFromAmount)
nSubtractFeeFromAmount++;
}
if (vecSend.empty())
{
strFailReason = _("Transaction must have at least one recipient");
return false;
}
CMutableTransaction txNew;
// Discourage fee sniping.
//
// For a large miner the value of the transactions in the best block and
// the mempool can exceed the cost of deliberately attempting to mine two
// blocks to orphan the current best block. By setting nLockTime such that
// only the next block can include the transaction, we discourage this
// practice as the height restricted and limited blocksize gives miners
// considering fee sniping fewer options for pulling off this attack.
//
// A simple way to think about this is from the wallet's point of view we
// always want the blockchain to move forward. By setting nLockTime this
// way we're basically making the statement that we only want this
// transaction to appear in the next block; we don't want to potentially
// encourage reorgs by allowing transactions to appear at lower heights
// than the next block in forks of the best chain.
//
// Of course, the subsidy is high enough, and transaction volume low
// enough, that fee sniping isn't a problem yet, but by implementing a fix
// now we ensure code won't be written that makes assumptions about
// nLockTime that preclude a fix later.
txNew.nLockTime = chainActive.Height();
// Secondly occasionally randomly pick a nLockTime even further back, so
// that transactions that are delayed after signing for whatever reason,
// e.g. high-latency mix networks and some CoinJoin implementations, have
// better privacy.
if (GetRandInt(10) == 0)
txNew.nLockTime = std::max(0, (int)txNew.nLockTime - GetRandInt(100));
assert(txNew.nLockTime <= (unsigned int)chainActive.Height());
assert(txNew.nLockTime < LOCKTIME_THRESHOLD);
FeeCalculation feeCalc;
CAmount nFeeNeeded;
int nBytes;
{
std::set<CInputCoin> setCoins;
LOCK2(cs_main, cs_wallet);
{
std::vector<COutput> vAvailableCoins;
AvailableCoins(vAvailableCoins, true, &coin_control);
CoinSelectionParams coin_selection_params; // Parameters for coin selection, init with dummy
// Create change script that will be used if we need change
// TODO: pass in scriptChange instead of reservekey so
// change transaction isn't always pay-to-5g-address
CScript scriptChange;
// coin control: send change to custom address
if (!boost::get<CNoDestination>(&coin_control.destChange)) {
scriptChange = GetScriptForDestination(coin_control.destChange);
} else { // no coin control: send change to newly generated address
// Note: We use a new key here to keep it from being obvious which side is the change.
// The drawback is that by not reusing a previous key, the change may be lost if a
// backup is restored, if the backup doesn't have the new private key for the change.
// If we reused the old key, it would be possible to add code to look for and
// rediscover unknown transactions that were written with keys of ours to recover
// post-backup change.
// Reserve a new key pair from key pool
CPubKey vchPubKey;
bool ret;
ret = reservekey.GetReservedKey(vchPubKey, true);
if (!ret)
{
strFailReason = _("Keypool ran out, please call keypoolrefill first");
return false;
}
const OutputType change_type = TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : m_default_change_type, vecSend);
LearnRelatedScripts(vchPubKey, change_type);
scriptChange = GetScriptForDestination(GetDestinationForKey(vchPubKey, change_type));
}
CTxOut change_prototype_txout(0, scriptChange);
coin_selection_params.change_output_size = GetSerializeSize(change_prototype_txout, SER_DISK, 0);
CFeeRate discard_rate = GetDiscardRate(*this, ::feeEstimator);
// Get the fee rate to use effective values in coin selection
CFeeRate nFeeRateNeeded = GetMinimumFeeRate(*this, coin_control, ::mempool, ::feeEstimator, &feeCalc);
nFeeRet = 0;
bool pick_new_inputs = true;
CAmount nValueIn = 0;
// BnB selector is the only selector used when this is true.
// That should only happen on the first pass through the loop.
coin_selection_params.use_bnb = nSubtractFeeFromAmount == 0; // If we are doing subtract fee from recipient, then don't use BnB
// Start with no fee and loop until there is enough fee
while (true)
{
nChangePosInOut = nChangePosRequest;
txNew.vin.clear();
txNew.vout.clear();
bool fFirst = true;
CAmount nValueToSelect = nValue;
if (nSubtractFeeFromAmount == 0)
nValueToSelect += nFeeRet;
// vouts to the payees
coin_selection_params.tx_noinputs_size = 11; // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 input count, 1 output count, 1 witness overhead (dummy, flag, stack size)
for (const auto& recipient : vecSend)
{
CTxOut txout(recipient.nAmount, recipient.scriptPubKey);
if (recipient.fSubtractFeeFromAmount)
{
assert(nSubtractFeeFromAmount != 0);
txout.nValue -= nFeeRet / nSubtractFeeFromAmount; // Subtract fee equally from each selected recipient
if (fFirst) // first receiver pays the remainder not divisible by output count
{
fFirst = false;
txout.nValue -= nFeeRet % nSubtractFeeFromAmount;
}
}
// Include the fee cost for outputs. Note this is only used for BnB right now
coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout, SER_NETWORK, PROTOCOL_VERSION);
if (IsDust(txout, ::dustRelayFee))
{
if (recipient.fSubtractFeeFromAmount && nFeeRet > 0)
{
if (txout.nValue < 0)
strFailReason = _("The transaction amount is too small to pay the fee");
else
strFailReason = _("The transaction amount is too small to send after the fee has been deducted");
}
else
strFailReason = _("Transaction amount too small");
return false;
}
txNew.vout.push_back(txout);
}
// Choose coins to use
bool bnb_used;
if (pick_new_inputs) {
nValueIn = 0;
setCoins.clear();
coin_selection_params.change_spend_size = CalculateMaximumSignedInputSize(change_prototype_txout, this);
coin_selection_params.effective_fee = nFeeRateNeeded;
if (!SelectCoins(vAvailableCoins, nValueToSelect, setCoins, nValueIn, coin_control, coin_selection_params, bnb_used))
{
// If BnB was used, it was the first pass. No longer the first pass and continue loop with knapsack.
if (bnb_used) {
coin_selection_params.use_bnb = false;
continue;
}
else {
strFailReason = _("Insufficient funds");
return false;
}
}
}
const CAmount nChange = nValueIn - nValueToSelect;
if (nChange > 0)
{
// Fill a vout to ourself
CTxOut newTxOut(nChange, scriptChange);
// Never create dust outputs; if we would, just
// add the dust to the fee.
// The nChange when BnB is used is always going to go to fees.
if (IsDust(newTxOut, discard_rate) || bnb_used)
{
nChangePosInOut = -1;
nFeeRet += nChange;
}
else
{
if (nChangePosInOut == -1)
{
// Insert change txn at random position:
nChangePosInOut = GetRandInt(txNew.vout.size()+1);
}
else if ((unsigned int)nChangePosInOut > txNew.vout.size())
{
strFailReason = _("Change index out of range");
return false;
}
std::vector<CTxOut>::iterator position = txNew.vout.begin()+nChangePosInOut;
txNew.vout.insert(position, newTxOut);
}
} else {
nChangePosInOut = -1;
}
// Dummy fill vin for maximum size estimation
//
for (const auto& coin : setCoins) {
txNew.vin.push_back(CTxIn(coin.outpoint,CScript()));
}
nBytes = CalculateMaximumSignedTxSize(txNew, this);
if (nBytes < 0) {
strFailReason = _("Signing transaction failed");
return false;
}
nFeeNeeded = GetMinimumFee(*this, nBytes, coin_control, ::mempool, ::feeEstimator, &feeCalc);
if (feeCalc.reason == FeeReason::FALLBACK && !m_allow_fallback_fee) {
// eventually allow a fallback fee
strFailReason = _("Fee estimation failed. Fallbackfee is disabled. Wait a few blocks or enable -fallbackfee.");
return false;
}
// If we made it here and we aren't even able to meet the relay fee on the next pass, give up
// because we must be at the maximum allowed fee.
if (nFeeNeeded < ::minRelayTxFee.GetFee(nBytes))
{
strFailReason = _("Transaction too large for fee policy");
return false;
}
if (nFeeRet >= nFeeNeeded) {
// Reduce fee to only the needed amount if possible. This
// prevents potential overpayment in fees if the coins
// selected to meet nFeeNeeded result in a transaction that
// requires less fee than the prior iteration.
// If we have no change and a big enough excess fee, then
// try to construct transaction again only without picking
// new inputs. We now know we only need the smaller fee
// (because of reduced tx size) and so we should add a
// change output. Only try this once.
if (nChangePosInOut == -1 && nSubtractFeeFromAmount == 0 && pick_new_inputs) {
unsigned int tx_size_with_change = nBytes + coin_selection_params.change_output_size + 2; // Add 2 as a buffer in case increasing # of outputs changes compact size
CAmount fee_needed_with_change = GetMinimumFee(*this, tx_size_with_change, coin_control, ::mempool, ::feeEstimator, nullptr);
CAmount minimum_value_for_change = GetDustThreshold(change_prototype_txout, discard_rate);
if (nFeeRet >= fee_needed_with_change + minimum_value_for_change) {
pick_new_inputs = false;
nFeeRet = fee_needed_with_change;
continue;
}
}
// If we have change output already, just increase it
if (nFeeRet > nFeeNeeded && nChangePosInOut != -1 && nSubtractFeeFromAmount == 0) {
CAmount extraFeePaid = nFeeRet - nFeeNeeded;
std::vector<CTxOut>::iterator change_position = txNew.vout.begin()+nChangePosInOut;
change_position->nValue += extraFeePaid;
nFeeRet -= extraFeePaid;
}
break; // Done, enough fee included.
}
else if (!pick_new_inputs) {
// This shouldn't happen, we should have had enough excess
// fee to pay for the new output and still meet nFeeNeeded
// Or we should have just subtracted fee from recipients and
// nFeeNeeded should not have changed
strFailReason = _("Transaction fee and change calculation failed");
return false;
}
// Try to reduce change to include necessary fee
if (nChangePosInOut != -1 && nSubtractFeeFromAmount == 0) {
CAmount additionalFeeNeeded = nFeeNeeded - nFeeRet;
std::vector<CTxOut>::iterator change_position = txNew.vout.begin()+nChangePosInOut;
// Only reduce change if remaining amount is still a large enough output.
if (change_position->nValue >= MIN_FINAL_CHANGE + additionalFeeNeeded) {
change_position->nValue -= additionalFeeNeeded;
nFeeRet += additionalFeeNeeded;
break; // Done, able to increase fee from change
}
}
// If subtracting fee from recipients, we now know what fee we
// need to subtract, we have no reason to reselect inputs
if (nSubtractFeeFromAmount > 0) {
pick_new_inputs = false;
}
// Include more fee and try again.
nFeeRet = nFeeNeeded;
coin_selection_params.use_bnb = false;
continue;
}
}
if (nChangePosInOut == -1) reservekey.ReturnKey(); // Return any reserved key if we don't have change
// Shuffle selected coins and fill in final vin
txNew.vin.clear();
std::vector<CInputCoin> selected_coins(setCoins.begin(), setCoins.end());
std::shuffle(selected_coins.begin(), selected_coins.end(), FastRandomContext());
// Note how the sequence number is set to non-maxint so that
// the nLockTime set above actually works.
//
// BIP125 defines opt-in RBF as any nSequence < maxint-1, so
// we use the highest possible value in that range (maxint-2)
// to avoid conflicting with other possible uses of nSequence,
// and in the spirit of "smallest possible change from prior
// behavior."
const uint32_t nSequence = coin_control.m_signal_bip125_rbf.get_value_or(m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1);
for (const auto& coin : selected_coins) {
txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence));
}
if(onTxToBeSigned)
onTxToBeSigned(txNew);
if (sign)
{
CTransaction txNewConst(txNew);
int nIn = 0;
for (const auto& coin : selected_coins)
{
const CScript& scriptPubKey = coin.txout.scriptPubKey;
SignatureData sigdata;
if (!ProduceSignature(*this, TransactionSignatureCreator(&txNewConst, nIn, coin.txout.nValue, SIGHASH_ALL), scriptPubKey, sigdata))
{
strFailReason = _("Signing transaction failed");
return false;
} else {
UpdateTransaction(txNew, nIn, sigdata);
}
nIn++;
}
}
// Return the constructed transaction data.
tx = MakeTransactionRef(std::move(txNew));
// Limit size
if (GetTransactionWeight(*tx) >= max_tx_weight())
{
strFailReason = _("Transaction too large");
return false;
}
}
if (gArgs.GetBoolArg("-walletrejectlongchains", DEFAULT_WALLET_REJECT_LONG_CHAINS)) {
// Lastly, ensure this tx will pass the mempool's chain limits
LockPoints lp;
CTxMemPoolEntry entry(tx, 0, 0, 0, false, 0, lp);
CTxMemPool::setEntries setAncestors;
size_t nLimitAncestors = gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
size_t nLimitAncestorSize = gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000;
size_t nLimitDescendants = gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT);
size_t nLimitDescendantSize = gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000;
std::string errString;
if (!mempool.CalculateMemPoolAncestors(entry, setAncestors, nLimitAncestors, nLimitAncestorSize, nLimitDescendants, nLimitDescendantSize, errString)) {
strFailReason = _("Transaction has too long of a mempool chain");
return false;
}
}
LogPrintf("Fee Calculation: Fee:%d Bytes:%u Needed:%d Tgt:%d (requested %d) Reason:\"%s\" Decay %.5f: Estimation: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out) Fail: (%g - %g) %.2f%% %.1f/(%.1f %d mem %.1f out)\n",
nFeeRet, nBytes, nFeeNeeded, feeCalc.returnedTarget, feeCalc.desiredTarget, StringForFeeReason(feeCalc.reason), feeCalc.est.decay,
feeCalc.est.pass.start, feeCalc.est.pass.end,
100 * feeCalc.est.pass.withinTarget / (feeCalc.est.pass.totalConfirmed + feeCalc.est.pass.inMempool + feeCalc.est.pass.leftMempool),
feeCalc.est.pass.withinTarget, feeCalc.est.pass.totalConfirmed, feeCalc.est.pass.inMempool, feeCalc.est.pass.leftMempool,
feeCalc.est.fail.start, feeCalc.est.fail.end,
100 * feeCalc.est.fail.withinTarget / (feeCalc.est.fail.totalConfirmed + feeCalc.est.fail.inMempool + feeCalc.est.fail.leftMempool),
feeCalc.est.fail.withinTarget, feeCalc.est.fail.totalConfirmed, feeCalc.est.fail.inMempool, feeCalc.est.fail.leftMempool);
return true;
}
bool CWallet::CreateCoinStakeKernel(CScript &kernelScript, const CScript &stakeScript,
unsigned int nBits, const CBlock &blockFrom,
unsigned int nTxPrevOffset, const CTransactionRef &txPrev,
const COutPoint &prevout, unsigned int &nTimeTx, bool fPrintProofOfStake) const
{
unsigned int nTryTime = 0;
uint256 hashProofOfStake = uint256();
if (blockFrom.GetBlockTime() + Params().GetConsensus().nStakeMinAge + nHashDrift > nTimeTx) // Min age requirement
return false;
for(unsigned int i = 0; i < nHashDrift; ++i)
{
nTryTime = nTimeTx - i;
bool stakeValid = CheckStakeKernelHash(nBits, blockFrom, nTxPrevOffset, txPrev, prevout, nTryTime, hashProofOfStake, true, false);
if (gArgs.GetBoolArg("-stakeinfo", false))
LogPrintf("%s - drift %03d prevout.hash %s prevout.n %02d hashProof %s\n", __func__, i, prevout.hash.ToString().c_str(), prevout.n, hashProofOfStake.ToString().c_str());
if (stakeValid)
{
//Double check that this will pass time requirements
if (nTryTime <= chainActive.Tip()->GetMedianTimePast()) {
LogPrintf("CreateCoinStakeKernel() : kernel found, but it is too far in the past \n");
continue;
}
// Found a kernel
LogPrintf("CreateCoinStakeKernel : kernel found\n");
kernelScript.clear();
kernelScript = stakeScript;
nTimeTx = nTryTime;
return true;
}
}
return false;
}
void CWallet::FillCoinStakePayments(CMutableTransaction &transaction, const CScript &scriptPubKeyOut, const COutPoint &stakePrevout, CAmount blockReward) const
{
const CWalletTx *walletTx = GetWalletTx(stakePrevout.hash);
CTxOut prevTxOut = walletTx->tx->vout[stakePrevout.n];
auto nCredit = prevTxOut.nValue;
unsigned int percentage = 100;
auto nCoinStakeReward = nCredit + GetStakeReward(blockReward, percentage);
transaction.vin.emplace_back(CTxIn(stakePrevout));
//presstab HyperStake - calculate the total size of our new output including the stake reward so that we can use it to decide whether to split the stake outputs
// adding output which will pay to coin stake
transaction.vout.emplace_back(nCoinStakeReward, scriptPubKeyOut);
{
CTxOut &lastTx = transaction.vout.back();
if(lastTx.nValue / 2 > nStakeSplitThreshold * COIN)
{
lastTx.nValue /= 2;
transaction.vout.emplace_back(lastTx.nValue, lastTx.scriptPubKey);
}
}
}
bool CWallet::GetPubKey(const CKeyID &address, CPubKey& vchPubKeyOut) const
{
LOCK(cs_wallet);
std::map<CKeyID, CHDPubKey>::const_iterator mi = mapHdPubKeys.find(address);
if (mi != mapHdPubKeys.end())
{
const CHDPubKey &hdPubKey = (*mi).second;
vchPubKeyOut = hdPubKey.extPubKey.pubkey;
return true;
}
else
return CCryptoKeyStore::GetPubKey(address, vchPubKeyOut);
}
bool CWallet::GetKey(const CKeyID &address, CKey& keyOut) const
{
LOCK(cs_wallet);
return CCryptoKeyStore::GetKey(address, keyOut);
}
typedef std::vector<unsigned char> valtype;
bool CWallet::CreateCoinStake(const CKeyStore& keystore,
unsigned int nBits,
CAmount blockReward,
CMutableTransaction &txNew,
unsigned int &nTxNewTime,
std::vector<const CWalletTx*> &vwtxPrev,
bool fGenerateSegwit)
{
// The following split & combine thresholds are important to security
// Should not be adjusted if you don't understand the consequences
//int64_t nCombineThreshold = 0;
txNew.vin.clear();
txNew.vout.clear();
// Mark coin stake transaction
CScript scriptEmpty;
scriptEmpty.clear();
txNew.vout.push_back(CTxOut(0, scriptEmpty));
// Choose coins to use
CAmount nBalance = GetBalance();
// presstab HyperStake - Initialize as static and don't update the set on every run of CreateCoinStake() in order to lighten resource use
static StakeCoinsSet setStakeCoins;
static int nLastStakeSetUpdate = 0;
if (GetTime() - nLastStakeSetUpdate > nStakeSetUpdateTime) {
setStakeCoins.clear();
CScript scriptPubKey;
if (!SelectStakeCoins(setStakeCoins, nBalance /*- nReserveBalance*/, fGenerateSegwit, scriptPubKey)) {
return error("Failed to select coins for staking");
}
LogPrintf("Selected %d coins for staking\n", setStakeCoins.size());
nLastStakeSetUpdate = GetTime();
}
if (setStakeCoins.empty())
return error("CreateCoinStake() : No Coins to stake");
CScript scriptPubKeyKernel;
//prevent staking a time that won't be accepted
if (GetAdjustedTime() <= chainActive.Tip()->nTime)
MilliSleep(10000);
bool fKernelFound = false;
for(const std::pair<const CWalletTx*, unsigned int> &pcoin : setStakeCoins)
{
//make sure that enough time has elapsed between
CBlockIndex* pindex = NULL;
BlockMap::iterator it = mapBlockIndex.find(pcoin.first->hashBlock);
if (it != mapBlockIndex.end())
pindex = it->second;
else {
LogPrintf("failed to find block index ");
continue;
}
// Read block header
CBlockHeader block = pindex->GetBlockHeader();
COutPoint prevoutStake = COutPoint(pcoin.first->GetHash(), pcoin.second);
nTxNewTime = GetAdjustedTime();
//iterates each utxo inside of CheckStakeKernelHash()
CScript kernelScript;
auto stakeScript = pcoin.first->tx->vout[pcoin.second].scriptPubKey;
fKernelFound = CreateCoinStakeKernel(kernelScript, stakeScript, nBits,
block, sizeof(CBlock), pcoin.first->tx,
prevoutStake, nTxNewTime, false);
if(fKernelFound)
{
FillCoinStakePayments(txNew, kernelScript, prevoutStake, blockReward);
break;
}
}
if(!fKernelFound)
{
LogPrintf("Failed to find coinstake kernel");
return false;
}
// Update coinbase transaction with additional info about masternode and governance payments,
// get some info back to pass to getblocktemplate
CTxOut txoutMasternode;
std::vector<CTxOut> voutSuperblock;
int nHeight = chainActive.Tip()->nHeight + 1;
nLastStakeSetUpdate = 0; //this will trigger stake set to repopulate next round
return true;
}
/**
* Call after CreateTransaction unless you want to abort
*/
bool CWallet::CommitTransaction(CTransactionRef tx, mapValue_t mapValue, std::vector<std::pair<std::string, std::string>> orderForm, std::string fromAccount, CReserveKey& reservekey, CConnman* connman, CValidationState& state)
{
{
LOCK2(cs_main, cs_wallet);
CWalletTx wtxNew(this, std::move(tx));
wtxNew.mapValue = std::move(mapValue);
wtxNew.vOrderForm = std::move(orderForm);
wtxNew.strFromAccount = std::move(fromAccount);
wtxNew.fTimeReceivedIsTxTime = true;
wtxNew.fFromMe = true;
LogPrintf("CommitTransaction:\n%s", wtxNew.tx->ToString()); /* Continued */
{
// Take key pair from key pool so it won't be used again
reservekey.KeepKey();
// Add tx to wallet, because if it has change it's also ours,
// otherwise just for transaction history.
AddToWallet(wtxNew);
// Notify that old coins are spent
for (const CTxIn& txin : wtxNew.tx->vin)
{
CWalletTx &coin = mapWallet.at(txin.prevout.hash);
coin.BindWallet(this);
NotifyTransactionChanged(this, coin.GetHash(), CT_UPDATED);
}
}
// Track how many getdata requests our transaction gets
mapRequestCount[wtxNew.GetHash()] = 0;
// Get the inserted-CWalletTx from mapWallet so that the
// fInMempool flag is cached properly
CWalletTx& wtx = mapWallet.at(wtxNew.GetHash());
if (fBroadcastTransactions)
{
// Broadcast
if (!wtx.AcceptToMemoryPool(maxTxFee, state)) {
LogPrintf("CommitTransaction(): Transaction cannot be broadcast immediately, %s\n", FormatStateMessage(state));
// TODO: if we expect the failure to be long term or permanent, instead delete wtx from the wallet and return failure.
} else {
wtx.RelayWalletTransaction(connman);
}
}
}
return true;
}
void CWallet::ListAccountCreditDebit(const std::string& strAccount, std::list<CAccountingEntry>& entries) {
WalletBatch batch(*database);
return batch.ListAccountCreditDebit(strAccount, entries);
}
bool CWallet::AddAccountingEntry(const CAccountingEntry& acentry)
{
WalletBatch batch(*database);
return AddAccountingEntry(acentry, &batch);
}
bool CWallet::AddAccountingEntry(const CAccountingEntry& acentry, WalletBatch *batch)
{
if (!batch->WriteAccountingEntry(++nAccountingEntryNumber, acentry)) {
return false;
}
laccentries.push_back(acentry);
CAccountingEntry & entry = laccentries.back();
wtxOrdered.insert(std::make_pair(entry.nOrderPos, TxPair(nullptr, &entry)));
return true;
}
DBErrors CWallet::LoadWallet(bool& fFirstRunRet)
{
LOCK2(cs_main, cs_wallet);
fFirstRunRet = false;
DBErrors nLoadWalletRet = WalletBatch(*database,"cr+").LoadWallet(this);
if (nLoadWalletRet == DBErrors::NEED_REWRITE)
{
if (database->Rewrite("\x04pool"))
{
setInternalKeyPool.clear();
setExternalKeyPool.clear();
m_pool_key_to_index.clear();
// Note: can't top-up keypool here, because wallet is locked.
// User will be prompted to unlock wallet the next operation
// that requires a new key.
}
}
for (auto& pair : mapWallet) {
for(size_t i = 0; i < pair.second.tx->vout.size(); ++i) {
if (IsMine(pair.second.tx->vout[i]) && !IsSpent(pair.first, i)) {
setWalletUTXO.insert(COutPoint(pair.first, i));
}
}
}
// This wallet is in its first run if all of these are empty
fFirstRunRet = mapKeys.empty() && mapCryptedKeys.empty() && mapWatchKeys.empty() && setWatchOnly.empty() && mapScripts.empty();
if (nLoadWalletRet != DBErrors::LOAD_OK)
return nLoadWalletRet;
uiInterface.LoadWallet(this);
return DBErrors::LOAD_OK;
}
DBErrors CWallet::ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut)
{
AssertLockHeld(cs_wallet); // mapWallet
DBErrors nZapSelectTxRet = WalletBatch(*database,"cr+").ZapSelectTx(vHashIn, vHashOut);
for (uint256 hash : vHashOut)
mapWallet.erase(hash);
if (nZapSelectTxRet == DBErrors::NEED_REWRITE)
{
if (database->Rewrite("\x04pool"))
{
setInternalKeyPool.clear();
setExternalKeyPool.clear();
m_pool_key_to_index.clear();
// Note: can't top-up keypool here, because wallet is locked.
// User will be prompted to unlock wallet the next operation
// that requires a new key.
}
}
if (nZapSelectTxRet != DBErrors::LOAD_OK)
return nZapSelectTxRet;
MarkDirty();
return DBErrors::LOAD_OK;
}
DBErrors CWallet::ZapWalletTx(std::vector<CWalletTx>& vWtx)
{
DBErrors nZapWalletTxRet = WalletBatch(*database,"cr+").ZapWalletTx(vWtx);
if (nZapWalletTxRet == DBErrors::NEED_REWRITE)
{
if (database->Rewrite("\x04pool"))
{
LOCK(cs_wallet);
setInternalKeyPool.clear();
setExternalKeyPool.clear();
m_pool_key_to_index.clear();
// Note: can't top-up keypool here, because wallet is locked.
// User will be prompted to unlock wallet the next operation
// that requires a new key.
}
}
if (nZapWalletTxRet != DBErrors::LOAD_OK)
return nZapWalletTxRet;
return DBErrors::LOAD_OK;
}
bool CWallet::SetAddressBook(const CTxDestination& address, const std::string& strName, const std::string& strPurpose)
{
bool fUpdated = false;
{
LOCK(cs_wallet); // mapAddressBook
std::map<CTxDestination, CAddressBookData>::iterator mi = mapAddressBook.find(address);
fUpdated = mi != mapAddressBook.end();
mapAddressBook[address].name = strName;
if (!strPurpose.empty()) /* update purpose only if requested */
mapAddressBook[address].purpose = strPurpose;
}
NotifyAddressBookChanged(this, address, strName, ::IsMine(*this, address) != ISMINE_NO,
strPurpose, (fUpdated ? CT_UPDATED : CT_NEW) );
if (!strPurpose.empty() && !WalletBatch(*database).WritePurpose(EncodeDestination(address), strPurpose))
return false;
return WalletBatch(*database).WriteName(EncodeDestination(address), strName);
}
bool CWallet::DelAddressBook(const CTxDestination& address)
{
{
LOCK(cs_wallet); // mapAddressBook
// Delete destdata tuples associated with address
std::string strAddress = EncodeDestination(address);
for (const std::pair<std::string, std::string> &item : mapAddressBook[address].destdata)
{
WalletBatch(*database).EraseDestData(strAddress, item.first);
}
mapAddressBook.erase(address);
}
NotifyAddressBookChanged(this, address, "", ::IsMine(*this, address) != ISMINE_NO, "", CT_DELETED);
WalletBatch(*database).ErasePurpose(EncodeDestination(address));
return WalletBatch(*database).EraseName(EncodeDestination(address));
}
const std::string& CWallet::GetLabelName(const CScript& scriptPubKey) const
{
CTxDestination address;
if (ExtractDestination(scriptPubKey, address) && !scriptPubKey.IsUnspendable()) {
auto mi = mapAddressBook.find(address);
if (mi != mapAddressBook.end()) {
return mi->second.name;
}
}
// A scriptPubKey that doesn't have an entry in the address book is
// associated with the default label ("").
const static std::string DEFAULT_LABEL_NAME;
return DEFAULT_LABEL_NAME;
}
/**
* Mark old keypool keys as used,
* and generate all new keys
*/
bool CWallet::NewKeyPool()
{
{
LOCK(cs_wallet);
WalletBatch batch(*database);
for (int64_t nIndex : setInternalKeyPool) {
batch.ErasePool(nIndex);
}
setInternalKeyPool.clear();
for (int64_t nIndex : setExternalKeyPool) {
batch.ErasePool(nIndex);
}
setExternalKeyPool.clear();
m_pool_key_to_index.clear();
if (!TopUpKeyPool()) {
return false;
}
LogPrintf("CWallet::NewKeyPool rewrote keypool\n");
}
return true;
}
size_t CWallet::KeypoolCountExternalKeys()
{
AssertLockHeld(cs_wallet); // setExternalKeyPool
return setExternalKeyPool.size();
}
void CWallet::LoadKeyPool(int64_t nIndex, const CKeyPool &keypool)
{
AssertLockHeld(cs_wallet);
if (keypool.fInternal) {
setInternalKeyPool.insert(nIndex);
} else {
setExternalKeyPool.insert(nIndex);
}
m_max_keypool_index = std::max(m_max_keypool_index, nIndex);
m_pool_key_to_index[keypool.vchPubKey.GetID()] = nIndex;
// If no metadata exists yet, create a default with the pool key's
// creation time. Note that this may be overwritten by actually
// stored metadata for that key later, which is fine.
CKeyID keyid = keypool.vchPubKey.GetID();
if (mapKeyMetadata.count(keyid) == 0)
mapKeyMetadata[keyid] = CKeyMetadata(keypool.nTime);
}
bool CWallet::TopUpKeyPool(unsigned int kpSize)
{
{
LOCK(cs_wallet);
if (IsLocked())
return false;
// Top up key pool
unsigned int nTargetSize;
if (kpSize > 0)
nTargetSize = kpSize;
else
nTargetSize = std::max(gArgs.GetArg("-keypool", DEFAULT_KEYPOOL_SIZE), (int64_t) 0);
// count amount of available keys (internal, external)
// make sure the keypool of external and internal keys fits the user selected target (-keypool)
int64_t missingExternal = std::max(std::max((int64_t) nTargetSize, (int64_t) 1) - (int64_t)setExternalKeyPool.size(), (int64_t) 0);
int64_t missingInternal = std::max(std::max((int64_t) nTargetSize, (int64_t) 1) - (int64_t)setInternalKeyPool.size(), (int64_t) 0);
if (!IsHDEnabled() || !CanSupportFeature(FEATURE_HD_SPLIT))
{
// don't create extra internal keys
missingInternal = 0;
}
bool internal = false;
WalletBatch batch(*database);
for (int64_t i = missingInternal + missingExternal; i--;)
{
if (i < missingInternal) {
internal = true;
}
assert(m_max_keypool_index < std::numeric_limits<int64_t>::max()); // How in the hell did you use so many keys?
int64_t index = ++m_max_keypool_index;
CPubKey pubkey(GenerateNewKey(batch, internal));
if (!batch.WritePool(index, CKeyPool(pubkey, internal))) {
throw std::runtime_error(std::string(__func__) + ": writing generated key failed");
}
if (internal) {
setInternalKeyPool.insert(index);
} else {
setExternalKeyPool.insert(index);
}
m_pool_key_to_index[pubkey.GetID()] = index;
}
if (missingInternal + missingExternal > 0) {
LogPrintf("keypool added %d keys (%d internal), size=%u (%u internal)\n", missingInternal + missingExternal, missingInternal, setInternalKeyPool.size() + setExternalKeyPool.size(), setInternalKeyPool.size());
}
}
return true;
}
bool CWallet::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& keypool, bool fRequestedInternal)
{
nIndex = -1;
keypool.vchPubKey = CPubKey();
{
LOCK(cs_wallet);
if (!IsLocked())
TopUpKeyPool();
bool fReturningInternal = IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT) && fRequestedInternal;
std::set<int64_t>& setKeyPool = fReturningInternal ? setInternalKeyPool : setExternalKeyPool;
// Get the oldest key
if (setKeyPool.empty()) {
return false;
}
WalletBatch batch(*database);
auto it = setKeyPool.begin();
nIndex = *it;
setKeyPool.erase(it);
if (!batch.ReadPool(nIndex, keypool)) {
throw std::runtime_error(std::string(__func__) + ": read failed");
}
if (!HaveKey(keypool.vchPubKey.GetID())) {
throw std::runtime_error(std::string(__func__) + ": unknown key in key pool");
}
if (keypool.fInternal != fReturningInternal) {
throw std::runtime_error(std::string(__func__) + ": keypool entry misclassified");
}
if (!keypool.vchPubKey.IsValid()) {
throw std::runtime_error(std::string(__func__) + ": keypool entry invalid");
}
m_pool_key_to_index.erase(keypool.vchPubKey.GetID());
LogPrintf("keypool reserve %d\n", nIndex);
}
return true;
}
void CWallet::KeepKey(int64_t nIndex)
{
// Remove from key pool
WalletBatch batch(*database);
batch.ErasePool(nIndex);
LogPrintf("keypool keep %d\n", nIndex);
}
void CWallet::ReturnKey(int64_t nIndex, bool fInternal, const CPubKey& pubkey)
{
// Return to key pool
{
LOCK(cs_wallet);
if (fInternal) {
setInternalKeyPool.insert(nIndex);
} else {
setExternalKeyPool.insert(nIndex);
}
m_pool_key_to_index[pubkey.GetID()] = nIndex;
}
LogPrintf("keypool return %d\n", nIndex);
}
bool CWallet::GetKeyFromPool(CPubKey& result, bool internal)
{
CKeyPool keypool;
{
LOCK(cs_wallet);
int64_t nIndex;
if (!ReserveKeyFromKeyPool(nIndex, keypool, internal)) {
if (IsLocked()) return false;
WalletBatch batch(*database);
result = GenerateNewKey(batch, internal);
return true;
}
KeepKey(nIndex);
result = keypool.vchPubKey;
}
return true;
}
static int64_t GetOldestKeyTimeInPool(const std::set<int64_t>& setKeyPool, WalletBatch& batch) {
if (setKeyPool.empty()) {
return GetTime();
}
CKeyPool keypool;
int64_t nIndex = *(setKeyPool.begin());
if (!batch.ReadPool(nIndex, keypool)) {
throw std::runtime_error(std::string(__func__) + ": read oldest key in keypool failed");
}
assert(keypool.vchPubKey.IsValid());
return keypool.nTime;
}
int64_t CWallet::GetOldestKeyPoolTime()
{
LOCK(cs_wallet);
WalletBatch batch(*database);
// load oldest key from keypool, get time and return
int64_t oldestKey = GetOldestKeyTimeInPool(setExternalKeyPool, batch);
if (IsHDEnabled() && CanSupportFeature(FEATURE_HD_SPLIT)) {
oldestKey = std::max(GetOldestKeyTimeInPool(setInternalKeyPool, batch), oldestKey);
}
return oldestKey;
}
std::map<CTxDestination, CAmount> CWallet::GetAddressBalances()
{
std::map<CTxDestination, CAmount> balances;
{
LOCK(cs_wallet);
for (const auto& walletEntry : mapWallet)
{
const CWalletTx *pcoin = &walletEntry.second;
if (!pcoin->IsTrusted())
continue;
if (pcoin->IsCoinBase() && pcoin->GetBlocksToMaturity() > 0)
continue;
int nDepth = pcoin->GetDepthInMainChain();
if (nDepth < (pcoin->IsFromMe(ISMINE_ALL) ? 0 : 1))
continue;
for (unsigned int i = 0; i < pcoin->tx->vout.size(); i++)
{
CTxDestination addr;
if (!IsMine(pcoin->tx->vout[i]))
continue;
if(!ExtractDestination(pcoin->tx->vout[i].scriptPubKey, addr))
continue;
CAmount n = IsSpent(walletEntry.first, i) ? 0 : pcoin->tx->vout[i].nValue;
if (!balances.count(addr))
balances[addr] = 0;
balances[addr] += n;
}
}
}
return balances;
}
std::set< std::set<CTxDestination> > CWallet::GetAddressGroupings()
{
AssertLockHeld(cs_wallet); // mapWallet
std::set< std::set<CTxDestination> > groupings;
std::set<CTxDestination> grouping;
for (const auto& walletEntry : mapWallet)
{
const CWalletTx *pcoin = &walletEntry.second;
if (pcoin->tx->vin.size() > 0)
{
bool any_mine = false;
// group all input addresses with each other
for (CTxIn txin : pcoin->tx->vin)
{
CTxDestination address;
if(!IsMine(txin)) /* If this input isn't mine, ignore it */
continue;
if(!ExtractDestination(mapWallet.at(txin.prevout.hash).tx->vout[txin.prevout.n].scriptPubKey, address))
continue;
grouping.insert(address);
any_mine = true;
}
// group change with input addresses
if (any_mine)
{
for (CTxOut txout : pcoin->tx->vout)
if (IsChange(txout))
{
CTxDestination txoutAddr;
if(!ExtractDestination(txout.scriptPubKey, txoutAddr))
continue;
grouping.insert(txoutAddr);
}
}
if (grouping.size() > 0)
{
groupings.insert(grouping);
grouping.clear();
}
}
// group lone addrs by themselves
for (const auto& txout : pcoin->tx->vout)
if (IsMine(txout))
{
CTxDestination address;
if(!ExtractDestination(txout.scriptPubKey, address))
continue;
grouping.insert(address);
groupings.insert(grouping);
grouping.clear();
}
}
std::set< std::set<CTxDestination>* > uniqueGroupings; // a set of pointers to groups of addresses
std::map< CTxDestination, std::set<CTxDestination>* > setmap; // map addresses to the unique group containing it
for (std::set<CTxDestination> _grouping : groupings)
{
// make a set of all the groups hit by this new group
std::set< std::set<CTxDestination>* > hits;
std::map< CTxDestination, std::set<CTxDestination>* >::iterator it;
for (CTxDestination address : _grouping)
if ((it = setmap.find(address)) != setmap.end())
hits.insert((*it).second);
// merge all hit groups into a new single group and delete old groups
std::set<CTxDestination>* merged = new std::set<CTxDestination>(_grouping);
for (std::set<CTxDestination>* hit : hits)
{
merged->insert(hit->begin(), hit->end());
uniqueGroupings.erase(hit);
delete hit;
}
uniqueGroupings.insert(merged);
// update setmap
for (CTxDestination element : *merged)
setmap[element] = merged;
}
std::set< std::set<CTxDestination> > ret;
for (std::set<CTxDestination>* uniqueGrouping : uniqueGroupings)
{
ret.insert(*uniqueGrouping);
delete uniqueGrouping;
}
return ret;
}
std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) const
{
LOCK(cs_wallet);
std::set<CTxDestination> result;
for (const std::pair<CTxDestination, CAddressBookData>& item : mapAddressBook)
{
const CTxDestination& address = item.first;
const std::string& strName = item.second.name;
if (strName == label)
result.insert(address);
}
return result;
}
void CWallet::DeleteLabel(const std::string& label)
{
WalletBatch batch(*database);
batch.EraseAccount(label);
}
bool CReserveKey::GetReservedKey(CPubKey& pubkey, bool internal)
{
if (nIndex == -1)
{
CKeyPool keypool;
pwallet->ReserveKeyFromKeyPool(nIndex, keypool, internal);
if (nIndex != -1)
vchPubKey = keypool.vchPubKey;
else {
return false;
}
fInternal = keypool.fInternal;
}
assert(vchPubKey.IsValid());
pubkey = vchPubKey;
return true;
}
void CReserveKey::KeepKey()
{
if (nIndex != -1)
pwallet->KeepKey(nIndex);
nIndex = -1;
vchPubKey = CPubKey();
}
void CReserveKey::ReturnKey()
{
if (nIndex != -1) {
pwallet->ReturnKey(nIndex, fInternal, vchPubKey);
}
nIndex = -1;
vchPubKey = CPubKey();
}
void CWallet::MarkReserveKeysAsUsed(int64_t keypool_id)
{
AssertLockHeld(cs_wallet);
bool internal = setInternalKeyPool.count(keypool_id);
if (!internal) assert(setExternalKeyPool.count(keypool_id));
std::set<int64_t> *setKeyPool = internal ? &setInternalKeyPool : &setExternalKeyPool;
auto it = setKeyPool->begin();
WalletBatch batch(*database);
while (it != std::end(*setKeyPool)) {
const int64_t& index = *(it);
if (index > keypool_id) break; // set*KeyPool is ordered
CKeyPool keypool;
if (batch.ReadPool(index, keypool)) { //TODO: This should be unnecessary
m_pool_key_to_index.erase(keypool.vchPubKey.GetID());
}
LearnAllRelatedScripts(keypool.vchPubKey);
batch.ErasePool(index);
LogPrintf("keypool index %d removed\n", index);
it = setKeyPool->erase(it);
}
}
bool CWallet::UpdatedTransaction(const uint256 &hashTx)
{
{
LOCK(cs_wallet);
// Only notify UI if this transaction is in this wallet
auto mi = mapWallet.find(hashTx);
if (mi != mapWallet.end()){
NotifyTransactionChanged(this, hashTx, CT_UPDATED);
return true;
}
}
return false;
}
void CWallet::GetScriptForMining(std::shared_ptr<CReserveScript> &script)
{
std::shared_ptr<CReserveKey> rKey = std::make_shared<CReserveKey>(this);
CPubKey pubkey;
if (!rKey->GetReservedKey(pubkey))
return;
script = rKey;
script->reserveScript = CScript() << ToByteVector(pubkey) << OP_CHECKSIG;
}
void CWallet::LockCoin(const COutPoint& output)
{
AssertLockHeld(cs_wallet); // setLockedCoins
setLockedCoins.insert(output);
}
void CWallet::UnlockCoin(const COutPoint& output)
{
AssertLockHeld(cs_wallet); // setLockedCoins
setLockedCoins.erase(output);
}
void CWallet::UnlockAllCoins()
{
AssertLockHeld(cs_wallet); // setLockedCoins
setLockedCoins.clear();
}
bool CWallet::IsLockedCoin(uint256 hash, unsigned int n) const
{
AssertLockHeld(cs_wallet); // setLockedCoins
COutPoint outpt(hash, n);
return (setLockedCoins.count(outpt) > 0);
}
void CWallet::ListLockedCoins(std::vector<COutPoint>& vOutpts) const
{
AssertLockHeld(cs_wallet); // setLockedCoins
for (std::set<COutPoint>::iterator it = setLockedCoins.begin();
it != setLockedCoins.end(); it++) {
COutPoint outpt = (*it);
vOutpts.push_back(outpt);
}
}
void CWallet::GetKeyBirthTimes(std::map<CTxDestination, int64_t> &mapKeyBirth) const {
AssertLockHeld(cs_wallet); // mapKeyMetadata
mapKeyBirth.clear();
// get birth times for keys with metadata
for (const auto& entry : mapKeyMetadata) {
if (entry.second.nCreateTime) {
mapKeyBirth[entry.first] = entry.second.nCreateTime;
}
}
// map in which we'll infer heights of other keys
CBlockIndex *pindexMax = chainActive[std::max(0, chainActive.Height() - 144)]; // the tip can be reorganized; use a 144-block safety margin
std::map<CKeyID, CBlockIndex*> mapKeyFirstBlock;
for (const CKeyID &keyid : GetKeys()) {
if (mapKeyBirth.count(keyid) == 0)
mapKeyFirstBlock[keyid] = pindexMax;
}
// if there are no such keys, we're done
if (mapKeyFirstBlock.empty())
return;
// find first block that affects those keys, if there are any left
std::vector<CKeyID> vAffected;
for (const auto& entry : mapWallet) {
// iterate over all wallet transactions...
const CWalletTx &wtx = entry.second;
CBlockIndex* pindex = LookupBlockIndex(wtx.hashBlock);
if (pindex && chainActive.Contains(pindex)) {
// ... which are already in a block
int nHeight = pindex->nHeight;
for (const CTxOut &txout : wtx.tx->vout) {
// iterate over all their outputs
CAffectedKeysVisitor(*this, vAffected).Process(txout.scriptPubKey);
for (const CKeyID &keyid : vAffected) {
// ... and all their affected keys
std::map<CKeyID, CBlockIndex*>::iterator rit = mapKeyFirstBlock.find(keyid);
if (rit != mapKeyFirstBlock.end() && nHeight < rit->second->nHeight)
rit->second = pindex;
}
vAffected.clear();
}
}
}
// Extract block timestamps for those keys
for (const auto& entry : mapKeyFirstBlock)
mapKeyBirth[entry.first] = entry.second->GetBlockTime() - TIMESTAMP_WINDOW; // block times can be 2h off
}
/**
* Compute smart timestamp for a transaction being added to the wallet.
*
* Logic:
* - If sending a transaction, assign its timestamp to the current time.
* - If receiving a transaction outside a block, assign its timestamp to the
* current time.
* - If receiving a block with a future timestamp, assign all its (not already
* known) transactions' timestamps to the current time.
* - If receiving a block with a past timestamp, before the most recent known
* transaction (that we care about), assign all its (not already known)
* transactions' timestamps to the same timestamp as that most-recent-known
* transaction.
* - If receiving a block with a past timestamp, but after the most recent known
* transaction, assign all its (not already known) transactions' timestamps to
* the block time.
*
* For more information see CWalletTx::nTimeSmart,
* https://5gtalk.org/?topic=54527, or
* https://github.com/5g/5g/pull/1393.
*/
unsigned int CWallet::ComputeTimeSmart(const CWalletTx& wtx) const
{
unsigned int nTimeSmart = wtx.nTimeReceived;
if (!wtx.hashUnset()) {
if (const CBlockIndex* pindex = LookupBlockIndex(wtx.hashBlock)) {
int64_t latestNow = wtx.nTimeReceived;
int64_t latestEntry = 0;
// Tolerate times up to the last timestamp in the wallet not more than 5 minutes into the future
int64_t latestTolerated = latestNow + 300;
const TxItems& txOrdered = wtxOrdered;
for (auto it = txOrdered.rbegin(); it != txOrdered.rend(); ++it) {
CWalletTx* const pwtx = it->second.first;
if (pwtx == &wtx) {
continue;
}
CAccountingEntry* const pacentry = it->second.second;
int64_t nSmartTime;
if (pwtx) {
nSmartTime = pwtx->nTimeSmart;
if (!nSmartTime) {
nSmartTime = pwtx->nTimeReceived;
}
} else {
nSmartTime = pacentry->nTime;
}
if (nSmartTime <= latestTolerated) {
latestEntry = nSmartTime;
if (nSmartTime > latestNow) {
latestNow = nSmartTime;
}
break;
}
}
int64_t blocktime = pindex->GetBlockTime();
nTimeSmart = std::max(latestEntry, std::min(blocktime, latestNow));
} else {
LogPrintf("%s: found %s in block %s not in index\n", __func__, wtx.GetHash().ToString(), wtx.hashBlock.ToString());
}
}
return nTimeSmart;
}
bool CWallet::AddDestData(const CTxDestination &dest, const std::string &key, const std::string &value)
{
if (boost::get<CNoDestination>(&dest))
return false;
mapAddressBook[dest].destdata.insert(std::make_pair(key, value));
return WalletBatch(*database).WriteDestData(EncodeDestination(dest), key, value);
}
bool CWallet::EraseDestData(const CTxDestination &dest, const std::string &key)
{
if (!mapAddressBook[dest].destdata.erase(key))
return false;
return WalletBatch(*database).EraseDestData(EncodeDestination(dest), key);
}
bool CWallet::LoadDestData(const CTxDestination &dest, const std::string &key, const std::string &value)
{
mapAddressBook[dest].destdata.insert(std::make_pair(key, value));
return true;
}
bool CWallet::GetDestData(const CTxDestination &dest, const std::string &key, std::string *value) const
{
std::map<CTxDestination, CAddressBookData>::const_iterator i = mapAddressBook.find(dest);
if(i != mapAddressBook.end())
{
CAddressBookData::StringMap::const_iterator j = i->second.destdata.find(key);
if(j != i->second.destdata.end())
{
if(value)
*value = j->second;
return true;
}
}
return false;
}
std::vector<std::string> CWallet::GetDestValues(const std::string& prefix) const
{
LOCK(cs_wallet);
std::vector<std::string> values;
for (const auto& address : mapAddressBook) {
for (const auto& data : address.second.destdata) {
if (!data.first.compare(0, prefix.size(), prefix)) {
values.emplace_back(data.second);
}
}
}
return values;
}
bool CWallet::Verify(std::string wallet_file, bool salvage_wallet, std::string& error_string, std::string& warning_string)
{
// Do some checking on wallet path. It should be either a:
//
// 1. Path where a directory can be created.
// 2. Path to an existing directory.
// 3. Path to a symlink to a directory.
// 4. For backwards compatibility, the name of a data file in -walletdir.
LOCK(cs_wallets);
fs::path wallet_path = fs::absolute(wallet_file, GetWalletDir());
fs::file_type path_type = fs::symlink_status(wallet_path).type();
if (!(path_type == fs::file_not_found || path_type == fs::directory_file ||
(path_type == fs::symlink_file && fs::is_directory(wallet_path)) ||
(path_type == fs::regular_file && fs::path(wallet_file).filename() == wallet_file))) {
error_string = strprintf(
"Invalid -wallet path '%s'. -wallet path should point to a directory where wallet.dat and "
"database/log.?????????? files can be stored, a location where such a directory could be created, "
"or (for backwards compatibility) the name of an existing data file in -walletdir (%s)",
wallet_file, GetWalletDir());
return false;
}
// Make sure that the wallet path doesn't clash with an existing wallet path
for (auto wallet : GetWallets()) {
if (fs::absolute(wallet->GetName(), GetWalletDir()) == wallet_path) {
error_string = strprintf("Error loading wallet %s. Duplicate -wallet filename specified.", wallet_file);
return false;
}
}
if (!WalletBatch::VerifyEnvironment(wallet_path, error_string)) {
return false;
}
if (salvage_wallet) {
// Recover readable keypairs:
CWallet dummyWallet("dummy", WalletDatabase::CreateDummy());
std::string backup_filename;
if (!WalletBatch::Recover(wallet_path, (void *)&dummyWallet, WalletBatch::RecoverKeysOnlyFilter, backup_filename)) {
return false;
}
}
return WalletBatch::VerifyDatabaseFile(wallet_path, warning_string, error_string);
}
CWallet* CWallet::CreateWalletFromFile(const std::string& name, const fs::path& path)
{
const std::string& walletFile = name;
// needed to restore wallet transaction meta data after -zapwallettxes
std::vector<CWalletTx> vWtx;
if (gArgs.GetBoolArg("-zapwallettxes", false)) {
uiInterface.InitMessage(_("Zapping all transactions from wallet..."));
std::unique_ptr<CWallet> tempWallet = MakeUnique<CWallet>(name, WalletDatabase::Create(path));
DBErrors nZapWalletRet = tempWallet->ZapWalletTx(vWtx);
if (nZapWalletRet != DBErrors::LOAD_OK) {
InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile));
return nullptr;
}
}
uiInterface.InitMessage(_("Loading wallet..."));
int64_t nStart = GetTimeMillis();
bool fFirstRun = true;
CWallet *walletInstance = new CWallet(name, WalletDatabase::Create(path));
DBErrors nLoadWalletRet = walletInstance->LoadWallet(fFirstRun);
if (nLoadWalletRet != DBErrors::LOAD_OK)
{
if (nLoadWalletRet == DBErrors::CORRUPT) {
InitError(strprintf(_("Error loading %s: Wallet corrupted"), walletFile));
return nullptr;
}
else if (nLoadWalletRet == DBErrors::NONCRITICAL_ERROR)
{
InitWarning(strprintf(_("Error reading %s! All keys read correctly, but transaction data"
" or address book entries might be missing or incorrect."),
walletFile));
}
else if (nLoadWalletRet == DBErrors::TOO_NEW) {
InitError(strprintf(_("Error loading %s: Wallet requires newer version of %s"), walletFile, _(PACKAGE_NAME)));
return nullptr;
}
else if (nLoadWalletRet == DBErrors::NEED_REWRITE)
{
InitError(strprintf(_("Wallet needed to be rewritten: restart %s to complete"), _(PACKAGE_NAME)));
return nullptr;
}
else {
InitError(strprintf(_("Error loading %s"), walletFile));
return nullptr;
}
}
if (gArgs.GetBoolArg("-upgradewallet", fFirstRun))
{
int nMaxVersion = gArgs.GetArg("-upgradewallet", 0);
if (nMaxVersion == 0) // the -upgradewallet without argument case
{
LogPrintf("Performing wallet upgrade to %i\n", FEATURE_LATEST);
nMaxVersion = CLIENT_VERSION;
walletInstance->SetMinVersion(FEATURE_LATEST); // permanently upgrade the wallet immediately
}
else
LogPrintf("Allowing wallet upgrade up to %i\n", nMaxVersion);
if (nMaxVersion < walletInstance->GetVersion())
{
InitError(_("Cannot downgrade wallet"));
return nullptr;
}
walletInstance->SetMaxVersion(nMaxVersion);
}
if (fFirstRun)
{
// ensure this wallet.dat can only be opened by clients supporting HD with chain split and expects no default key
if (!gArgs.GetBoolArg("-usehd", true)) {
InitError(strprintf(_("Error creating %s: You can't create non-HD wallets with this version."), walletFile));
return nullptr;
}
walletInstance->SetMinVersion(FEATURE_NO_DEFAULT_KEY);
// generate a new master key
CPubKey masterPubKey = walletInstance->GenerateNewHDMasterKey();
if (!walletInstance->SetHDMasterKey(masterPubKey))
throw std::runtime_error(std::string(__func__) + ": Storing master key failed");
// Top up the keypool
if (!walletInstance->TopUpKeyPool()) {
InitError(_("Unable to generate initial keys") += "\n");
return nullptr;
}
walletInstance->ChainStateFlushed(chainActive.GetLocator());
} else if (gArgs.IsArgSet("-usehd")) {
bool useHD = gArgs.GetBoolArg("-usehd", true);
if (walletInstance->IsHDEnabled() && !useHD) {
InitError(strprintf(_("Error loading %s: You can't disable HD on an already existing HD wallet"), walletFile));
return nullptr;
}
if (!walletInstance->IsHDEnabled() && useHD) {
InitError(strprintf(_("Error loading %s: You can't enable HD on an already existing non-HD wallet"), walletFile));
return nullptr;
}
}
if (!gArgs.GetArg("-addresstype", "").empty() && !ParseOutputType(gArgs.GetArg("-addresstype", ""), walletInstance->m_default_address_type)) {
InitError(strprintf("Unknown address type '%s'", gArgs.GetArg("-addresstype", "")));
return nullptr;
}
if (!gArgs.GetArg("-changetype", "").empty() && !ParseOutputType(gArgs.GetArg("-changetype", ""), walletInstance->m_default_change_type)) {
InitError(strprintf("Unknown change type '%s'", gArgs.GetArg("-changetype", "")));
return nullptr;
}
if (gArgs.IsArgSet("-mintxfee")) {
CAmount n = 0;
if (!ParseMoney(gArgs.GetArg("-mintxfee", ""), n) || 0 == n) {
InitError(AmountErrMsg("mintxfee", gArgs.GetArg("-mintxfee", "")));
return nullptr;
}
if (n > HIGH_TX_FEE_PER_KB) {
InitWarning(AmountHighWarn("-mintxfee") + " " +
_("This is the minimum transaction fee you pay on every transaction."));
}
walletInstance->m_min_fee = CFeeRate(n);
}
walletInstance->m_allow_fallback_fee = Params().IsFallbackFeeEnabled();
if (gArgs.IsArgSet("-fallbackfee")) {
CAmount nFeePerK = 0;
if (!ParseMoney(gArgs.GetArg("-fallbackfee", ""), nFeePerK)) {
InitError(strprintf(_("Invalid amount for -fallbackfee=<amount>: '%s'"), gArgs.GetArg("-fallbackfee", "")));
return nullptr;
}
if (nFeePerK > HIGH_TX_FEE_PER_KB) {
InitWarning(AmountHighWarn("-fallbackfee") + " " +
_("This is the transaction fee you may pay when fee estimates are not available."));
}
walletInstance->m_fallback_fee = CFeeRate(nFeePerK);
walletInstance->m_allow_fallback_fee = nFeePerK != 0; //disable fallback fee in case value was set to 0, enable if non-null value
}
if (gArgs.IsArgSet("-discardfee")) {
CAmount nFeePerK = 0;
if (!ParseMoney(gArgs.GetArg("-discardfee", ""), nFeePerK)) {
InitError(strprintf(_("Invalid amount for -discardfee=<amount>: '%s'"), gArgs.GetArg("-discardfee", "")));
return nullptr;
}
if (nFeePerK > HIGH_TX_FEE_PER_KB) {
InitWarning(AmountHighWarn("-discardfee") + " " +
_("This is the transaction fee you may discard if change is smaller than dust at this level"));
}
walletInstance->m_discard_rate = CFeeRate(nFeePerK);
}
if (gArgs.IsArgSet("-paytxfee")) {
CAmount nFeePerK = 0;
if (!ParseMoney(gArgs.GetArg("-paytxfee", ""), nFeePerK)) {
InitError(AmountErrMsg("paytxfee", gArgs.GetArg("-paytxfee", "")));
return nullptr;
}
if (nFeePerK > HIGH_TX_FEE_PER_KB) {
InitWarning(AmountHighWarn("-paytxfee") + " " +
_("This is the transaction fee you will pay if you send a transaction."));
}
walletInstance->m_pay_tx_fee = CFeeRate(nFeePerK, 1000);
if (walletInstance->m_pay_tx_fee < ::minRelayTxFee) {
InitError(strprintf(_("Invalid amount for -paytxfee=<amount>: '%s' (must be at least %s)"),
gArgs.GetArg("-paytxfee", ""), ::minRelayTxFee.ToString()));
return nullptr;
}
}
walletInstance->m_confirm_target = gArgs.GetArg("-txconfirmtarget", DEFAULT_TX_CONFIRM_TARGET);
walletInstance->m_spend_zero_conf_change = gArgs.GetBoolArg("-spendzeroconfchange", DEFAULT_SPEND_ZEROCONF_CHANGE);
walletInstance->m_signal_rbf = gArgs.GetBoolArg("-walletrbf", DEFAULT_WALLET_RBF);
LogPrintf(" wallet %15dms\n", GetTimeMillis() - nStart);
// Try to top up keypool. No-op if the wallet is locked.
walletInstance->TopUpKeyPool();
LOCK(cs_main);
CBlockIndex *pindexRescan = chainActive.Genesis();
if (!gArgs.GetBoolArg("-rescan", false))
{
WalletBatch batch(*walletInstance->database);
CBlockLocator locator;
if (batch.ReadBestBlock(locator))
pindexRescan = FindForkInGlobalIndex(chainActive, locator);
}
walletInstance->m_last_block_processed = chainActive.Tip();
RegisterValidationInterface(walletInstance);
if (chainActive.Tip() && chainActive.Tip() != pindexRescan)
{
//We can't rescan beyond non-pruned blocks, stop and throw an error
//this might happen if a user uses an old wallet within a pruned node
// or if he ran -disablewallet for a longer time, then decided to re-enable
if (fPruneMode)
{
CBlockIndex *block = chainActive.Tip();
while (block && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA) && block->pprev->nTx > 0 && pindexRescan != block)
block = block->pprev;
if (pindexRescan != block) {
InitError(_("Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)"));
return nullptr;
}
}
uiInterface.InitMessage(_("Rescanning..."));
LogPrintf("Rescanning last %i blocks (from block %i)...\n", chainActive.Height() - pindexRescan->nHeight, pindexRescan->nHeight);
// No need to read and scan block if block was created before
// our wallet birthday (as adjusted for block time variability)
while (pindexRescan && walletInstance->nTimeFirstKey && (pindexRescan->GetBlockTime() < (walletInstance->nTimeFirstKey - TIMESTAMP_WINDOW))) {
pindexRescan = chainActive.Next(pindexRescan);
}
nStart = GetTimeMillis();
{
WalletRescanReserver reserver(walletInstance);
if (!reserver.reserve()) {
InitError(_("Failed to rescan the wallet during initialization"));
return nullptr;
}
walletInstance->ScanForWalletTransactions(pindexRescan, nullptr, reserver, true);
}
LogPrintf(" rescan %15dms\n", GetTimeMillis() - nStart);
walletInstance->ChainStateFlushed(chainActive.GetLocator());
walletInstance->database->IncrementUpdateCounter();
// Restore wallet transaction metadata after -zapwallettxes=1
if (gArgs.GetBoolArg("-zapwallettxes", false) && gArgs.GetArg("-zapwallettxes", "1") != "2")
{
WalletBatch batch(*walletInstance->database);
for (const CWalletTx& wtxOld : vWtx)
{
uint256 hash = wtxOld.GetHash();
std::map<uint256, CWalletTx>::iterator mi = walletInstance->mapWallet.find(hash);
if (mi != walletInstance->mapWallet.end())
{
const CWalletTx* copyFrom = &wtxOld;
CWalletTx* copyTo = &mi->second;
copyTo->mapValue = copyFrom->mapValue;
copyTo->vOrderForm = copyFrom->vOrderForm;
copyTo->nTimeReceived = copyFrom->nTimeReceived;
copyTo->nTimeSmart = copyFrom->nTimeSmart;
copyTo->fFromMe = copyFrom->fFromMe;
copyTo->strFromAccount = copyFrom->strFromAccount;
copyTo->nOrderPos = copyFrom->nOrderPos;
batch.WriteTx(*copyTo);
}
}
}
}
walletInstance->SetBroadcastTransactions(gArgs.GetBoolArg("-walletbroadcast", DEFAULT_WALLETBROADCAST));
{
LOCK(walletInstance->cs_wallet);
LogPrintf("setKeyPool.size() = %u\n", walletInstance->GetKeyPoolSize());
LogPrintf("mapWallet.size() = %u\n", walletInstance->mapWallet.size());
LogPrintf("mapAddressBook.size() = %u\n", walletInstance->mapAddressBook.size());
}
return walletInstance;
}
std::atomic<bool> CWallet::fFlushScheduled(false);
void CWallet::postInitProcess(CScheduler& scheduler)
{
// Add wallet transactions that aren't already in a block to mempool
// Do this here as mempool requires genesis block to be loaded
ReacceptWalletTransactions();
// Run a thread to flush wallet periodically
if (!CWallet::fFlushScheduled.exchange(true)) {
scheduler.scheduleEvery(MaybeCompactWalletDB, 500);
}
}
bool CWallet::BackupWallet(const std::string& strDest)
{
return database->Backup(strDest);
}
CKeyPool::CKeyPool()
{
nTime = GetTime();
fInternal = false;
}
CKeyPool::CKeyPool(const CPubKey& vchPubKeyIn, bool internalIn)
{
nTime = GetTime();
vchPubKey = vchPubKeyIn;
fInternal = internalIn;
}
CWalletKey::CWalletKey(int64_t nExpires)
{
nTimeCreated = (nExpires ? GetTime() : 0);
nTimeExpires = nExpires;
}
void CMerkleTx::SetMerkleBranch(const CBlockIndex* pindex, int posInBlock)
{
// Update the tx's hashBlock
hashBlock = pindex->GetBlockHash();
// set the position of the transaction in the block
nIndex = posInBlock;
}
int CMerkleTx::GetDepthInMainChain(const CBlockIndex* &pindexRet) const
{
if (hashUnset())
return 0;
AssertLockHeld(cs_main);
// Find the block it claims to be in
CBlockIndex* pindex = LookupBlockIndex(hashBlock);
if (!pindex || !chainActive.Contains(pindex))
return 0;
pindexRet = pindex;
return ((nIndex == -1) ? (-1) : 1) * (chainActive.Height() - pindex->nHeight + 1);
}
int CMerkleTx::GetBlocksToMaturity() const
{
if (!IsCoinBase() && !IsCoinStake())
return 0;
return std::max(0, (COINBASE_MATURITY+1) - GetDepthInMainChain());
}
bool CWalletTx::AcceptToMemoryPool(const CAmount& nAbsurdFee, CValidationState& state)
{
// We must set fInMempool here - while it will be re-set to true by the
// entered-mempool callback, if we did not there would be a race where a
// user could call sendmoney in a loop and hit spurious out of funds errors
// because we think that this newly generated transaction's change is
// unavailable as we're not yet aware that it is in the mempool.
bool ret = ::AcceptToMemoryPool(mempool, state, tx, nullptr /* pfMissingInputs */,
nullptr /* plTxnReplaced */, false /* bypass_limits */, nAbsurdFee);
fInMempool |= ret;
return ret;
}
void CWallet::LearnRelatedScripts(const CPubKey& key, OutputType type)
{
if (key.IsCompressed() && (type == OutputType::P2SH_SEGWIT || type == OutputType::BECH32)) {
CTxDestination witdest = WitnessV0KeyHash(key.GetID());
CScript witprog = GetScriptForDestination(witdest);
// Make sure the resulting program is solvable.
assert(IsSolvable(*this, witprog));
AddCScript(witprog);
}
}
void CWallet::LearnAllRelatedScripts(const CPubKey& key)
{
// OutputType::P2SH_SEGWIT always adds all necessary scripts for all types.
LearnRelatedScripts(key, OutputType::P2SH_SEGWIT);
}
std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outputs, bool single_coin) const {
std::vector<OutputGroup> groups;
std::map<CTxDestination, OutputGroup> gmap;
CTxDestination dst;
for (const auto& output : outputs) {
if (output.fSpendable) {
CInputCoin input_coin = output.GetInputCoin();
size_t ancestors, descendants;
mempool.GetTransactionAncestry(output.tx->GetHash(), ancestors, descendants);
if (!single_coin && ExtractDestination(output.tx->tx->vout[output.i].scriptPubKey, dst)) {
// Limit output groups to no more than 10 entries, to protect
// against inadvertently creating a too-large transaction
// when using -avoidpartialspends
if (gmap[dst].m_outputs.size() >= OUTPUT_GROUP_MAX_ENTRIES) {
groups.push_back(gmap[dst]);
gmap.erase(dst);
}
gmap[dst].Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants);
} else {
groups.emplace_back(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants);
}
}
}
if (!single_coin) {
for (const auto& it : gmap) groups.push_back(it.second);
}
return groups;
}
bool CWallet::GetOutpointAndKeysFromOutput(const COutput& out, COutPoint& outpointRet, CPubKey& pubKeyRet, CKey& keyRet)
{
// wait for reindex and/or import to finish
if (fImporting || fReindex) return false;
CScript pubScript;
outpointRet = COutPoint(out.tx->GetHash(), out.i);
pubScript = out.tx->tx->vout[out.i].scriptPubKey; // the inputs PubKey
CTxDestination address1;
ExtractDestination(pubScript, address1);
C5GAddress address2(address1);
CKeyID keyID;
if (!address2.GetKeyID(keyID)) {
LogPrintf("CWallet::GetOutpointAndKeysFromOutput -- Address does not refer to a key\n");
return false;
}
if (!GetKey(keyID, keyRet)) {
LogPrintf ("CWallet::GetOutpointAndKeysFromOutput -- Private key for address is not known\n");
return false;
}
pubKeyRet = keyRet.GetPubKey();
return true;
}
bool CWallet::GetMasternodeOutpointAndKeys(COutPoint& outpointRet, CPubKey& pubKeyRet, CKey& keyRet, std::string strTxHash, std::string strOutputIndex)
{
// wait for reindex and/or import to finish
if (fImporting || fReindex) return false;
// Find possible candidates
std::vector<COutput> vPossibleCoins;
CCoinControl coinControl;
coinControl.nCoinType = ONLY_MASTERNODE_COLLATERAL;
AvailableCoins(vPossibleCoins, true, &coinControl, 1, MAX_MONEY, MAX_MONEY, 0, 0, 9999999);
if(vPossibleCoins.empty()) {
LogPrintf("CWallet::GetMasternodeOutpointAndKeys -- Could not locate any valid masternode vin\n");
return false;
}
if(strTxHash.empty()) // No output specified, select the first one
return GetOutpointAndKeysFromOutput(vPossibleCoins[0], outpointRet, pubKeyRet, keyRet);
// Find specific vin
uint256 txHash = uint256S(strTxHash);
int nOutputIndex = atoi(strOutputIndex.c_str());
for(COutput& out : vPossibleCoins)
{
if(out.tx->GetHash() == txHash && out.i == nOutputIndex) // found it!
return GetOutpointAndKeysFromOutput(out, outpointRet, pubKeyRet, keyRet);
}
LogPrintf("CWallet::GetMasternodeOutpointAndKeys -- Could not locate specified masternode vin\n");
return false;
}
|
#include <boost/python.hpp>
#include <boost/python/suite/indexing/indexing_suite.hpp>
#include <boost/python/suite/indexing/map_indexing_suite.hpp>
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
#include <lanelet2_core/LaneletMap.h>
#include <lanelet2_core/primitives/BasicRegulatoryElements.h>
#include <lanelet2_core/primitives/GPSPoint.h>
#include <lanelet2_core/primitives/LaneletOrArea.h>
#include <lanelet2_core/primitives/LaneletSequence.h>
#include <lanelet2_core/primitives/RegulatoryElement.h>
#include "internal/converter.h"
using namespace boost::python;
using namespace lanelet;
struct AttributeToPythonStr {
static PyObject* convert(Attribute const& s) { return boost::python::incref(boost::python::object(s.value()).ptr()); }
};
struct AttributeFromPythonStr {
AttributeFromPythonStr() {
boost::python::converter::registry::push_back(&convertible, &construct, boost::python::type_id<Attribute>());
}
static void* convertible(PyObject* objPtr) {
#if PY_MAJOR_VERSION < 3
return PyString_Check(objPtr) ? objPtr : nullptr; // NOLINT
#else
return PyUnicode_Check(objPtr) ? objPtr : nullptr; // NOLINT
#endif
}
static void construct(PyObject* objPtr, boost::python::converter::rvalue_from_python_stage1_data* data) {
#if PY_MAJOR_VERSION < 3
const char* value = PyString_AsString(objPtr);
#else
auto pyStr = PyUnicode_AsUTF8String(objPtr);
const char* value = PyBytes_AsString(pyStr);
#endif
if (value == nullptr) {
boost::python::throw_error_already_set();
}
using StorageType = boost::python::converter::rvalue_from_python_storage<Attribute>;
void* storage = reinterpret_cast<StorageType*>(data)->storage.bytes; // NOLINT
new (storage) Attribute(value);
data->convertible = storage;
}
};
struct DictToAttributeMapConverter {
DictToAttributeMapConverter() { converter::registry::push_back(&convertible, &construct, type_id<AttributeMap>()); }
static void* convertible(PyObject* obj) {
if (!PyDict_CheckExact(obj)) { // NOLINT
return nullptr;
}
return obj;
}
static void construct(PyObject* obj, converter::rvalue_from_python_stage1_data* data) {
dict d(borrowed(obj));
list keys = d.keys();
list values = d.values();
AttributeMap attributes;
for (auto i = 0u; i < len(keys); ++i) {
std::string key = extract<std::string>(keys[i]);
std::string value = extract<std::string>(values[i]);
attributes.insert(std::make_pair(key, value));
}
using StorageType = converter::rvalue_from_python_storage<AttributeMap>;
void* storage = reinterpret_cast<StorageType*>(data)->storage.bytes; // NOLINT
new (storage) AttributeMap(attributes);
data->convertible = storage;
}
};
struct LineStringOrPolygonToObject {
static PyObject* convert(const lanelet::LineStringOrPolygon3d& v) {
if (v.isPolygon()) {
return incref(object(*v.polygon()).ptr());
}
if (v.isLineString()) {
return incref(object(*v.lineString()).ptr());
}
return incref(object().ptr());
}
};
struct ConstLineStringOrPolygonToObject {
static PyObject* convert(const lanelet::ConstLineStringOrPolygon3d& v) {
if (v.isPolygon()) {
return incref(object(*v.polygon()).ptr());
}
if (v.isLineString()) {
return incref(object(*v.lineString()).ptr());
}
return incref(object().ptr());
}
};
struct ConstLaneletOrAreaToObject {
static PyObject* convert(const lanelet::ConstLaneletOrArea& v) {
if (v.isArea()) {
return incref(object(*v.area()).ptr());
}
if (v.isLanelet()) {
return incref(object(*v.lanelet()).ptr());
}
return incref(object().ptr());
}
};
template <class T>
struct MapItem {
using K = typename T::key_type;
using V = typename T::mapped_type;
using Iter = typename T::const_iterator;
MapItem& fromPython() {
&MapItem::convertible, &MapItem::init, boost::python::type_id<T>();
return *this;
}
static list keys(T const& x) {
list t;
for (auto it = x.begin(); it != x.end(); ++it) {
t.append(it->first);
}
return t;
}
static list values(T const& x) {
list t;
for (auto it = x.begin(); it != x.end(); ++it) {
t.append(it->second);
}
return t;
}
static list items(T const& x) {
list t;
for (auto it = x.begin(); it != x.end(); ++it) {
t.append(boost::python::make_tuple(it->first, it->second));
}
return t;
}
static void* convertible(PyObject* object) { return PyObject_GetIter(object) != nullptr ? object : nullptr; }
static std::shared_ptr<T> init(boost::python::dict& pyDict) {
auto mapPtr = std::make_shared<T>();
auto& map = *mapPtr;
boost::python::list keys = pyDict.keys();
for (int i = 0; i < len(keys); ++i) { // NOLINT
boost::python::extract<K> extractedKey(keys[i]);
if (!extractedKey.check()) {
PyErr_SetString(PyExc_KeyError, "Key invalid!");
throw_error_already_set();
}
K key = extractedKey;
boost::python::extract<V> extractedVal(pyDict[key]);
if (!extractedVal.check()) {
PyErr_SetString(PyExc_KeyError, "Value invalid!");
throw_error_already_set();
}
V value = extractedVal;
map[key] = value; // NOLINT
}
return mapPtr;
}
};
template <typename T>
void setXWrapper(T& obj, double x) {
obj.x() = x;
}
template <typename T>
void setYWrapper(T& obj, double y) {
obj.y() = y;
}
template <typename T>
void setZWrapper(T& obj, double z) {
obj.z() = z;
}
template <typename T>
double getXWrapper(const T& obj) {
return obj.x();
}
template <typename T>
double getYWrapper(const T& obj) {
return obj.y();
}
template <typename T>
double getZWrapper(const T& obj) {
return obj.z();
}
template <typename Func>
auto getRefFunc(Func&& f) {
return make_function(std::forward<Func>(f), return_internal_reference<>());
}
template <typename T>
void setAttributeWrapper(T& obj, const AttributeMap& attr) {
obj.attributes() = attr;
}
template <typename PrimT>
class IsPrimitive : public def_visitor<IsPrimitive<PrimT>> {
using ConstT = const PrimT;
public:
template <typename ClassT>
void visit(ClassT& c) const {
const AttributeMap& (PrimT::*attr)() const = &PrimT::attributes;
c.add_property("id", &PrimT::id, &PrimT::setId);
c.add_property("attributes", getRefFunc(attr), setAttributeWrapper<PrimT>);
c.def(self == self); // NOLINT
c.def(self != self); // NOLINT
c.def(self_ns::str(self_ns::self));
}
};
template <typename PrimT>
class IsConstPrimitive : public def_visitor<IsConstPrimitive<PrimT>> {
friend class def_visitor_access;
public:
template <typename ClassT>
void visit(ClassT& c) const {
c.add_property("id", &PrimT::id);
const AttributeMap& (PrimT::*attr)() const = &PrimT::attributes;
c.add_property("attributes", getRefFunc(attr));
c.def(self == self); // NOLINT
c.def(self != self); // NOLINT
c.def(self_ns::str(self_ns::self));
}
};
template <typename LsT, bool InternalRef = true>
class IsConstLineString : public def_visitor<IsConstLineString<LsT, InternalRef>> {
friend class def_visitor_access;
public:
template <typename ClassT>
void visit(ClassT& c) const {
c.def("__iter__", iterator<LsT>()).def("__len__", &LsT::size).def("inverted", &LsT::inverted);
addGetitem<InternalRef>(c);
}
template <bool InternalRefVal, typename ClassT>
std::enable_if_t<InternalRefVal> addGetitem(ClassT& c) const {
c.def("__getitem__", wrappers::getItem<LsT>, return_internal_reference<>());
}
template <bool InternalRefVal, typename ClassT>
std::enable_if_t<!InternalRefVal> addGetitem(ClassT& c) const {
c.def("__getitem__", wrappers::getItem<LsT>, return_value_policy<return_by_value>());
}
private:
bool internalRef_{false};
};
template <typename LsT>
class IsLineString : public def_visitor<IsLineString<LsT>> {
friend class def_visitor_access;
public:
template <typename ClassT>
void visit(ClassT& c) const {
c.def("__setitem__", wrappers::setItem<LsT, typename LsT::PointType>)
.def("__delitem__", wrappers::delItem<LsT>)
.def("append", &LsT::push_back)
.def("__iter__", iterator<LsT>())
.def("__len__", &LsT::size)
.def("inverted", &LsT::inverted);
addGetitem(c);
}
template <typename ClassT>
void addGetitem(ClassT& c) const {
c.def("__getitem__", wrappers::getItem<LsT>, return_internal_reference<>());
}
};
template <typename T>
class IsHybridMap : public def_visitor<IsHybridMap<T>> {
friend class def_visitor_access;
public:
template <typename ClassT>
void visit(ClassT& c) const {
c.def("__init__", make_constructor(MapItem<T>::init))
.def(map_indexing_suite<T, true>())
.def("keys", MapItem<T>::keys)
.def("values", MapItem<T>::values)
.def("items", MapItem<T>::items,
"Iterates over the key-value pairs")
.def(self == self) // NOLINT
.def(self != self); // NOLINT
}
};
template <typename T>
T addWrapper(const T& rhs, const T& lhs) {
return (rhs + lhs);
}
template <typename T>
T subWrapper(const T& rhs, const T& lhs) {
return (rhs - lhs);
}
template <typename T1, typename T2>
T1 mulWrapper(const T1& rhs, const T2& lhs) {
return (rhs * lhs).eval();
}
template <typename T1, typename T2>
T2 rmulWrapper(const T1& rhs, const T2& lhs) {
return (rhs * lhs).eval();
}
template <typename T1, typename T2>
T1 divWrapper(const T1& rhs, const T2& lhs) {
return (rhs / lhs).eval();
}
template <typename T>
using SetAttrSig = void (T::*)(const std::string&, const Attribute&);
template <typename T>
using GetAttrSig = const Attribute& (ConstPrimitive<T>::*)(const std::string&)const;
template <typename LayerT = PointLayer, typename... ClassArgs>
auto wrapLayer(const char* layerName) {
auto get = static_cast<typename LayerT::PrimitiveT (LayerT::*)(Id)>(&LayerT::get);
auto search = static_cast<typename LayerT::PrimitiveVec (LayerT::*)(const BoundingBox2d&)>(&LayerT::search);
auto nearest =
static_cast<typename LayerT::PrimitiveVec (LayerT::*)(const BasicPoint2d&, unsigned)>(&LayerT::nearest);
return class_<LayerT, boost::noncopyable, ClassArgs...>(layerName, no_init)
.def("exists", &LayerT::exists, "Checks if a point exists")
.def("get", get, "Gets a point with specified Id")
.def("__iter__", iterator<LayerT>())
.def("__len__", &LayerT::size)
.def("__getitem__", +[](LayerT& self, Id idx) { return self.get(idx); })
.def("search", search, "Search in a search area")
.def("nearest", nearest, "Get nearest n points")
.def("uniqueId", &LayerT::uniqueId);
}
template <typename PrimT>
auto selectAdd() {
return static_cast<void (LaneletMap::*)(PrimT)>(&LaneletMap::add);
}
template <typename PrimT>
auto selectSubmapAdd() {
return static_cast<void (LaneletSubmap::*)(PrimT)>(&LaneletSubmap::add);
}
template <typename RegelemT>
std::vector<std::shared_ptr<RegelemT>> regelemAs(Lanelet& llt) {
return llt.regulatoryElementsAs<RegelemT>();
}
template <typename RegelemT>
std::vector<std::shared_ptr<const RegelemT>> constRegelemAs(ConstLanelet& llt) {
return llt.regulatoryElementsAs<RegelemT>();
}
template <typename PrimT>
LaneletMapPtr createMapWrapper(const PrimT& prim) {
return utils::createMap(prim);
}
template <typename PrimT>
LaneletSubmapPtr createSubmapWrapper(const PrimT& prim) {
return utils::createSubmap(prim);
}
BOOST_PYTHON_MODULE(PYTHON_API_MODULE_NAME) { // NOLINT
class_<BasicPoint2d>("BasicPoint2d", "A simple point", init<double, double>((arg("x") = 0., arg("y") = 0.)))
.def(init<>("BasicPoint2d()"))
.add_property("x", getXWrapper<BasicPoint2d>, setXWrapper<BasicPoint2d>, "x coordinate")
.add_property("y", getYWrapper<BasicPoint2d>, setYWrapper<BasicPoint2d>, "y coordinate")
.def("__add__", addWrapper<BasicPoint2d>)
.def("__sub__", subWrapper<BasicPoint2d>)
.def("__mul__", mulWrapper<BasicPoint2d, double>)
.def("__rmul__", mulWrapper<BasicPoint2d, double>)
.def("__div__", divWrapper<BasicPoint2d, double>)
.def(self_ns::str(self_ns::self));
class_<Eigen::Vector2d>("Vector2d", "A simple point", no_init)
.add_property("x", getXWrapper<BasicPoint2d>, setXWrapper<BasicPoint2d>, "x coordinate")
.add_property("y", getYWrapper<BasicPoint2d>, setYWrapper<BasicPoint2d>, "y coordinate")
.def("__add__", addWrapper<BasicPoint2d>)
.def("__sub__", subWrapper<BasicPoint2d>)
.def("__mul__", mulWrapper<BasicPoint2d, double>)
.def("__rmul__", mulWrapper<BasicPoint2d, double>)
.def("__div__", divWrapper<BasicPoint2d, double>)
.def(self_ns::str(self_ns::self));
implicitly_convertible<Eigen::Vector2d, BasicPoint2d>();
class_<BasicPoint3d>("BasicPoint3d", "A simple point",
init<double, double, double>((arg("x") = 0., arg("y") = 0., arg("z") = 0.)))
.add_property("x", getXWrapper<BasicPoint3d>, setXWrapper<BasicPoint3d>, "x coordinate")
.add_property("y", getYWrapper<BasicPoint3d>, setYWrapper<BasicPoint3d>, "y coordinate")
.add_property("z", getZWrapper<BasicPoint3d>, setZWrapper<BasicPoint3d>, "z coordinate")
.def("__add__", addWrapper<BasicPoint3d>)
.def("__sub__", subWrapper<BasicPoint3d>)
.def("__mul__", mulWrapper<BasicPoint3d, double>)
.def("__rmul__", mulWrapper<BasicPoint3d, double>)
.def("__div__", divWrapper<BasicPoint3d, double>)
.def(self_ns::str(self_ns::self));
class_<BoundingBox2d>("BoundingBox2d", init<BasicPoint2d, BasicPoint2d>("BoundingBox2d(minPoint, maxPoint"))
.add_property("min", +[](BoundingBox2d& self) { return self.min(); })
.add_property("max", +[](BoundingBox2d& self) { return self.max(); });
class_<BoundingBox3d>("BoundingBox3d", init<BasicPoint3d, BasicPoint3d>("BoundingBox3d(minPoint, maxPoint"))
.add_property("min", +[](BoundingBox3d& self) { return self.min(); })
.add_property("max", +[](BoundingBox3d& self) { return self.max(); });
boost::python::to_python_converter<Attribute, AttributeToPythonStr>();
using ::converters::IterableConverter;
using ::converters::OptionalConverter;
using ::converters::PairConverter;
using ::converters::ToOptionalConverter;
using ::converters::VariantConverter;
using ::converters::VectorToListConverter;
using ::converters::WeakConverter;
VariantConverter<RuleParameter>();
VariantConverter<ConstRuleParameter>();
VectorToListConverter<Points3d>();
VectorToListConverter<Points2d>();
VectorToListConverter<BasicPoints3d>();
VectorToListConverter<std::vector<BasicPoint2d>>();
VectorToListConverter<ConstPoints3d>();
VectorToListConverter<ConstPoints2d>();
VectorToListConverter<LineStrings3d>();
VectorToListConverter<LineStrings2d>();
VectorToListConverter<ConstLineStrings3d>();
VectorToListConverter<ConstLineStrings2d>();
VectorToListConverter<BasicPolygon3d>();
VectorToListConverter<BasicPolygon2d>();
VectorToListConverter<Polygons3d>();
VectorToListConverter<Polygons2d>();
VectorToListConverter<ConstPolygons3d>();
VectorToListConverter<ConstPolygons2d>();
VectorToListConverter<CompoundPolygons3d>();
VectorToListConverter<CompoundPolygons2d>();
VectorToListConverter<Lanelets>();
VectorToListConverter<ConstLanelets>();
VectorToListConverter<LaneletSequences>();
VectorToListConverter<LaneletsWithStopLines>();
VectorToListConverter<RuleParameters>();
VectorToListConverter<ConstRuleParameters>();
VectorToListConverter<RegulatoryElementPtrs>();
VectorToListConverter<RegulatoryElementConstPtrs>();
VectorToListConverter<LineStringsOrPolygons3d>();
VectorToListConverter<ConstLineStringsOrPolygons3d>();
VectorToListConverter<ConstLaneletOrAreas>();
VectorToListConverter<Areas>();
VectorToListConverter<std::vector<TrafficLight::Ptr>>();
VectorToListConverter<std::vector<TrafficSign::Ptr>>();
VectorToListConverter<std::vector<SpeedLimit::Ptr>>();
VectorToListConverter<std::vector<RightOfWay::Ptr>>();
VectorToListConverter<std::vector<AllWayStop::Ptr>>();
VectorToListConverter<std::vector<std::shared_ptr<const TrafficLight>>>();
VectorToListConverter<std::vector<std::shared_ptr<const TrafficSign>>>();
VectorToListConverter<std::vector<std::shared_ptr<const SpeedLimit>>>();
VectorToListConverter<std::vector<std::shared_ptr<const RightOfWay>>>();
VectorToListConverter<std::vector<std::shared_ptr<const AllWayStop>>>();
VectorToListConverter<std::vector<std::string>>();
AttributeFromPythonStr();
DictToAttributeMapConverter();
OptionalConverter<Point2d>();
OptionalConverter<Point3d>();
OptionalConverter<ConstPoint2d>();
OptionalConverter<ConstPoint3d>();
OptionalConverter<LineString3d>();
OptionalConverter<LineString2d>();
OptionalConverter<ConstLineString3d>();
OptionalConverter<ConstLineString2d>();
OptionalConverter<LineStrings3d>();
OptionalConverter<LineStrings2d>();
OptionalConverter<ConstLineStrings3d>();
OptionalConverter<ConstLineStrings2d>();
OptionalConverter<LineStringsOrPolygons3d>();
OptionalConverter<Lanelet>();
OptionalConverter<ConstLanelet>();
OptionalConverter<LaneletSequence>();
OptionalConverter<Area>();
OptionalConverter<ConstArea>();
OptionalConverter<Polygon2d>();
OptionalConverter<Polygon3d>();
OptionalConverter<ConstPolygon2d>();
OptionalConverter<ConstPolygon3d>();
OptionalConverter<RuleParameter>();
OptionalConverter<ConstRuleParameter>();
OptionalConverter<RegulatoryElement>();
PairConverter<std::pair<BasicPoint2d, BasicPoint2d>>();
PairConverter<std::pair<BasicPoint3d, BasicPoint3d>>();
WeakConverter<WeakLanelet>();
WeakConverter<WeakArea>();
// Register interable conversions.
IterableConverter()
.fromPython<Points3d>()
.fromPython<Points2d>()
.fromPython<ConstLineStrings3d>()
.fromPython<ConstLineStrings2d>()
.fromPython<LineStrings3d>()
.fromPython<LineStrings2d>()
.fromPython<Polygons2d>()
.fromPython<Polygons3d>()
.fromPython<Lanelets>()
.fromPython<ConstLanelets>()
.fromPython<LaneletsWithStopLines>()
.fromPython<Areas>()
.fromPython<ConstAreas>()
.fromPython<RegulatoryElementPtrs>()
.fromPython<LineStringsOrPolygons3d>()
.fromPython<ConstLineStringsOrPolygons3d>();
ToOptionalConverter().fromPython<LineString3d>();
to_python_converter<LineStringOrPolygon3d, LineStringOrPolygonToObject>();
to_python_converter<ConstLineStringOrPolygon3d, ConstLineStringOrPolygonToObject>();
to_python_converter<ConstLaneletOrArea, ConstLaneletOrAreaToObject>();
implicitly_convertible<LineString3d, LineStringOrPolygon3d>();
implicitly_convertible<Polygon3d, LineStringOrPolygon3d>();
implicitly_convertible<ConstLineString3d, ConstLineStringOrPolygon3d>();
implicitly_convertible<ConstPolygon3d, ConstLineStringOrPolygon3d>();
implicitly_convertible<ConstLanelet, ConstLaneletOrArea>();
implicitly_convertible<ConstArea, ConstLaneletOrArea>();
class_<AttributeMap>("AttributeMap", init<>("AttributeMap()"))
.def(IsHybridMap<AttributeMap>())
.def(self_ns::str(self_ns::self));
class_<RuleParameterMap>("RuleParameterMap", init<>("RuleParameterMap()")).def(IsHybridMap<RuleParameterMap>());
class_<ConstRuleParameterMap>("ConstRuleParameterMap", init<>("ConstRuleParameterMap()"))
.def(IsHybridMap<ConstRuleParameterMap>());
class_<ConstPoint2d>("ConstPoint2d", no_init)
.def(IsConstPrimitive<ConstPoint2d>())
.add_property("x", getXWrapper<ConstPoint2d>, "x coordinate")
.add_property("y", getYWrapper<ConstPoint2d>, "y coordinate");
class_<Point2d, bases<ConstPoint2d>>(
"Point2d", "Lanelets 2d point primitive",
init<Id, BasicPoint3d, AttributeMap>((arg("id"), arg("point"), arg("attributes") = AttributeMap())))
.def(init<>("Point2d()"))
.def(init<Point3d>("Point3d()"))
.def(init<Id, double, double, double, AttributeMap>(
(arg("id"), arg("x"), arg("y"), arg("z") = 0., arg("attributes") = AttributeMap())))
.add_property("x", getXWrapper<Point2d>, setXWrapper<Point2d>, "x coordinate")
.add_property("y", getYWrapper<Point2d>, setYWrapper<Point2d>, "y coordinate")
.def("basicPoint", &ConstPoint2d::basicPoint, return_internal_reference<>())
.def(IsPrimitive<Point2d>());
class_<ConstPoint3d>("ConstPoint3d", no_init)
.def(IsConstPrimitive<ConstPoint3d>())
.add_property("x", getXWrapper<ConstPoint3d>, "x coordinate")
.add_property("y", getYWrapper<ConstPoint3d>, "y coordinate")
.add_property("z", getZWrapper<ConstPoint3d>, "z coordinate")
.def("basicPoint", &ConstPoint3d::basicPoint, return_internal_reference<>());
class_<Point3d, bases<ConstPoint3d>>(
"Point3d", "Lanelets 3d point primitive",
init<Id, BasicPoint3d, AttributeMap>((arg("id"), arg("point"), arg("attributes") = AttributeMap())))
.def(init<>("Point3d()"))
.def(init<Point2d>("Point2d()"))
.def(init<Id, double, double, double, AttributeMap>(
(arg("id"), arg("x"), arg("y"), arg("z") = 0., arg("attributes") = AttributeMap())))
.add_property("x", getXWrapper<Point3d>, setXWrapper<Point3d>, "x coordinate")
.add_property("y", getYWrapper<Point3d>, setYWrapper<Point3d>, "y coordinate")
.add_property("z", getZWrapper<Point3d>, setZWrapper<Point3d>, "z coordinate")
.def(IsPrimitive<Point3d>());
class_<GPSPoint, std::shared_ptr<GPSPoint>>("GPSPoint", "A raw GPS point", no_init)
.def("__init__", make_constructor(
+[](double lat, double lon, double alt) {
return std::make_shared<GPSPoint>(GPSPoint({lat, lon, alt}));
},
default_call_policies(), (arg("lat") = 0., arg("lon") = 0., arg("alt") = 0)))
.add_property("lat", &GPSPoint::lat)
.add_property("lon", &GPSPoint::lon)
.add_property("alt", &GPSPoint::ele);
class_<ConstLineString2d>("ConstLineString2d", "Immutable 2d lineString primitive",
init<ConstLineString3d>("ConstLineString2d(ConstLineString3d)"))
.def("invert", &ConstLineString2d::invert)
.def(IsConstLineString<ConstLineString2d>())
.def(IsConstPrimitive<ConstLineString2d>());
class_<LineString2d, bases<ConstLineString2d>>(
"LineString2d", "Lanelets 2d lineString primitive",
init<Id, Points3d, AttributeMap>("LineString2d(id, point_list, attributes)"))
.def(init<Id, Points3d>("LineString2d(id, point_list"))
.def(init<LineString3d>("LineString2d(LineString3d)"))
.def("invert", &LineString2d::invert)
.def(IsLineString<LineString2d>());
class_<ConstLineString3d>("ConstLineString3d", "Immutable 3d lineString primitive",
init<ConstLineString2d>("ConstLineString3d(ConstLineString2d)"))
.def(init<Id, Points3d, AttributeMap>((arg("id"), arg("points"), arg("attributes") = AttributeMap())))
.def("invert", &ConstLineString3d::invert)
.def(IsConstLineString<ConstLineString3d>())
.def(IsConstPrimitive<ConstLineString3d>());
class_<LineString3d, bases<ConstLineString3d>>(
"LineString3d", "Lanelets 3d lineString primitive",
init<Id, Points3d, AttributeMap>((arg("id"), arg("points"), arg("attributes") = AttributeMap())))
.def(init<LineString2d>("LineString3d(LineString2d)"))
.def("invert", &LineString3d::invert)
.def(IsLineString<LineString3d>())
.def(IsPrimitive<LineString3d>());
class_<ConstHybridLineString2d>("ConstHybridLineString2d", "A Linestring that behaves like a normal BasicLineString",
init<LineString2d>("ConstHybridLineString2d(LineString2d"))
.def(init<ConstLineString2d>())
.def("invert", &ConstHybridLineString2d::invert)
.def(IsConstLineString<ConstHybridLineString2d, false>())
.def(IsConstPrimitive<ConstHybridLineString2d>());
class_<ConstHybridLineString3d>("ConstHybridLineString3d", "A Linestring that behaves like a normal BasicLineString",
init<LineString3d>("ConstHybridLineString3d(LineString3d"))
.def("invert", &ConstHybridLineString3d::invert)
.def(IsConstLineString<ConstHybridLineString3d, false>())
.def(IsConstPrimitive<ConstHybridLineString3d>());
class_<CompoundLineString2d>("CompoundLineString2d", "A LineString composed of multiple linestrings",
init<ConstLineStrings2d>())
.def(IsConstLineString<CompoundLineString2d>())
.def("lineStrings", &CompoundLineString2d::lineStrings)
.def("ids", &CompoundLineString2d::ids, "ids of the linestrings");
class_<CompoundLineString3d>("CompoundLineString3d", "A LineString composed of multiple linestrings",
init<ConstLineStrings3d>())
.def(IsConstLineString<CompoundLineString3d>())
.def("lineStrings", &CompoundLineString3d::lineStrings)
.def("ids", &CompoundLineString3d::ids, "ids of the linestrings");
class_<ConstPolygon2d>(
"ConstPolygon2d", "A two-dimensional lanelet polygon",
init<Id, Points3d, AttributeMap>((arg("id"), arg("points"), arg("attributes") = AttributeMap())))
.def(IsConstLineString<ConstPolygon2d>())
.def(IsConstPrimitive<ConstPolygon2d>());
class_<ConstPolygon3d>(
"ConstPolygon3d", "A three-dimensional lanelet polygon",
init<Id, Points3d, AttributeMap>((arg("id"), arg("points"), arg("attributes") = AttributeMap())))
.def(IsConstLineString<ConstPolygon3d>())
.def(IsConstPrimitive<ConstPolygon3d>());
class_<Polygon2d, bases<ConstPolygon2d>>("Polygon2d", "A two-dimensional lanelet polygon",
init<Id, Points3d, AttributeMap>("Polygon2d(Id, point_list, attributes"))
.def(IsLineString<Polygon2d>())
.def(IsPrimitive<Polygon2d>());
class_<Polygon3d, bases<ConstPolygon3d>>(
"Polygon3d", "A three-dimensional lanelet polygon",
init<Id, Points3d, AttributeMap>((arg("id"), arg("points"), arg("attributes") = AttributeMap())))
.def(IsLineString<Polygon3d>())
.def(IsPrimitive<Polygon3d>());
class_<CompoundPolygon2d>("CompoundPolygon2d", "A polygon composed of multiple linestrings",
init<ConstLineStrings2d>())
.def(IsConstLineString<CompoundPolygon2d>())
.def("lineStrings", &CompoundPolygon2d::lineStrings)
.def("ids", &CompoundPolygon2d::ids, "ids of the linestrings");
class_<CompoundPolygon3d>("CompoundPolygon3d", "A polygon composed of multiple linestrings",
init<ConstLineStrings3d>())
.def(IsConstLineString<CompoundPolygon3d>())
.def("lineStrings", &CompoundPolygon3d::lineStrings)
.def("ids", &CompoundPolygon3d::ids, "ids of the linestrings");
class_<ConstLanelet>("ConstLanelet", "An immutable lanelet primitive",
init<Id, LineString3d, LineString3d, AttributeMap>(
(arg("id"), arg("leftBound"), arg("rightBound"), arg("attributes") = AttributeMap())))
.def(init<Lanelet>())
.def(IsConstPrimitive<ConstLanelet>())
.add_property("centerline", &ConstLanelet::centerline, "Centerline of the lanelet")
.add_property("leftBound", &ConstLanelet::leftBound, "Left boundary of lanelet")
.add_property("rightBound", &ConstLanelet::rightBound, "Right boundary of lanelet")
.add_property("regulatoryElements",
make_function(&ConstLanelet::regulatoryElements, return_value_policy<return_by_value>()),
"Regulatory elements of the lanelet")
.def("trafficLights", constRegelemAs<TrafficLight>, "traffic light regulatory elements")
.def("trafficSigns", constRegelemAs<TrafficSign>, "traffic sign regulatory elements")
.def("speedLimits", constRegelemAs<SpeedLimit>, "speed limit regulatory elements")
.def("rightOfWay", constRegelemAs<RightOfWay>, "right of way regulatory elements")
.def("allWayStop", constRegelemAs<AllWayStop>, "all way stop regulatory elements")
.def("invert", &ConstLanelet::invert, "Returns inverted lanelet (flipped left/right bound, etc")
.def("inverted", &ConstLanelet::inverted, "Returns whether this lanelet has been inverted")
.def("polygon2d", &ConstLanelet::polygon2d, "Outline of this lanelet as 2d polygon")
.def("polygon3d", &ConstLanelet::polygon3d, "Outline of this lanelet as 3d polygon")
.def("resetCache", &ConstLanelet::resetCache,
"Reset the cache. Forces update of the centerline if points have chagned");
auto left = static_cast<LineString3d (Lanelet::*)()>(&Lanelet::leftBound);
auto right = static_cast<LineString3d (Lanelet::*)()>(&Lanelet::rightBound);
auto regelems = static_cast<const RegulatoryElementPtrs& (Lanelet::*)()>(&Lanelet::regulatoryElements);
class_<Lanelet, bases<ConstLanelet>>(
"Lanelet", "The famous lanelet primitive",
init<Id, LineString3d, LineString3d, AttributeMap>(
(arg("id"), arg("leftBound"), arg("rightBound"), arg("attributes") = AttributeMap())))
.def(IsPrimitive<Lanelet>())
.add_property("centerline", &Lanelet::centerline, &Lanelet::setCenterline, "Centerline of the lanelet")
.add_property("leftBound", left, &Lanelet::setLeftBound, "Left boundary of lanelet")
.add_property("rightBound", right, &Lanelet::setRightBound, "Right boundary of lanelet")
.add_property("regulatoryElements", make_function(regelems, return_value_policy<return_by_value>()),
"Regulatory elements of the lanelet")
.def("trafficLights", regelemAs<TrafficLight>, "traffic light regulatory elements")
.def("trafficSigns", regelemAs<TrafficSign>, "traffic sign regulatory elements")
.def("speedLimits", regelemAs<SpeedLimit>, "speed limit regulatory elements")
.def("rightOfWay", regelemAs<RightOfWay>, "right of way regulatory elements")
.def("allWayStop", regelemAs<AllWayStop>, "all way stop regulatory elements")
.def("addRegulatoryElement", &Lanelet::addRegulatoryElement)
.def("removeRegulatoryElement", &Lanelet::removeRegulatoryElement)
.def("invert", &Lanelet::invert, "Returns inverted lanelet (flipped left/right bound, etc")
.def("inverted", &ConstLanelet::inverted, "Returns whether this lanelet has been inverted")
.def("polygon2d", &ConstLanelet::polygon2d, "Outline of this lanelet as 2d polygon")
.def("polygon3d", &ConstLanelet::polygon3d, "Outline of this lanelet as 3d polygon");
class_<LaneletSequence>("LaneletSequence", "A combined lane formed from multiple lanelets", init<ConstLanelets>())
.add_property("centerline", &LaneletSequence::centerline, "Centerline of the lanelet")
.add_property("leftBound", &LaneletSequence::leftBound, "Left boundary of lanelet")
.add_property("rightBound", &LaneletSequence::rightBound, "Right boundary of lanelet")
.def("invert", &LaneletSequence::invert, "Returns inverted lanelet (flipped left/right bound, etc")
.def("inverted", &LaneletSequence::inverted, "Returns whether this lanelet has been inverted")
.def("polygon2d", &LaneletSequence::polygon2d, "Outline of this lanelet as 2d polygon")
.def("polygon3d", &LaneletSequence::polygon3d, "Outline of this lanelet as 3d polygon")
.def("lanelets", &LaneletSequence::lanelets, "Lanelets that make up this compound lanelet")
.def("__iter__", iterator<LaneletSequence>())
.def("__len__", &LaneletSequence::size)
.def("inverted", &LaneletSequence::inverted)
.def("__getitem__", wrappers::getItem<LaneletSequence>, return_internal_reference<>());
class_<ConstLaneletWithStopLine>("ConstLaneletWithStopLine", "A lanelet with a stopline", no_init)
.add_property("lanelet", &ConstLaneletWithStopLine::lanelet)
.add_property("stopLine", &ConstLaneletWithStopLine::stopLine);
class_<LaneletWithStopLine>("LaneletWithStopLine", "A lanelet with a stopline", no_init)
.add_property("lanelet", &LaneletWithStopLine::lanelet)
.add_property("stopLine", &LaneletWithStopLine::stopLine);
class_<ConstArea>("ConstArea", "Represents an area, potentially with holes, in the map",
boost::python::init<Id, LineStrings3d, InnerBounds, AttributeMap>(
"Lanelet(id, outerBound, innerBounds, attributes"))
.def(init<Id, LineStrings3d, InnerBounds>("Lanelet(id, outerBound, innerBounds"))
.def(init<Id, LineStrings3d>("Lanelet(id, outerBound"))
.def(IsConstPrimitive<ConstArea>())
.add_property("outerBound", &ConstArea::outerBound)
.add_property("innerBounds", &ConstArea::innerBounds)
.def("outerBoundPolygon", &ConstArea::outerBoundPolygon)
.def("innerBoundPolygon", &ConstArea::innerBoundPolygons);
auto outerBound = static_cast<const LineStrings3d& (Area::*)()>(&Area::outerBound);
auto innerBounds = static_cast<const std::vector<LineStrings3d>& (Area::*)()>(&Area::innerBounds);
class_<Area, bases<ConstArea>>("Area", "Represents an area, potentially with holes, in the map",
boost::python::init<Id, LineStrings3d, InnerBounds, AttributeMap>(
"Lanelet(id, outerBound, innerBounds, attributes"))
.def(init<Id, LineStrings3d, InnerBounds>("Lanelet(id, outerBound, innerBounds"))
.def(init<Id, LineStrings3d>("Lanelet(id, outerBound"))
.def(IsPrimitive<Area>())
.add_property("outerBound", getRefFunc(outerBound), &Area::setOuterBound)
.add_property("innerBounds", getRefFunc(innerBounds), &Area::setInnerBounds)
.add_property("regulatoryElements", +[](Area& self) { return self.regulatoryElements(); },
"Regulatory elements of the area")
.def("addRegulatoryElement", &Area::addRegulatoryElement)
.def("removeRegulatoryElement", &Area::removeRegulatoryElement)
.def("outerBoundPolygon", &Area::outerBoundPolygon)
.def("innerBoundPolygon", &Area::innerBoundPolygons);
using GetParamSig = ConstRuleParameterMap (RegulatoryElement::*)() const;
class_<RegulatoryElement, boost::noncopyable, RegulatoryElementPtr>(
"RegulatoryElement", "A Regulatory element defines traffic rules that affect a lanelet", no_init)
.def(IsConstPrimitive<RegulatoryElement>())
.add_property("id", &RegulatoryElement::id, &RegulatoryElement::setId)
.add_property("parameters", static_cast<GetParamSig>(&RegulatoryElement::getParameters),
"the parameters (ie traffic signs, lanelets) that affect "
"this RegulatoryElement")
.add_property("roles", &RegulatoryElement::roles)
.def("find", &RegulatoryElement::find<ConstRuleParameter>, "Returns a primitive with matching id, else None")
.def("__len__", &RegulatoryElement::size);
register_ptr_to_python<RegulatoryElementConstPtr>();
class_<TrafficLight, boost::noncopyable, std::shared_ptr<TrafficLight>, bases<RegulatoryElement>>(
"TrafficLight", "A traffic light regulatory element", no_init)
.def("__init__", make_constructor(&TrafficLight::make, default_call_policies(),
(arg("id"), arg("attributes"), arg("trafficLights"),
arg("stopLine") = Optional<LineString3d>())))
.add_property("stopLine", +[](TrafficLight& self) { return self.stopLine(); }, &TrafficLight::setStopLine)
.add_property("trafficLights", +[](TrafficLight& self) { return self.trafficLights(); })
.def("addTrafficLight", &TrafficLight::addTrafficLight)
.def("removeTrafficLight", &TrafficLight::removeTrafficLight);
implicitly_convertible<std::shared_ptr<TrafficLight>, RegulatoryElementPtr>();
enum_<ManeuverType>("ManeuverType")
.value("Yield", ManeuverType::Yield)
.value("RightOfWay", ManeuverType::RightOfWay)
.value("Unknown", ManeuverType::Unknown)
.export_values();
class_<RightOfWay, boost::noncopyable, std::shared_ptr<RightOfWay>, bases<RegulatoryElement>>(
"RightOfWay", "A right of way regulatory element", no_init)
.def("__init__", make_constructor(&RightOfWay::make, default_call_policies(),
(arg("id"), arg("attributes"), arg("rightOfWayLanelets"), arg("yieldLanelets"),
arg("stopLine") = Optional<LineString3d>{})))
.add_property("stopLine", +[](RightOfWay& self) { return self.stopLine(); }, &RightOfWay::setStopLine)
.def("removeStopLine", &RightOfWay::removeStopLine)
.def("getManeuver", &RightOfWay::getManeuver, "get maneuver for a lanelet")
.def("rightOfWayLanelets", +[](RightOfWay& self) { return self.rightOfWayLanelets(); })
.def("addRightOfWayLanelet", &RightOfWay::addRightOfWayLanelet)
.def("removeRightOfWayLanelet", &RightOfWay::removeRightOfWayLanelet)
.def("yieldLanelets", +[](RightOfWay& self) { return self.yieldLanelets(); })
.def("addYieldLanelet", &RightOfWay::addYieldLanelet)
.def("removeYieldLanelet", &RightOfWay::removeYieldLanelet);
implicitly_convertible<std::shared_ptr<RightOfWay>, RegulatoryElementPtr>();
class_<AllWayStop, boost::noncopyable, std::shared_ptr<AllWayStop>, bases<RegulatoryElement>>(
"AllWayStop", "An all way stop regulatory element", no_init)
.def("__init__", make_constructor(&AllWayStop::make, default_call_policies(),
(arg("id"), arg("attributes"), arg("lltsWithStop"),
arg("signs") = Optional<LineStringsOrPolygons3d>{})))
.def("lanelets", +[](AllWayStop& self) { return self.lanelets(); })
.def("stopLines", +[](AllWayStop& self) { return self.stopLines(); })
.def("trafficSigns", +[](AllWayStop& self) { return self.trafficSigns(); })
.def("addTrafficSign", &AllWayStop::addTrafficSign)
.def("removeTrafficSign", &AllWayStop::removeTrafficSign)
.def("addLanelet", &AllWayStop::addLanelet)
.def("removeLanelet", &AllWayStop::removeLanelet);
implicitly_convertible<std::shared_ptr<AllWayStop>, RegulatoryElementPtr>();
class_<TrafficSignsWithType, std::shared_ptr<TrafficSignsWithType>>("TrafficSignsWithType", no_init)
.def("__init__", make_constructor(+[](LineStringsOrPolygons3d ls) {
return std::make_shared<TrafficSignsWithType>(TrafficSignsWithType{std::move(ls)});
}))
.def("__init__", make_constructor(+[](LineStringsOrPolygons3d ls, std::string type) {
return std::make_shared<TrafficSignsWithType>(TrafficSignsWithType{std::move(ls), std::move(type)});
}))
.add_property("trafficSigns", &TrafficSignsWithType::trafficSigns)
.add_property("type", &TrafficSignsWithType::type);
class_<TrafficSign, boost::noncopyable, std::shared_ptr<TrafficSign>, bases<RegulatoryElement>>(
"TrafficSign", "A traffic sign regulatory element", no_init)
.def("__init__", make_constructor(&TrafficSign::make, default_call_policies(),
(arg("id"), arg("attributes"), arg("trafficSigns"),
arg("cancellingTrafficSigns") = TrafficSignsWithType{},
arg("refLines") = LineStrings3d(), arg("cancelLines") = LineStrings3d())))
.def("trafficSigns", +[](TrafficSign& self) { return self.trafficSigns(); })
.def("cancellingTrafficSigns", +[](TrafficSign& self) { return self.cancellingTrafficSigns(); })
.def("refLines", +[](TrafficSign& self) { return self.refLines(); })
.def("cancelLines", +[](TrafficSign& self) { return self.cancelLines(); })
.def("addTrafficSign", &TrafficSign::addTrafficSign)
.def("removeTrafficSign", &TrafficSign::removeTrafficSign)
.def("addRefLine", &TrafficSign::addRefLine)
.def("removeRefLine", &TrafficSign::removeRefLine)
.def("addCancellingTrafficSign", &TrafficSign::addCancellingTrafficSign)
.def("removeCancellingTrafficSign", &TrafficSign::removeCancellingTrafficSign)
.def("addCancellingRefLine", &TrafficSign::addCancellingRefLine)
.def("removeCancellingRefLine", &TrafficSign::removeCancellingRefLine)
.def("type", &TrafficSign::type)
.def("cancelTypes", &TrafficSign::cancelTypes);
implicitly_convertible<std::shared_ptr<TrafficSign>, RegulatoryElementPtr>();
implicitly_convertible<std::shared_ptr<SpeedLimit>, RegulatoryElementPtr>();
class_<SpeedLimit, boost::noncopyable, std::shared_ptr<SpeedLimit>, bases<TrafficSign>>( // NOLINT
"SpeedLimit", "A speed limit regulatory element", no_init)
.def("__init__", make_constructor(&TrafficSign::make, default_call_policies(),
(arg("id"), arg("attributes"), arg("trafficSigns"),
arg("cancellingTrafficSigns") = TrafficSignsWithType{},
arg("refLines") = LineStrings3d(), arg("cancelLines") = LineStrings3d())));
class_<PrimitiveLayer<Area>, boost::noncopyable>("PrimitiveLayerArea", no_init); // NOLINT
class_<PrimitiveLayer<Lanelet>, boost::noncopyable>("PrimitiveLayerLanelet", no_init); // NOLINT
wrapLayer<AreaLayer, bases<PrimitiveLayer<Area>>>("AreaLayer")
.def("findUsages", +[](AreaLayer& self, RegulatoryElementPtr& e) { return self.findUsages(e); })
.def("findUsages", +[](AreaLayer& self, LineString3d& ls) { return self.findUsages(ls); });
wrapLayer<LaneletLayer, bases<PrimitiveLayer<Lanelet>>>("LaneletLayer")
.def("findUsages", +[](LaneletLayer& self, RegulatoryElementPtr& e) { return self.findUsages(e); })
.def("findUsages", +[](LaneletLayer& self, LineString3d& ls) { return self.findUsages(ls); });
wrapLayer<PolygonLayer>("PolygonLayer").def("findUsages", +[](PolygonLayer& self, Point3d& p) {
return self.findUsages(p);
});
wrapLayer<LineStringLayer>("LineStringLayer").def("findUsages", +[](LineStringLayer& self, Point3d& p) {
return self.findUsages(p);
});
wrapLayer<PointLayer>("PointLayer");
wrapLayer<RegulatoryElementLayer>("RegulatoryElementLayer");
class_<LaneletMapLayers, boost::noncopyable>("LaneletMapLayers", "Container for the layers of a lanelet map")
.def_readonly("laneletLayer", &LaneletMap::laneletLayer, "Lanelets")
.def_readonly("areaLayer", &LaneletMap::areaLayer)
.def_readonly("regulatoryElementLayer", &LaneletMap::regulatoryElementLayer)
.def_readonly("lineStringLayer", &LaneletMap::lineStringLayer)
.def_readonly("polygonLayer", &LaneletMap::polygonLayer)
.def_readonly("pointLayer", &LaneletMap::pointLayer);
class_<LaneletMap, bases<LaneletMapLayers>, LaneletMapPtr, boost::noncopyable>(
"LaneletMap", "Object for managing a lanelet map", init<>("LaneletMap()"))
.def("add", selectAdd<Point3d>())
.def("add", selectAdd<Lanelet>())
.def("add", selectAdd<Area>())
.def("add", selectAdd<LineString3d>())
.def("add", selectAdd<Polygon3d>())
.def("add", selectAdd<const RegulatoryElementPtr&>());
register_ptr_to_python<LaneletMapConstPtr>();
class_<LaneletSubmap, bases<LaneletMapLayers>, LaneletSubmapPtr, boost::noncopyable>(
"LaneletSubmap", "Object for managing parts of a lanelet map", init<>("LaneletSubmap()"))
.def("laneletMap", +[](LaneletSubmap& self) { return LaneletMapPtr{self.laneletMap()}; })
.def("add", selectSubmapAdd<Point3d>())
.def("add", selectSubmapAdd<Lanelet>())
.def("add", selectSubmapAdd<Area>())
.def("add", selectSubmapAdd<LineString3d>())
.def("add", selectSubmapAdd<Polygon3d>())
.def("add", selectSubmapAdd<const RegulatoryElementPtr&>());
register_ptr_to_python<LaneletSubmapConstPtr>();
def("getId", static_cast<Id (&)()>(utils::getId), "Returns a unique id");
def("registerId", &utils::registerId, "Registers an id");
def("createMapFromPoints", createMapWrapper<Points3d>, "Create map from primitives");
def("createMapFromLineStrings", createMapWrapper<LineStrings3d>, "Create map from primitives");
def("createMapFromPolygons", createMapWrapper<Polygons3d>, "Create map from primitives");
def("createMapFromLanelets", createMapWrapper<Lanelets>, "Create map from primitives");
def("createMapFromAreas", createMapWrapper<Areas>, "Create map from primitives");
def("createSubmapFromPoints", createSubmapWrapper<Points3d>, "Create sbumap from primitives");
def("createSubmapFromLineStrings", createSubmapWrapper<LineStrings3d>, "Create submap from primitives");
def("createSubmapFromPolygons", createSubmapWrapper<Polygons3d>, "Create submap from primitives");
def("createSubmapFromLanelets", createSubmapWrapper<Lanelets>, "Create submap from primitives");
def("createSubmapFromAreas", createSubmapWrapper<Areas>, "Create submap from primitives");
}
|
// Copyright (c) 2009-2010 Satoshi Nakamoto
// Copyright (c) 2009-2012 The Bitcoin developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include "alert.h"
#include "checkpoints.h"
#include "db.h"
#include "txdb.h"
#include "net.h"
#include "init.h"
#include "ui_interface.h"
#include "checkqueue.h"
#include <boost/algorithm/string/replace.hpp>
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
using namespace std;
using namespace boost;
//
// Global state
//
CCriticalSection cs_setpwalletRegistered;
set<CWallet*> setpwalletRegistered;
CCriticalSection cs_main;
CTxMemPool mempool;
unsigned int nTransactionsUpdated = 0;
map<uint256, CBlockIndex*> mapBlockIndex;
uint256 hashGenesisBlock("0xcd5bf709762129f175ed3f3fa407d48f9df8c4e192991c1cb2ad226cc3f43f9d");
static CBigNum bnProofOfWorkLimit = CBigNum().SetCompact(504365644); // kripton: starting difficulty is 1 / 2^12
CBlockIndex* pindexGenesisBlock = NULL;
int nBestHeight = -1;
uint256 nBestChainWork = 0;
uint256 nBestInvalidWork = 0;
uint256 hashBestChain = 0;
CBlockIndex* pindexBest = NULL;
set<CBlockIndex*, CBlockIndexWorkComparator> setBlockIndexValid; // may contain all CBlockIndex*'s that have validness >=BLOCK_VALID_TRANSACTIONS, and must contain those who aren't failed
int64 nTimeBestReceived = 0;
int nScriptCheckThreads = 0;
bool fImporting = false;
bool fReindex = false;
bool fBenchmark = false;
bool fTxIndex = false;
unsigned int nCoinCacheSize = 5000;
/** Fees smaller than this (in satoshi) are considered zero fee (for transaction creation) */
int64 CTransaction::nMinTxFee = 2000000;
/** Fees smaller than this (in satoshi) are considered zero fee (for relaying) */
int64 CTransaction::nMinRelayTxFee = 2000000;
CMedianFilter<int> cPeerBlockCounts(8, 0); // Amount of blocks that other nodes claim to have
map<uint256, CBlock*> mapOrphanBlocks;
multimap<uint256, CBlock*> mapOrphanBlocksByPrev;
map<uint256, CTransaction> mapOrphanTransactions;
map<uint256, set<uint256> > mapOrphanTransactionsByPrev;
// Constant stuff for coinbase transactions we create:
CScript COINBASE_FLAGS;
const string strMessageMagic = "kripton Signed Message:\n";
double dHashesPerSec = 0.0;
int64 nHPSTimerStart = 0;
// Settings
int64 nTransactionFee = 0;
int64 nMinimumInputValue = DUST_HARD_LIMIT;
//////////////////////////////////////////////////////////////////////////////
//
// dispatching functions
//
// These functions dispatch to one or all registered wallets
void RegisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.insert(pwalletIn);
}
}
void UnregisterWallet(CWallet* pwalletIn)
{
{
LOCK(cs_setpwalletRegistered);
setpwalletRegistered.erase(pwalletIn);
}
}
// get the wallet transaction with the given hash (if it exists)
bool static GetTransaction(const uint256& hashTx, CWalletTx& wtx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
if (pwallet->GetTransaction(hashTx,wtx))
return true;
return false;
}
// erases transaction with the given hash from all wallets
void static EraseFromWallets(uint256 hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->EraseFromWallet(hash);
}
// make sure all wallets know about the given transaction, in the given block
void SyncWithWallets(const uint256 &hash, const CTransaction& tx, const CBlock* pblock, bool fUpdate)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->AddToWalletIfInvolvingMe(hash, tx, pblock, fUpdate);
}
// notify wallets about a new best chain
void static SetBestChain(const CBlockLocator& loc)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->SetBestChain(loc);
}
// notify wallets about an updated transaction
void static UpdatedTransaction(const uint256& hashTx)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->UpdatedTransaction(hashTx);
}
// dump all wallets
void static PrintWallets(const CBlock& block)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->PrintWallet(block);
}
// notify wallets about an incoming inventory (for request counts)
void static Inventory(const uint256& hash)
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->Inventory(hash);
}
// ask wallets to resend their transactions
void static ResendWalletTransactions()
{
BOOST_FOREACH(CWallet* pwallet, setpwalletRegistered)
pwallet->ResendWalletTransactions();
}
//////////////////////////////////////////////////////////////////////////////
//
// CCoinsView implementations
//
bool CCoinsView::GetCoins(const uint256 &txid, CCoins &coins) { return false; }
bool CCoinsView::SetCoins(const uint256 &txid, const CCoins &coins) { return false; }
bool CCoinsView::HaveCoins(const uint256 &txid) { return false; }
CBlockIndex *CCoinsView::GetBestBlock() { return NULL; }
bool CCoinsView::SetBestBlock(CBlockIndex *pindex) { return false; }
bool CCoinsView::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) { return false; }
bool CCoinsView::GetStats(CCoinsStats &stats) { return false; }
CCoinsViewBacked::CCoinsViewBacked(CCoinsView &viewIn) : base(&viewIn) { }
bool CCoinsViewBacked::GetCoins(const uint256 &txid, CCoins &coins) { return base->GetCoins(txid, coins); }
bool CCoinsViewBacked::SetCoins(const uint256 &txid, const CCoins &coins) { return base->SetCoins(txid, coins); }
bool CCoinsViewBacked::HaveCoins(const uint256 &txid) { return base->HaveCoins(txid); }
CBlockIndex *CCoinsViewBacked::GetBestBlock() { return base->GetBestBlock(); }
bool CCoinsViewBacked::SetBestBlock(CBlockIndex *pindex) { return base->SetBestBlock(pindex); }
void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; }
bool CCoinsViewBacked::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) { return base->BatchWrite(mapCoins, pindex); }
bool CCoinsViewBacked::GetStats(CCoinsStats &stats) { return base->GetStats(stats); }
CCoinsViewCache::CCoinsViewCache(CCoinsView &baseIn, bool fDummy) : CCoinsViewBacked(baseIn), pindexTip(NULL) { }
bool CCoinsViewCache::GetCoins(const uint256 &txid, CCoins &coins) {
if (cacheCoins.count(txid)) {
coins = cacheCoins[txid];
return true;
}
if (base->GetCoins(txid, coins)) {
cacheCoins[txid] = coins;
return true;
}
return false;
}
std::map<uint256,CCoins>::iterator CCoinsViewCache::FetchCoins(const uint256 &txid) {
std::map<uint256,CCoins>::iterator it = cacheCoins.lower_bound(txid);
if (it != cacheCoins.end() && it->first == txid)
return it;
CCoins tmp;
if (!base->GetCoins(txid,tmp))
return cacheCoins.end();
std::map<uint256,CCoins>::iterator ret = cacheCoins.insert(it, std::make_pair(txid, CCoins()));
tmp.swap(ret->second);
return ret;
}
CCoins &CCoinsViewCache::GetCoins(const uint256 &txid) {
std::map<uint256,CCoins>::iterator it = FetchCoins(txid);
assert(it != cacheCoins.end());
return it->second;
}
bool CCoinsViewCache::SetCoins(const uint256 &txid, const CCoins &coins) {
cacheCoins[txid] = coins;
return true;
}
bool CCoinsViewCache::HaveCoins(const uint256 &txid) {
return FetchCoins(txid) != cacheCoins.end();
}
CBlockIndex *CCoinsViewCache::GetBestBlock() {
if (pindexTip == NULL)
pindexTip = base->GetBestBlock();
return pindexTip;
}
bool CCoinsViewCache::SetBestBlock(CBlockIndex *pindex) {
pindexTip = pindex;
return true;
}
bool CCoinsViewCache::BatchWrite(const std::map<uint256, CCoins> &mapCoins, CBlockIndex *pindex) {
for (std::map<uint256, CCoins>::const_iterator it = mapCoins.begin(); it != mapCoins.end(); it++)
cacheCoins[it->first] = it->second;
pindexTip = pindex;
return true;
}
bool CCoinsViewCache::Flush() {
bool fOk = base->BatchWrite(cacheCoins, pindexTip);
if (fOk)
cacheCoins.clear();
return fOk;
}
unsigned int CCoinsViewCache::GetCacheSize() {
return cacheCoins.size();
}
/** CCoinsView that brings transactions from a memorypool into view.
It does not check for spendings by memory pool transactions. */
CCoinsViewMemPool::CCoinsViewMemPool(CCoinsView &baseIn, CTxMemPool &mempoolIn) : CCoinsViewBacked(baseIn), mempool(mempoolIn) { }
bool CCoinsViewMemPool::GetCoins(const uint256 &txid, CCoins &coins) {
if (base->GetCoins(txid, coins))
return true;
if (mempool.exists(txid)) {
const CTransaction &tx = mempool.lookup(txid);
coins = CCoins(tx, MEMPOOL_HEIGHT);
return true;
}
return false;
}
bool CCoinsViewMemPool::HaveCoins(const uint256 &txid) {
return mempool.exists(txid) || base->HaveCoins(txid);
}
CCoinsViewCache *pcoinsTip = NULL;
CBlockTreeDB *pblocktree = NULL;
//////////////////////////////////////////////////////////////////////////////
//
// mapOrphanTransactions
//
bool AddOrphanTx(const CTransaction& tx)
{
uint256 hash = tx.GetHash();
if (mapOrphanTransactions.count(hash))
return false;
// Ignore big transactions, to avoid a
// send-big-orphans memory exhaustion attack. If a peer has a legitimate
// large transaction with a missing parent then we assume
// it will rebroadcast it later, after the parent transaction(s)
// have been mined or received.
// 10,000 orphans, each of which is at most 5,000 bytes big is
// at most 500 megabytes of orphans:
unsigned int sz = tx.GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz > 5000)
{
printf("ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString().c_str());
return false;
}
mapOrphanTransactions[hash] = tx;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapOrphanTransactionsByPrev[txin.prevout.hash].insert(hash);
printf("stored orphan tx %s (mapsz %"PRIszu")\n", hash.ToString().c_str(),
mapOrphanTransactions.size());
return true;
}
void static EraseOrphanTx(uint256 hash)
{
if (!mapOrphanTransactions.count(hash))
return;
const CTransaction& tx = mapOrphanTransactions[hash];
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
mapOrphanTransactionsByPrev[txin.prevout.hash].erase(hash);
if (mapOrphanTransactionsByPrev[txin.prevout.hash].empty())
mapOrphanTransactionsByPrev.erase(txin.prevout.hash);
}
mapOrphanTransactions.erase(hash);
}
unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans)
{
unsigned int nEvicted = 0;
while (mapOrphanTransactions.size() > nMaxOrphans)
{
// Evict a random orphan:
uint256 randomhash = GetRandHash();
map<uint256, CTransaction>::iterator it = mapOrphanTransactions.lower_bound(randomhash);
if (it == mapOrphanTransactions.end())
it = mapOrphanTransactions.begin();
EraseOrphanTx(it->first);
++nEvicted;
}
return nEvicted;
}
//////////////////////////////////////////////////////////////////////////////
//
// CTransaction / CTxOut
//
bool CTxOut::IsDust() const
{
// kripton: IsDust() detection disabled, allows any valid dust to be relayed.
// The fees imposed on each dust txo is considered sufficient spam deterrant.
return false;
}
bool CTransaction::IsStandard(string& strReason) const
{
if (nVersion > CTransaction::CURRENT_VERSION || nVersion < 1) {
strReason = "version";
return false;
}
if (!IsFinal()) {
strReason = "not-final";
return false;
}
// Extremely large transactions with lots of inputs can cost the network
// almost as much to process as they cost the sender in fees, because
// computing signature hashes is O(ninputs*txsize). Limiting transactions
// to MAX_STANDARD_TX_SIZE mitigates CPU exhaustion attacks.
unsigned int sz = this->GetSerializeSize(SER_NETWORK, CTransaction::CURRENT_VERSION);
if (sz >= MAX_STANDARD_TX_SIZE) {
strReason = "tx-size";
return false;
}
BOOST_FOREACH(const CTxIn& txin, vin)
{
// Biggest 'standard' txin is a 3-signature 3-of-3 CHECKMULTISIG
// pay-to-script-hash, which is 3 ~80-byte signatures, 3
// ~65-byte public keys, plus a few script ops.
if (txin.scriptSig.size() > 500) {
strReason = "scriptsig-size";
return false;
}
if (!txin.scriptSig.IsPushOnly()) {
strReason = "scriptsig-not-pushonly";
return false;
}
}
BOOST_FOREACH(const CTxOut& txout, vout) {
if (!::IsStandard(txout.scriptPubKey)) {
strReason = "scriptpubkey";
return false;
}
if (txout.IsDust()) {
strReason = "dust";
return false;
}
}
return true;
}
//
// Check transaction inputs, and make sure any
// pay-to-script-hash transactions are evaluating IsStandard scripts
//
// Why bother? To avoid denial-of-service attacks; an attacker
// can submit a standard HASH... OP_EQUAL transaction,
// which will get accepted into blocks. The redemption
// script can be anything; an attacker could use a very
// expensive-to-check-upon-redemption script like:
// DUP CHECKSIG DROP ... repeated 100 times... OP_1
//
bool CTransaction::AreInputsStandard(CCoinsViewCache& mapInputs) const
{
if (IsCoinBase())
return true; // Coinbases don't use vin normally
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut& prev = GetOutputFor(vin[i], mapInputs);
vector<vector<unsigned char> > vSolutions;
txnouttype whichType;
// get the scriptPubKey corresponding to this input:
const CScript& prevScript = prev.scriptPubKey;
if (!Solver(prevScript, whichType, vSolutions))
return false;
int nArgsExpected = ScriptSigArgsExpected(whichType, vSolutions);
if (nArgsExpected < 0)
return false;
// Transactions with extra stuff in their scriptSigs are
// non-standard. Note that this EvalScript() call will
// be quick, because if there are any operations
// beside "push data" in the scriptSig the
// IsStandard() call returns false
vector<vector<unsigned char> > stack;
if (!EvalScript(stack, vin[i].scriptSig, *this, i, false, 0))
return false;
if (whichType == TX_SCRIPTHASH)
{
if (stack.empty())
return false;
CScript subscript(stack.back().begin(), stack.back().end());
vector<vector<unsigned char> > vSolutions2;
txnouttype whichType2;
if (!Solver(subscript, whichType2, vSolutions2))
return false;
if (whichType2 == TX_SCRIPTHASH)
return false;
int tmpExpected;
tmpExpected = ScriptSigArgsExpected(whichType2, vSolutions2);
if (tmpExpected < 0)
return false;
nArgsExpected += tmpExpected;
}
if (stack.size() != (unsigned int)nArgsExpected)
return false;
}
return true;
}
unsigned int CTransaction::GetLegacySigOpCount() const
{
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTxIn& txin, vin)
{
nSigOps += txin.scriptSig.GetSigOpCount(false);
}
BOOST_FOREACH(const CTxOut& txout, vout)
{
nSigOps += txout.scriptPubKey.GetSigOpCount(false);
}
return nSigOps;
}
int CMerkleTx::SetMerkleBranch(const CBlock* pblock)
{
CBlock blockTmp;
if (pblock == NULL) {
CCoins coins;
if (pcoinsTip->GetCoins(GetHash(), coins)) {
CBlockIndex *pindex = FindBlockByHeight(coins.nHeight);
if (pindex) {
if (!blockTmp.ReadFromDisk(pindex))
return 0;
pblock = &blockTmp;
}
}
}
if (pblock) {
// Update the tx's hashBlock
hashBlock = pblock->GetHash();
// Locate the transaction
for (nIndex = 0; nIndex < (int)pblock->vtx.size(); nIndex++)
if (pblock->vtx[nIndex] == *(CTransaction*)this)
break;
if (nIndex == (int)pblock->vtx.size())
{
vMerkleBranch.clear();
nIndex = -1;
printf("ERROR: SetMerkleBranch() : couldn't find tx in block\n");
return 0;
}
// Fill in merkle branch
vMerkleBranch = pblock->GetMerkleBranch(nIndex);
}
// Is the tx in a block that's in the main chain
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
return pindexBest->nHeight - pindex->nHeight + 1;
}
bool CTransaction::CheckTransaction(CValidationState &state) const
{
// Basic checks that don't depend on any context
if (vin.empty())
return state.DoS(10, error("CTransaction::CheckTransaction() : vin empty"));
if (vout.empty())
return state.DoS(10, error("CTransaction::CheckTransaction() : vout empty"));
// Size limits
if (::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CTransaction::CheckTransaction() : size limits failed"));
// Check for negative or overflow output values
int64 nValueOut = 0;
BOOST_FOREACH(const CTxOut& txout, vout)
{
if (txout.nValue < 0)
return state.DoS(100, error("CTransaction::CheckTransaction() : txout.nValue negative"));
if (txout.nValue > MAX_MONEY)
return state.DoS(100, error("CTransaction::CheckTransaction() : txout.nValue too high"));
nValueOut += txout.nValue;
if (!MoneyRange(nValueOut))
return state.DoS(100, error("CTransaction::CheckTransaction() : txout total out of range"));
}
// Check for duplicate inputs
set<COutPoint> vInOutPoints;
BOOST_FOREACH(const CTxIn& txin, vin)
{
if (vInOutPoints.count(txin.prevout))
return state.DoS(100, error("CTransaction::CheckTransaction() : duplicate inputs"));
vInOutPoints.insert(txin.prevout);
}
if (IsCoinBase())
{
if (vin[0].scriptSig.size() < 2 || vin[0].scriptSig.size() > 100)
return state.DoS(100, error("CTransaction::CheckTransaction() : coinbase script size"));
}
else
{
BOOST_FOREACH(const CTxIn& txin, vin)
if (txin.prevout.IsNull())
return state.DoS(10, error("CTransaction::CheckTransaction() : prevout is null"));
}
return true;
}
int64 CTransaction::GetMinFee(unsigned int nBlockSize, bool fAllowFree,
enum GetMinFee_mode mode) const
{
// Base fee is either nMinTxFee or nMinRelayTxFee
int64 nBaseFee = (mode == GMF_RELAY) ? nMinRelayTxFee : nMinTxFee;
unsigned int nBytes = ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION);
unsigned int nNewBlockSize = nBlockSize + nBytes;
int64 nMinFee = (1 + (int64)nBytes / 1000) * nBaseFee;
if (fAllowFree)
{
if (nBlockSize == 1)
{
// Transactions under 10K are free
// (about 4500 BTC if made of 50 BTC inputs)
if (nBytes < 10000)
nMinFee = 0;
}
else
{
// Free transaction area
if (nNewBlockSize < 27000)
nMinFee = 0;
}
}
// kripton
// To limit dust spam, add nBaseFee for each output less than DUST_SOFT_LIMIT
BOOST_FOREACH(const CTxOut& txout, vout)
if (txout.nValue < DUST_SOFT_LIMIT)
nMinFee += nBaseFee;
// Raise the price as the block approaches full
if (nBlockSize != 1 && nNewBlockSize >= MAX_BLOCK_SIZE_GEN/2)
{
if (nNewBlockSize >= MAX_BLOCK_SIZE_GEN)
return MAX_MONEY;
nMinFee *= MAX_BLOCK_SIZE_GEN / (MAX_BLOCK_SIZE_GEN - nNewBlockSize);
}
if (!MoneyRange(nMinFee))
nMinFee = MAX_MONEY;
return nMinFee;
}
void CTxMemPool::pruneSpent(const uint256 &hashTx, CCoins &coins)
{
LOCK(cs);
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.lower_bound(COutPoint(hashTx, 0));
// iterate over all COutPoints in mapNextTx whose hash equals the provided hashTx
while (it != mapNextTx.end() && it->first.hash == hashTx) {
coins.Spend(it->first.n); // and remove those outputs from coins
it++;
}
}
bool CTxMemPool::accept(CValidationState &state, CTransaction &tx, bool fCheckInputs, bool fLimitFree,
bool* pfMissingInputs)
{
if (pfMissingInputs)
*pfMissingInputs = false;
if (!tx.CheckTransaction(state))
return error("CTxMemPool::accept() : CheckTransaction failed");
// Coinbase is only valid in a block, not as a loose transaction
if (tx.IsCoinBase())
return state.DoS(100, error("CTxMemPool::accept() : coinbase as individual tx"));
// To help v0.1.5 clients who would see it as a negative number
if ((int64)tx.nLockTime > std::numeric_limits<int>::max())
return error("CTxMemPool::accept() : not accepting nLockTime beyond 2038 yet");
// Rather not work on nonstandard transactions (unless -testnet)
string strNonStd;
if (!fTestNet && !tx.IsStandard(strNonStd))
return error("CTxMemPool::accept() : nonstandard transaction (%s)",
strNonStd.c_str());
// is it already in the memory pool?
uint256 hash = tx.GetHash();
{
LOCK(cs);
if (mapTx.count(hash))
return false;
}
// Check for conflicts with in-memory transactions
CTransaction* ptxOld = NULL;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (mapNextTx.count(outpoint))
{
// Disable replacement feature for now
return false;
// Allow replacing with a newer version of the same transaction
if (i != 0)
return false;
ptxOld = mapNextTx[outpoint].ptx;
if (ptxOld->IsFinal())
return false;
if (!tx.IsNewerThan(*ptxOld))
return false;
for (unsigned int i = 0; i < tx.vin.size(); i++)
{
COutPoint outpoint = tx.vin[i].prevout;
if (!mapNextTx.count(outpoint) || mapNextTx[outpoint].ptx != ptxOld)
return false;
}
break;
}
}
if (fCheckInputs)
{
CCoinsView dummy;
CCoinsViewCache view(dummy);
{
LOCK(cs);
CCoinsViewMemPool viewMemPool(*pcoinsTip, *this);
view.SetBackend(viewMemPool);
// do we already have it?
if (view.HaveCoins(hash))
return false;
// do all inputs exist?
// Note that this does not check for the presence of actual outputs (see the next check for that),
// only helps filling in pfMissingInputs (to determine missing vs spent).
BOOST_FOREACH(const CTxIn txin, tx.vin) {
if (!view.HaveCoins(txin.prevout.hash)) {
if (pfMissingInputs)
*pfMissingInputs = true;
return false;
}
}
// are the actual inputs available?
if (!tx.HaveInputs(view))
return state.Invalid(error("CTxMemPool::accept() : inputs already spent"));
// Bring the best block into scope
view.GetBestBlock();
// we have all inputs cached now, so switch back to dummy, so we don't need to keep lock on mempool
view.SetBackend(dummy);
}
// Check for non-standard pay-to-script-hash in inputs
if (!tx.AreInputsStandard(view) && !fTestNet)
return error("CTxMemPool::accept() : nonstandard transaction input");
// Note: if you modify this code to accept non-standard transactions, then
// you should add code here to check that the transaction does a
// reasonable number of ECDSA signature verifications.
int64 nFees = tx.GetValueIn(view)-tx.GetValueOut();
unsigned int nSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
// Don't accept it if it can't get into a block
int64 txMinFee = tx.GetMinFee(1000, true, GMF_RELAY);
if (fLimitFree && nFees < txMinFee)
return error("CTxMemPool::accept() : not enough fees %s, %"PRI64d" < %"PRI64d,
hash.ToString().c_str(),
nFees, txMinFee);
// Continuously rate-limit free transactions
// This mitigates 'penny-flooding' -- sending thousands of free transactions just to
// be annoying or make others' transactions take longer to confirm.
if (fLimitFree && nFees < CTransaction::nMinRelayTxFee)
{
static double dFreeCount;
static int64 nLastTime;
int64 nNow = GetTime();
LOCK(cs);
// Use an exponentially decaying ~10-minute window:
dFreeCount *= pow(1.0 - 1.0/600.0, (double)(nNow - nLastTime));
nLastTime = nNow;
// -limitfreerelay unit is thousand-bytes-per-minute
// At default rate it would take over a month to fill 1GB
if (dFreeCount >= GetArg("-limitfreerelay", 15)*10*1000)
return error("CTxMemPool::accept() : free transaction rejected by rate limiter");
if (fDebug)
printf("Rate limit dFreeCount: %g => %g\n", dFreeCount, dFreeCount+nSize);
dFreeCount += nSize;
}
// Check against previous transactions
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
if (!tx.CheckInputs(state, view, true, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_STRICTENC))
{
return error("CTxMemPool::accept() : ConnectInputs failed %s", hash.ToString().c_str());
}
}
// Store transaction in memory
{
LOCK(cs);
if (ptxOld)
{
printf("CTxMemPool::accept() : replacing tx %s with new version\n", ptxOld->GetHash().ToString().c_str());
remove(*ptxOld);
}
addUnchecked(hash, tx);
}
///// are we sure this is ok when loading transactions or restoring block txes
// If updated, erase old tx from wallet
if (ptxOld)
EraseFromWallets(ptxOld->GetHash());
SyncWithWallets(hash, tx, NULL, true);
printf("CTxMemPool::accept() : accepted %s (poolsz %"PRIszu")\n",
hash.ToString().c_str(),
mapTx.size());
return true;
}
bool CTransaction::AcceptToMemoryPool(CValidationState &state, bool fCheckInputs, bool fLimitFree, bool* pfMissingInputs)
{
try {
return mempool.accept(state, *this, fCheckInputs, fLimitFree, pfMissingInputs);
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
}
bool CTxMemPool::addUnchecked(const uint256& hash, const CTransaction &tx)
{
// Add to memory pool without checking anything. Don't call this directly,
// call CTxMemPool::accept to properly check the transaction first.
{
mapTx[hash] = tx;
for (unsigned int i = 0; i < tx.vin.size(); i++)
mapNextTx[tx.vin[i].prevout] = CInPoint(&mapTx[hash], i);
nTransactionsUpdated++;
}
return true;
}
bool CTxMemPool::remove(const CTransaction &tx, bool fRecursive)
{
// Remove transaction from memory pool
{
LOCK(cs);
uint256 hash = tx.GetHash();
if (fRecursive) {
for (unsigned int i = 0; i < tx.vout.size(); i++) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(COutPoint(hash, i));
if (it != mapNextTx.end())
remove(*it->second.ptx, true);
}
}
if (mapTx.count(hash))
{
BOOST_FOREACH(const CTxIn& txin, tx.vin)
mapNextTx.erase(txin.prevout);
mapTx.erase(hash);
nTransactionsUpdated++;
}
}
return true;
}
bool CTxMemPool::removeConflicts(const CTransaction &tx)
{
// Remove transactions which depend on inputs of tx, recursively
LOCK(cs);
BOOST_FOREACH(const CTxIn &txin, tx.vin) {
std::map<COutPoint, CInPoint>::iterator it = mapNextTx.find(txin.prevout);
if (it != mapNextTx.end()) {
const CTransaction &txConflict = *it->second.ptx;
if (txConflict != tx)
remove(txConflict, true);
}
}
return true;
}
void CTxMemPool::clear()
{
LOCK(cs);
mapTx.clear();
mapNextTx.clear();
++nTransactionsUpdated;
}
void CTxMemPool::queryHashes(std::vector<uint256>& vtxid)
{
vtxid.clear();
LOCK(cs);
vtxid.reserve(mapTx.size());
for (map<uint256, CTransaction>::iterator mi = mapTx.begin(); mi != mapTx.end(); ++mi)
vtxid.push_back((*mi).first);
}
int CMerkleTx::GetDepthInMainChain(CBlockIndex* &pindexRet) const
{
if (hashBlock == 0 || nIndex == -1)
return 0;
// Find the block it claims to be in
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashBlock);
if (mi == mapBlockIndex.end())
return 0;
CBlockIndex* pindex = (*mi).second;
if (!pindex || !pindex->IsInMainChain())
return 0;
// Make sure the merkle branch connects to this block
if (!fMerkleVerified)
{
if (CBlock::CheckMerkleBranch(GetHash(), vMerkleBranch, nIndex) != pindex->hashMerkleRoot)
return 0;
fMerkleVerified = true;
}
pindexRet = pindex;
return pindexBest->nHeight - pindex->nHeight + 1;
}
int CMerkleTx::GetBlocksToMaturity() const
{
if (!IsCoinBase())
return 0;
return max(0, (COINBASE_MATURITY) - GetDepthInMainChain());
}
bool CMerkleTx::AcceptToMemoryPool(bool fCheckInputs, bool fLimitFree)
{
CValidationState state;
return CTransaction::AcceptToMemoryPool(state, fCheckInputs, fLimitFree);
}
bool CWalletTx::AcceptWalletTransaction(bool fCheckInputs)
{
{
LOCK(mempool.cs);
// Add previous supporting transactions first
BOOST_FOREACH(CMerkleTx& tx, vtxPrev)
{
if (!tx.IsCoinBase())
{
uint256 hash = tx.GetHash();
if (!mempool.exists(hash) && pcoinsTip->HaveCoins(hash))
tx.AcceptToMemoryPool(fCheckInputs, false);
}
}
return AcceptToMemoryPool(fCheckInputs, false);
}
return false;
}
// Return transaction in tx, and if it was found inside a block, its hash is placed in hashBlock
bool GetTransaction(const uint256 &hash, CTransaction &txOut, uint256 &hashBlock, bool fAllowSlow)
{
CBlockIndex *pindexSlow = NULL;
{
LOCK(cs_main);
{
LOCK(mempool.cs);
if (mempool.exists(hash))
{
txOut = mempool.lookup(hash);
return true;
}
}
if (fTxIndex) {
CDiskTxPos postx;
if (pblocktree->ReadTxIndex(hash, postx)) {
CAutoFile file(OpenBlockFile(postx, true), SER_DISK, CLIENT_VERSION);
CBlockHeader header;
try {
file >> header;
fseek(file, postx.nTxOffset, SEEK_CUR);
file >> txOut;
} catch (std::exception &e) {
return error("%s() : deserialize or I/O error", __PRETTY_FUNCTION__);
}
hashBlock = header.GetHash();
if (txOut.GetHash() != hash)
return error("%s() : txid mismatch", __PRETTY_FUNCTION__);
return true;
}
}
if (fAllowSlow) { // use coin database to locate block that contains transaction, and scan it
int nHeight = -1;
{
CCoinsViewCache &view = *pcoinsTip;
CCoins coins;
if (view.GetCoins(hash, coins))
nHeight = coins.nHeight;
}
if (nHeight > 0)
pindexSlow = FindBlockByHeight(nHeight);
}
}
if (pindexSlow) {
CBlock block;
if (block.ReadFromDisk(pindexSlow)) {
BOOST_FOREACH(const CTransaction &tx, block.vtx) {
if (tx.GetHash() == hash) {
txOut = tx;
hashBlock = pindexSlow->GetBlockHash();
return true;
}
}
}
}
return false;
}
//////////////////////////////////////////////////////////////////////////////
//
// CBlock and CBlockIndex
//
static CBlockIndex* pblockindexFBBHLast;
CBlockIndex* FindBlockByHeight(int nHeight)
{
CBlockIndex *pblockindex;
if (nHeight < nBestHeight / 2)
pblockindex = pindexGenesisBlock;
else
pblockindex = pindexBest;
if (pblockindexFBBHLast && abs(nHeight - pblockindex->nHeight) > abs(nHeight - pblockindexFBBHLast->nHeight))
pblockindex = pblockindexFBBHLast;
while (pblockindex->nHeight > nHeight)
pblockindex = pblockindex->pprev;
while (pblockindex->nHeight < nHeight)
pblockindex = pblockindex->pnext;
pblockindexFBBHLast = pblockindex;
return pblockindex;
}
bool CBlock::ReadFromDisk(const CBlockIndex* pindex)
{
if (!ReadFromDisk(pindex->GetBlockPos()))
return false;
if (GetHash() != pindex->GetBlockHash())
return error("CBlock::ReadFromDisk() : GetHash() doesn't match index");
return true;
}
uint256 static GetOrphanRoot(const CBlockHeader* pblock)
{
// Work back to the first block in the orphan chain
while (mapOrphanBlocks.count(pblock->hashPrevBlock))
pblock = mapOrphanBlocks[pblock->hashPrevBlock];
return pblock->GetHash();
}
int64 static GetBlockValue(int nHeight, int64 nFees)
{
int64 nSubsidy = 5000000000;
// Subsidy is cut in half every 840000 blocks, which will occur approximately every 4 years
nSubsidy >>= (nHeight / 840000); // kripton: 840k blocks in ~4 years
return nSubsidy + nFees;
}
static const int64 nTargetTimespan = 302400; // kripton: 3.5 days
static const int64 nTargetSpacing = 150; // kripton: 2.5 minutes
static const int64 nInterval = nTargetTimespan / nTargetSpacing;
//
// minimum amount of work that could possibly be required nTime after
// minimum work required was nBase
//
unsigned int ComputeMinWork(unsigned int nBase, int64 nTime)
{
// Testnet has min-difficulty blocks
// after nTargetSpacing*2 time between blocks:
if (fTestNet && nTime > nTargetSpacing*2)
return bnProofOfWorkLimit.GetCompact();
CBigNum bnResult;
bnResult.SetCompact(nBase);
while (nTime > 0 && bnResult < bnProofOfWorkLimit)
{
// Maximum 400% adjustment...
bnResult *= 4;
// ... in best-case exactly 4-times-normal target time
nTime -= nTargetTimespan*4;
}
if (bnResult > bnProofOfWorkLimit)
bnResult = bnProofOfWorkLimit;
return bnResult.GetCompact();
}
unsigned int static GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock)
{
unsigned int nProofOfWorkLimit = bnProofOfWorkLimit.GetCompact();
// Genesis block
if (pindexLast == NULL)
return nProofOfWorkLimit;
// Only change once per interval
if ((pindexLast->nHeight+1) % nInterval != 0)
{
// Special difficulty rule for testnet:
if (fTestNet)
{
// If the new block's timestamp is more than 2* 10 minutes
// then allow mining of a min-difficulty block.
if (pblock->nTime > pindexLast->nTime + nTargetSpacing*2)
return nProofOfWorkLimit;
else
{
// Return the last non-special-min-difficulty-rules-block
const CBlockIndex* pindex = pindexLast;
while (pindex->pprev && pindex->nHeight % nInterval != 0 && pindex->nBits == nProofOfWorkLimit)
pindex = pindex->pprev;
return pindex->nBits;
}
}
return pindexLast->nBits;
}
// kripton: This fixes an issue where a 51% attack can change difficulty at will.
// Go back the full period unless it's the first retarget after genesis. Code courtesy of Art Forz
int blockstogoback = nInterval-1;
if ((pindexLast->nHeight+1) != nInterval)
blockstogoback = nInterval;
// Go back by what we want to be 14 days worth of blocks
const CBlockIndex* pindexFirst = pindexLast;
for (int i = 0; pindexFirst && i < blockstogoback; i++)
pindexFirst = pindexFirst->pprev;
assert(pindexFirst);
// Limit adjustment step
int64 nActualTimespan = pindexLast->GetBlockTime() - pindexFirst->GetBlockTime();
printf(" nActualTimespan = %"PRI64d" before bounds\n", nActualTimespan);
if (nActualTimespan < nTargetTimespan/4)
nActualTimespan = nTargetTimespan/4;
if (nActualTimespan > nTargetTimespan*4)
nActualTimespan = nTargetTimespan*4;
// Retarget
CBigNum bnNew;
bnNew.SetCompact(pindexLast->nBits);
bnNew *= nActualTimespan;
bnNew /= nTargetTimespan;
if (bnNew > bnProofOfWorkLimit)
bnNew = bnProofOfWorkLimit;
/// debug print
printf("GetNextWorkRequired RETARGET\n");
printf("nTargetTimespan = %"PRI64d" nActualTimespan = %"PRI64d"\n", nTargetTimespan, nActualTimespan);
printf("Before: %08x %s\n", pindexLast->nBits, CBigNum().SetCompact(pindexLast->nBits).getuint256().ToString().c_str());
printf("After: %08x %s\n", bnNew.GetCompact(), bnNew.getuint256().ToString().c_str());
return bnNew.GetCompact();
}
bool CheckProofOfWork(uint256 hash, unsigned int nBits)
{
CBigNum bnTarget;
bnTarget.SetCompact(nBits);
// Check range
if (bnTarget <= 0 || bnTarget > bnProofOfWorkLimit)
return error("CheckProofOfWork() : nBits below minimum work");
// Check proof of work matches claimed amount
if (hash > bnTarget.getuint256())
return error("CheckProofOfWork() : hash doesn't match nBits");
return true;
}
// Return maximum amount of blocks that other nodes claim to have
int GetNumBlocksOfPeers()
{
return std::max(cPeerBlockCounts.median(), Checkpoints::GetTotalBlocksEstimate());
}
bool IsInitialBlockDownload()
{
if (pindexBest == NULL || fImporting || fReindex || nBestHeight < Checkpoints::GetTotalBlocksEstimate())
return true;
static int64 nLastUpdate;
static CBlockIndex* pindexLastBest;
if (pindexBest != pindexLastBest)
{
pindexLastBest = pindexBest;
nLastUpdate = GetTime();
}
return (GetTime() - nLastUpdate < 10 &&
pindexBest->GetBlockTime() < GetTime() - 24 * 60 * 60);
}
void static InvalidChainFound(CBlockIndex* pindexNew)
{
if (pindexNew->nChainWork > nBestInvalidWork)
{
nBestInvalidWork = pindexNew->nChainWork;
pblocktree->WriteBestInvalidWork(CBigNum(nBestInvalidWork));
uiInterface.NotifyBlocksChanged();
}
printf("InvalidChainFound: invalid block=%s height=%d log2_work=%.8g date=%s\n",
pindexNew->GetBlockHash().ToString().c_str(), pindexNew->nHeight,
log(pindexNew->nChainWork.getdouble())/log(2.0), DateTimeStrFormat("%Y-%m-%d %H:%M:%S",
pindexNew->GetBlockTime()).c_str());
printf("InvalidChainFound: current best=%s height=%d log2_work=%.8g date=%s\n",
hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0),
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
if (pindexBest && nBestInvalidWork > nBestChainWork + (pindexBest->GetBlockWork() * 6).getuint256())
printf("InvalidChainFound: Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.\n");
}
void static InvalidBlockFound(CBlockIndex *pindex) {
pindex->nStatus |= BLOCK_FAILED_VALID;
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindex));
setBlockIndexValid.erase(pindex);
InvalidChainFound(pindex);
if (pindex->pnext) {
CValidationState stateDummy;
ConnectBestBlock(stateDummy); // reorganise away from the failed block
}
}
bool ConnectBestBlock(CValidationState &state) {
do {
CBlockIndex *pindexNewBest;
{
std::set<CBlockIndex*,CBlockIndexWorkComparator>::reverse_iterator it = setBlockIndexValid.rbegin();
if (it == setBlockIndexValid.rend())
return true;
pindexNewBest = *it;
}
if (pindexNewBest == pindexBest || (pindexBest && pindexNewBest->nChainWork == pindexBest->nChainWork))
return true; // nothing to do
// check ancestry
CBlockIndex *pindexTest = pindexNewBest;
std::vector<CBlockIndex*> vAttach;
do {
if (pindexTest->nStatus & BLOCK_FAILED_MASK) {
// mark descendants failed
CBlockIndex *pindexFailed = pindexNewBest;
while (pindexTest != pindexFailed) {
pindexFailed->nStatus |= BLOCK_FAILED_CHILD;
setBlockIndexValid.erase(pindexFailed);
pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexFailed));
pindexFailed = pindexFailed->pprev;
}
InvalidChainFound(pindexNewBest);
break;
}
if (pindexBest == NULL || pindexTest->nChainWork > pindexBest->nChainWork)
vAttach.push_back(pindexTest);
if (pindexTest->pprev == NULL || pindexTest->pnext != NULL) {
reverse(vAttach.begin(), vAttach.end());
BOOST_FOREACH(CBlockIndex *pindexSwitch, vAttach) {
boost::this_thread::interruption_point();
try {
if (!SetBestChain(state, pindexSwitch))
return false;
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
}
return true;
}
pindexTest = pindexTest->pprev;
} while(true);
} while(true);
}
void CBlockHeader::UpdateTime(const CBlockIndex* pindexPrev)
{
nTime = max(pindexPrev->GetMedianTimePast()+1, GetAdjustedTime());
// Updating time can change work required on testnet:
if (fTestNet)
nBits = GetNextWorkRequired(pindexPrev, this);
}
const CTxOut &CTransaction::GetOutputFor(const CTxIn& input, CCoinsViewCache& view)
{
const CCoins &coins = view.GetCoins(input.prevout.hash);
assert(coins.IsAvailable(input.prevout.n));
return coins.vout[input.prevout.n];
}
int64 CTransaction::GetValueIn(CCoinsViewCache& inputs) const
{
if (IsCoinBase())
return 0;
int64 nResult = 0;
for (unsigned int i = 0; i < vin.size(); i++)
nResult += GetOutputFor(vin[i], inputs).nValue;
return nResult;
}
unsigned int CTransaction::GetP2SHSigOpCount(CCoinsViewCache& inputs) const
{
if (IsCoinBase())
return 0;
unsigned int nSigOps = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const CTxOut &prevout = GetOutputFor(vin[i], inputs);
if (prevout.scriptPubKey.IsPayToScriptHash())
nSigOps += prevout.scriptPubKey.GetSigOpCount(vin[i].scriptSig);
}
return nSigOps;
}
void CTransaction::UpdateCoins(CValidationState &state, CCoinsViewCache &inputs, CTxUndo &txundo, int nHeight, const uint256 &txhash) const
{
// mark inputs spent
if (!IsCoinBase()) {
BOOST_FOREACH(const CTxIn &txin, vin) {
CCoins &coins = inputs.GetCoins(txin.prevout.hash);
CTxInUndo undo;
assert(coins.Spend(txin.prevout, undo));
txundo.vprevout.push_back(undo);
}
}
// add outputs
assert(inputs.SetCoins(txhash, CCoins(*this, nHeight)));
}
bool CTransaction::HaveInputs(CCoinsViewCache &inputs) const
{
if (!IsCoinBase()) {
// first check whether information about the prevout hash is available
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
if (!inputs.HaveCoins(prevout.hash))
return false;
}
// then check whether the actual outputs are available
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
if (!coins.IsAvailable(prevout.n))
return false;
}
}
return true;
}
bool CScriptCheck::operator()() const {
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
if (!VerifyScript(scriptSig, scriptPubKey, *ptxTo, nIn, nFlags, nHashType))
return error("CScriptCheck() : %s VerifySignature failed", ptxTo->GetHash().ToString().c_str());
return true;
}
bool VerifySignature(const CCoins& txFrom, const CTransaction& txTo, unsigned int nIn, unsigned int flags, int nHashType)
{
return CScriptCheck(txFrom, txTo, nIn, flags, nHashType)();
}
bool CTransaction::CheckInputs(CValidationState &state, CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, std::vector<CScriptCheck> *pvChecks) const
{
if (!IsCoinBase())
{
if (pvChecks)
pvChecks->reserve(vin.size());
// This doesn't trigger the DoS code on purpose; if it did, it would make it easier
// for an attacker to attempt to split the network.
if (!HaveInputs(inputs))
return state.Invalid(error("CheckInputs() : %s inputs unavailable", GetHash().ToString().c_str()));
// While checking, GetBestBlock() refers to the parent block.
// This is also true for mempool checks.
int nSpendHeight = inputs.GetBestBlock()->nHeight + 1;
int64 nValueIn = 0;
int64 nFees = 0;
for (unsigned int i = 0; i < vin.size(); i++)
{
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
// If prev is coinbase, check that it's matured
if (coins.IsCoinBase()) {
if (nSpendHeight - coins.nHeight < COINBASE_MATURITY)
return state.Invalid(error("CheckInputs() : tried to spend coinbase at depth %d", nSpendHeight - coins.nHeight));
}
// Check for negative or overflow input values
nValueIn += coins.vout[prevout.n].nValue;
if (!MoneyRange(coins.vout[prevout.n].nValue) || !MoneyRange(nValueIn))
return state.DoS(100, error("CheckInputs() : txin values out of range"));
}
if (nValueIn < GetValueOut())
return state.DoS(100, error("CheckInputs() : %s value in < value out", GetHash().ToString().c_str()));
// Tally transaction fees
int64 nTxFee = nValueIn - GetValueOut();
if (nTxFee < 0)
return state.DoS(100, error("CheckInputs() : %s nTxFee < 0", GetHash().ToString().c_str()));
nFees += nTxFee;
if (!MoneyRange(nFees))
return state.DoS(100, error("CheckInputs() : nFees out of range"));
// The first loop above does all the inexpensive checks.
// Only if ALL inputs pass do we perform expensive ECDSA signature checks.
// Helps prevent CPU exhaustion attacks.
// Skip ECDSA signature verification when connecting blocks
// before the last block chain checkpoint. This is safe because block merkle hashes are
// still computed and checked, and any change will be caught at the next checkpoint.
if (fScriptChecks) {
for (unsigned int i = 0; i < vin.size(); i++) {
const COutPoint &prevout = vin[i].prevout;
const CCoins &coins = inputs.GetCoins(prevout.hash);
// Verify signature
CScriptCheck check(coins, *this, i, flags, 0);
if (pvChecks) {
pvChecks->push_back(CScriptCheck());
check.swap(pvChecks->back());
} else if (!check()) {
if (flags & SCRIPT_VERIFY_STRICTENC) {
// For now, check whether the failure was caused by non-canonical
// encodings or not; if so, don't trigger DoS protection.
CScriptCheck check(coins, *this, i, flags & (~SCRIPT_VERIFY_STRICTENC), 0);
if (check())
return state.Invalid();
}
return state.DoS(100,false);
}
}
}
}
return true;
}
bool CBlock::DisconnectBlock(CValidationState &state, CBlockIndex *pindex, CCoinsViewCache &view, bool *pfClean)
{
assert(pindex == view.GetBestBlock());
if (pfClean)
*pfClean = false;
bool fClean = true;
CBlockUndo blockUndo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (pos.IsNull())
return error("DisconnectBlock() : no undo data available");
if (!blockUndo.ReadFromDisk(pos, pindex->pprev->GetBlockHash()))
return error("DisconnectBlock() : failure reading undo data");
if (blockUndo.vtxundo.size() + 1 != vtx.size())
return error("DisconnectBlock() : block and undo data inconsistent");
// undo transactions in reverse order
for (int i = vtx.size() - 1; i >= 0; i--) {
const CTransaction &tx = vtx[i];
uint256 hash = tx.GetHash();
// check that all outputs are available
if (!view.HaveCoins(hash)) {
fClean = fClean && error("DisconnectBlock() : outputs still spent? database corrupted");
view.SetCoins(hash, CCoins());
}
CCoins &outs = view.GetCoins(hash);
CCoins outsBlock = CCoins(tx, pindex->nHeight);
// The CCoins serialization does not serialize negative numbers.
// No network rules currently depend on the version here, so an inconsistency is harmless
// but it must be corrected before txout nversion ever influences a network rule.
if (outsBlock.nVersion < 0)
outs.nVersion = outsBlock.nVersion;
if (outs != outsBlock)
fClean = fClean && error("DisconnectBlock() : added transaction mismatch? database corrupted");
// remove outputs
outs = CCoins();
// restore inputs
if (i > 0) { // not coinbases
const CTxUndo &txundo = blockUndo.vtxundo[i-1];
if (txundo.vprevout.size() != tx.vin.size())
return error("DisconnectBlock() : transaction and undo data inconsistent");
for (unsigned int j = tx.vin.size(); j-- > 0;) {
const COutPoint &out = tx.vin[j].prevout;
const CTxInUndo &undo = txundo.vprevout[j];
CCoins coins;
view.GetCoins(out.hash, coins); // this can fail if the prevout was already entirely spent
if (undo.nHeight != 0) {
// undo data contains height: this is the last output of the prevout tx being spent
if (!coins.IsPruned())
fClean = fClean && error("DisconnectBlock() : undo data overwriting existing transaction");
coins = CCoins();
coins.fCoinBase = undo.fCoinBase;
coins.nHeight = undo.nHeight;
coins.nVersion = undo.nVersion;
} else {
if (coins.IsPruned())
fClean = fClean && error("DisconnectBlock() : undo data adding output to missing transaction");
}
if (coins.IsAvailable(out.n))
fClean = fClean && error("DisconnectBlock() : undo data overwriting existing output");
if (coins.vout.size() < out.n+1)
coins.vout.resize(out.n+1);
coins.vout[out.n] = undo.txout;
if (!view.SetCoins(out.hash, coins))
return error("DisconnectBlock() : cannot restore coin inputs");
}
}
}
// move best block pointer to prevout block
view.SetBestBlock(pindex->pprev);
if (pfClean) {
*pfClean = fClean;
return true;
} else {
return fClean;
}
}
void static FlushBlockFile(bool fFinalize = false)
{
LOCK(cs_LastBlockFile);
CDiskBlockPos posOld(nLastBlockFile, 0);
FILE *fileOld = OpenBlockFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, infoLastBlockFile.nSize);
FileCommit(fileOld);
fclose(fileOld);
}
fileOld = OpenUndoFile(posOld);
if (fileOld) {
if (fFinalize)
TruncateFile(fileOld, infoLastBlockFile.nUndoSize);
FileCommit(fileOld);
fclose(fileOld);
}
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize);
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void ThreadScriptCheck() {
RenameThread("bitcoin-scriptch");
scriptcheckqueue.Thread();
}
bool CBlock::ConnectBlock(CValidationState &state, CBlockIndex* pindex, CCoinsViewCache &view, bool fJustCheck)
{
// Check it again in case a previous version let a bad block in
// don't check PoW for the genesis block
if (!CheckBlock(state, !fJustCheck && hashPrevBlock != 0, !fJustCheck))
return false;
// verify that the view's current state corresponds to the previous block
assert(pindex->pprev == view.GetBestBlock());
// Special case for the genesis block, skipping connection of its transactions
// (its coinbase is unspendable)
if (GetHash() == hashGenesisBlock) {
view.SetBestBlock(pindex);
pindexGenesisBlock = pindex;
return true;
}
bool fScriptChecks = pindex->nHeight >= Checkpoints::GetTotalBlocksEstimate();
// Do not allow blocks that contain transactions which 'overwrite' older transactions,
// unless those are already completely spent.
// If such overwrites are allowed, coinbases and transactions depending upon those
// can be duplicated to remove the ability to spend the first instance -- even after
// being sent to another address.
// See BIP30 and http://r6.ca/blog/20120206T005236Z.html for more information.
// This logic is not necessary for memory pool transactions, as AcceptToMemoryPool
// already refuses previously-known transaction ids entirely.
// This rule was originally applied all blocks whose timestamp was after October 1, 2012, 0:00 UTC.
// Now that the whole chain is irreversibly beyond that time it is applied to all blocks,
// this prevents exploiting the issue against nodes in their initial block download.
bool fEnforceBIP30 = true;
if (fEnforceBIP30) {
for (unsigned int i=0; i<vtx.size(); i++) {
uint256 hash = GetTxHash(i);
if (view.HaveCoins(hash) && !view.GetCoins(hash).IsPruned())
return state.DoS(100, error("ConnectBlock() : tried to overwrite transaction"));
}
}
// BIP16 didn't become active until Oct 1 2012
int64 nBIP16SwitchTime = 1349049600;
bool fStrictPayToScriptHash = (pindex->nTime >= nBIP16SwitchTime);
unsigned int flags = SCRIPT_VERIFY_NOCACHE |
(fStrictPayToScriptHash ? SCRIPT_VERIFY_P2SH : SCRIPT_VERIFY_NONE);
CBlockUndo blockundo;
CCheckQueueControl<CScriptCheck> control(fScriptChecks && nScriptCheckThreads ? &scriptcheckqueue : NULL);
int64 nStart = GetTimeMicros();
int64 nFees = 0;
int nInputs = 0;
unsigned int nSigOps = 0;
CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(vtx.size()));
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
vPos.reserve(vtx.size());
for (unsigned int i=0; i<vtx.size(); i++)
{
const CTransaction &tx = vtx[i];
nInputs += tx.vin.size();
nSigOps += tx.GetLegacySigOpCount();
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock() : too many sigops"));
if (!tx.IsCoinBase())
{
if (!tx.HaveInputs(view))
return state.DoS(100, error("ConnectBlock() : inputs missing/spent"));
if (fStrictPayToScriptHash)
{
// Add in sigops done by pay-to-script-hash inputs;
// this is to prevent a "rogue miner" from creating
// an incredibly-expensive-to-validate block.
nSigOps += tx.GetP2SHSigOpCount(view);
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("ConnectBlock() : too many sigops"));
}
nFees += tx.GetValueIn(view)-tx.GetValueOut();
std::vector<CScriptCheck> vChecks;
if (!tx.CheckInputs(state, view, fScriptChecks, flags, nScriptCheckThreads ? &vChecks : NULL))
return false;
control.Add(vChecks);
}
CTxUndo txundo;
tx.UpdateCoins(state, view, txundo, pindex->nHeight, GetTxHash(i));
if (!tx.IsCoinBase())
blockundo.vtxundo.push_back(txundo);
vPos.push_back(std::make_pair(GetTxHash(i), pos));
pos.nTxOffset += ::GetSerializeSize(tx, SER_DISK, CLIENT_VERSION);
}
int64 nTime = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Connect %u transactions: %.2fms (%.3fms/tx, %.3fms/txin)\n", (unsigned)vtx.size(), 0.001 * nTime, 0.001 * nTime / vtx.size(), nInputs <= 1 ? 0 : 0.001 * nTime / (nInputs-1));
if (vtx[0].GetValueOut() > GetBlockValue(pindex->nHeight, nFees))
return state.DoS(100, error("ConnectBlock() : coinbase pays too much (actual=%"PRI64d" vs limit=%"PRI64d")", vtx[0].GetValueOut(), GetBlockValue(pindex->nHeight, nFees)));
if (!control.Wait())
return state.DoS(100, false);
int64 nTime2 = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Verify %u txins: %.2fms (%.3fms/txin)\n", nInputs - 1, 0.001 * nTime2, nInputs <= 1 ? 0 : 0.001 * nTime2 / (nInputs-1));
if (fJustCheck)
return true;
// Write undo information to disk
if (pindex->GetUndoPos().IsNull() || (pindex->nStatus & BLOCK_VALID_MASK) < BLOCK_VALID_SCRIPTS)
{
if (pindex->GetUndoPos().IsNull()) {
CDiskBlockPos pos;
if (!FindUndoPos(state, pindex->nFile, pos, ::GetSerializeSize(blockundo, SER_DISK, CLIENT_VERSION) + 40))
return error("ConnectBlock() : FindUndoPos failed");
if (!blockundo.WriteToDisk(pos, pindex->pprev->GetBlockHash()))
return state.Abort(_("Failed to write undo data"));
// update nUndoPos in block index
pindex->nUndoPos = pos.nPos;
pindex->nStatus |= BLOCK_HAVE_UNDO;
}
pindex->nStatus = (pindex->nStatus & ~BLOCK_VALID_MASK) | BLOCK_VALID_SCRIPTS;
CDiskBlockIndex blockindex(pindex);
if (!pblocktree->WriteBlockIndex(blockindex))
return state.Abort(_("Failed to write block index"));
}
if (fTxIndex)
if (!pblocktree->WriteTxIndex(vPos))
return state.Abort(_("Failed to write transaction index"));
// add this block to the view's block chain
assert(view.SetBestBlock(pindex));
// Watch for transactions paying to me
for (unsigned int i=0; i<vtx.size(); i++)
SyncWithWallets(GetTxHash(i), vtx[i], this, true);
return true;
}
bool SetBestChain(CValidationState &state, CBlockIndex* pindexNew)
{
// All modifications to the coin state will be done in this cache.
// Only when all have succeeded, we push it to pcoinsTip.
CCoinsViewCache view(*pcoinsTip, true);
// Find the fork (typically, there is none)
CBlockIndex* pfork = view.GetBestBlock();
CBlockIndex* plonger = pindexNew;
while (pfork && pfork != plonger)
{
while (plonger->nHeight > pfork->nHeight) {
plonger = plonger->pprev;
assert(plonger != NULL);
}
if (pfork == plonger)
break;
pfork = pfork->pprev;
assert(pfork != NULL);
}
// List of what to disconnect (typically nothing)
vector<CBlockIndex*> vDisconnect;
for (CBlockIndex* pindex = view.GetBestBlock(); pindex != pfork; pindex = pindex->pprev)
vDisconnect.push_back(pindex);
// List of what to connect (typically only pindexNew)
vector<CBlockIndex*> vConnect;
for (CBlockIndex* pindex = pindexNew; pindex != pfork; pindex = pindex->pprev)
vConnect.push_back(pindex);
reverse(vConnect.begin(), vConnect.end());
if (vDisconnect.size() > 0) {
printf("REORGANIZE: Disconnect %"PRIszu" blocks; %s..\n", vDisconnect.size(), pfork->GetBlockHash().ToString().c_str());
printf("REORGANIZE: Connect %"PRIszu" blocks; ..%s\n", vConnect.size(), pindexNew->GetBlockHash().ToString().c_str());
}
// Disconnect shorter branch
vector<CTransaction> vResurrect;
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect) {
CBlock block;
if (!block.ReadFromDisk(pindex))
return state.Abort(_("Failed to read block"));
int64 nStart = GetTimeMicros();
if (!block.DisconnectBlock(state, pindex, view))
return error("SetBestBlock() : DisconnectBlock %s failed", pindex->GetBlockHash().ToString().c_str());
if (fBenchmark)
printf("- Disconnect: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Queue memory transactions to resurrect.
// We only do this for blocks after the last checkpoint (reorganisation before that
// point should only happen with -reindex/-loadblock, or a misbehaving peer.
BOOST_FOREACH(const CTransaction& tx, block.vtx)
if (!tx.IsCoinBase() && pindex->nHeight > Checkpoints::GetTotalBlocksEstimate())
vResurrect.push_back(tx);
}
// Connect longer branch
vector<CTransaction> vDelete;
BOOST_FOREACH(CBlockIndex *pindex, vConnect) {
CBlock block;
if (!block.ReadFromDisk(pindex))
return state.Abort(_("Failed to read block"));
int64 nStart = GetTimeMicros();
if (!block.ConnectBlock(state, pindex, view)) {
if (state.IsInvalid()) {
InvalidChainFound(pindexNew);
InvalidBlockFound(pindex);
}
return error("SetBestBlock() : ConnectBlock %s failed", pindex->GetBlockHash().ToString().c_str());
}
if (fBenchmark)
printf("- Connect: %.2fms\n", (GetTimeMicros() - nStart) * 0.001);
// Queue memory transactions to delete
BOOST_FOREACH(const CTransaction& tx, block.vtx)
vDelete.push_back(tx);
}
// Flush changes to global coin state
int64 nStart = GetTimeMicros();
int nModified = view.GetCacheSize();
assert(view.Flush());
int64 nTime = GetTimeMicros() - nStart;
if (fBenchmark)
printf("- Flush %i transactions: %.2fms (%.4fms/tx)\n", nModified, 0.001 * nTime, 0.001 * nTime / nModified);
// Make sure it's successfully written to disk before changing memory structure
bool fIsInitialDownload = IsInitialBlockDownload();
if (!fIsInitialDownload || pcoinsTip->GetCacheSize() > nCoinCacheSize) {
// Typical CCoins structures on disk are around 100 bytes in size.
// Pushing a new one to the database can cause it to be written
// twice (once in the log, and once in the tables). This is already
// an overestimation, as most will delete an existing entry or
// overwrite one. Still, use a conservative safety factor of 2.
if (!CheckDiskSpace(100 * 2 * 2 * pcoinsTip->GetCacheSize()))
return state.Error();
FlushBlockFile();
pblocktree->Sync();
if (!pcoinsTip->Flush())
return state.Abort(_("Failed to write to coin database"));
}
// At this point, all changes have been done to the database.
// Proceed by updating the memory structures.
// Disconnect shorter branch
BOOST_FOREACH(CBlockIndex* pindex, vDisconnect)
if (pindex->pprev)
pindex->pprev->pnext = NULL;
// Connect longer branch
BOOST_FOREACH(CBlockIndex* pindex, vConnect)
if (pindex->pprev)
pindex->pprev->pnext = pindex;
// Resurrect memory transactions that were in the disconnected branch
BOOST_FOREACH(CTransaction& tx, vResurrect) {
// ignore validation errors in resurrected transactions
CValidationState stateDummy;
if (!tx.AcceptToMemoryPool(stateDummy, true, false))
mempool.remove(tx, true);
}
// Delete redundant memory transactions that are in the connected branch
BOOST_FOREACH(CTransaction& tx, vDelete) {
mempool.remove(tx);
mempool.removeConflicts(tx);
}
// Update best block in wallet (so we can detect restored wallets)
if ((pindexNew->nHeight % 20160) == 0 || (!fIsInitialDownload && (pindexNew->nHeight % 144) == 0))
{
const CBlockLocator locator(pindexNew);
::SetBestChain(locator);
}
// New best block
hashBestChain = pindexNew->GetBlockHash();
pindexBest = pindexNew;
pblockindexFBBHLast = NULL;
nBestHeight = pindexBest->nHeight;
nBestChainWork = pindexNew->nChainWork;
nTimeBestReceived = GetTime();
nTransactionsUpdated++;
printf("SetBestChain: new best=%s height=%d log2_work=%.8g tx=%lu date=%s progress=%f\n",
hashBestChain.ToString().c_str(), nBestHeight, log(nBestChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str(),
Checkpoints::GuessVerificationProgress(pindexBest));
// Check the version of the last 100 blocks to see if we need to upgrade:
if (!fIsInitialDownload)
{
int nUpgraded = 0;
const CBlockIndex* pindex = pindexBest;
for (int i = 0; i < 100 && pindex != NULL; i++)
{
if (pindex->nVersion > CBlock::CURRENT_VERSION)
++nUpgraded;
pindex = pindex->pprev;
}
if (nUpgraded > 0)
printf("SetBestChain: %d of last 100 blocks above version %d\n", nUpgraded, CBlock::CURRENT_VERSION);
if (nUpgraded > 100/2)
// strMiscWarning is read by GetWarnings(), called by Qt and the JSON-RPC code to warn the user:
strMiscWarning = _("Warning: This version is obsolete, upgrade required!");
}
std::string strCmd = GetArg("-blocknotify", "");
if (!fIsInitialDownload && !strCmd.empty())
{
boost::replace_all(strCmd, "%s", hashBestChain.GetHex());
boost::thread t(runCommand, strCmd); // thread runs free
}
return true;
}
bool CBlock::AddToBlockIndex(CValidationState &state, const CDiskBlockPos &pos)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("AddToBlockIndex() : %s already exists", hash.ToString().c_str()));
// Construct new block index object
CBlockIndex* pindexNew = new CBlockIndex(*this);
assert(pindexNew);
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
map<uint256, CBlockIndex*>::iterator miPrev = mapBlockIndex.find(hashPrevBlock);
if (miPrev != mapBlockIndex.end())
{
pindexNew->pprev = (*miPrev).second;
pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
}
pindexNew->nTx = vtx.size();
pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + pindexNew->GetBlockWork().getuint256();
pindexNew->nChainTx = (pindexNew->pprev ? pindexNew->pprev->nChainTx : 0) + pindexNew->nTx;
pindexNew->nFile = pos.nFile;
pindexNew->nDataPos = pos.nPos;
pindexNew->nUndoPos = 0;
pindexNew->nStatus = BLOCK_VALID_TRANSACTIONS | BLOCK_HAVE_DATA;
setBlockIndexValid.insert(pindexNew);
if (!pblocktree->WriteBlockIndex(CDiskBlockIndex(pindexNew)))
return state.Abort(_("Failed to write block index"));
// New best?
if (!ConnectBestBlock(state))
return false;
if (pindexNew == pindexBest)
{
// Notify UI to display prev block's coinbase if it was ours
static uint256 hashPrevBestCoinBase;
UpdatedTransaction(hashPrevBestCoinBase);
hashPrevBestCoinBase = GetTxHash(0);
}
if (!pblocktree->Flush())
return state.Abort(_("Failed to sync block index"));
uiInterface.NotifyBlocksChanged();
return true;
}
bool FindBlockPos(CValidationState &state, CDiskBlockPos &pos, unsigned int nAddSize, unsigned int nHeight, uint64 nTime, bool fKnown = false)
{
bool fUpdatedLast = false;
LOCK(cs_LastBlockFile);
if (fKnown) {
if (nLastBlockFile != pos.nFile) {
nLastBlockFile = pos.nFile;
infoLastBlockFile.SetNull();
pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile);
fUpdatedLast = true;
}
} else {
while (infoLastBlockFile.nSize + nAddSize >= MAX_BLOCKFILE_SIZE) {
printf("Leaving block file %i: %s\n", nLastBlockFile, infoLastBlockFile.ToString().c_str());
FlushBlockFile(true);
nLastBlockFile++;
infoLastBlockFile.SetNull();
pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile); // check whether data for the new file somehow already exist; can fail just fine
fUpdatedLast = true;
}
pos.nFile = nLastBlockFile;
pos.nPos = infoLastBlockFile.nSize;
}
infoLastBlockFile.nSize += nAddSize;
infoLastBlockFile.AddBlock(nHeight, nTime);
if (!fKnown) {
unsigned int nOldChunks = (pos.nPos + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
unsigned int nNewChunks = (infoLastBlockFile.nSize + BLOCKFILE_CHUNK_SIZE - 1) / BLOCKFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenBlockFile(pos);
if (file) {
printf("Pre-allocating up to position 0x%x in blk%05u.dat\n", nNewChunks * BLOCKFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * BLOCKFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error();
}
}
if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
return state.Abort(_("Failed to write file info"));
if (fUpdatedLast)
pblocktree->WriteLastBlockFile(nLastBlockFile);
return true;
}
bool FindUndoPos(CValidationState &state, int nFile, CDiskBlockPos &pos, unsigned int nAddSize)
{
pos.nFile = nFile;
LOCK(cs_LastBlockFile);
unsigned int nNewSize;
if (nFile == nLastBlockFile) {
pos.nPos = infoLastBlockFile.nUndoSize;
nNewSize = (infoLastBlockFile.nUndoSize += nAddSize);
if (!pblocktree->WriteBlockFileInfo(nLastBlockFile, infoLastBlockFile))
return state.Abort(_("Failed to write block info"));
} else {
CBlockFileInfo info;
if (!pblocktree->ReadBlockFileInfo(nFile, info))
return state.Abort(_("Failed to read block info"));
pos.nPos = info.nUndoSize;
nNewSize = (info.nUndoSize += nAddSize);
if (!pblocktree->WriteBlockFileInfo(nFile, info))
return state.Abort(_("Failed to write block info"));
}
unsigned int nOldChunks = (pos.nPos + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
unsigned int nNewChunks = (nNewSize + UNDOFILE_CHUNK_SIZE - 1) / UNDOFILE_CHUNK_SIZE;
if (nNewChunks > nOldChunks) {
if (CheckDiskSpace(nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos)) {
FILE *file = OpenUndoFile(pos);
if (file) {
printf("Pre-allocating up to position 0x%x in rev%05u.dat\n", nNewChunks * UNDOFILE_CHUNK_SIZE, pos.nFile);
AllocateFileRange(file, pos.nPos, nNewChunks * UNDOFILE_CHUNK_SIZE - pos.nPos);
fclose(file);
}
}
else
return state.Error();
}
return true;
}
bool CBlock::CheckBlock(CValidationState &state, bool fCheckPOW, bool fCheckMerkleRoot) const
{
// These are checks that are independent of context
// that can be verified before saving an orphan block.
// Size limits
if (vtx.empty() || vtx.size() > MAX_BLOCK_SIZE || ::GetSerializeSize(*this, SER_NETWORK, PROTOCOL_VERSION) > MAX_BLOCK_SIZE)
return state.DoS(100, error("CheckBlock() : size limits failed"));
// kripton: Special short-term limits to avoid 10,000 BDB lock limit:
if (GetBlockTime() < 1376568000) // stop enforcing 15 August 2013 00:00:00
{
// Rule is: #unique txids referenced <= 4,500
// ... to prevent 10,000 BDB lock exhaustion on old clients
set<uint256> setTxIn;
for (size_t i = 0; i < vtx.size(); i++)
{
setTxIn.insert(vtx[i].GetHash());
if (i == 0) continue; // skip coinbase txin
BOOST_FOREACH(const CTxIn& txin, vtx[i].vin)
setTxIn.insert(txin.prevout.hash);
}
size_t nTxids = setTxIn.size();
if (nTxids > 4500)
return error("CheckBlock() : 15 August maxlocks violation");
}
// Check proof of work matches claimed amount
if (fCheckPOW && !CheckProofOfWork(GetPoWHash(), nBits))
return state.DoS(50, error("CheckBlock() : proof of work failed"));
// Check timestamp
if (GetBlockTime() > GetAdjustedTime() + 2 * 60 * 60)
return state.Invalid(error("CheckBlock() : block timestamp too far in the future"));
// First transaction must be coinbase, the rest must not be
if (vtx.empty() || !vtx[0].IsCoinBase())
return state.DoS(100, error("CheckBlock() : first tx is not coinbase"));
for (unsigned int i = 1; i < vtx.size(); i++)
if (vtx[i].IsCoinBase())
return state.DoS(100, error("CheckBlock() : more than one coinbase"));
// Check transactions
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.CheckTransaction(state))
return error("CheckBlock() : CheckTransaction failed");
// Build the merkle tree already. We need it anyway later, and it makes the
// block cache the transaction hashes, which means they don't need to be
// recalculated many times during this block's validation.
BuildMerkleTree();
// Check for duplicate txids. This is caught by ConnectInputs(),
// but catching it earlier avoids a potential DoS attack:
set<uint256> uniqueTx;
for (unsigned int i=0; i<vtx.size(); i++) {
uniqueTx.insert(GetTxHash(i));
}
if (uniqueTx.size() != vtx.size())
return state.DoS(100, error("CheckBlock() : duplicate transaction"));
unsigned int nSigOps = 0;
BOOST_FOREACH(const CTransaction& tx, vtx)
{
nSigOps += tx.GetLegacySigOpCount();
}
if (nSigOps > MAX_BLOCK_SIGOPS)
return state.DoS(100, error("CheckBlock() : out-of-bounds SigOpCount"));
// Check merkle root
if (fCheckMerkleRoot && hashMerkleRoot != BuildMerkleTree())
return state.DoS(100, error("CheckBlock() : hashMerkleRoot mismatch"));
return true;
}
bool CBlock::AcceptBlock(CValidationState &state, CDiskBlockPos *dbp)
{
// Check for duplicate
uint256 hash = GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("AcceptBlock() : block already in mapBlockIndex"));
// Get prev block index
CBlockIndex* pindexPrev = NULL;
int nHeight = 0;
if (hash != hashGenesisBlock) {
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashPrevBlock);
if (mi == mapBlockIndex.end())
return state.DoS(10, error("AcceptBlock() : prev block not found"));
pindexPrev = (*mi).second;
nHeight = pindexPrev->nHeight+1;
// Check proof of work
if (nBits != GetNextWorkRequired(pindexPrev, this))
return state.DoS(100, error("AcceptBlock() : incorrect proof of work"));
// Check timestamp against prev
if (GetBlockTime() <= pindexPrev->GetMedianTimePast())
return state.Invalid(error("AcceptBlock() : block's timestamp is too early"));
// Check that all transactions are finalized
BOOST_FOREACH(const CTransaction& tx, vtx)
if (!tx.IsFinal(nHeight, GetBlockTime()))
return state.DoS(10, error("AcceptBlock() : contains a non-final transaction"));
// Check that the block chain matches the known block chain up to a checkpoint
if (!Checkpoints::CheckBlock(nHeight, hash))
return state.DoS(100, error("AcceptBlock() : rejected by checkpoint lock-in at %d", nHeight));
// Reject block.nVersion=1 blocks when 95% (75% on testnet) of the network has upgraded:
if (nVersion < 2)
{
if ((!fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 950, 1000)) ||
(fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 75, 100)))
{
return state.Invalid(error("AcceptBlock() : rejected nVersion=1 block"));
}
}
// Enforce block.nVersion=2 rule that the coinbase starts with serialized block height
if (nVersion >= 2)
{
// if 750 of the last 1,000 blocks are version 2 or greater (51/100 if testnet):
if ((!fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 750, 1000)) ||
(fTestNet && CBlockIndex::IsSuperMajority(2, pindexPrev, 51, 100)))
{
CScript expect = CScript() << nHeight;
if (vtx[0].vin[0].scriptSig.size() < expect.size() ||
!std::equal(expect.begin(), expect.end(), vtx[0].vin[0].scriptSig.begin()))
return state.DoS(100, error("AcceptBlock() : block height mismatch in coinbase"));
}
}
}
// Write block to history file
try {
unsigned int nBlockSize = ::GetSerializeSize(*this, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
if (dbp != NULL)
blockPos = *dbp;
if (!FindBlockPos(state, blockPos, nBlockSize+8, nHeight, nTime, dbp != NULL))
return error("AcceptBlock() : FindBlockPos failed");
if (dbp == NULL)
if (!WriteToDisk(blockPos))
return state.Abort(_("Failed to write block"));
if (!AddToBlockIndex(state, blockPos))
return error("AcceptBlock() : AddToBlockIndex failed");
} catch(std::runtime_error &e) {
return state.Abort(_("System error: ") + e.what());
}
// Relay inventory, but don't relay old inventory during initial block download
int nBlockEstimate = Checkpoints::GetTotalBlocksEstimate();
if (hashBestChain == hash)
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
if (nBestHeight > (pnode->nStartingHeight != -1 ? pnode->nStartingHeight - 2000 : nBlockEstimate))
pnode->PushInventory(CInv(MSG_BLOCK, hash));
}
return true;
}
bool CBlockIndex::IsSuperMajority(int minVersion, const CBlockIndex* pstart, unsigned int nRequired, unsigned int nToCheck)
{
// kripton: temporarily disable v2 block lockin until we are ready for v2 transition
return false;
unsigned int nFound = 0;
for (unsigned int i = 0; i < nToCheck && nFound < nRequired && pstart != NULL; i++)
{
if (pstart->nVersion >= minVersion)
++nFound;
pstart = pstart->pprev;
}
return (nFound >= nRequired);
}
bool ProcessBlock(CValidationState &state, CNode* pfrom, CBlock* pblock, CDiskBlockPos *dbp)
{
// Check for duplicate
uint256 hash = pblock->GetHash();
if (mapBlockIndex.count(hash))
return state.Invalid(error("ProcessBlock() : already have block %d %s", mapBlockIndex[hash]->nHeight, hash.ToString().c_str()));
if (mapOrphanBlocks.count(hash))
return state.Invalid(error("ProcessBlock() : already have block (orphan) %s", hash.ToString().c_str()));
// Preliminary checks
if (!pblock->CheckBlock(state))
return error("ProcessBlock() : CheckBlock FAILED");
CBlockIndex* pcheckpoint = Checkpoints::GetLastCheckpoint(mapBlockIndex);
if (pcheckpoint && pblock->hashPrevBlock != hashBestChain)
{
// Extra checks to prevent "fill up memory by spamming with bogus blocks"
int64 deltaTime = pblock->GetBlockTime() - pcheckpoint->nTime;
if (deltaTime < 0)
{
return state.DoS(100, error("ProcessBlock() : block with timestamp before last checkpoint"));
}
CBigNum bnNewBlock;
bnNewBlock.SetCompact(pblock->nBits);
CBigNum bnRequired;
bnRequired.SetCompact(ComputeMinWork(pcheckpoint->nBits, deltaTime));
if (bnNewBlock > bnRequired)
{
return state.DoS(100, error("ProcessBlock() : block with too little proof-of-work"));
}
}
// If we don't already have its previous block, shunt it off to holding area until we get it
if (pblock->hashPrevBlock != 0 && !mapBlockIndex.count(pblock->hashPrevBlock))
{
printf("ProcessBlock: ORPHAN BLOCK, prev=%s\n", pblock->hashPrevBlock.ToString().c_str());
// Accept orphans as long as there is a node to request its parents from
if (pfrom) {
CBlock* pblock2 = new CBlock(*pblock);
mapOrphanBlocks.insert(make_pair(hash, pblock2));
mapOrphanBlocksByPrev.insert(make_pair(pblock2->hashPrevBlock, pblock2));
// Ask this guy to fill in what we're missing
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(pblock2));
}
return true;
}
// Store to disk
if (!pblock->AcceptBlock(state, dbp))
return error("ProcessBlock() : AcceptBlock FAILED");
// Recursively process any orphan blocks that depended on this one
vector<uint256> vWorkQueue;
vWorkQueue.push_back(hash);
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (multimap<uint256, CBlock*>::iterator mi = mapOrphanBlocksByPrev.lower_bound(hashPrev);
mi != mapOrphanBlocksByPrev.upper_bound(hashPrev);
++mi)
{
CBlock* pblockOrphan = (*mi).second;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan resolution (that is, feeding people an invalid block based on LegitBlockX in order to get anyone relaying LegitBlockX banned)
CValidationState stateDummy;
if (pblockOrphan->AcceptBlock(stateDummy))
vWorkQueue.push_back(pblockOrphan->GetHash());
mapOrphanBlocks.erase(pblockOrphan->GetHash());
delete pblockOrphan;
}
mapOrphanBlocksByPrev.erase(hashPrev);
}
printf("ProcessBlock: ACCEPTED\n");
return true;
}
CMerkleBlock::CMerkleBlock(const CBlock& block, CBloomFilter& filter)
{
header = block.GetBlockHeader();
vector<bool> vMatch;
vector<uint256> vHashes;
vMatch.reserve(block.vtx.size());
vHashes.reserve(block.vtx.size());
for (unsigned int i = 0; i < block.vtx.size(); i++)
{
uint256 hash = block.vtx[i].GetHash();
if (filter.IsRelevantAndUpdate(block.vtx[i], hash))
{
vMatch.push_back(true);
vMatchedTxn.push_back(make_pair(i, hash));
}
else
vMatch.push_back(false);
vHashes.push_back(hash);
}
txn = CPartialMerkleTree(vHashes, vMatch);
}
uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::vector<uint256> &vTxid) {
if (height == 0) {
// hash at height 0 is the txids themself
return vTxid[pos];
} else {
// calculate left hash
uint256 left = CalcHash(height-1, pos*2, vTxid), right;
// calculate right hash if not beyong the end of the array - copy left hash otherwise1
if (pos*2+1 < CalcTreeWidth(height-1))
right = CalcHash(height-1, pos*2+1, vTxid);
else
right = left;
// combine subhashes
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
void CPartialMerkleTree::TraverseAndBuild(int height, unsigned int pos, const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) {
// determine whether this node is the parent of at least one matched txid
bool fParentOfMatch = false;
for (unsigned int p = pos << height; p < (pos+1) << height && p < nTransactions; p++)
fParentOfMatch |= vMatch[p];
// store as flag bit
vBits.push_back(fParentOfMatch);
if (height==0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, store hash and stop
vHash.push_back(CalcHash(height, pos, vTxid));
} else {
// otherwise, don't store any hash, but descend into the subtrees
TraverseAndBuild(height-1, pos*2, vTxid, vMatch);
if (pos*2+1 < CalcTreeWidth(height-1))
TraverseAndBuild(height-1, pos*2+1, vTxid, vMatch);
}
}
uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, unsigned int &nBitsUsed, unsigned int &nHashUsed, std::vector<uint256> &vMatch) {
if (nBitsUsed >= vBits.size()) {
// overflowed the bits array - failure
fBad = true;
return 0;
}
bool fParentOfMatch = vBits[nBitsUsed++];
if (height==0 || !fParentOfMatch) {
// if at height 0, or nothing interesting below, use stored hash and do not descend
if (nHashUsed >= vHash.size()) {
// overflowed the hash array - failure
fBad = true;
return 0;
}
const uint256 &hash = vHash[nHashUsed++];
if (height==0 && fParentOfMatch) // in case of height 0, we have a matched txid
vMatch.push_back(hash);
return hash;
} else {
// otherwise, descend into the subtrees to extract matched txids and hashes
uint256 left = TraverseAndExtract(height-1, pos*2, nBitsUsed, nHashUsed, vMatch), right;
if (pos*2+1 < CalcTreeWidth(height-1))
right = TraverseAndExtract(height-1, pos*2+1, nBitsUsed, nHashUsed, vMatch);
else
right = left;
// and combine them before returning
return Hash(BEGIN(left), END(left), BEGIN(right), END(right));
}
}
CPartialMerkleTree::CPartialMerkleTree(const std::vector<uint256> &vTxid, const std::vector<bool> &vMatch) : nTransactions(vTxid.size()), fBad(false) {
// reset state
vBits.clear();
vHash.clear();
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
TraverseAndBuild(nHeight, 0, vTxid, vMatch);
}
CPartialMerkleTree::CPartialMerkleTree() : nTransactions(0), fBad(true) {}
uint256 CPartialMerkleTree::ExtractMatches(std::vector<uint256> &vMatch) {
vMatch.clear();
// An empty set will not work
if (nTransactions == 0)
return 0;
// check for excessively high numbers of transactions
if (nTransactions > MAX_BLOCK_SIZE / 60) // 60 is the lower bound for the size of a serialized CTransaction
return 0;
// there can never be more hashes provided than one for every txid
if (vHash.size() > nTransactions)
return 0;
// there must be at least one bit per node in the partial tree, and at least one node per hash
if (vBits.size() < vHash.size())
return 0;
// calculate height of tree
int nHeight = 0;
while (CalcTreeWidth(nHeight) > 1)
nHeight++;
// traverse the partial tree
unsigned int nBitsUsed = 0, nHashUsed = 0;
uint256 hashMerkleRoot = TraverseAndExtract(nHeight, 0, nBitsUsed, nHashUsed, vMatch);
// verify that no problems occured during the tree traversal
if (fBad)
return 0;
// verify that all bits were consumed (except for the padding caused by serializing it as a byte sequence)
if ((nBitsUsed+7)/8 != (vBits.size()+7)/8)
return 0;
// verify that all hashes were consumed
if (nHashUsed != vHash.size())
return 0;
return hashMerkleRoot;
}
bool AbortNode(const std::string &strMessage) {
strMiscWarning = strMessage;
printf("*** %s\n", strMessage.c_str());
uiInterface.ThreadSafeMessageBox(strMessage, "", CClientUIInterface::MSG_ERROR);
StartShutdown();
return false;
}
bool CheckDiskSpace(uint64 nAdditionalBytes)
{
uint64 nFreeBytesAvailable = filesystem::space(GetDataDir()).available;
// Check for nMinDiskSpace bytes (currently 50MB)
if (nFreeBytesAvailable < nMinDiskSpace + nAdditionalBytes)
return AbortNode(_("Error: Disk space is low!"));
return true;
}
CCriticalSection cs_LastBlockFile;
CBlockFileInfo infoLastBlockFile;
int nLastBlockFile = 0;
FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fReadOnly)
{
if (pos.IsNull())
return NULL;
boost::filesystem::path path = GetDataDir() / "blocks" / strprintf("%s%05u.dat", prefix, pos.nFile);
boost::filesystem::create_directories(path.parent_path());
FILE* file = fopen(path.string().c_str(), "rb+");
if (!file && !fReadOnly)
file = fopen(path.string().c_str(), "wb+");
if (!file) {
printf("Unable to open file %s\n", path.string().c_str());
return NULL;
}
if (pos.nPos) {
if (fseek(file, pos.nPos, SEEK_SET)) {
printf("Unable to seek to position %u of %s\n", pos.nPos, path.string().c_str());
fclose(file);
return NULL;
}
}
return file;
}
FILE* OpenBlockFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "blk", fReadOnly);
}
FILE* OpenUndoFile(const CDiskBlockPos &pos, bool fReadOnly) {
return OpenDiskFile(pos, "rev", fReadOnly);
}
CBlockIndex * InsertBlockIndex(uint256 hash)
{
if (hash == 0)
return NULL;
// Return existing
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hash);
if (mi != mapBlockIndex.end())
return (*mi).second;
// Create new
CBlockIndex* pindexNew = new CBlockIndex();
if (!pindexNew)
throw runtime_error("LoadBlockIndex() : new CBlockIndex failed");
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
pindexNew->phashBlock = &((*mi).first);
return pindexNew;
}
bool static LoadBlockIndexDB()
{
if (!pblocktree->LoadBlockIndexGuts())
return false;
boost::this_thread::interruption_point();
// Calculate nChainWork
vector<pair<int, CBlockIndex*> > vSortedByHeight;
vSortedByHeight.reserve(mapBlockIndex.size());
BOOST_FOREACH(const PAIRTYPE(uint256, CBlockIndex*)& item, mapBlockIndex)
{
CBlockIndex* pindex = item.second;
vSortedByHeight.push_back(make_pair(pindex->nHeight, pindex));
}
sort(vSortedByHeight.begin(), vSortedByHeight.end());
BOOST_FOREACH(const PAIRTYPE(int, CBlockIndex*)& item, vSortedByHeight)
{
CBlockIndex* pindex = item.second;
pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + pindex->GetBlockWork().getuint256();
pindex->nChainTx = (pindex->pprev ? pindex->pprev->nChainTx : 0) + pindex->nTx;
if ((pindex->nStatus & BLOCK_VALID_MASK) >= BLOCK_VALID_TRANSACTIONS && !(pindex->nStatus & BLOCK_FAILED_MASK))
setBlockIndexValid.insert(pindex);
}
// Load block file info
pblocktree->ReadLastBlockFile(nLastBlockFile);
printf("LoadBlockIndexDB(): last block file = %i\n", nLastBlockFile);
if (pblocktree->ReadBlockFileInfo(nLastBlockFile, infoLastBlockFile))
printf("LoadBlockIndexDB(): last block file info: %s\n", infoLastBlockFile.ToString().c_str());
// Load nBestInvalidWork, OK if it doesn't exist
CBigNum bnBestInvalidWork;
pblocktree->ReadBestInvalidWork(bnBestInvalidWork);
nBestInvalidWork = bnBestInvalidWork.getuint256();
// Check whether we need to continue reindexing
bool fReindexing = false;
pblocktree->ReadReindexing(fReindexing);
fReindex |= fReindexing;
// Check whether we have a transaction index
pblocktree->ReadFlag("txindex", fTxIndex);
printf("LoadBlockIndexDB(): transaction index %s\n", fTxIndex ? "enabled" : "disabled");
// Load hashBestChain pointer to end of best chain
pindexBest = pcoinsTip->GetBestBlock();
if (pindexBest == NULL)
return true;
hashBestChain = pindexBest->GetBlockHash();
nBestHeight = pindexBest->nHeight;
nBestChainWork = pindexBest->nChainWork;
// set 'next' pointers in best chain
CBlockIndex *pindex = pindexBest;
while(pindex != NULL && pindex->pprev != NULL) {
CBlockIndex *pindexPrev = pindex->pprev;
pindexPrev->pnext = pindex;
pindex = pindexPrev;
}
printf("LoadBlockIndexDB(): hashBestChain=%s height=%d date=%s\n",
hashBestChain.ToString().c_str(), nBestHeight,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexBest->GetBlockTime()).c_str());
return true;
}
bool VerifyDB() {
if (pindexBest == NULL || pindexBest->pprev == NULL)
return true;
// Verify blocks in the best chain
int nCheckLevel = GetArg("-checklevel", 3);
int nCheckDepth = GetArg( "-checkblocks", 288);
if (nCheckDepth == 0)
nCheckDepth = 1000000000; // suffices until the year 19000
if (nCheckDepth > nBestHeight)
nCheckDepth = nBestHeight;
nCheckLevel = std::max(0, std::min(4, nCheckLevel));
printf("Verifying last %i blocks at level %i\n", nCheckDepth, nCheckLevel);
CCoinsViewCache coins(*pcoinsTip, true);
CBlockIndex* pindexState = pindexBest;
CBlockIndex* pindexFailure = NULL;
int nGoodTransactions = 0;
CValidationState state;
for (CBlockIndex* pindex = pindexBest; pindex && pindex->pprev; pindex = pindex->pprev)
{
boost::this_thread::interruption_point();
if (pindex->nHeight < nBestHeight-nCheckDepth)
break;
CBlock block;
// check level 0: read from disk
if (!block.ReadFromDisk(pindex))
return error("VerifyDB() : *** block.ReadFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
// check level 1: verify block validity
if (nCheckLevel >= 1 && !block.CheckBlock(state))
return error("VerifyDB() : *** found bad block at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
// check level 2: verify undo validity
if (nCheckLevel >= 2 && pindex) {
CBlockUndo undo;
CDiskBlockPos pos = pindex->GetUndoPos();
if (!pos.IsNull()) {
if (!undo.ReadFromDisk(pos, pindex->pprev->GetBlockHash()))
return error("VerifyDB() : *** found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
}
}
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
if (nCheckLevel >= 3 && pindex == pindexState && (coins.GetCacheSize() + pcoinsTip->GetCacheSize()) <= 2*nCoinCacheSize + 32000) {
bool fClean = true;
if (!block.DisconnectBlock(state, pindex, coins, &fClean))
return error("VerifyDB() : *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
pindexState = pindex->pprev;
if (!fClean) {
nGoodTransactions = 0;
pindexFailure = pindex;
} else
nGoodTransactions += block.vtx.size();
}
}
if (pindexFailure)
return error("VerifyDB() : *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", pindexBest->nHeight - pindexFailure->nHeight + 1, nGoodTransactions);
// check level 4: try reconnecting blocks
if (nCheckLevel >= 4) {
CBlockIndex *pindex = pindexState;
while (pindex != pindexBest) {
boost::this_thread::interruption_point();
pindex = pindex->pnext;
CBlock block;
if (!block.ReadFromDisk(pindex))
return error("VerifyDB() : *** block.ReadFromDisk failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
if (!block.ConnectBlock(state, pindex, coins))
return error("VerifyDB() : *** found unconnectable block at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
}
}
printf("No coin database inconsistencies in last %i blocks (%i transactions)\n", pindexBest->nHeight - pindexState->nHeight, nGoodTransactions);
return true;
}
void UnloadBlockIndex()
{
mapBlockIndex.clear();
setBlockIndexValid.clear();
pindexGenesisBlock = NULL;
nBestHeight = 0;
nBestChainWork = 0;
nBestInvalidWork = 0;
hashBestChain = 0;
pindexBest = NULL;
}
bool LoadBlockIndex()
{
if (fTestNet)
{
pchMessageStart[0] = 0xfc;
pchMessageStart[1] = 0xc1;
pchMessageStart[2] = 0xb7;
pchMessageStart[3] = 0xdc;
hashGenesisBlock = uint256("0x24cd6a8ebe587629588046cad81b61108fa32ee3be54c079a45d868e2adc7e6a");
}
//
// Load block index from databases
//
if (!fReindex && !LoadBlockIndexDB())
return false;
return true;
}
bool InitBlockIndex() {
// Check whether we're already initialized
if (pindexGenesisBlock != NULL)
return true;
// Use the provided setting for -txindex in the new database
fTxIndex = GetBoolArg("-txindex", false);
pblocktree->WriteFlag("txindex", fTxIndex);
printf("Initializing databases...\n");
// Only add the genesis block if not reindexing (in which case we reuse the one already on disk)
if (!fReindex) {
// Genesis Block:
// CBlock(hash=12a765e31ffd4059bada, PoW=0000050c34a64b415b6b, ver=1, hashPrevBlock=00000000000000000000, hashMerkleRoot=97ddfbbae6, nTime=1317972665, nBits=1e0ffff0, nNonce=2084524493, vtx=1)
// CTransaction(hash=97ddfbbae6, ver=1, vin.size=1, vout.size=1, nLockTime=0)
// CTxIn(COutPoint(0000000000, -1), coinbase 04ffff001d0104404e592054696d65732030352f4f63742f32303131205374657665204a6f62732c204170706c65e280997320566973696f6e6172792c2044696573206174203536)
// CTxOut(nValue=50.00000000, scriptPubKey=040184710fa689ad5023690c80f3a4)
// vMerkleTree: 97ddfbbae6
// Genesis block
const char* pszTimestamp = "Another illustrious Build-a-Coin cryptocurrency!";
CTransaction txNew;
txNew.vin.resize(1);
txNew.vout.resize(1);
txNew.vin[0].scriptSig = CScript() << 486604799 << CBigNum(4) << vector<unsigned char>((const unsigned char*)pszTimestamp, (const unsigned char*)pszTimestamp + strlen(pszTimestamp));
txNew.vout[0].nValue = 5000000000;
txNew.vout[0].scriptPubKey = CScript() << ParseHex("fc22a10cce5a538b978b6975b683d219d8ba287626776582997a3f516c9ece5cfed3767650fd50bd65bcb93048f472d109028d2a67668bdbf2e1a7661789845f10") << OP_CHECKSIG;
CBlock block;
block.vtx.push_back(txNew);
block.hashPrevBlock = 0;
block.hashMerkleRoot = block.BuildMerkleTree();
block.nVersion = 1;
block.nTime = 1515424629;
block.nBits = 504365644;
block.nNonce = 1491017094;
if (fTestNet)
{
block.nTime = 1515424629;
block.nNonce = 259448130;
}
//// debug print
uint256 hash = block.GetHash();
printf("%s\n", hash.ToString().c_str());
printf("%s\n", hashGenesisBlock.ToString().c_str());
printf("%s\n", block.hashMerkleRoot.ToString().c_str());
assert(block.hashMerkleRoot == uint256("0x14379b6ab95fe516a41fd496ae11ccff41a162bcde2edf610b011adb10d70c5b"));
block.print();
assert(hash == hashGenesisBlock);
// Start new block file
try {
unsigned int nBlockSize = ::GetSerializeSize(block, SER_DISK, CLIENT_VERSION);
CDiskBlockPos blockPos;
CValidationState state;
if (!FindBlockPos(state, blockPos, nBlockSize+8, 0, block.nTime))
return error("LoadBlockIndex() : FindBlockPos failed");
if (!block.WriteToDisk(blockPos))
return error("LoadBlockIndex() : writing genesis block to disk failed");
if (!block.AddToBlockIndex(state, blockPos))
return error("LoadBlockIndex() : genesis block not accepted");
} catch(std::runtime_error &e) {
return error("LoadBlockIndex() : failed to initialize block database: %s", e.what());
}
}
return true;
}
void PrintBlockTree()
{
// pre-compute tree structure
map<CBlockIndex*, vector<CBlockIndex*> > mapNext;
for (map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.begin(); mi != mapBlockIndex.end(); ++mi)
{
CBlockIndex* pindex = (*mi).second;
mapNext[pindex->pprev].push_back(pindex);
// test
//while (rand() % 3 == 0)
// mapNext[pindex->pprev].push_back(pindex);
}
vector<pair<int, CBlockIndex*> > vStack;
vStack.push_back(make_pair(0, pindexGenesisBlock));
int nPrevCol = 0;
while (!vStack.empty())
{
int nCol = vStack.back().first;
CBlockIndex* pindex = vStack.back().second;
vStack.pop_back();
// print split or gap
if (nCol > nPrevCol)
{
for (int i = 0; i < nCol-1; i++)
printf("| ");
printf("|\\\n");
}
else if (nCol < nPrevCol)
{
for (int i = 0; i < nCol; i++)
printf("| ");
printf("|\n");
}
nPrevCol = nCol;
// print columns
for (int i = 0; i < nCol; i++)
printf("| ");
// print item
CBlock block;
block.ReadFromDisk(pindex);
printf("%d (blk%05u.dat:0x%x) %s tx %"PRIszu"",
pindex->nHeight,
pindex->GetBlockPos().nFile, pindex->GetBlockPos().nPos,
DateTimeStrFormat("%Y-%m-%d %H:%M:%S", block.GetBlockTime()).c_str(),
block.vtx.size());
PrintWallets(block);
// put the main time-chain first
vector<CBlockIndex*>& vNext = mapNext[pindex];
for (unsigned int i = 0; i < vNext.size(); i++)
{
if (vNext[i]->pnext)
{
swap(vNext[0], vNext[i]);
break;
}
}
// iterate children
for (unsigned int i = 0; i < vNext.size(); i++)
vStack.push_back(make_pair(nCol+i, vNext[i]));
}
}
bool LoadExternalBlockFile(FILE* fileIn, CDiskBlockPos *dbp)
{
int64 nStart = GetTimeMillis();
int nLoaded = 0;
try {
CBufferedFile blkdat(fileIn, 2*MAX_BLOCK_SIZE, MAX_BLOCK_SIZE+8, SER_DISK, CLIENT_VERSION);
uint64 nStartByte = 0;
if (dbp) {
// (try to) skip already indexed part
CBlockFileInfo info;
if (pblocktree->ReadBlockFileInfo(dbp->nFile, info)) {
nStartByte = info.nSize;
blkdat.Seek(info.nSize);
}
}
uint64 nRewind = blkdat.GetPos();
while (blkdat.good() && !blkdat.eof()) {
boost::this_thread::interruption_point();
blkdat.SetPos(nRewind);
nRewind++; // start one byte further next time, in case of failure
blkdat.SetLimit(); // remove former limit
unsigned int nSize = 0;
try {
// locate a header
unsigned char buf[4];
blkdat.FindByte(pchMessageStart[0]);
nRewind = blkdat.GetPos()+1;
blkdat >> FLATDATA(buf);
if (memcmp(buf, pchMessageStart, 4))
continue;
// read size
blkdat >> nSize;
if (nSize < 80 || nSize > MAX_BLOCK_SIZE)
continue;
} catch (std::exception &e) {
// no valid block header found; don't complain
break;
}
try {
// read block
uint64 nBlockPos = blkdat.GetPos();
blkdat.SetLimit(nBlockPos + nSize);
CBlock block;
blkdat >> block;
nRewind = blkdat.GetPos();
// process block
if (nBlockPos >= nStartByte) {
LOCK(cs_main);
if (dbp)
dbp->nPos = nBlockPos;
CValidationState state;
if (ProcessBlock(state, NULL, &block, dbp))
nLoaded++;
if (state.IsError())
break;
}
} catch (std::exception &e) {
printf("%s() : Deserialize or I/O error caught during load\n", __PRETTY_FUNCTION__);
}
}
fclose(fileIn);
} catch(std::runtime_error &e) {
AbortNode(_("Error: system error: ") + e.what());
}
if (nLoaded > 0)
printf("Loaded %i blocks from external file in %"PRI64d"ms\n", nLoaded, GetTimeMillis() - nStart);
return nLoaded > 0;
}
//////////////////////////////////////////////////////////////////////////////
//
// CAlert
//
extern map<uint256, CAlert> mapAlerts;
extern CCriticalSection cs_mapAlerts;
string GetWarnings(string strFor)
{
int nPriority = 0;
string strStatusBar;
string strRPC;
if (GetBoolArg("-testsafemode"))
strRPC = "test";
if (!CLIENT_VERSION_IS_RELEASE)
strStatusBar = _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications");
// Misc warnings like out of disk space and clock is wrong
if (strMiscWarning != "")
{
nPriority = 1000;
strStatusBar = strMiscWarning;
}
// Longer invalid proof-of-work chain
if (pindexBest && nBestInvalidWork > nBestChainWork + (pindexBest->GetBlockWork() * 6).getuint256())
{
nPriority = 2000;
strStatusBar = strRPC = _("Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.");
}
// Alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
{
const CAlert& alert = item.second;
if (alert.AppliesToMe() && alert.nPriority > nPriority)
{
nPriority = alert.nPriority;
strStatusBar = alert.strStatusBar;
}
}
}
if (strFor == "statusbar")
return strStatusBar;
else if (strFor == "rpc")
return strRPC;
assert(!"GetWarnings() : invalid parameter");
return "error";
}
//////////////////////////////////////////////////////////////////////////////
//
// Messages
//
bool static AlreadyHave(const CInv& inv)
{
switch (inv.type)
{
case MSG_TX:
{
bool txInMap = false;
{
LOCK(mempool.cs);
txInMap = mempool.exists(inv.hash);
}
return txInMap || mapOrphanTransactions.count(inv.hash) ||
pcoinsTip->HaveCoins(inv.hash);
}
case MSG_BLOCK:
return mapBlockIndex.count(inv.hash) ||
mapOrphanBlocks.count(inv.hash);
}
// Don't know what it is, just say we already got one
return true;
}
// The message start string is designed to be unlikely to occur in normal data.
// The characters are rarely used upper ASCII, not valid as UTF-8, and produce
// a large 4-byte int at any alignment.
unsigned char pchMessageStart[4] = { 0xfb, 0xc0, 0xb6, 0xdb }; // kripton: increase each by adding 2 to bitcoin's value.
void static ProcessGetData(CNode* pfrom)
{
std::deque<CInv>::iterator it = pfrom->vRecvGetData.begin();
vector<CInv> vNotFound;
while (it != pfrom->vRecvGetData.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
// Don't waste work on slow peers until they catch up on the blocks we
// give them. 80 bytes is just the size of a block header - obviously
// the minimum we might return.
if (pfrom->nBlocksRequested * 80 > pfrom->nSendBytes)
break;
const CInv &inv = *it;
{
boost::this_thread::interruption_point();
it++;
if (inv.type == MSG_BLOCK || inv.type == MSG_FILTERED_BLOCK)
{
// Send block from disk
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(inv.hash);
pfrom->nBlocksRequested++;
if (mi != mapBlockIndex.end())
{
CBlock block;
block.ReadFromDisk((*mi).second);
if (inv.type == MSG_BLOCK)
pfrom->PushMessage("block", block);
else // MSG_FILTERED_BLOCK)
{
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
{
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
pfrom->PushMessage("merkleblock", merkleBlock);
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
// This avoids hurting performance by pointlessly requiring a round-trip
// Note that there is currently no way for a node to request any single transactions we didnt send here -
// they must either disconnect and retry or request the full block.
// Thus, the protocol spec specified allows for us to provide duplicate txn here,
// however we MUST always provide at least what the remote peer needs
typedef std::pair<unsigned int, uint256> PairType;
BOOST_FOREACH(PairType& pair, merkleBlock.vMatchedTxn)
if (!pfrom->setInventoryKnown.count(CInv(MSG_TX, pair.second)))
pfrom->PushMessage("tx", block.vtx[pair.first]);
}
// else
// no response
}
// Trigger them to send a getblocks request for the next batch of inventory
if (inv.hash == pfrom->hashContinue)
{
// Bypass PushInventory, this must send even if redundant,
// and we want it right after the last block so they don't
// wait for other stuff first.
vector<CInv> vInv;
vInv.push_back(CInv(MSG_BLOCK, hashBestChain));
pfrom->PushMessage("inv", vInv);
pfrom->hashContinue = 0;
}
}
}
else if (inv.IsKnownType())
{
// Send stream from relay memory
bool pushed = false;
{
LOCK(cs_mapRelay);
map<CInv, CDataStream>::iterator mi = mapRelay.find(inv);
if (mi != mapRelay.end()) {
pfrom->PushMessage(inv.GetCommand(), (*mi).second);
pushed = true;
}
}
if (!pushed && inv.type == MSG_TX) {
LOCK(mempool.cs);
if (mempool.exists(inv.hash)) {
CTransaction tx = mempool.lookup(inv.hash);
CDataStream ss(SER_NETWORK, PROTOCOL_VERSION);
ss.reserve(1000);
ss << tx;
pfrom->PushMessage("tx", ss);
pushed = true;
}
}
if (!pushed) {
vNotFound.push_back(inv);
}
}
// Track requests for our stuff.
Inventory(inv.hash);
}
}
pfrom->vRecvGetData.erase(pfrom->vRecvGetData.begin(), it);
if (!vNotFound.empty()) {
// Let the peer know that we didn't find what it asked for, so it doesn't
// have to wait around forever. Currently only SPV clients actually care
// about this message: it's needed when they are recursively walking the
// dependencies of relevant unconfirmed transactions. SPV clients want to
// do that because they want to know about (and store and rebroadcast and
// risk analyze) the dependencies of transactions relevant to them, without
// having to download the entire memory pool.
pfrom->PushMessage("notfound", vNotFound);
}
}
bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv)
{
RandAddSeedPerfmon();
if (fDebug)
printf("received: %s (%"PRIszu" bytes)\n", strCommand.c_str(), vRecv.size());
if (mapArgs.count("-dropmessagestest") && GetRand(atoi(mapArgs["-dropmessagestest"])) == 0)
{
printf("dropmessagestest DROPPING RECV MESSAGE\n");
return true;
}
if (strCommand == "version")
{
// Each connection can only send one version message
if (pfrom->nVersion != 0)
{
pfrom->Misbehaving(1);
return false;
}
int64 nTime;
CAddress addrMe;
CAddress addrFrom;
uint64 nNonce = 1;
vRecv >> pfrom->nVersion >> pfrom->nServices >> nTime >> addrMe;
if (pfrom->nVersion < MIN_PROTO_VERSION)
{
// Since February 20, 2012, the protocol is initiated at version 209,
// and earlier versions are no longer supported
printf("partner %s using obsolete version %i; disconnecting\n", pfrom->addr.ToString().c_str(), pfrom->nVersion);
pfrom->fDisconnect = true;
return false;
}
if (pfrom->nVersion == 10300)
pfrom->nVersion = 300;
if (!vRecv.empty())
vRecv >> addrFrom >> nNonce;
if (!vRecv.empty())
vRecv >> pfrom->strSubVer;
if (!vRecv.empty())
vRecv >> pfrom->nStartingHeight;
if (!vRecv.empty())
vRecv >> pfrom->fRelayTxes; // set to true after we get the first filter* message
else
pfrom->fRelayTxes = true;
if (pfrom->fInbound && addrMe.IsRoutable())
{
pfrom->addrLocal = addrMe;
SeenLocal(addrMe);
}
// Disconnect if we connected to ourself
if (nNonce == nLocalHostNonce && nNonce > 1)
{
printf("connected to self at %s, disconnecting\n", pfrom->addr.ToString().c_str());
pfrom->fDisconnect = true;
return true;
}
// Be shy and don't send version until we hear
if (pfrom->fInbound)
pfrom->PushVersion();
pfrom->fClient = !(pfrom->nServices & NODE_NETWORK);
AddTimeData(pfrom->addr, nTime);
// Change version
pfrom->PushMessage("verack");
pfrom->ssSend.SetVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
if (!pfrom->fInbound)
{
// Advertise our address
if (!fNoListen && !IsInitialBlockDownload())
{
CAddress addr = GetLocalAddress(&pfrom->addr);
if (addr.IsRoutable())
pfrom->PushAddress(addr);
}
// Get recent addresses
if (pfrom->fOneShot || pfrom->nVersion >= CADDR_TIME_VERSION || addrman.size() < 1000)
{
pfrom->PushMessage("getaddr");
pfrom->fGetAddr = true;
}
addrman.Good(pfrom->addr);
} else {
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
{
addrman.Add(addrFrom, addrFrom);
addrman.Good(addrFrom);
}
}
// Relay alerts
{
LOCK(cs_mapAlerts);
BOOST_FOREACH(PAIRTYPE(const uint256, CAlert)& item, mapAlerts)
item.second.RelayTo(pfrom);
}
pfrom->fSuccessfullyConnected = true;
printf("receive version message: version %d, blocks=%d, us=%s, them=%s, peer=%s\n", pfrom->nVersion, pfrom->nStartingHeight, addrMe.ToString().c_str(), addrFrom.ToString().c_str(), pfrom->addr.ToString().c_str());
cPeerBlockCounts.input(pfrom->nStartingHeight);
}
else if (pfrom->nVersion == 0)
{
// Must have a version message before anything else
pfrom->Misbehaving(1);
return false;
}
else if (strCommand == "verack")
{
pfrom->SetRecvVersion(min(pfrom->nVersion, PROTOCOL_VERSION));
}
else if (strCommand == "addr")
{
vector<CAddress> vAddr;
vRecv >> vAddr;
// Don't want addr from older versions unless seeding
if (pfrom->nVersion < CADDR_TIME_VERSION && addrman.size() > 1000)
return true;
if (vAddr.size() > 1000)
{
pfrom->Misbehaving(20);
return error("message addr size() = %"PRIszu"", vAddr.size());
}
// Store the new addresses
vector<CAddress> vAddrOk;
int64 nNow = GetAdjustedTime();
int64 nSince = nNow - 10 * 60;
BOOST_FOREACH(CAddress& addr, vAddr)
{
boost::this_thread::interruption_point();
if (addr.nTime <= 100000000 || addr.nTime > nNow + 10 * 60)
addr.nTime = nNow - 5 * 24 * 60 * 60;
pfrom->AddAddressKnown(addr);
bool fReachable = IsReachable(addr);
if (addr.nTime > nSince && !pfrom->fGetAddr && vAddr.size() <= 10 && addr.IsRoutable())
{
// Relay to a limited number of other nodes
{
LOCK(cs_vNodes);
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the setAddrKnowns of the chosen nodes prevent repeats
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint64 hashAddr = addr.GetHash();
uint256 hashRand = hashSalt ^ (hashAddr<<32) ^ ((GetTime()+hashAddr)/(24*60*60));
hashRand = Hash(BEGIN(hashRand), END(hashRand));
multimap<uint256, CNode*> mapMix;
BOOST_FOREACH(CNode* pnode, vNodes)
{
if (pnode->nVersion < CADDR_TIME_VERSION)
continue;
unsigned int nPointer;
memcpy(&nPointer, &pnode, sizeof(nPointer));
uint256 hashKey = hashRand ^ nPointer;
hashKey = Hash(BEGIN(hashKey), END(hashKey));
mapMix.insert(make_pair(hashKey, pnode));
}
int nRelayNodes = fReachable ? 2 : 1; // limited relaying of addresses outside our network(s)
for (multimap<uint256, CNode*>::iterator mi = mapMix.begin(); mi != mapMix.end() && nRelayNodes-- > 0; ++mi)
((*mi).second)->PushAddress(addr);
}
}
// Do not store addresses outside our network
if (fReachable)
vAddrOk.push_back(addr);
}
addrman.Add(vAddrOk, pfrom->addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom->fGetAddr = false;
if (pfrom->fOneShot)
pfrom->fDisconnect = true;
}
else if (strCommand == "inv")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
pfrom->Misbehaving(20);
return error("message inv size() = %"PRIszu"", vInv.size());
}
// find last block in inv vector
unsigned int nLastBlock = (unsigned int)(-1);
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++) {
if (vInv[vInv.size() - 1 - nInv].type == MSG_BLOCK) {
nLastBlock = vInv.size() - 1 - nInv;
break;
}
}
for (unsigned int nInv = 0; nInv < vInv.size(); nInv++)
{
const CInv &inv = vInv[nInv];
boost::this_thread::interruption_point();
pfrom->AddInventoryKnown(inv);
bool fAlreadyHave = AlreadyHave(inv);
if (fDebug)
printf(" got inventory: %s %s\n", inv.ToString().c_str(), fAlreadyHave ? "have" : "new");
if (!fAlreadyHave) {
if (!fImporting && !fReindex)
pfrom->AskFor(inv);
} else if (inv.type == MSG_BLOCK && mapOrphanBlocks.count(inv.hash)) {
pfrom->PushGetBlocks(pindexBest, GetOrphanRoot(mapOrphanBlocks[inv.hash]));
} else if (nInv == nLastBlock) {
// In case we are on a very long side-chain, it is possible that we already have
// the last block in an inv bundle sent in response to getblocks. Try to detect
// this situation and push another getblocks to continue.
pfrom->PushGetBlocks(mapBlockIndex[inv.hash], uint256(0));
if (fDebug)
printf("force request: %s\n", inv.ToString().c_str());
}
// Track requests for our stuff
Inventory(inv.hash);
}
}
else if (strCommand == "getdata")
{
vector<CInv> vInv;
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
pfrom->Misbehaving(20);
return error("message getdata size() = %"PRIszu"", vInv.size());
}
if (fDebugNet || (vInv.size() != 1))
printf("received getdata (%"PRIszu" invsz)\n", vInv.size());
if ((fDebugNet && vInv.size() > 0) || (vInv.size() == 1))
printf("received getdata for: %s\n", vInv[0].ToString().c_str());
pfrom->vRecvGetData.insert(pfrom->vRecvGetData.end(), vInv.begin(), vInv.end());
ProcessGetData(pfrom);
}
else if (strCommand == "getblocks")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
// Find the last block the caller has in the main chain
CBlockIndex* pindex = locator.GetBlockIndex();
// Send the rest of the chain
if (pindex)
pindex = pindex->pnext;
int nLimit = 500;
printf("getblocks %d to %s limit %d\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str(), nLimit);
for (; pindex; pindex = pindex->pnext)
{
if (pindex->GetBlockHash() == hashStop)
{
printf(" getblocks stopping at %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
break;
}
pfrom->PushInventory(CInv(MSG_BLOCK, pindex->GetBlockHash()));
if (--nLimit <= 0)
{
// When this block is requested, we'll send an inv that'll make them
// getblocks the next batch of inventory.
printf(" getblocks stopping at limit %d %s\n", pindex->nHeight, pindex->GetBlockHash().ToString().c_str());
pfrom->hashContinue = pindex->GetBlockHash();
break;
}
}
}
else if (strCommand == "getheaders")
{
CBlockLocator locator;
uint256 hashStop;
vRecv >> locator >> hashStop;
CBlockIndex* pindex = NULL;
if (locator.IsNull())
{
// If locator is null, return the hashStop block
map<uint256, CBlockIndex*>::iterator mi = mapBlockIndex.find(hashStop);
if (mi == mapBlockIndex.end())
return true;
pindex = (*mi).second;
}
else
{
// Find the last block the caller has in the main chain
pindex = locator.GetBlockIndex();
if (pindex)
pindex = pindex->pnext;
}
// we must use CBlocks, as CBlockHeaders won't include the 0x00 nTx count at the end
vector<CBlock> vHeaders;
int nLimit = 2000;
printf("getheaders %d to %s\n", (pindex ? pindex->nHeight : -1), hashStop.ToString().c_str());
for (; pindex; pindex = pindex->pnext)
{
vHeaders.push_back(pindex->GetBlockHeader());
if (--nLimit <= 0 || pindex->GetBlockHash() == hashStop)
break;
}
pfrom->PushMessage("headers", vHeaders);
}
else if (strCommand == "tx")
{
vector<uint256> vWorkQueue;
vector<uint256> vEraseQueue;
CDataStream vMsg(vRecv);
CTransaction tx;
vRecv >> tx;
CInv inv(MSG_TX, tx.GetHash());
pfrom->AddInventoryKnown(inv);
bool fMissingInputs = false;
CValidationState state;
if (tx.AcceptToMemoryPool(state, true, true, &fMissingInputs))
{
RelayTransaction(tx, inv.hash);
mapAlreadyAskedFor.erase(inv);
vWorkQueue.push_back(inv.hash);
vEraseQueue.push_back(inv.hash);
// Recursively process any orphan transactions that depended on this one
for (unsigned int i = 0; i < vWorkQueue.size(); i++)
{
uint256 hashPrev = vWorkQueue[i];
for (set<uint256>::iterator mi = mapOrphanTransactionsByPrev[hashPrev].begin();
mi != mapOrphanTransactionsByPrev[hashPrev].end();
++mi)
{
const uint256& orphanHash = *mi;
const CTransaction& orphanTx = mapOrphanTransactions[orphanHash];
bool fMissingInputs2 = false;
// Use a dummy CValidationState so someone can't setup nodes to counter-DoS based on orphan
// resolution (that is, feeding people an invalid transaction based on LegitTxX in order to get
// anyone relaying LegitTxX banned)
CValidationState stateDummy;
if (tx.AcceptToMemoryPool(stateDummy, true, true, &fMissingInputs2))
{
printf(" accepted orphan tx %s\n", orphanHash.ToString().c_str());
RelayTransaction(orphanTx, orphanHash);
mapAlreadyAskedFor.erase(CInv(MSG_TX, orphanHash));
vWorkQueue.push_back(orphanHash);
vEraseQueue.push_back(orphanHash);
}
else if (!fMissingInputs2)
{
// invalid or too-little-fee orphan
vEraseQueue.push_back(orphanHash);
printf(" removed orphan tx %s\n", orphanHash.ToString().c_str());
}
}
}
BOOST_FOREACH(uint256 hash, vEraseQueue)
EraseOrphanTx(hash);
}
else if (fMissingInputs)
{
AddOrphanTx(tx);
// DoS prevention: do not allow mapOrphanTransactions to grow unbounded
unsigned int nEvicted = LimitOrphanTxSize(MAX_ORPHAN_TRANSACTIONS);
if (nEvicted > 0)
printf("mapOrphan overflow, removed %u tx\n", nEvicted);
}
int nDoS;
if (state.IsInvalid(nDoS))
pfrom->Misbehaving(nDoS);
}
else if (strCommand == "block" && !fImporting && !fReindex) // Ignore blocks received while importing
{
CBlock block;
vRecv >> block;
printf("received block %s\n", block.GetHash().ToString().c_str());
// block.print();
CInv inv(MSG_BLOCK, block.GetHash());
pfrom->AddInventoryKnown(inv);
CValidationState state;
if (ProcessBlock(state, pfrom, &block))
mapAlreadyAskedFor.erase(inv);
int nDoS;
if (state.IsInvalid(nDoS))
pfrom->Misbehaving(nDoS);
}
else if (strCommand == "getaddr")
{
pfrom->vAddrToSend.clear();
vector<CAddress> vAddr = addrman.GetAddr();
BOOST_FOREACH(const CAddress &addr, vAddr)
pfrom->PushAddress(addr);
}
else if (strCommand == "mempool")
{
std::vector<uint256> vtxid;
LOCK2(mempool.cs, pfrom->cs_filter);
mempool.queryHashes(vtxid);
vector<CInv> vInv;
BOOST_FOREACH(uint256& hash, vtxid) {
CInv inv(MSG_TX, hash);
if ((pfrom->pfilter && pfrom->pfilter->IsRelevantAndUpdate(mempool.lookup(hash), hash)) ||
(!pfrom->pfilter))
vInv.push_back(inv);
if (vInv.size() == MAX_INV_SZ)
break;
}
if (vInv.size() > 0)
pfrom->PushMessage("inv", vInv);
}
else if (strCommand == "ping")
{
if (pfrom->nVersion > BIP0031_VERSION)
{
uint64 nonce = 0;
vRecv >> nonce;
// Echo the message back with the nonce. This allows for two useful features:
//
// 1) A remote node can quickly check if the connection is operational
// 2) Remote nodes can measure the latency of the network thread. If this node
// is overloaded it won't respond to pings quickly and the remote node can
// avoid sending us more work, like chain download requests.
//
// The nonce stops the remote getting confused between different pings: without
// it, if the remote node sends a ping once per second and this node takes 5
// seconds to respond to each, the 5th ping the remote sends would appear to
// return very quickly.
pfrom->PushMessage("pong", nonce);
}
}
else if (strCommand == "alert")
{
CAlert alert;
vRecv >> alert;
uint256 alertHash = alert.GetHash();
if (pfrom->setKnown.count(alertHash) == 0)
{
if (alert.ProcessAlert())
{
// Relay
pfrom->setKnown.insert(alertHash);
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
alert.RelayTo(pnode);
}
}
else {
// Small DoS penalty so peers that send us lots of
// duplicate/expired/invalid-signature/whatever alerts
// eventually get banned.
// This isn't a Misbehaving(100) (immediate ban) because the
// peer might be an older or different implementation with
// a different signature key, etc.
pfrom->Misbehaving(10);
}
}
}
else if (!fBloomFilters &&
(strCommand == "filterload" ||
strCommand == "filteradd" ||
strCommand == "filterclear"))
{
pfrom->Misbehaving(100);
return error("peer %s attempted to set a bloom filter even though we do not advertise that service",
pfrom->addr.ToString().c_str());
}
else if (strCommand == "filterload")
{
CBloomFilter filter;
vRecv >> filter;
if (!filter.IsWithinSizeConstraints())
// There is no excuse for sending a too-large filter
pfrom->Misbehaving(100);
else
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter(filter);
pfrom->pfilter->UpdateEmptyFull();
}
pfrom->fRelayTxes = true;
}
else if (strCommand == "filteradd")
{
vector<unsigned char> vData;
vRecv >> vData;
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
// and thus, the maximum size any matched object can have) in a filteradd message
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
{
pfrom->Misbehaving(100);
} else {
LOCK(pfrom->cs_filter);
if (pfrom->pfilter)
pfrom->pfilter->insert(vData);
else
pfrom->Misbehaving(100);
}
}
else if (strCommand == "filterclear")
{
LOCK(pfrom->cs_filter);
delete pfrom->pfilter;
pfrom->pfilter = new CBloomFilter();
pfrom->fRelayTxes = true;
}
else
{
// Ignore unknown commands for extensibility
}
// Update the last seen time for this node's address
if (pfrom->fNetworkNode)
if (strCommand == "version" || strCommand == "addr" || strCommand == "inv" || strCommand == "getdata" || strCommand == "ping")
AddressCurrentlyConnected(pfrom->addr);
return true;
}
// requires LOCK(cs_vRecvMsg)
bool ProcessMessages(CNode* pfrom)
{
//if (fDebug)
// printf("ProcessMessages(%zu messages)\n", pfrom->vRecvMsg.size());
//
// Message format
// (4) message start
// (12) command
// (4) size
// (4) checksum
// (x) data
//
bool fOk = true;
if (!pfrom->vRecvGetData.empty())
ProcessGetData(pfrom);
std::deque<CNetMessage>::iterator it = pfrom->vRecvMsg.begin();
while (!pfrom->fDisconnect && it != pfrom->vRecvMsg.end()) {
// Don't bother if send buffer is too full to respond anyway
if (pfrom->nSendSize >= SendBufferSize())
break;
// get next message
CNetMessage& msg = *it;
//if (fDebug)
// printf("ProcessMessages(message %u msgsz, %zu bytes, complete:%s)\n",
// msg.hdr.nMessageSize, msg.vRecv.size(),
// msg.complete() ? "Y" : "N");
// end, if an incomplete message is found
if (!msg.complete())
break;
// at this point, any failure means we can delete the current message
it++;
// Scan for message start
if (memcmp(msg.hdr.pchMessageStart, pchMessageStart, sizeof(pchMessageStart)) != 0) {
printf("\n\nPROCESSMESSAGE: INVALID MESSAGESTART\n\n");
fOk = false;
break;
}
// Read header
CMessageHeader& hdr = msg.hdr;
if (!hdr.IsValid())
{
printf("\n\nPROCESSMESSAGE: ERRORS IN HEADER %s\n\n\n", hdr.GetCommand().c_str());
continue;
}
string strCommand = hdr.GetCommand();
// Message size
unsigned int nMessageSize = hdr.nMessageSize;
// Checksum
CDataStream& vRecv = msg.vRecv;
uint256 hash = Hash(vRecv.begin(), vRecv.begin() + nMessageSize);
unsigned int nChecksum = 0;
memcpy(&nChecksum, &hash, sizeof(nChecksum));
if (nChecksum != hdr.nChecksum)
{
printf("ProcessMessages(%s, %u bytes) : CHECKSUM ERROR nChecksum=%08x hdr.nChecksum=%08x\n",
strCommand.c_str(), nMessageSize, nChecksum, hdr.nChecksum);
continue;
}
// Process message
bool fRet = false;
try
{
{
LOCK(cs_main);
fRet = ProcessMessage(pfrom, strCommand, vRecv);
}
boost::this_thread::interruption_point();
}
catch (std::ios_base::failure& e)
{
if (strstr(e.what(), "end of data"))
{
// Allow exceptions from under-length message on vRecv
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught, normally caused by a message being shorter than its stated length\n", strCommand.c_str(), nMessageSize, e.what());
}
else if (strstr(e.what(), "size too large"))
{
// Allow exceptions from over-long size
printf("ProcessMessages(%s, %u bytes) : Exception '%s' caught\n", strCommand.c_str(), nMessageSize, e.what());
}
else
{
PrintExceptionContinue(&e, "ProcessMessages()");
}
}
catch (boost::thread_interrupted) {
throw;
}
catch (std::exception& e) {
PrintExceptionContinue(&e, "ProcessMessages()");
} catch (...) {
PrintExceptionContinue(NULL, "ProcessMessages()");
}
if (!fRet)
printf("ProcessMessage(%s, %u bytes) FAILED\n", strCommand.c_str(), nMessageSize);
}
// In case the connection got shut down, its receive buffer was wiped
if (!pfrom->fDisconnect)
pfrom->vRecvMsg.erase(pfrom->vRecvMsg.begin(), it);
return fOk;
}
bool SendMessages(CNode* pto, bool fSendTrickle)
{
TRY_LOCK(cs_main, lockMain);
if (lockMain) {
// Don't send anything until we get their version message
if (pto->nVersion == 0)
return true;
// Keep-alive ping. We send a nonce of zero because we don't use it anywhere
// right now.
if (pto->nLastSend && GetTime() - pto->nLastSend > 30 * 60 && pto->vSendMsg.empty()) {
uint64 nonce = 0;
if (pto->nVersion > BIP0031_VERSION)
pto->PushMessage("ping", nonce);
else
pto->PushMessage("ping");
}
// Start block sync
if (pto->fStartSync && !fImporting && !fReindex) {
pto->fStartSync = false;
pto->PushGetBlocks(pindexBest, uint256(0));
}
// Resend wallet transactions that haven't gotten in a block yet
// Except during reindex, importing and IBD, when old wallet
// transactions become unconfirmed and spams other nodes.
if (!fReindex && !fImporting && !IsInitialBlockDownload())
{
ResendWalletTransactions();
}
// Address refresh broadcast
static int64 nLastRebroadcast;
if (!IsInitialBlockDownload() && (GetTime() - nLastRebroadcast > 24 * 60 * 60))
{
{
LOCK(cs_vNodes);
BOOST_FOREACH(CNode* pnode, vNodes)
{
// Periodically clear setAddrKnown to allow refresh broadcasts
if (nLastRebroadcast)
pnode->setAddrKnown.clear();
// Rebroadcast our address
if (!fNoListen)
{
CAddress addr = GetLocalAddress(&pnode->addr);
if (addr.IsRoutable())
pnode->PushAddress(addr);
}
}
}
nLastRebroadcast = GetTime();
}
//
// Message: addr
//
if (fSendTrickle)
{
vector<CAddress> vAddr;
vAddr.reserve(pto->vAddrToSend.size());
BOOST_FOREACH(const CAddress& addr, pto->vAddrToSend)
{
// returns true if wasn't already contained in the set
if (pto->setAddrKnown.insert(addr).second)
{
vAddr.push_back(addr);
// receiver rejects addr messages larger than 1000
if (vAddr.size() >= 1000)
{
pto->PushMessage("addr", vAddr);
vAddr.clear();
}
}
}
pto->vAddrToSend.clear();
if (!vAddr.empty())
pto->PushMessage("addr", vAddr);
}
//
// Message: inventory
//
vector<CInv> vInv;
vector<CInv> vInvWait;
{
LOCK(pto->cs_inventory);
vInv.reserve(pto->vInventoryToSend.size());
vInvWait.reserve(pto->vInventoryToSend.size());
BOOST_FOREACH(const CInv& inv, pto->vInventoryToSend)
{
if (pto->setInventoryKnown.count(inv))
continue;
// trickle out tx inv to protect privacy
if (inv.type == MSG_TX && !fSendTrickle)
{
// 1/4 of tx invs blast to all immediately
static uint256 hashSalt;
if (hashSalt == 0)
hashSalt = GetRandHash();
uint256 hashRand = inv.hash ^ hashSalt;
hashRand = Hash(BEGIN(hashRand), END(hashRand));
bool fTrickleWait = ((hashRand & 3) != 0);
// always trickle our own transactions
if (!fTrickleWait)
{
CWalletTx wtx;
if (GetTransaction(inv.hash, wtx))
if (wtx.fFromMe)
fTrickleWait = true;
}
if (fTrickleWait)
{
vInvWait.push_back(inv);
continue;
}
}
// returns true if wasn't already contained in the set
if (pto->setInventoryKnown.insert(inv).second)
{
vInv.push_back(inv);
if (vInv.size() >= 1000)
{
pto->PushMessage("inv", vInv);
vInv.clear();
}
}
}
pto->vInventoryToSend = vInvWait;
}
if (!vInv.empty())
pto->PushMessage("inv", vInv);
//
// Message: getdata
//
vector<CInv> vGetData;
int64 nNow = GetTime() * 1000000;
while (!pto->mapAskFor.empty() && (*pto->mapAskFor.begin()).first <= nNow)
{
const CInv& inv = (*pto->mapAskFor.begin()).second;
if (!AlreadyHave(inv))
{
if (fDebugNet)
printf("sending getdata: %s\n", inv.ToString().c_str());
vGetData.push_back(inv);
if (vGetData.size() >= 1000)
{
pto->PushMessage("getdata", vGetData);
vGetData.clear();
}
}
pto->mapAskFor.erase(pto->mapAskFor.begin());
}
if (!vGetData.empty())
pto->PushMessage("getdata", vGetData);
}
return true;
}
//////////////////////////////////////////////////////////////////////////////
//
// kriptonMiner
//
int static FormatHashBlocks(void* pbuffer, unsigned int len)
{
unsigned char* pdata = (unsigned char*)pbuffer;
unsigned int blocks = 1 + ((len + 8) / 64);
unsigned char* pend = pdata + 64 * blocks;
memset(pdata + len, 0, 64 * blocks - len);
pdata[len] = 0x80;
unsigned int bits = len * 8;
pend[-1] = (bits >> 0) & 0xff;
pend[-2] = (bits >> 8) & 0xff;
pend[-3] = (bits >> 16) & 0xff;
pend[-4] = (bits >> 24) & 0xff;
return blocks;
}
static const unsigned int pSHA256InitState[8] =
{0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19};
void SHA256Transform(void* pstate, void* pinput, const void* pinit)
{
SHA256_CTX ctx;
unsigned char data[64];
SHA256_Init(&ctx);
for (int i = 0; i < 16; i++)
((uint32_t*)data)[i] = ByteReverse(((uint32_t*)pinput)[i]);
for (int i = 0; i < 8; i++)
ctx.h[i] = ((uint32_t*)pinit)[i];
SHA256_Update(&ctx, data, sizeof(data));
for (int i = 0; i < 8; i++)
((uint32_t*)pstate)[i] = ctx.h[i];
}
// Some explaining would be appreciated
class COrphan
{
public:
CTransaction* ptx;
set<uint256> setDependsOn;
double dPriority;
double dFeePerKb;
COrphan(CTransaction* ptxIn)
{
ptx = ptxIn;
dPriority = dFeePerKb = 0;
}
void print() const
{
printf("COrphan(hash=%s, dPriority=%.1f, dFeePerKb=%.1f)\n",
ptx->GetHash().ToString().c_str(), dPriority, dFeePerKb);
BOOST_FOREACH(uint256 hash, setDependsOn)
printf(" setDependsOn %s\n", hash.ToString().c_str());
}
};
uint64 nLastBlockTx = 0;
uint64 nLastBlockSize = 0;
// We want to sort transactions by priority and fee, so:
typedef boost::tuple<double, double, CTransaction*> TxPriority;
class TxPriorityCompare
{
bool byFee;
public:
TxPriorityCompare(bool _byFee) : byFee(_byFee) { }
bool operator()(const TxPriority& a, const TxPriority& b)
{
if (byFee)
{
if (a.get<1>() == b.get<1>())
return a.get<0>() < b.get<0>();
return a.get<1>() < b.get<1>();
}
else
{
if (a.get<0>() == b.get<0>())
return a.get<1>() < b.get<1>();
return a.get<0>() < b.get<0>();
}
}
};
CBlockTemplate* CreateNewBlock(CReserveKey& reservekey)
{
// Create new block
auto_ptr<CBlockTemplate> pblocktemplate(new CBlockTemplate());
if(!pblocktemplate.get())
return NULL;
CBlock *pblock = &pblocktemplate->block; // pointer for convenience
// Create coinbase tx
CTransaction txNew;
txNew.vin.resize(1);
txNew.vin[0].prevout.SetNull();
txNew.vout.resize(1);
CPubKey pubkey;
if (!reservekey.GetReservedKey(pubkey))
return NULL;
txNew.vout[0].scriptPubKey << pubkey << OP_CHECKSIG;
// Add our coinbase tx as first transaction
pblock->vtx.push_back(txNew);
pblocktemplate->vTxFees.push_back(-1); // updated at end
pblocktemplate->vTxSigOps.push_back(-1); // updated at end
// Largest block you're willing to create:
unsigned int nBlockMaxSize = GetArg("-blockmaxsize", MAX_BLOCK_SIZE_GEN/4);
// Limit to betweeen 1K and MAX_BLOCK_SIZE-1K for sanity:
nBlockMaxSize = std::max((unsigned int)1000, std::min((unsigned int)(MAX_BLOCK_SIZE-1000), nBlockMaxSize));
// How much of the block should be dedicated to high-priority transactions,
// included regardless of the fees they pay
unsigned int nBlockPrioritySize = GetArg("-blockprioritysize", 27000);
nBlockPrioritySize = std::min(nBlockMaxSize, nBlockPrioritySize);
// Minimum block size you want to create; block will be filled with free transactions
// until there are no more or the block reaches this size:
unsigned int nBlockMinSize = GetArg("-blockminsize", 0);
nBlockMinSize = std::min(nBlockMaxSize, nBlockMinSize);
// Collect memory pool transactions into the block
int64 nFees = 0;
{
LOCK2(cs_main, mempool.cs);
CBlockIndex* pindexPrev = pindexBest;
CCoinsViewCache view(*pcoinsTip, true);
// Priority order to process transactions
list<COrphan> vOrphan; // list memory doesn't move
map<uint256, vector<COrphan*> > mapDependers;
bool fPrintPriority = GetBoolArg("-printpriority");
// This vector will be sorted into a priority queue:
vector<TxPriority> vecPriority;
vecPriority.reserve(mempool.mapTx.size());
for (map<uint256, CTransaction>::iterator mi = mempool.mapTx.begin(); mi != mempool.mapTx.end(); ++mi)
{
CTransaction& tx = (*mi).second;
if (tx.IsCoinBase() || !tx.IsFinal())
continue;
COrphan* porphan = NULL;
double dPriority = 0;
int64 nTotalIn = 0;
bool fMissingInputs = false;
BOOST_FOREACH(const CTxIn& txin, tx.vin)
{
// Read prev transaction
if (!view.HaveCoins(txin.prevout.hash))
{
// This should never happen; all transactions in the memory
// pool should connect to either transactions in the chain
// or other transactions in the memory pool.
if (!mempool.mapTx.count(txin.prevout.hash))
{
printf("ERROR: mempool transaction missing input\n");
if (fDebug) assert("mempool transaction missing input" == 0);
fMissingInputs = true;
if (porphan)
vOrphan.pop_back();
break;
}
// Has to wait for dependencies
if (!porphan)
{
// Use list for automatic deletion
vOrphan.push_back(COrphan(&tx));
porphan = &vOrphan.back();
}
mapDependers[txin.prevout.hash].push_back(porphan);
porphan->setDependsOn.insert(txin.prevout.hash);
nTotalIn += mempool.mapTx[txin.prevout.hash].vout[txin.prevout.n].nValue;
continue;
}
const CCoins &coins = view.GetCoins(txin.prevout.hash);
int64 nValueIn = coins.vout[txin.prevout.n].nValue;
nTotalIn += nValueIn;
int nConf = pindexPrev->nHeight - coins.nHeight + 1;
dPriority += (double)nValueIn * nConf;
}
if (fMissingInputs) continue;
// Priority is sum(valuein * age) / txsize
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
dPriority /= nTxSize;
// This is a more accurate fee-per-kilobyte than is used by the client code, because the
// client code rounds up the size to the nearest 1K. That's good, because it gives an
// incentive to create smaller transactions.
double dFeePerKb = double(nTotalIn-tx.GetValueOut()) / (double(nTxSize)/1000.0);
if (porphan)
{
porphan->dPriority = dPriority;
porphan->dFeePerKb = dFeePerKb;
}
else
vecPriority.push_back(TxPriority(dPriority, dFeePerKb, &(*mi).second));
}
// Collect transactions into block
uint64 nBlockSize = 1000;
uint64 nBlockTx = 0;
int nBlockSigOps = 100;
bool fSortedByFee = (nBlockPrioritySize <= 0);
TxPriorityCompare comparer(fSortedByFee);
std::make_heap(vecPriority.begin(), vecPriority.end(), comparer);
while (!vecPriority.empty())
{
// Take highest priority transaction off the priority queue:
double dPriority = vecPriority.front().get<0>();
double dFeePerKb = vecPriority.front().get<1>();
CTransaction& tx = *(vecPriority.front().get<2>());
std::pop_heap(vecPriority.begin(), vecPriority.end(), comparer);
vecPriority.pop_back();
// Size limits
unsigned int nTxSize = ::GetSerializeSize(tx, SER_NETWORK, PROTOCOL_VERSION);
if (nBlockSize + nTxSize >= nBlockMaxSize)
continue;
// Legacy limits on sigOps:
unsigned int nTxSigOps = tx.GetLegacySigOpCount();
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
// Skip free transactions if we're past the minimum block size:
if (fSortedByFee && (dFeePerKb < CTransaction::nMinTxFee) && (nBlockSize + nTxSize >= nBlockMinSize))
continue;
// Prioritize by fee once past the priority size or we run out of high-priority
// transactions:
if (!fSortedByFee &&
((nBlockSize + nTxSize >= nBlockPrioritySize) || (dPriority < COIN * 576 / 250)))
{
fSortedByFee = true;
comparer = TxPriorityCompare(fSortedByFee);
std::make_heap(vecPriority.begin(), vecPriority.end(), comparer);
}
if (!tx.HaveInputs(view))
continue;
int64 nTxFees = tx.GetValueIn(view)-tx.GetValueOut();
nTxSigOps += tx.GetP2SHSigOpCount(view);
if (nBlockSigOps + nTxSigOps >= MAX_BLOCK_SIGOPS)
continue;
CValidationState state;
if (!tx.CheckInputs(state, view, true, SCRIPT_VERIFY_P2SH))
continue;
CTxUndo txundo;
uint256 hash = tx.GetHash();
tx.UpdateCoins(state, view, txundo, pindexPrev->nHeight+1, hash);
// Added
pblock->vtx.push_back(tx);
pblocktemplate->vTxFees.push_back(nTxFees);
pblocktemplate->vTxSigOps.push_back(nTxSigOps);
nBlockSize += nTxSize;
++nBlockTx;
nBlockSigOps += nTxSigOps;
nFees += nTxFees;
if (fPrintPriority)
{
printf("priority %.1f feeperkb %.1f txid %s\n",
dPriority, dFeePerKb, tx.GetHash().ToString().c_str());
}
// Add transactions that depend on this one to the priority queue
if (mapDependers.count(hash))
{
BOOST_FOREACH(COrphan* porphan, mapDependers[hash])
{
if (!porphan->setDependsOn.empty())
{
porphan->setDependsOn.erase(hash);
if (porphan->setDependsOn.empty())
{
vecPriority.push_back(TxPriority(porphan->dPriority, porphan->dFeePerKb, porphan->ptx));
std::push_heap(vecPriority.begin(), vecPriority.end(), comparer);
}
}
}
}
}
nLastBlockTx = nBlockTx;
nLastBlockSize = nBlockSize;
printf("CreateNewBlock(): total size %"PRI64u"\n", nBlockSize);
pblock->vtx[0].vout[0].nValue = GetBlockValue(pindexPrev->nHeight+1, nFees);
pblocktemplate->vTxFees[0] = -nFees;
// Fill in header
pblock->hashPrevBlock = pindexPrev->GetBlockHash();
pblock->UpdateTime(pindexPrev);
pblock->nBits = GetNextWorkRequired(pindexPrev, pblock);
pblock->nNonce = 0;
pblock->vtx[0].vin[0].scriptSig = CScript() << OP_0 << OP_0;
pblocktemplate->vTxSigOps[0] = pblock->vtx[0].GetLegacySigOpCount();
CBlockIndex indexDummy(*pblock);
indexDummy.pprev = pindexPrev;
indexDummy.nHeight = pindexPrev->nHeight + 1;
CCoinsViewCache viewNew(*pcoinsTip, true);
CValidationState state;
if (!pblock->ConnectBlock(state, &indexDummy, viewNew, true))
throw std::runtime_error("CreateNewBlock() : ConnectBlock failed");
}
return pblocktemplate.release();
}
void IncrementExtraNonce(CBlock* pblock, CBlockIndex* pindexPrev, unsigned int& nExtraNonce)
{
// Update nExtraNonce
static uint256 hashPrevBlock;
if (hashPrevBlock != pblock->hashPrevBlock)
{
nExtraNonce = 0;
hashPrevBlock = pblock->hashPrevBlock;
}
++nExtraNonce;
unsigned int nHeight = pindexPrev->nHeight+1; // Height first in coinbase required for block.version=2
pblock->vtx[0].vin[0].scriptSig = (CScript() << nHeight << CBigNum(nExtraNonce)) + COINBASE_FLAGS;
assert(pblock->vtx[0].vin[0].scriptSig.size() <= 100);
pblock->hashMerkleRoot = pblock->BuildMerkleTree();
}
void FormatHashBuffers(CBlock* pblock, char* pmidstate, char* pdata, char* phash1)
{
//
// Pre-build hash buffers
//
struct
{
struct unnamed2
{
int nVersion;
uint256 hashPrevBlock;
uint256 hashMerkleRoot;
unsigned int nTime;
unsigned int nBits;
unsigned int nNonce;
}
block;
unsigned char pchPadding0[64];
uint256 hash1;
unsigned char pchPadding1[64];
}
tmp;
memset(&tmp, 0, sizeof(tmp));
tmp.block.nVersion = pblock->nVersion;
tmp.block.hashPrevBlock = pblock->hashPrevBlock;
tmp.block.hashMerkleRoot = pblock->hashMerkleRoot;
tmp.block.nTime = pblock->nTime;
tmp.block.nBits = pblock->nBits;
tmp.block.nNonce = pblock->nNonce;
FormatHashBlocks(&tmp.block, sizeof(tmp.block));
FormatHashBlocks(&tmp.hash1, sizeof(tmp.hash1));
// Byte swap all the input buffer
for (unsigned int i = 0; i < sizeof(tmp)/4; i++)
((unsigned int*)&tmp)[i] = ByteReverse(((unsigned int*)&tmp)[i]);
// Precalc the first half of the first hash, which stays constant
SHA256Transform(pmidstate, &tmp.block, pSHA256InitState);
memcpy(pdata, &tmp.block, 128);
memcpy(phash1, &tmp.hash1, 64);
}
bool CheckWork(CBlock* pblock, CWallet& wallet, CReserveKey& reservekey)
{
uint256 hash = pblock->GetPoWHash();
uint256 hashTarget = CBigNum().SetCompact(pblock->nBits).getuint256();
if (hash > hashTarget)
return false;
//// debug print
printf("kripton RPCMiner:\n");
printf("proof-of-work found \n hash: %s \ntarget: %s\n", hash.GetHex().c_str(), hashTarget.GetHex().c_str());
pblock->print();
printf("generated %s\n", FormatMoney(pblock->vtx[0].vout[0].nValue).c_str());
// Found a solution
{
LOCK(cs_main);
if (pblock->hashPrevBlock != hashBestChain)
return error("kriptonMiner : generated block is stale");
// Remove key from key pool
reservekey.KeepKey();
// Track how many getdata requests this block gets
{
LOCK(wallet.cs_wallet);
wallet.mapRequestCount[pblock->GetHash()] = 0;
}
// Process this block the same as if we had received it from another node
CValidationState state;
if (!ProcessBlock(state, NULL, pblock))
return error("kriptonMiner : ProcessBlock, block not accepted");
}
return true;
}
// Amount compression:
// * If the amount is 0, output 0
// * first, divide the amount (in base units) by the largest power of 10 possible; call the exponent e (e is max 9)
// * if e<9, the last digit of the resulting number cannot be 0; store it as d, and drop it (divide by 10)
// * call the result n
// * output 1 + 10*(9*n + d - 1) + e
// * if e==9, we only know the resulting number is not zero, so output 1 + 10*(n - 1) + 9
// (this is decodable, as d is in [1-9] and e is in [0-9])
uint64 CTxOutCompressor::CompressAmount(uint64 n)
{
if (n == 0)
return 0;
int e = 0;
while (((n % 10) == 0) && e < 9) {
n /= 10;
e++;
}
if (e < 9) {
int d = (n % 10);
assert(d >= 1 && d <= 9);
n /= 10;
return 1 + (n*9 + d - 1)*10 + e;
} else {
return 1 + (n - 1)*10 + 9;
}
}
uint64 CTxOutCompressor::DecompressAmount(uint64 x)
{
// x = 0 OR x = 1+10*(9*n + d - 1) + e OR x = 1+10*(n - 1) + 9
if (x == 0)
return 0;
x--;
// x = 10*(9*n + d - 1) + e
int e = x % 10;
x /= 10;
uint64 n = 0;
if (e < 9) {
// x = 9*n + d - 1
int d = (x % 9) + 1;
x /= 9;
// x = n
n = x*10 + d;
} else {
n = x+1;
}
while (e) {
n *= 10;
e--;
}
return n;
}
class CMainCleanup
{
public:
CMainCleanup() {}
~CMainCleanup() {
// block headers
std::map<uint256, CBlockIndex*>::iterator it1 = mapBlockIndex.begin();
for (; it1 != mapBlockIndex.end(); it1++)
delete (*it1).second;
mapBlockIndex.clear();
// orphan blocks
std::map<uint256, CBlock*>::iterator it2 = mapOrphanBlocks.begin();
for (; it2 != mapOrphanBlocks.end(); it2++)
delete (*it2).second;
mapOrphanBlocks.clear();
// orphan transactions
mapOrphanTransactions.clear();
}
} instance_of_cmaincleanup;
|
/*
Copyright (C) 2013-2020 Sysdig Inc.
This file is part of sysdig.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#define __STDC_FORMAT_MACROS
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <assert.h>
#include <algorithm>
#include <sinsp.h>
#ifdef HAS_CAPTURE
#ifndef WIN32
#include "driver_config.h"
#endif // WIN32
#endif // HAS_CAPTURE
#include "sysdig.h"
#ifdef HAS_CHISELS
#include "chisel.h"
#include "chisel_utils.h"
#endif
#include "fields_info.h"
#include "table.h"
#include "utils.h"
#include "plugin_utils.h"
#ifdef _WIN32
#include "win32/getopt.h"
#include <io.h>
#else
#include <unistd.h>
#include <getopt.h>
#include <term.h>
#endif
#include "cursescomponents.h"
#include "cursestable.h"
#include "cursesui.h"
#include "scap_open_exception.h"
#include "sinsp_capture_interrupt_exception.h"
#define MOUSE_CAPABLE_TERM "xterm-1003"
#define MOUSE_CAPABLE_TERM_COMPAT "xterm-1002"
static bool g_terminate = false;
static bool g_plugin_input = false;
static void usage();
//
// Helper functions
//
static void signal_callback(int signal)
{
if(g_plugin_input)
{
//
// Input plugins can get stuck at any point.
// When we are using one, check again in few seconds and force a quit
// if we are stuck.
//
if(g_terminate == true)
{
exit(0);
}
else
{
g_terminate = true;
#ifndef _WIN32
alarm(2);
#endif
}
}
else
{
g_terminate = true;
}
}
//
// Program help
//
static void usage()
{
printf(
"csysdig version " SYSDIG_VERSION "\n"
"Usage: csysdig [options] [filter]\n\n"
"Options:\n"
" -A, --print-ascii When emitting JSON, only print the text portion of data buffers, and echo\n"
" end-of-lines. This is useful to only display human-readable\n"
" data.\n"
" -B<bpf_probe>, --bpf=<bpf_probe>\n"
" Enable live capture using the specified BPF probe instead of the kernel module.\n"
" The BPF probe can also be specified via the environment variable\n"
" SYSDIG_BPF_PROBE. If <bpf_probe> is left empty, sysdig will\n"
" try to load one from the scap-driver-loader script.\n"
#ifdef HAS_CAPTURE
" --cri <path> Path to CRI socket for container metadata\n"
" Use the specified socket to fetch data from a CRI-compatible runtime\n"
"\n"
" --cri-timeout <timeout_ms>\n"
" Wait at most <timeout_ms> milliseconds for response from CRI\n"
#endif
" -d <period>, --delay=<period>\n"
" Set the delay between updates, in milliseconds. This works\n"
" similarly to the -d option in top.\n"
" -E, --exclude-users\n"
" Don't create the user/group tables by querying the OS when\n"
" sysdig starts. This also means that no user or group info\n"
" will be written to the tracefile by the -w flag.\n"
" The user/group tables are necessary to use filter fields\n"
" like user.name or group.name. However, creating them can\n"
" increase sysdig's startup time. Moreover, they contain\n"
" information that could be privacy sensitive.\n"
" --force-term-compat\n"
" Try to configure simple terminal settings (xterm-1002) that work\n"
" better with terminals like putty. Try to use this flag if you experience\n"
" terminal issues like the mouse not working.\n"
" -h, --help Print this page\n"
#ifndef MINIMAL_BUILD
" -k <url>, --k8s-api=<url>\n"
" Enable Kubernetes support by connecting to the API server\n"
" specified as argument. E.g. \"http://admin:password@127.0.0.1:8080\".\n"
" The API server can also be specified via the environment variable\n"
" SYSDIG_K8S_API.\n"
" --node-name=<url>\n"
" The node name is used as a filter when requesting metadata of pods\n"
" to the API server; if empty, no filter is set\n"
" -K <bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>], --k8s-api-cert=<bt_file> | <cert_file>:<key_file[#password]>[:<ca_cert_file>]\n"
" Use the provided files names to authenticate user and (optionally) verify the K8S API\n"
" server identity.\n"
" Each entry must specify full (absolute, or relative to the current directory) path\n"
" to the respective file.\n"
" Private key password is optional (needed only if key is password protected).\n"
" CA certificate is optional. For all files, only PEM file format is supported. \n"
" Specifying CA certificate only is obsoleted - when single entry is provided \n"
" for this option, it will be interpreted as the name of a file containing bearer token.\n"
" Note that the format of this command-line option prohibits use of files whose names contain\n"
" ':' or '#' characters in the file name.\n"
" Option can also be provided via the environment variable SYSDIG_K8S_API_CERT.\n"
#endif // MINIMAL_BUILD
" -l, --list List all the fields that can be used in views.\n"
" --large-environment\n"
" Support environments larger than 4KiB\n"
" When the environment is larger than 4KiB, load the whole\n"
" environment from /proc instead of truncating to the first 4KiB\n"
" This may fail for short-lived processes and in that case\n"
" the truncated environment is used instead.\n"
" --logfile=<file>\n"
" Print program logs into the given file.\n"
" -n <num>, --numevents=<num>\n"
" Stop capturing after <num> events\n"
" --page-faults Capture user/kernel major/minor page faults\n"
" -pc, -pcontainer\n"
" Instruct csysdig to use a container-friendly format in its\n"
" views.\n"
" This will cause several of the views to contain additional\n"
" container-related columns.\n"
" -R Resolve port numbers to names.\n"
" -r <readfile>, --read=<readfile>\n"
" Read the events from <readfile>.\n"
" --raw Print raw output on a regular terminal instead of enabling\n"
" ncurses-based ANSI output.\n"
" -s <len>, --snaplen=<len>\n"
" Capture the first <len> bytes of each I/O buffer.\n"
" By default, the first 80 bytes are captured. Use this\n"
" option with caution, it can generate huge trace files.\n"
" -T, --force-tracers-capture\n"
" Tell the driver to make sure full buffers are captured from\n"
" /dev/null, to make sure that tracers are completely\n"
" captured. Note that sysdig will enable extended /dev/null\n"
" capture by itself after detecting that tracers are written\n"
" there, but that could result in the truncation of some\n"
" tracers at the beginning of the capture. This option allows\n"
" preventing that.\n"
" -v <view_id>, --view=<view_id>\n"
" Run the view with the given ID when csysdig starts.\n"
" View IDs can be found in the view documentation pages in\n"
" csysdig. Combine this option with a command line filter for\n"
" complete output customization.\n"
" --version Print version number.\n"
" -X, --print-hex-ascii\n"
" When emitting JSON, print data buffers in hex and ASCII.\n"
"\n"
"How to use csysdig:\n"
"1. you can either see real time data, or analyze a trace file by using the -r\n"
" command line flag.\n"
"2. you can switch to a different view by using the F2 key.\n"
"3. You can drill down into a selection by typing enter.\n"
" You can navigate back by typing backspace.\n"
"4. you can observe reads and writes (F5) or see sysdig events (F6) for any\n"
" selection.\n"
"\nAdditional help can be obtained by clicking F1 while the program is running,\n"
"and in the man page.\n\n"
);
}
#ifdef HAS_CHISELS
static void add_chisel_dirs(sinsp* inspector)
{
//
// Add the default chisel directory statically configured by the build system
//
chisel_add_dir(SYSDIG_CHISELS_DIR, false);
//
// Add the directories configured in the SYSDIG_CHISEL_DIR environment variable
//
char* s_user_cdirs = getenv("SYSDIG_CHISEL_DIR");
if(s_user_cdirs != NULL)
{
vector<string> user_cdirs = sinsp_split(s_user_cdirs, ';');
for(uint32_t j = 0; j < user_cdirs.size(); j++)
{
chisel_add_dir(user_cdirs[j], true);
}
}
}
static void print_views(sinsp_view_manager* view_manager)
{
Json::FastWriter writer;
Json::Value root;
vector<sinsp_view_info>* vlist = view_manager->get_views();
for(auto it = vlist->begin(); it != vlist->end(); ++it)
{
Json::Value jv;
sinsp_view_info& vinfo = *it;
jv["id"] = vinfo.m_id;
jv["name"] = vinfo.m_name;
jv["description"] = vinfo.m_description;
jv["isRoot"] = vinfo.m_is_root;
jv["drilldownTarget"] = vinfo.m_drilldown_target;
jv["filter"] = vinfo.m_filter;
jv["canDrillDown"] = (vinfo.m_type == sinsp_view_info::T_TABLE);
for(auto it = vinfo.m_applies_to.begin(); it != vinfo.m_applies_to.end(); ++it)
{
jv["appliesTo"].append(*it);
}
for(auto it = vinfo.m_tags.begin(); it != vinfo.m_tags.end(); ++it)
{
jv["tags"].append(*it);
}
for(auto it = vinfo.m_tips.begin(); it != vinfo.m_tips.end(); ++it)
{
jv["tips"].append(*it);
}
root.append(jv);
}
string output = writer.write(root);
printf("%s", output.substr(0, output.size() - 1).c_str());
}
#endif
captureinfo do_inspect(sinsp* inspector,
uint64_t cnt,
sinsp_cursesui* ui)
{
captureinfo retval;
int32_t res;
sinsp_evt* ev;
//
// Loop through the events
//
while(1)
{
if(retval.m_nevts == cnt || g_terminate)
{
//
// End of capture, either because the user stopped it, or because
// we reached the event count specified with -n.
//
break;
}
res = inspector->next(&ev);
if(res == SCAP_TIMEOUT)
{
continue;
}
else if(res != SCAP_EOF && res != SCAP_SUCCESS)
{
//
// Event read error.
// Notify the chisels that we're exiting, and then die with an error.
//
if(inspector->is_live())
{
throw sinsp_exception(inspector->getlasterr());
}
else
{
ui->set_truncated_input(true);
res = SCAP_EOF;
}
}
if(ui->process_event(ev, res) == true)
{
return retval;
}
retval.m_nevts++;
}
return retval;
}
string g_version_string = SYSDIG_VERSION;
sysdig_init_res csysdig_init(int argc, char **argv)
{
sysdig_init_res res;
sinsp* inspector = NULL;
vector<string> infiles;
int op;
uint64_t cnt = -1;
uint32_t snaplen = 0;
int long_index = 0;
int32_t n_filterargs = 0;
captureinfo cinfo;
string errorstr;
string display_view;
bool print_containers = false;
uint64_t refresh_interval_ns = 2000000000;
bool list_flds = false;
bool is_interactive = false;
int32_t json_first_row = 0;
int32_t json_last_row = 0;
int32_t sorting_col = -1;
bool list_views = false;
bool bpf = false;
string bpf_probe;
#ifdef HAS_CAPTURE
string cri_socket_path;
#endif
#ifndef _WIN32
sinsp_table::output_type output_type = sinsp_table::OT_CURSES;
#else
sinsp_table::output_type output_type = sinsp_table::OT_JSON;
#endif
#ifndef MINIMAL_BUILD
string* k8s_api = 0;
string* node_name = 0;
string* k8s_api_cert = 0;
string* mesos_api = 0;
#endif // MINIMAL_BUILD
bool terminal_with_mouse = false;
bool force_tracers_capture = false;
bool force_term_compat = false;
sinsp_evt::param_fmt event_buffer_format = sinsp_evt::PF_NORMAL;
bool page_faults = false;
static struct option long_options[] =
{
{"print-ascii", no_argument, 0, 'A' },
{"bpf", optional_argument, 0, 'B' },
#ifdef HAS_CAPTURE
{"cri", required_argument, 0, 0 },
{"cri-timeout", required_argument, 0, 0 },
#endif
{"delay", required_argument, 0, 'd' },
{"exclude-users", no_argument, 0, 'E' },
{"from", required_argument, 0, 0 },
{"help", no_argument, 0, 'h' },
#ifndef MINIMAL_BUILD
{"k8s-api", required_argument, 0, 'k'},
{"node-name", required_argument, 0, 'N'},
{"k8s-api-cert", required_argument, 0, 'K' },
#endif // MINIMAL_BUILD
{"json", no_argument, 0, 'j' },
{"interactive", optional_argument, 0, 0 },
{"large-environment", no_argument, 0, 0 },
{"list", optional_argument, 0, 'l' },
{"list-views", no_argument, 0, 0},
#ifndef MINIMAL_BUILD
{"mesos-api", required_argument, 0, 'm'},
#endif // MINIMAL_BUILD
{"numevents", required_argument, 0, 'n' },
{"page-faults", no_argument, 0, 0 },
{"print", required_argument, 0, 'p' },
{"resolve-ports", no_argument, 0, 'R'},
{"readfile", required_argument, 0, 'r' },
{"raw", no_argument, 0, 0 },
{"snaplen", required_argument, 0, 's' },
{"logfile", required_argument, 0, 0 },
{"force-tracers-capture", required_argument, 0, 'T'},
{"force-term-compat", no_argument, 0, 0},
{"sortingcol", required_argument, 0, 0 },
{"to", required_argument, 0, 0 },
{"view", required_argument, 0, 'v' },
{"version", no_argument, 0, 0 },
{"print-hex-ascii", no_argument, 0, 'X'},
{0, 0, 0, 0}
};
//
// Parse the arguments
//
try
{
inspector = new sinsp();
#ifdef HAS_CHISELS
add_chisel_dirs(inspector);
#endif
add_plugin_dirs(SYSDIG_PLUGINS_DIR);
init_plugins(inspector);
//
// Parse the args
//
while((op = getopt_long(argc, argv,
"AB::d:Ehk:K:jlm:n:p:Rr:s:Tv:X", long_options, &long_index)) != -1)
{
switch(op)
{
case '?':
//
// Command line error
//
throw sinsp_exception("command line error");
break;
case 'A':
if(event_buffer_format != sinsp_evt::PF_NORMAL)
{
fprintf(stderr, "you cannot specify more than one output format\n");
delete inspector;
return sysdig_init_res(EXIT_SUCCESS);
}
event_buffer_format = sinsp_evt::PF_EOLS_COMPACT;
break;
case 'B':
{
bpf = true;
if(optarg)
{
bpf_probe = optarg;
}
break;
}
case 'd':
try
{
refresh_interval_ns = sinsp_numparser::parseu64(optarg) * 1000000;
}
catch(...)
{
throw sinsp_exception("can't parse the -d argument, make sure it's a number");
}
if(refresh_interval_ns < 100000000)
{
throw sinsp_exception("Period must be bigger then 100ms");
}
break;
case 'E':
inspector->set_import_users(false);
break;
case 'h':
usage();
delete inspector;
return sysdig_init_res(EXIT_SUCCESS);
#ifndef MINIMAL_BUILD
case 'k':
k8s_api = new string(optarg);
break;
case 'N':
node_name = new string(optarg);
break;
case 'K':
k8s_api_cert = new string(optarg);
break;
#endif // MINIMAL_BUILD
case 'j':
output_type = sinsp_table::OT_JSON;
break;
case 'l':
list_flds = true;
break;
#ifndef MINIMAL_BUILD
case 'm':
mesos_api = new string(optarg);
break;
#endif // MINIMAL_BUILD
case 'n':
try
{
cnt = sinsp_numparser::parseu64(optarg);
}
catch(...)
{
throw sinsp_exception("can't parse the -n argument, make sure it's a number");
}
if(cnt <= 0)
{
throw sinsp_exception(string("invalid event count ") + optarg);
res.m_res = EXIT_FAILURE;
goto exit;
}
break;
case 'p':
if(string(optarg) == "c" || string(optarg) == "container")
{
inspector->set_print_container_data(true);
print_containers = true;
}
break;
case 'R':
inspector->set_hostname_and_port_resolution_mode(true);
break;
case 'r':
infiles.push_back(optarg);
#ifndef MINIMAL_BUILD
k8s_api = new string();
mesos_api = new string();
#endif // MINIMAL_BUILD
break;
case 's':
snaplen = atoi(optarg);
break;
case 'T':
force_tracers_capture = true;
break;
case 'v':
display_view = optarg;
break;
case 'X':
if(event_buffer_format != sinsp_evt::PF_NORMAL)
{
fprintf(stderr, "you cannot specify more than one output format\n");
delete inspector;
return sysdig_init_res(EXIT_FAILURE);
}
event_buffer_format = sinsp_evt::PF_HEXASCII;
break;
case 0:
{
if(long_options[long_index].flag != 0)
{
break;
}
string optname = string(long_options[long_index].name);
if(optname == "version")
{
printf("sysdig version %s\n", SYSDIG_VERSION);
delete inspector;
return sysdig_init_res(EXIT_SUCCESS);
}
else if(optname == "interactive")
{
is_interactive = true;
output_type = sinsp_table::OT_JSON;
}
else if(optname == "large-environment")
{
inspector->set_large_envs(true);
}
#ifdef HAS_CAPTURE
#ifndef MINIMAL_BUILD
else if(optname == "cri")
{
cri_socket_path = optarg;
}
else if(optname == "cri-timeout")
{
inspector->set_cri_timeout(sinsp_numparser::parsed64(optarg));
}
#endif // MINIMAL_BUILD
#endif
else if(optname == "logfile")
{
inspector->set_log_file(optarg);
}
else if(optname == "raw")
{
output_type = sinsp_table::OT_RAW;
}
else if(optname == "force-term-compat")
{
force_term_compat = true;
}
else if(optname == "from")
{
json_first_row = sinsp_numparser::parsed32(optarg);
}
else if(optname == "to")
{
json_last_row = sinsp_numparser::parsed32(optarg);
}
else if(optname == "sortingcol")
{
sorting_col = sinsp_numparser::parsed32(optarg);
}
else if(optname == "list-views")
{
list_views = true;
}
else if(optname == "page-faults")
{
page_faults = true;
}
}
break;
default:
break;
}
}
#ifdef HAS_CAPTURE
if(!cri_socket_path.empty())
{
inspector->set_cri_socket_path(cri_socket_path);
}
#endif
string filter;
//
// If -l was specified, print the fields and exit
//
if(list_flds)
{
list_fields(false, false);
res.m_res = EXIT_SUCCESS;
goto exit;
}
//
// the filter is at the end of the command line
//
if(optind + n_filterargs < argc)
{
#ifdef HAS_FILTERING
for(int32_t j = optind + n_filterargs; j < argc; j++)
{
filter += argv[j];
if(j < argc)
{
filter += " ";
}
}
#else
fprintf(stderr, "filtering not compiled.\n");
res.m_res = EXIT_FAILURE;
goto exit;
#endif
}
if(!bpf)
{
const char *probe = scap_get_bpf_probe_from_env();
if(probe)
{
bpf = true;
bpf_probe = probe;
}
}
if(bpf)
{
inspector->set_bpf_probe(bpf_probe);
}
if(signal(SIGINT, signal_callback) == SIG_ERR)
{
fprintf(stderr, "An error occurred while setting SIGINT signal handler.\n");
res.m_res = EXIT_FAILURE;
goto exit;
}
if(signal(SIGTERM, signal_callback) == SIG_ERR)
{
fprintf(stderr, "An error occurred while setting SIGTERM signal handler.\n");
res.m_res = EXIT_FAILURE;
goto exit;
}
if(json_last_row < json_first_row)
{
fprintf(stderr, "'to' argument cannot be smaller than the 'from' one.\n");
res.m_res = EXIT_FAILURE;
goto exit;
}
//
// Initialize ncurses
//
#ifndef NOCURSESUI
if(output_type == sinsp_table::OT_CURSES)
{
//
// Check if terminal has mouse support
//
const char* mct = force_term_compat? MOUSE_CAPABLE_TERM_COMPAT : MOUSE_CAPABLE_TERM;
terminal_with_mouse = (tgetent(NULL, mct) != 0);
if(terminal_with_mouse)
{
//
// Enable fine-grained mouse activity capture by setting xterm-1002
//
setenv("TERM", mct, 1);
}
(void) initscr(); // initialize the curses library
(void) nonl(); // tell curses not to do NL->CR/NL on output
intrflush(stdscr, false);
keypad(stdscr, true);
curs_set(0);
if(has_colors())
{
start_color();
}
use_default_colors();
mousemask(ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION, NULL);
noecho();
timeout(0);
// If this is uncommented, it's possible to natively handle stuff like CTRL+c
//raw();
}
#endif
//
// Create the list of views
//
sinsp_view_manager view_manager;
//
// Scan the chisel list to load the Lua views, and add them to the list
//
vector<chisel_desc> chlist;
sinsp_chisel::get_chisel_list(&chlist);
for(auto it : chlist)
{
if(it.m_viewinfo.m_valid)
{
if(print_containers)
{
it.m_viewinfo.apply_tag("containers");
}
else
{
it.m_viewinfo.apply_tag("default");
}
if(it.m_viewinfo.m_tags.size() != 0)
{
if(it.m_viewinfo.m_tags[0] == "Containers")
{
continue;
}
}
if(output_type != sinsp_table::OT_JSON)
{
if(std::find(it.m_viewinfo.m_tags.begin(),
it.m_viewinfo.m_tags.end(),
"nocsysdig") != it.m_viewinfo.m_tags.end())
{
continue;
}
}
view_manager.add(&it.m_viewinfo);
}
}
//
// Set the initial display view
//
view_manager.set_selected_view(display_view);
if(list_views)
{
print_views(&view_manager);
goto exit;
}
//
// Go through the input sources and apply the processing to all of them
//
for(uint32_t j = 0; j < infiles.size() || infiles.size() == 0; j++)
{
//
// Initialize the UI
//
sinsp_cursesui ui(inspector,
(infiles.size() != 0)? infiles[0] : "",
(filter.size() != 0)? filter : "",
refresh_interval_ns,
print_containers,
output_type,
terminal_with_mouse,
json_first_row,
json_last_row,
sorting_col,
event_buffer_format);
ui.configure(&view_manager);
if(display_view == "dig" || display_view == "echo")
{
ui.start(false, true);
}
else
{
ui.start(false, false);
}
if(is_interactive)
{
printf("ready\n");
//
// In interactive mode, make sure stderr is flushed at every printf
//
setbuf(stderr, NULL);
//
// Set the UI in interactive mode and start listening to user
// input.
//
ui.set_interactive(true);
}
//
// Launch the capture
//
if(infiles.size() != 0)
{
//
// We have a file to open
//
inspector->open(infiles[j]);
}
else
{
if(j > 0)
{
break;
}
//
// No file to open, this is a live capture
//
#if defined(HAS_CAPTURE)
bool open_success = true;
try
{
inspector->open("");
}
catch(const sinsp_exception& e)
{
open_success = false;
}
#ifndef _WIN32
//
// Starting the live capture failed, try to load the driver with
// modprobe.
//
if(!open_success)
{
open_success = true;
if(bpf)
{
if(bpf_probe.empty())
{
if(system("scap-driver-loader bpf"))
{
fprintf(stderr, "Unable to load the BPF probe\n");
}
}
}
else
{
if(system("modprobe " DRIVER_NAME " > /dev/null 2> /dev/null"))
{
fprintf(stderr, "Unable to load the driver\n");
}
}
inspector->open("");
}
#endif // _WIN32
#else // HAS_CAPTURE
//
// Starting live capture
// If this fails on Windows and OSX, don't try with any driver
//
inspector->open("");
#endif // HAS_CAPTURE
//
// Enable gathering the CPU from the kernel module
//
inspector->set_get_procs_cpu_from_driver(true);
}
//
// If required, set the snaplen
//
if(snaplen != 0)
{
inspector->set_snaplen(snaplen);
}
//
// If required, tell the driver to enable tracers capture
//
if(force_tracers_capture)
{
inspector->enable_tracers_capture();
}
if(page_faults)
{
inspector->enable_page_faults();
}
#ifndef MINIMAL_BUILD
//
// run k8s, if required
//
if(k8s_api)
{
if(!k8s_api_cert)
{
if(char* k8s_cert_env = getenv("SYSDIG_K8S_API_CERT"))
{
k8s_api_cert = new string(k8s_cert_env);
}
}
inspector->init_k8s_client(k8s_api, k8s_api_cert, node_name);
k8s_api = 0;
k8s_api_cert = 0;
}
else if(char* k8s_api_env = getenv("SYSDIG_K8S_API"))
{
if(k8s_api_env != NULL)
{
if(!k8s_api_cert)
{
if(char* k8s_cert_env = getenv("SYSDIG_K8S_API_CERT"))
{
k8s_api_cert = new string(k8s_cert_env);
}
}
k8s_api = new string(k8s_api_env);
inspector->init_k8s_client(k8s_api, k8s_api_cert, node_name);
}
else
{
delete k8s_api;
delete k8s_api_cert;
}
k8s_api = 0;
k8s_api_cert = 0;
}
//
// run mesos, if required
//
if(mesos_api)
{
inspector->init_mesos_client(mesos_api);
}
else if(char* mesos_api_env = getenv("SYSDIG_MESOS_API"))
{
if(mesos_api_env != NULL)
{
mesos_api = new string(mesos_api_env);
inspector->init_mesos_client(mesos_api);
}
}
delete mesos_api;
mesos_api = 0;
if(output_type == sinsp_table::OT_JSON)
{
printf("{\"slices\": [\n");
if(display_view != "dig" && display_view != "echo")
{
printf("{\"progress\": 0},\n");
}
}
#endif // MINIMAL_BUILD
//
// Start the capture loop
//
cinfo = do_inspect(inspector,
cnt,
&ui);
if(output_type == sinsp_table::OT_JSON)
{
// The following line produces malformed json when using
// csysdig with the -j option. We are leaving it here,
// commented, in case it's needed for tools that consume
// csysdig's json like sysdig inspect.
//printf("]}\n");
//printf("%c", EOF);
}
//
// Done. Close the capture.
//
inspector->close();
}
}
catch(const sinsp_capture_interrupt_exception&)
{
}
catch(const scap_open_exception& e)
{
errorstr = e.what();
res.m_res = e.scap_rc();
}
catch(const std::exception& e)
{
errorstr = e.what();
res.m_res = EXIT_FAILURE;
}
catch(...)
{
errorstr = "uncaught exception";
res.m_res = EXIT_FAILURE;
}
exit:
if(inspector)
{
delete inspector;
}
//
// Restore the original screen
//
#ifndef NOCURSESUI
if(output_type == sinsp_table::OT_CURSES)
{
endwin();
}
#endif
if(errorstr != "")
{
cerr << errorstr << endl;
}
return res;
}
//
// MAIN
//
int main(int argc, char **argv)
{
sysdig_init_res res;
//
// Run csysdig
//
res = csysdig_init(argc, argv);
#ifdef _WIN32
_CrtDumpMemoryLeaks();
#endif
return res.m_res;
}
|
#include "stdafx.h"
#include "build.h"
#include "../xrlc_light/xrdeflector.h"
#include "../xrlc_light/xrThread.h"
#include "../xrLC_Light/xrLC_GlobalData.h"
#include "../xrLC_Light/xrLightVertex.h"
#include "net.h"
//#include "../xrLC_Light/net_task_manager.h"
#include "../xrLC_Light/lcnet_task_manager.h"
#include "../xrLC_Light/mu_model_light.h"
std::recursive_mutex task_CS;
xr_vector<int> task_pool;
class CLMThread : public CThread {
private:
HASH H;
CDB::COLLIDER DB;
base_lighting LightsSelected;
public:
CLMThread(u32 ID) : CThread(ID) {
// thMonitor= TRUE;
thMessages = FALSE;
}
virtual void Execute() {
CDeflector* D = 0;
for (;;) {
// Get task
{
std::lock_guard<decltype(task_CS)> lock(task_CS);
thProgress =
1.f - float(task_pool.size()) / float(lc_global_data()->g_deflectors().size());
if (task_pool.empty())
return;
D = lc_global_data()->g_deflectors()[task_pool.back()];
task_pool.pop_back();
}
// Perform operation
try {
D->Light(&DB, &LightsSelected, H);
} catch (...) {
clMsg("* ERROR: CLMThread::Execute - light");
}
}
}
};
void CBuild::LMapsLocal() {
FPU::m64r();
mem_Compact();
// Randomize deflectors
#ifndef NET_CMP
std::shuffle(lc_global_data()->g_deflectors().begin(), lc_global_data()->g_deflectors().end(),
rng);
#endif
#ifndef NET_CMP
for (u32 dit = 0; dit < lc_global_data()->g_deflectors().size(); dit++)
task_pool.push_back(dit);
#else
task_pool.push_back(14);
task_pool.push_back(16);
#endif
// Main process (4 threads)
Status("Lighting...");
CThreadManager threads;
const u32 thNUM = 6;
CTimer start_time;
start_time.Start();
for (int L = 0; L < thNUM; L++)
threads.start(xr_new<CLMThread>(L));
threads.wait(500);
clMsg("%f seconds", start_time.GetElapsed_sec());
}
void CBuild::LMaps() {
//****************************************** Lmaps
Phase("LIGHT: LMaps...");
// DeflectorsStats ();
#ifndef NET_CMP
if (g_build_options.b_net_light)
// net_light ();
lc_net::net_lightmaps();
else {
LMapsLocal();
}
#else
create_net_task_manager();
get_net_task_manager()->create_global_data_write(pBuild->path);
LMapsLocal();
get_net_task_manager()->run();
destroy_net_task_manager();
// net_light ();
#endif
}
void XRLC_LIGHT_API ImplicitNetWait();
void CBuild::Light() {
//****************************************** Implicit
{
FPU::m64r();
Phase("LIGHT: Implicit...");
mem_Compact();
ImplicitLighting();
}
LMaps();
//****************************************** Vertex
FPU::m64r();
Phase("LIGHT: Vertex...");
mem_Compact();
LightVertex();
//
ImplicitNetWait();
WaitMuModelsLocalCalcLightening();
lc_net::get_task_manager().wait_all();
// get_task_manager().wait_all();
lc_net::get_task_manager().release();
//
//****************************************** Merge LMAPS
{
FPU::m64r();
Phase("LIGHT: Merging lightmaps...");
mem_Compact();
xrPhase_MergeLM();
}
}
void CBuild::LightVertex() { ::LightVertex(!!g_build_options.b_net_light); }
|
/****************************************************************************
**
** Copyright (C) 2015 The Qt Company Ltd.
** Contact: http://www.qt.io/licensing/
**
** This file is part of the examples of the Qt Toolkit.
**
** $QT_BEGIN_LICENSE:BSD$
** You may use this file under the terms of the BSD license as follows:
**
** "Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions are
** met:
** * Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** * Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in
** the documentation and/or other materials provided with the
** distribution.
** * Neither the name of The Qt Company Ltd nor the names of its
** contributors may be used to endorse or promote products derived
** from this software without specific prior written permission.
**
**
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
** OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
** LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
** OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
**
** $QT_END_LICENSE$
**
****************************************************************************/
#include "renderarea.h"
#include <QPainter>
#include <QPaintEvent>
//! [0]
RenderArea::RenderArea(QWidget *parent)
: QWidget(parent)
{
QFont newFont = font();
newFont.setPixelSize(12);
setFont(newFont);
QFontMetrics fontMetrics(newFont);
xBoundingRect = fontMetrics.boundingRect(tr("x"));
yBoundingRect = fontMetrics.boundingRect(tr("y"));
}
//! [0]
//! [1]
void RenderArea::setOperations(const QList<Operation> &operations)
{
this->operations = operations;
update();
}
//! [1]
//! [2]
void RenderArea::setShape(const QPainterPath &shape)
{
this->shape = shape;
update();
}
//! [2]
//! [3]
QSize RenderArea::minimumSizeHint() const
{
return QSize(182, 182);
}
//! [3]
//! [4]
QSize RenderArea::sizeHint() const
{
return QSize(232, 232);
}
//! [4]
//! [5]
void RenderArea::paintEvent(QPaintEvent *event)
{
QPainter painter(this);
painter.setRenderHint(QPainter::Antialiasing);
painter.fillRect(event->rect(), QBrush(Qt::white));
painter.translate(66, 66);
//! [5]
//! [6]
painter.save();
transformPainter(painter);
drawShape(painter);
painter.restore();
//! [6]
//! [7]
drawOutline(painter);
//! [7]
//! [8]
transformPainter(painter);
drawCoordinates(painter);
}
//! [8]
//! [9]
void RenderArea::drawCoordinates(QPainter &painter)
{
painter.setPen(Qt::red);
painter.drawLine(0, 0, 50, 0);
painter.drawLine(48, -2, 50, 0);
painter.drawLine(48, 2, 50, 0);
painter.drawText(60 - xBoundingRect.width() / 2,
0 + xBoundingRect.height() / 2, tr("x"));
painter.drawLine(0, 0, 0, 50);
painter.drawLine(-2, 48, 0, 50);
painter.drawLine(2, 48, 0, 50);
painter.drawText(0 - yBoundingRect.width() / 2,
60 + yBoundingRect.height() / 2, tr("y"));
}
//! [9]
//! [10]
void RenderArea::drawOutline(QPainter &painter)
{
painter.setPen(Qt::darkGreen);
painter.setPen(Qt::DashLine);
painter.setBrush(Qt::NoBrush);
painter.drawRect(0, 0, 100, 100);
}
//! [10]
//! [11]
void RenderArea::drawShape(QPainter &painter)
{
painter.fillPath(shape, Qt::blue);
}
//! [11]
//! [12]
void RenderArea::transformPainter(QPainter &painter)
{
for (int i = 0; i < operations.size(); ++i) {
switch (operations[i]) {
case Translate:
painter.translate(50, 50);
break;
case Scale:
painter.scale(0.75, 0.75);
break;
case Rotate:
painter.rotate(60);
break;
case NoTransformation:
default:
;
}
}
}
//! [12]
|
/***********************************************************************************
** MIT License **
** **
** Copyright (c) 2018 Victor DENIS (victordenis01@gmail.com) **
** **
** Permission is hereby granted, free of charge, to any person obtaining a copy **
** of this software and associated documentation files (the "Software"), to deal **
** in the Software without restriction, including without limitation the rights **
** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell **
** copies of the Software, and to permit persons to whom the Software is **
** furnished to do so, subject to the following conditions: **
** **
** The above copyright notice and this permission notice shall be included in all **
** copies or substantial portions of the Software. **
** **
** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR **
** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, **
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE **
** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER **
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, **
** OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE **
** SOFTWARE. **
***********************************************************************************/
#include "Widgets/Tab/TabStackedWidget.hpp"
#include <QApplication>
#include <QTimer>
#include "Utils/Settings.hpp"
#include "Utils/SideBarManager.hpp"
#include "Widgets/NavigationBar.hpp"
#include "Widgets/Tab/ComboTabBar.hpp"
#include "Widgets/Tab/TabWidget.hpp"
namespace Sn {
TabStackedWidget::TabStackedWidget(QWidget* parent) :
QWidget(parent),
m_currentIndex(-1),
m_previousIndex(-1),
m_sideBarManager(new SideBarManager(this))
{
m_layout = new QVBoxLayout(this);
m_layout->setSpacing(0);
m_layout->setContentsMargins(0, 0, 0, 0);
m_stack = new QStackedWidget(this);
m_stack->setObjectName("tabwidget-stack");
m_splitter = new QSplitter(this);
m_splitter->setObjectName("sidebar-splitter");
m_splitter->addWidget(m_stack);
m_splitter->setCollapsible(0, false);
m_layout->addWidget(m_splitter);
connect(m_stack, &QStackedWidget::widgetRemoved, this, &TabStackedWidget::tabWasRemoved);
}
TabStackedWidget::~TabStackedWidget()
{
// Empty
}
SideBar* TabStackedWidget::addSideBar()
{
if (m_sideBar)
return m_sideBar.data();
m_sideBar = new SideBar(m_sideBarManager, dynamic_cast<TabWidget*>(this));
m_splitter->insertWidget(0, m_sideBar.data());
m_splitter->setCollapsible(0, false);
m_splitter->setSizes({124, 2000});
return m_sideBar.data();
}
void TabStackedWidget::createSideBarsMenu(QMenu* menu)
{
m_sideBarManager->createMenu(menu);
}
void TabStackedWidget::saveSideBarSettings()
{
Settings settings{};
settings.beginGroup("SideBars");
settings.setValue("Active", sideBarManager()->activeSideBar());
}
void TabStackedWidget::setTabBar(ComboTabBar* tab)
{
Q_ASSERT(tab);
if (tab->parentWidget() != this) {
tab->setParent(this);
tab->show();
}
delete m_comboTabBar;
m_dirtyTabBar = true;
m_comboTabBar = tab;
setFocusProxy(m_comboTabBar);
connect(m_comboTabBar, &ComboTabBar::currentChanged, this, &TabStackedWidget::showTab);
connect(m_comboTabBar, &ComboTabBar::tabMoved, this, &TabStackedWidget::tabWasMoved);
connect(m_comboTabBar, &ComboTabBar::overFlowChanged, this, &TabStackedWidget::setUpLayout);
if (m_comboTabBar->tabsClosable())
connect(m_comboTabBar, &ComboTabBar::tabCloseRequested, this, &TabStackedWidget::tabCloseRequested);
setDocumentMode(m_comboTabBar->documentMode());
m_comboTabBar->installEventFilter(this);
setUpLayout();
}
void TabStackedWidget::setNavigationToolBar(NavigationToolBar* navigationToolBar)
{
m_layout->insertWidget(0, navigationToolBar);
setUpLayout();
}
bool TabStackedWidget::documentMode() const
{
return m_comboTabBar->documentMode();
}
void TabStackedWidget::setDocumentMode(bool enable)
{
m_comboTabBar->setDocumentMode(enable);
m_comboTabBar->setDrawBase(enable);
m_comboTabBar->setExpanding(!enable);
}
int TabStackedWidget::addTab(QWidget* widget, const QString& label, bool pinned)
{
return insertTab(-1, widget, label, pinned);
}
int TabStackedWidget::insertTab(int index, QWidget* widget, const QString& label, bool pinned)
{
if (!widget)
return -1;
if (pinned) {
index = index < 0 ? m_comboTabBar->pinnedTabsCount() : qMin(index, m_comboTabBar->pinnedTabsCount());
index = m_stack->insertWidget(index, widget);
m_comboTabBar->insertTab(index, QIcon(), label, true);
}
else {
index = index < 0 ? -1 : qMax(index, m_comboTabBar->pinnedTabsCount());
index = m_stack->insertWidget(index, widget);
m_comboTabBar->insertTab(index, QIcon(), label, false);
}
if (m_previousIndex >= index)
++m_previousIndex;
if (m_currentIndex >= index)
++m_currentIndex;
QTimer::singleShot(10, this, &TabStackedWidget::setUpLayout);
return index;
}
QString TabStackedWidget::tabText(int index) const
{
return m_comboTabBar->tabText(index);
}
void TabStackedWidget::setTabText(int index, const QString& label)
{
m_comboTabBar->setTabText(index, label);
}
QString TabStackedWidget::tabToolTip(int index) const
{
return m_comboTabBar->tabToolTip(index);
}
void TabStackedWidget::setTabToolTip(int index, const QString& tip)
{
m_comboTabBar->setTabToolTip(index, tip);
}
int TabStackedWidget::pinUnPinTab(int index, const QString& title)
{
QWidget* widget{m_stack->widget(index)};
QWidget* currentWidget{m_stack->currentWidget()};
if (!widget || !currentWidget)
return -1;
bool makePinned = index >= m_comboTabBar->pinnedTabsCount();
QWidget* button = m_comboTabBar->tabButton(index, m_comboTabBar->iconButtonPosition());
m_comboTabBar->m_blockCurrentChangedSignal = true;
m_comboTabBar->setTabButton(index, m_comboTabBar->iconButtonPosition(), nullptr);
m_stack->removeWidget(widget);
int newIndex{insertTab(makePinned ? 0 : m_comboTabBar->pinnedTabsCount(), widget, title, makePinned)};
m_comboTabBar->setTabButton(newIndex, m_comboTabBar->iconButtonPosition(), button);
m_comboTabBar->m_blockCurrentChangedSignal = false;
setCurrentWidget(currentWidget);
emit pinStateChanged(newIndex, makePinned);
return newIndex;
}
void TabStackedWidget::removeTab(int index)
{
if (QWidget* widget = m_stack->widget(index)) {
if (index == currentIndex() && count() > 1)
selectTabOnRemove();
m_stack->removeWidget(widget);
}
}
void TabStackedWidget::moveTab(int from, int to)
{
m_comboTabBar->moveTab(from, to);
}
int TabStackedWidget::currentIndex() const
{
return m_comboTabBar->currentIndex();
}
int TabStackedWidget::indexOf(QWidget* widget) const
{
return m_stack->indexOf(widget);
}
int TabStackedWidget::count() const
{
return m_comboTabBar->count();
}
QWidget* TabStackedWidget::currentWidget() const
{
return m_stack->currentWidget();
}
QWidget* TabStackedWidget::widget(int index) const
{
return m_stack->widget(index);
}
void TabStackedWidget::setCurrentIndex(int index)
{
m_comboTabBar->setCurrentIndex(index);
}
void TabStackedWidget::setCurrentWidget(QWidget* widget)
{
m_comboTabBar->setCurrentIndex(indexOf(widget));
}
void TabStackedWidget::setUpLayout()
{
int count = m_stack->count();
if (!m_comboTabBar->isVisible()) {
m_dirtyTabBar = true;
return;
}
m_comboTabBar->setElideMode(m_comboTabBar->elideMode());
m_dirtyTabBar = false;
}
bool TabStackedWidget::eventFilter(QObject* obj, QEvent* event)
{
if (m_dirtyTabBar & (obj == m_comboTabBar) && (event->type() == QEvent::Show))
setUpLayout();
return false;
}
void TabStackedWidget::keyPressEvent(QKeyEvent* event)
{
if ((event->key() == Qt::Key_Tab || event->key() == Qt::Key_Backtab) && count() > 1
&& event->modifiers() & Qt::ControlModifier) {
int pageCount{count()};
int page{currentIndex()};
int dx{(event->key() == Qt::Key_Backtab || event->modifiers() & Qt::ShiftModifier) ? -1 : 1};
for (int pass{0}; pass < pageCount; ++pass) {
page += dx;
if (page < 0)
page = count() - 1;
else if (page >= pageCount)
page = 0;
if (m_comboTabBar->isTabEnabled(page)) {
setCurrentIndex(page);
break;
}
}
if (!QApplication::focusWidget())
m_comboTabBar->setFocus();
}
else
event->ignore();
}
void TabStackedWidget::showTab(int index)
{
if (validIndex(index))
m_stack->setCurrentIndex(index);
m_previousIndex = m_currentIndex;
m_currentIndex = index;
emit currentChanged(index);
}
void TabStackedWidget::tabWasMoved(int from, int to)
{
m_stack->blockSignals(true);
QWidget* widget{m_stack->widget(from)};
m_stack->removeWidget(widget);
m_stack->insertWidget(to, widget);
m_stack->blockSignals(false);
}
void TabStackedWidget::tabWasRemoved(int index)
{
if (m_previousIndex == index)
m_previousIndex = -1;
else if (m_previousIndex > index)
--m_previousIndex;
if (m_currentIndex == index)
m_currentIndex = -1;
else if (m_currentIndex > index)
--m_currentIndex;
m_comboTabBar->removeTab(index);
}
bool TabStackedWidget::validIndex(int index) const
{
return (index < m_stack->count() && index >= 0);
}
void TabStackedWidget::selectTabOnRemove()
{
Q_ASSERT(count() > 1);
int index{-1};
switch (m_comboTabBar->selectionBehaviorOnRemove()) {
case QTabBar::SelectPreviousTab:
if (validIndex(m_previousIndex)) {
index = m_previousIndex;
break;
}
case QTabBar::SelectLeftTab:
index = currentIndex() - 1;
if (!validIndex(index))
index = 1;
break;
case QTabBar::SelectRightTab:
index = currentIndex() + 1;
if (!validIndex(index))
index = currentIndex() - 1;
break;
default:
break;
}
Q_ASSERT(validIndex(index));
setCurrentIndex(index);
}
}
|
#ifdef _USE_DIRECTX12
#include "DX12MeshRenderer.h"
#include "revGraphics.h"
DX12MeshRenderer::DX12MeshRenderer()
{
}
DX12MeshRenderer::~DX12MeshRenderer()
{
}
void DX12MeshRenderer::SetModel(const revModel* model)
{
revMeshRenderer::SetModel(model);
uint32 transformCount = static_cast<uint32>(transforms.size());
transformConstantBufferViews.resize(transformCount, nullptr);
for (uint32 i = 0; i < transformCount; ++i) {
transformConstantBufferViews[i] = new DX12ConstantBufferView();
}
}
void DX12MeshRenderer::SetMesh(uint32 index, const revMesh* mesh)
{
revMeshRenderer::SetMesh(index, mesh);
if (drawResources[index] == nullptr || drawResources[index]->vertexBuffer == nullptr) return;
if (vertexBufferViews.size() <= index) {
vertexBufferViews.resize(index + 1);
}
vertexBufferViews[index] = new DX12VertexBufferView();
vertexBufferViews[index]->Create(revGraphics::Get().GetDevice(), drawResources[index]->vertexBuffer);
if (indexBufferViews.size() <= index) indexBufferViews.resize(index + 1);
if (drawResources[index]->indexBuffer != nullptr) {
indexBufferViews[index] = new DX12IndexBufferView();
indexBufferViews[index]->Create(revGraphics::Get().GetDevice(), drawResources[index]->indexBuffer);
}
}
void DX12MeshRenderer::Destroy()
{
const uint32 vertexBufferCount = static_cast<uint32>(drawResources.size());
for (uint32 i = 0; i < vertexBufferCount; ++i) {
vertexBufferViews[i]->Destroy();
delete vertexBufferViews[i];
vertexBufferViews[i] = nullptr;
if (indexBufferViews[i] != nullptr) {
indexBufferViews[i]->Destroy();
delete indexBufferViews[i];
indexBufferViews[i] = nullptr;
}
}
vertexBufferViews.clear();
revMeshRenderer::Destroy();
}
void DX12MeshRenderer::Initialize(DX12DescriptorHeap* cBufferHeap)
{
uint32 cbufferviewCount = static_cast<uint32>(transformConstantBufferViews.size());
transformCbufferHeapChunk = cBufferHeap->Allocation(cbufferviewCount);
for (uint32 i = 0; i < cbufferviewCount; ++i) {
transformConstantBufferViews[i]->Create(revGraphics::Get().GetDevice(), transformConstantBuffers[i], transformCbufferHeapChunk.GetHandle(i));
}
}
void DX12MeshRenderer::PrepareDraw(const revCamera& camera)
{
revTransform::CBuffer cbuffer;
revMatrix44 viewProj = camera.GetViewMatrix() * camera.GetProjectionMatrix();
uint32 cbufferviewCount = static_cast<uint32>(transformConstantBufferViews.size());
for (uint32 i = 0; i < cbufferviewCount; ++i) {
cbuffer.world = transforms[i]->GetWorldMatrix();
cbuffer.wvp = cbuffer.world * viewProj;
cbuffer.world.Transpose();
cbuffer.wvp.Transpose();
transformConstantBuffers[i]->Update(&cbuffer, sizeof(cbuffer));
}
}
void DX12MeshRenderer::Draw(revGraphicsCommandList& commandList, DX12DescriptorHeap& cBufferHeap, DX12DescriptorHeap& textureHeap, DX12DescriptorHeap& samplerHeap)
{
auto& list = commandList.GetList();
list->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
uint32 drawCount = static_cast<uint32>(drawResources.size());
for (uint32 i = 0; i < drawCount; ++i) {
if (drawResources[i] == nullptr) continue;
if (drawResources[i]->transformIndex != DONT_HAVE_CONSTANT_BUFFER) {
cBufferHeap.Apply(commandList, 0, transformCbufferHeapChunk.GetDescriptorOffset(drawResources[i]->transformIndex));
}
if (drawResources[i]->materialIndex != DONT_HAVE_CONSTANT_BUFFER) {
//cBufferHeap.Apply(commandList, 0, materialCbufferHeapChunk.GetDescriptorOffset(drawResources[i]->materialIndex));
}
list->IASetVertexBuffers(0, 1, vertexBufferViews[i]->GetResourceView());
if (drawResources[i]->indexBuffer != nullptr) {
list->IASetIndexBuffer(indexBufferViews[i]->GetResourceView());
list->DrawIndexedInstanced(indexBufferViews[i]->GetCount() * 3, 1, 0, 0, 0);
}
else {
list->DrawInstanced(drawResources[i]->vertexBuffer->GetLength(), 1, 0, 0);
}
}
}
#endif
|
#ifndef FSLDEMOAPP_BASE_SERVICE_EVENTS_BASIC_MOUSEMOVEEVENT_HPP
#define FSLDEMOAPP_BASE_SERVICE_EVENTS_BASIC_MOUSEMOVEEVENT_HPP
/****************************************************************************************************************************************************
* Copyright (c) 2014 Freescale Semiconductor, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of the Freescale Semiconductor, Inc. nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
****************************************************************************************************************************************************/
#include <FslBase/Exceptions.hpp>
#include <FslBase/Math/Pixel/PxPoint2.hpp>
#include <FslNativeWindow/Base/VirtualMouseButtonFlags.hpp>
#include <FslDemoApp/Base/Service/Events/Basic/BasicEvent.hpp>
namespace Fsl
{
// Basic events must be exactly the same size as a BasicEvent (so they can have no member variables).
class MouseMoveEvent : public BasicEvent
{
public:
explicit MouseMoveEvent(const BasicEvent& encodedEvent)
: BasicEvent(encodedEvent)
{
if (m_type != EventType::MouseMove)
{
throw std::invalid_argument("The supplied argument is of a wrong type");
}
}
MouseMoveEvent(const PxPoint2& position, const VirtualMouseButtonFlags& mouseButtonFlags);
//! @brief Get the current position
PxPoint2 GetPosition() const;
//! @brief Get the mouse button flags
VirtualMouseButtonFlags GetMouseButtonFlags() const;
};
}
#endif
|
// g2o - General Graph Optimization
// Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "sparse_block_matrix.h"
#include <iostream>
using namespace std;
using namespace g2o;
using namespace Eigen;
typedef SparseBlockMatrix< MatrixXd >
SparseBlockMatrixX;
std::ostream& operator << (std::ostream& os, const SparseBlockMatrixX::SparseMatrixBlock& m) {
for (int i=0; i<m.rows(); ++i){
for (int j=0; j<m.cols(); ++j)
cerr << m(i,j) << " ";
cerr << endl;
}
return os;
}
int main (int argc, char** argv){
int rcol[] = {3,6,8,12};
int ccol[] = {2,4,13};
cerr << "creation" << endl;
SparseBlockMatrixX* M=new SparseBlockMatrixX(rcol, ccol, 4,3);
cerr << "block access" << endl;
SparseBlockMatrixX::SparseMatrixBlock* b=M->block(0,0, true);
cerr << b->rows() << " " << b->cols() << endl;
for (int i=0; i<b->rows(); ++i)
for (int j=0; j<b->cols(); ++j){
(*b)(i,j)=i*b->cols()+j;
}
cerr << "block access 2" << endl;
b=M->block(0,2, true);
cerr << b->rows() << " " << b->cols() << endl;
for (int i=0; i<b->rows(); ++i)
for (int j=0; j<b->cols(); ++j){
(*b)(i,j)=i*b->cols()+j;
}
b=M->block(3,2, true);
cerr << b->rows() << " " << b->cols() << endl;
for (int i=0; i<b->rows(); ++i)
for (int j=0; j<b->cols(); ++j){
(*b)(i,j)=i*b->cols()+j;
}
cerr << *M << endl;
cerr << "SUM" << endl;
SparseBlockMatrixX* Ms=0;
M->add(Ms);
M->add(Ms);
cerr << *Ms;
SparseBlockMatrixX* Mt=0;
M->transpose(Mt);
cerr << *Mt << endl;
SparseBlockMatrixX* Mp=0;
M->multiply(Mp, Mt);
cerr << *Mp << endl;
int iperm[]={3,2,1,0};
SparseBlockMatrixX* PMp=0;
Mp->symmPermutation(PMp,iperm, false);
cerr << *PMp << endl;
PMp->clear(true);
Mp->block(3,0)->fill(0.);
Mp->symmPermutation(PMp,iperm, true);
cerr << *PMp << endl;
}
|
#ifndef GRAPH_CONTRACTOR_HPP
#define GRAPH_CONTRACTOR_HPP
#include "contractor/query_edge.hpp"
#include "util/binary_heap.hpp"
#include "util/deallocating_vector.hpp"
#include "util/dynamic_graph.hpp"
#include "util/integer_range.hpp"
#include "util/percent.hpp"
#include "util/simple_logger.hpp"
#include "util/timing_util.hpp"
#include "util/typedefs.hpp"
#include "util/xor_fast_hash.hpp"
#include "util/xor_fast_hash_storage.hpp"
#include <boost/assert.hpp>
#include <stxxl/vector>
#include <tbb/enumerable_thread_specific.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_sort.h>
#include <algorithm>
#include <limits>
#include <memory>
#include <vector>
namespace osrm
{
namespace contractor
{
class GraphContractor
{
private:
struct ContractorEdgeData
{
ContractorEdgeData()
: distance(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0),
is_original_via_node_ID(false), distance_data(DistanceData())
{
}
ContractorEdgeData(unsigned distance,
unsigned original_edges,
unsigned id,
bool shortcut,
bool forward,
bool backward,
const DistanceData & distance_data)
: distance(distance), id(id),
originalEdges(std::min((unsigned)1 << 28, original_edges)), shortcut(shortcut),
forward(forward), backward(backward), is_original_via_node_ID(false),
distance_data(distance_data)
{
}
unsigned distance;
unsigned id;
unsigned originalEdges : 28;
bool shortcut : 1;
bool forward : 1;
bool backward : 1;
bool is_original_via_node_ID : 1;
DistanceData distance_data;
} data;
struct ContractorHeapData
{
ContractorHeapData() {}
ContractorHeapData(short hop_, bool target_) : hop(hop_), target(target_) {}
short hop = 0;
bool target = false;
};
using ContractorGraph = util::DynamicGraph<ContractorEdgeData>;
// using ContractorHeap = util::BinaryHeap<NodeID, NodeID, int, ContractorHeapData,
// ArrayStorage<NodeID, NodeID>
// >;
using ContractorHeap = util::BinaryHeap<NodeID,
NodeID,
int,
ContractorHeapData,
util::XORFastHashStorage<NodeID, NodeID>>;
using ContractorEdge = ContractorGraph::InputEdge;
struct ContractorThreadData
{
ContractorHeap heap;
std::vector<ContractorEdge> inserted_edges;
std::vector<NodeID> neighbours;
explicit ContractorThreadData(NodeID nodes) : heap(nodes) {}
};
using NodeDepth = int;
struct ContractionStats
{
int edges_deleted_count;
int edges_added_count;
int original_edges_deleted_count;
int original_edges_added_count;
ContractionStats()
: edges_deleted_count(0), edges_added_count(0), original_edges_deleted_count(0),
original_edges_added_count(0)
{
}
};
struct RemainingNodeData
{
RemainingNodeData() : id(0), is_independent(false) {}
NodeID id : 31;
bool is_independent : 1;
};
struct ThreadDataContainer
{
explicit ThreadDataContainer(int number_of_nodes) : number_of_nodes(number_of_nodes) {}
inline ContractorThreadData *GetThreadData()
{
bool exists = false;
auto &ref = data.local(exists);
if (!exists)
{
ref = std::make_shared<ContractorThreadData>(number_of_nodes);
}
return ref.get();
}
int number_of_nodes;
using EnumerableThreadData =
tbb::enumerable_thread_specific<std::shared_ptr<ContractorThreadData>>;
EnumerableThreadData data;
};
public:
template <class ContainerT>
GraphContractor(int nodes, ContainerT &input_edge_list)
: GraphContractor(nodes, input_edge_list, {}, {})
{
}
template <class ContainerT>
GraphContractor(int nodes,
ContainerT &input_edge_list,
std::vector<float> &&node_levels_,
std::vector<EdgeWeight> &&node_weights_)
: node_levels(std::move(node_levels_)), node_weights(std::move(node_weights_))
{
std::vector<ContractorEdge> edges;
edges.reserve(input_edge_list.size() * 2);
const auto dend = input_edge_list.dend();
for (auto diter = input_edge_list.dbegin(); diter != dend; ++diter)
{
BOOST_ASSERT_MSG(static_cast<unsigned int>(std::max(diter->weight, 1)) > 0,
"edge distance < 1");
#ifndef NDEBUG
if (static_cast<unsigned int>(std::max(diter->weight, 1)) > 24 * 60 * 60 * 10)
{
util::SimpleLogger().Write(logWARNING)
<< "Edge weight large -> "
<< static_cast<unsigned int>(std::max(diter->weight, 1)) << " : "
<< static_cast<unsigned int>(diter->source) << " -> "
<< static_cast<unsigned int>(diter->target);
}
#endif
edges.emplace_back(diter->source,
diter->target,
static_cast<unsigned int>(std::max(diter->weight, 1)),
1,
diter->edge_id,
false,
diter->forward ? true : false,
diter->backward ? true : false,
diter->distance_data);
edges.emplace_back(diter->target,
diter->source,
static_cast<unsigned int>(std::max(diter->weight, 1)),
1,
diter->edge_id,
false,
diter->backward ? true : false,
diter->forward ? true : false,
diter->distance_data);
}
// clear input vector
input_edge_list.clear();
// FIXME not sure if we need this
edges.shrink_to_fit();
tbb::parallel_sort(edges.begin(), edges.end());
NodeID edge = 0;
for (NodeID i = 0; i < edges.size();)
{
const NodeID source = edges[i].source;
const NodeID target = edges[i].target;
const NodeID id = edges[i].data.id;
// remove eigenloops
if (source == target)
{
++i;
continue;
}
ContractorEdge forward_edge;
ContractorEdge reverse_edge;
forward_edge.source = reverse_edge.source = source;
forward_edge.target = reverse_edge.target = target;
forward_edge.data.forward = reverse_edge.data.backward = true;
forward_edge.data.backward = reverse_edge.data.forward = false;
forward_edge.data.shortcut = reverse_edge.data.shortcut = false;
forward_edge.data.id = reverse_edge.data.id = id;
forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1;
forward_edge.data.distance = reverse_edge.data.distance = INVALID_EDGE_WEIGHT;
forward_edge.data.distance_data = reverse_edge.data.distance_data = INVALID_DISTANCE_DATA;
// remove parallel edges
while (i < edges.size() && edges[i].source == source && edges[i].target == target)
{
if (edges[i].data.forward)
{
if (edges[i].data.distance < forward_edge.data.distance) {
forward_edge.data.distance = edges[i].data.distance;
forward_edge.data.distance_data = edges[i].data.distance_data;
}
}
if (edges[i].data.backward)
{
if (edges[i].data.distance < reverse_edge.data.distance) {
reverse_edge.data.distance = edges[i].data.distance;
reverse_edge.data.distance_data = edges[i].data.distance_data;
}
}
++i;
}
// merge edges (s,t) and (t,s) into bidirectional edge
if (forward_edge.data.distance == reverse_edge.data.distance && forward_edge.data.distance_data == reverse_edge.data.distance_data)
{
if ((int)forward_edge.data.distance != INVALID_EDGE_WEIGHT)
{
forward_edge.data.backward = true;
edges[edge++] = forward_edge;
}
}
else
{ // insert seperate edges
if (((int)forward_edge.data.distance) != INVALID_EDGE_WEIGHT)
{
edges[edge++] = forward_edge;
}
if ((int)reverse_edge.data.distance != INVALID_EDGE_WEIGHT)
{
edges[edge++] = reverse_edge;
}
}
}
util::SimpleLogger().Write() << "merged " << edges.size() - edge << " edges out of "
<< edges.size();
edges.resize(edge);
contractor_graph = std::make_shared<ContractorGraph>(nodes, edges);
edges.clear();
edges.shrink_to_fit();
BOOST_ASSERT(0 == edges.capacity());
util::SimpleLogger().Write() << "contractor finished initalization";
}
void Run(double core_factor = 1.0)
{
// for the preperation we can use a big grain size, which is much faster (probably cache)
const constexpr size_t InitGrainSize = 100000;
const constexpr size_t PQGrainSize = 100000;
// auto_partitioner will automatically increase the blocksize if we have
// a lot of data. It is *important* for the last loop iterations
// (which have a very small dataset) that it is devisible.
const constexpr size_t IndependentGrainSize = 1;
const constexpr size_t ContractGrainSize = 1;
const constexpr size_t NeighboursGrainSize = 1;
const constexpr size_t DeleteGrainSize = 1;
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
util::Percent p(number_of_nodes);
ThreadDataContainer thread_data_list(number_of_nodes);
NodeID number_of_contracted_nodes = 0;
std::vector<NodeDepth> node_depth;
std::vector<float> node_priorities;
is_core_node.resize(number_of_nodes, false);
std::vector<RemainingNodeData> remaining_nodes(number_of_nodes);
// initialize priorities in parallel
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, InitGrainSize),
[this, &remaining_nodes](const tbb::blocked_range<int> &range) {
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
remaining_nodes[x].id = x;
}
});
bool use_cached_node_priorities = !node_levels.empty();
if (use_cached_node_priorities)
{
std::cout << "using cached node priorities ..." << std::flush;
node_priorities.swap(node_levels);
std::cout << "ok" << std::endl;
}
else
{
node_depth.resize(number_of_nodes, 0);
node_priorities.resize(number_of_nodes);
node_levels.resize(number_of_nodes);
std::cout << "initializing elimination PQ ..." << std::flush;
tbb::parallel_for(tbb::blocked_range<int>(0, number_of_nodes, PQGrainSize),
[this, &node_priorities, &node_depth, &thread_data_list](
const tbb::blocked_range<int> &range) {
ContractorThreadData *data = thread_data_list.GetThreadData();
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
node_priorities[x] =
this->EvaluateNodePriority(data, node_depth[x], x);
}
});
std::cout << "ok" << std::endl;
}
BOOST_ASSERT(node_priorities.size() == number_of_nodes);
std::cout << "preprocessing " << number_of_nodes << " nodes ..." << std::flush;
unsigned current_level = 0;
bool flushed_contractor = false;
while (number_of_nodes > 2 &&
number_of_contracted_nodes < static_cast<NodeID>(number_of_nodes * core_factor))
{
if (!flushed_contractor && (number_of_contracted_nodes >
static_cast<NodeID>(number_of_nodes * 0.65 * core_factor)))
{
util::DeallocatingVector<ContractorEdge>
new_edge_set; // this one is not explicitely
// cleared since it goes out of
// scope anywa
std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush;
// Delete old heap data to free memory that we need for the coming operations
thread_data_list.data.clear();
// Create new priority array
std::vector<float> new_node_priority(remaining_nodes.size());
std::vector<EdgeWeight> new_node_weights(remaining_nodes.size());
// this map gives the old IDs from the new ones, necessary to get a consistent graph
// at the end of contraction
orig_node_id_from_new_node_id_map.resize(remaining_nodes.size());
// this map gives the new IDs from the old ones, necessary to remap targets from the
// remaining graph
std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, SPECIAL_NODEID);
for (const auto new_node_id :
util::irange<std::size_t>(0UL, remaining_nodes.size()))
{
auto &node = remaining_nodes[new_node_id];
BOOST_ASSERT(node_priorities.size() > node.id);
new_node_priority[new_node_id] = node_priorities[node.id];
BOOST_ASSERT(node_weights.size() > node.id);
new_node_weights[new_node_id] = node_weights[node.id];
}
// build forward and backward renumbering map and remap ids in remaining_nodes
for (const auto new_node_id :
util::irange<std::size_t>(0UL, remaining_nodes.size()))
{
auto &node = remaining_nodes[new_node_id];
// create renumbering maps in both directions
orig_node_id_from_new_node_id_map[new_node_id] = node.id;
new_node_id_from_orig_id_map[node.id] = new_node_id;
node.id = new_node_id;
}
// walk over all nodes
for (const auto source :
util::irange<NodeID>(0UL, contractor_graph->GetNumberOfNodes()))
{
for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(source))
{
ContractorGraph::EdgeData &data =
contractor_graph->GetEdgeData(current_edge);
const NodeID target = contractor_graph->GetTarget(current_edge);
if (SPECIAL_NODEID == new_node_id_from_orig_id_map[source])
{
external_edge_list.push_back({source, target, data});
}
else
{
// node is not yet contracted.
// add (renumbered) outgoing edges to new util::DynamicGraph.
ContractorEdge new_edge = {new_node_id_from_orig_id_map[source],
new_node_id_from_orig_id_map[target],
data};
new_edge.data.is_original_via_node_ID = true;
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[source],
"new source id not resolveable");
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_node_id_from_orig_id_map[target],
"new target id not resolveable");
new_edge_set.push_back(new_edge);
}
}
}
// Delete map from old NodeIDs to new ones.
new_node_id_from_orig_id_map.clear();
new_node_id_from_orig_id_map.shrink_to_fit();
// Replace old priorities array by new one
node_priorities.swap(new_node_priority);
// Delete old node_priorities vector
// Due to the scope, these should get cleared automatically? @daniel-j-h do you
// agree?
new_node_priority.clear();
new_node_priority.shrink_to_fit();
node_weights.swap(new_node_weights);
// old Graph is removed
contractor_graph.reset();
// create new graph
tbb::parallel_sort(new_edge_set.begin(), new_edge_set.end());
contractor_graph =
std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set);
new_edge_set.clear();
flushed_contractor = true;
// INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH!
// reinitialize heaps and ThreadData objects with appropriate size
thread_data_list.number_of_nodes = contractor_graph->GetNumberOfNodes();
}
tbb::parallel_for(
tbb::blocked_range<std::size_t>(0, remaining_nodes.size(), IndependentGrainSize),
[this, &node_priorities, &remaining_nodes, &thread_data_list](
const tbb::blocked_range<std::size_t> &range) {
ContractorThreadData *data = thread_data_list.GetThreadData();
// determine independent node set
for (auto i = range.begin(), end = range.end(); i != end; ++i)
{
const NodeID node = remaining_nodes[i].id;
remaining_nodes[i].is_independent =
this->IsNodeIndependent(node_priorities, data, node);
}
});
// sort all remaining nodes to the beginning of the sequence
const auto begin_independent_nodes = stable_partition(
remaining_nodes.begin(), remaining_nodes.end(), [](RemainingNodeData node_data) {
return !node_data.is_independent;
});
auto begin_independent_nodes_idx =
std::distance(remaining_nodes.begin(), begin_independent_nodes);
auto end_independent_nodes_idx = remaining_nodes.size();
if (!use_cached_node_priorities)
{
// write out contraction level
tbb::parallel_for(
tbb::blocked_range<std::size_t>(
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
[this, remaining_nodes, flushed_contractor, current_level](
const tbb::blocked_range<std::size_t> &range) {
if (flushed_contractor)
{
for (int position = range.begin(), end = range.end(); position != end;
++position)
{
const NodeID x = remaining_nodes[position].id;
node_levels[orig_node_id_from_new_node_id_map[x]] = current_level;
}
}
else
{
for (int position = range.begin(), end = range.end(); position != end;
++position)
{
const NodeID x = remaining_nodes[position].id;
node_levels[x] = current_level;
}
}
});
}
// contract independent nodes
tbb::parallel_for(
tbb::blocked_range<std::size_t>(
begin_independent_nodes_idx, end_independent_nodes_idx, ContractGrainSize),
[this, &remaining_nodes, &thread_data_list](
const tbb::blocked_range<std::size_t> &range) {
ContractorThreadData *data = thread_data_list.GetThreadData();
for (int position = range.begin(), end = range.end(); position != end;
++position)
{
const NodeID x = remaining_nodes[position].id;
this->ContractNode<false>(data, x);
}
});
tbb::parallel_for(
tbb::blocked_range<int>(
begin_independent_nodes_idx, end_independent_nodes_idx, DeleteGrainSize),
[this, &remaining_nodes, &thread_data_list](const tbb::blocked_range<int> &range) {
ContractorThreadData *data = thread_data_list.GetThreadData();
for (int position = range.begin(), end = range.end(); position != end;
++position)
{
const NodeID x = remaining_nodes[position].id;
this->DeleteIncomingEdges(data, x);
}
});
// make sure we really sort each block
tbb::parallel_for(
thread_data_list.data.range(),
[&](const ThreadDataContainer::EnumerableThreadData::range_type &range) {
for (auto &data : range)
tbb::parallel_sort(data->inserted_edges.begin(),
data->inserted_edges.end());
});
// insert new edges
for (auto &data : thread_data_list.data)
{
for (const ContractorEdge &edge : data->inserted_edges)
{
const EdgeID current_edge_ID =
contractor_graph->FindEdge(edge.source, edge.target);
if (current_edge_ID < contractor_graph->EndEdges(edge.source))
{
ContractorGraph::EdgeData ¤t_data =
contractor_graph->GetEdgeData(current_edge_ID);
if (current_data.shortcut && edge.data.forward == current_data.forward &&
edge.data.backward == current_data.backward &&
edge.data.distance < current_data.distance)
{
// found a duplicate edge with smaller weight, update it.
current_data = edge.data;
continue;
}
}
contractor_graph->InsertEdge(edge.source, edge.target, edge.data);
}
data->inserted_edges.clear();
}
if (!use_cached_node_priorities)
{
tbb::parallel_for(
tbb::blocked_range<int>(begin_independent_nodes_idx,
end_independent_nodes_idx,
NeighboursGrainSize),
[this, &node_priorities, &remaining_nodes, &node_depth, &thread_data_list](
const tbb::blocked_range<int> &range) {
ContractorThreadData *data = thread_data_list.GetThreadData();
for (int position = range.begin(), end = range.end(); position != end;
++position)
{
NodeID x = remaining_nodes[position].id;
this->UpdateNodeNeighbours(node_priorities, node_depth, data, x);
}
});
}
// remove contracted nodes from the pool
number_of_contracted_nodes += end_independent_nodes_idx - begin_independent_nodes_idx;
remaining_nodes.resize(begin_independent_nodes_idx);
p.PrintStatus(number_of_contracted_nodes);
++current_level;
}
if (remaining_nodes.size() > 2)
{
if (orig_node_id_from_new_node_id_map.size() > 0)
{
tbb::parallel_for(tbb::blocked_range<int>(0, remaining_nodes.size(), InitGrainSize),
[this, &remaining_nodes](const tbb::blocked_range<int> &range) {
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
const auto orig_id = remaining_nodes[x].id;
is_core_node[orig_node_id_from_new_node_id_map[orig_id]] =
true;
}
});
}
else
{
tbb::parallel_for(tbb::blocked_range<int>(0, remaining_nodes.size(), InitGrainSize),
[this, &remaining_nodes](const tbb::blocked_range<int> &range) {
for (int x = range.begin(), end = range.end(); x != end; ++x)
{
const auto orig_id = remaining_nodes[x].id;
is_core_node[orig_id] = true;
}
});
}
}
else
{
// in this case we don't need core markers since we fully contracted
// the graph
is_core_node.clear();
}
util::SimpleLogger().Write() << "[core] " << remaining_nodes.size() << " nodes "
<< contractor_graph->GetNumberOfEdges() << " edges."
<< std::endl;
thread_data_list.data.clear();
}
inline void GetCoreMarker(std::vector<bool> &out_is_core_node)
{
out_is_core_node.swap(is_core_node);
}
inline void GetNodeLevels(std::vector<float> &out_node_levels)
{
out_node_levels.swap(node_levels);
}
template <class Edge> inline void GetEdges(util::DeallocatingVector<Edge> &edges)
{
util::Percent p(contractor_graph->GetNumberOfNodes());
util::SimpleLogger().Write() << "Getting edges of minimized graph";
const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes();
if (contractor_graph->GetNumberOfNodes())
{
Edge new_edge;
for (const auto node : util::irange(0u, number_of_nodes))
{
p.PrintStatus(node);
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID target = contractor_graph->GetTarget(edge);
const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge);
if (!orig_node_id_from_new_node_id_map.empty())
{
new_edge.source = orig_node_id_from_new_node_id_map[node];
new_edge.target = orig_node_id_from_new_node_id_map[target];
}
else
{
new_edge.source = node;
new_edge.target = target;
}
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_edge.source, "Source id invalid");
BOOST_ASSERT_MSG(SPECIAL_NODEID != new_edge.target, "Target id invalid");
new_edge.data.distance = data.distance;
new_edge.data.distance_data = data.distance_data;
new_edge.data.shortcut = data.shortcut;
if (!data.is_original_via_node_ID && !orig_node_id_from_new_node_id_map.empty())
{
// tranlate the _node id_ of the shortcutted node
new_edge.data.id = orig_node_id_from_new_node_id_map[data.id];
}
else
{
new_edge.data.id = data.id;
}
BOOST_ASSERT_MSG(new_edge.data.id != INT_MAX, // 2^31
"edge id invalid");
new_edge.data.forward = data.forward;
new_edge.data.backward = data.backward;
edges.push_back(new_edge);
}
}
}
contractor_graph.reset();
orig_node_id_from_new_node_id_map.clear();
orig_node_id_from_new_node_id_map.shrink_to_fit();
BOOST_ASSERT(0 == orig_node_id_from_new_node_id_map.capacity());
edges.append(external_edge_list.begin(), external_edge_list.end());
external_edge_list.clear();
}
private:
inline void RelaxNode(const NodeID node,
const NodeID forbidden_node,
const int distance,
ContractorHeap &heap)
{
const short current_hop = heap.GetData(node).hop + 1;
for (auto edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &data = contractor_graph->GetEdgeData(edge);
if (!data.forward)
{
continue;
}
const NodeID to = contractor_graph->GetTarget(edge);
if (forbidden_node == to)
{
continue;
}
const int to_distance = distance + data.distance;
// New Node discovered -> Add to Heap + Node Info Storage
if (!heap.WasInserted(to))
{
heap.Insert(to, to_distance, ContractorHeapData{current_hop, false});
}
// Found a shorter Path -> Update distance
else if (to_distance < heap.GetKey(to))
{
heap.DecreaseKey(to, to_distance);
heap.GetData(to).hop = current_hop;
}
}
}
inline void Dijkstra(const int max_distance,
const unsigned number_of_targets,
const int max_nodes,
ContractorThreadData &data,
const NodeID middle_node)
{
ContractorHeap &heap = data.heap;
int nodes = 0;
unsigned number_of_targets_found = 0;
while (!heap.Empty())
{
const NodeID node = heap.DeleteMin();
const auto distance = heap.GetKey(node);
if (++nodes > max_nodes)
{
return;
}
if (distance > max_distance)
{
return;
}
// Destination settled?
if (heap.GetData(node).target)
{
++number_of_targets_found;
if (number_of_targets_found >= number_of_targets)
{
return;
}
}
RelaxNode(node, middle_node, distance, heap);
}
}
inline float EvaluateNodePriority(ContractorThreadData *const data,
const NodeDepth node_depth,
const NodeID node)
{
ContractionStats stats;
// perform simulated contraction
ContractNode<true>(data, node, &stats);
// Result will contain the priority
float result;
if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count))
{
result = 1.f * node_depth;
}
else
{
result = 2.f * (((float)stats.edges_added_count) / stats.edges_deleted_count) +
4.f * (((float)stats.original_edges_added_count) /
stats.original_edges_deleted_count) +
1.f * node_depth;
}
BOOST_ASSERT(result >= 0);
return result;
}
template <bool RUNSIMULATION>
inline bool
ContractNode(ContractorThreadData *data, const NodeID node, ContractionStats *stats = nullptr)
{
ContractorHeap &heap = data->heap;
std::size_t inserted_edges_size = data->inserted_edges.size();
std::vector<ContractorEdge> &inserted_edges = data->inserted_edges;
const constexpr bool SHORTCUT_ARC = true;
const constexpr bool FORWARD_DIRECTION_ENABLED = true;
const constexpr bool FORWARD_DIRECTION_DISABLED = false;
const constexpr bool REVERSE_DIRECTION_ENABLED = true;
const constexpr bool REVERSE_DIRECTION_DISABLED = false;
for (auto in_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &in_data = contractor_graph->GetEdgeData(in_edge);
const NodeID source = contractor_graph->GetTarget(in_edge);
if (source == node)
continue;
if (RUNSIMULATION)
{
BOOST_ASSERT(stats != nullptr);
++stats->edges_deleted_count;
stats->original_edges_deleted_count += in_data.originalEdges;
}
if (!in_data.backward)
{
continue;
}
heap.Clear();
heap.Insert(source, 0, ContractorHeapData{});
int max_distance = 0;
unsigned number_of_targets = 0;
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
if (!out_data.forward)
{
continue;
}
const NodeID target = contractor_graph->GetTarget(out_edge);
if (node == target)
continue;
const EdgeWeight path_distance = in_data.distance + out_data.distance;
if (target == source)
{
if (path_distance < node_weights[node])
{
if (RUNSIMULATION)
{
// make sure to prune better, but keep inserting this loop if it should
// still be the best
// CAREFUL: This only works due to the independent node-setting. This
// guarantees that source is not connected to another node that is
// contracted
node_weights[source] = path_distance + 1;
BOOST_ASSERT(stats != nullptr);
stats->edges_added_count += 2;
stats->original_edges_added_count +=
2 * (out_data.originalEdges + in_data.originalEdges);
}
else
{
// CAREFUL: This only works due to the independent node-setting. This
// guarantees that source is not connected to another node that is
// contracted
node_weights[source] = path_distance; // make sure to prune better
DistanceData path_distance_data = in_data.distance_data + out_data.distance_data;
inserted_edges.emplace_back(source,
target,
path_distance,
out_data.originalEdges +
in_data.originalEdges,
node,
SHORTCUT_ARC,
FORWARD_DIRECTION_ENABLED,
REVERSE_DIRECTION_DISABLED,
path_distance_data);
inserted_edges.emplace_back(target,
source,
path_distance,
out_data.originalEdges +
in_data.originalEdges,
node,
SHORTCUT_ARC,
FORWARD_DIRECTION_DISABLED,
REVERSE_DIRECTION_ENABLED,
path_distance_data);
}
}
continue;
}
max_distance = std::max(max_distance, path_distance);
if (!heap.WasInserted(target))
{
heap.Insert(target, INVALID_EDGE_WEIGHT, ContractorHeapData{0, true});
++number_of_targets;
}
}
if (RUNSIMULATION)
{
const int constexpr SIMULATION_SEARCH_SPACE_SIZE = 1000;
Dijkstra(
max_distance, number_of_targets, SIMULATION_SEARCH_SPACE_SIZE, *data, node);
}
else
{
const int constexpr FULL_SEARCH_SPACE_SIZE = 2000;
Dijkstra(max_distance, number_of_targets, FULL_SEARCH_SPACE_SIZE, *data, node);
}
for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node))
{
const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge);
if (!out_data.forward)
{
continue;
}
const NodeID target = contractor_graph->GetTarget(out_edge);
if (target == node)
continue;
const int path_distance = in_data.distance + out_data.distance;
const int distance = heap.GetKey(target);
if (path_distance < distance)
{
if (RUNSIMULATION)
{
BOOST_ASSERT(stats != nullptr);
stats->edges_added_count += 2;
stats->original_edges_added_count +=
2 * (out_data.originalEdges + in_data.originalEdges);
}
else
{
DistanceData path_distance_data = in_data.distance_data + out_data.distance_data;
inserted_edges.emplace_back(source,
target,
path_distance,
out_data.originalEdges + in_data.originalEdges,
node,
SHORTCUT_ARC,
FORWARD_DIRECTION_ENABLED,
REVERSE_DIRECTION_DISABLED,
path_distance_data);
inserted_edges.emplace_back(target,
source,
path_distance,
out_data.originalEdges + in_data.originalEdges,
node,
SHORTCUT_ARC,
FORWARD_DIRECTION_DISABLED,
REVERSE_DIRECTION_ENABLED,
path_distance_data);
}
}
}
}
// Check For One-Way Streets to decide on the creation of self-loops
if (!RUNSIMULATION)
{
std::size_t iend = inserted_edges.size();
for (std::size_t i = inserted_edges_size; i < iend; ++i)
{
bool found = false;
for (std::size_t other = i + 1; other < iend; ++other)
{
if (inserted_edges[other].source != inserted_edges[i].source)
{
continue;
}
if (inserted_edges[other].target != inserted_edges[i].target)
{
continue;
}
if (inserted_edges[other].data.distance != inserted_edges[i].data.distance)
{
continue;
}
if (inserted_edges[other].data.shortcut != inserted_edges[i].data.shortcut)
{
continue;
}
if (inserted_edges[other].data.distance_data != inserted_edges[i].data.distance_data)
{
continue;
}
inserted_edges[other].data.forward |= inserted_edges[i].data.forward;
inserted_edges[other].data.backward |= inserted_edges[i].data.backward;
found = true;
break;
}
if (!found)
{
inserted_edges[inserted_edges_size++] = inserted_edges[i];
}
}
inserted_edges.resize(inserted_edges_size);
}
return true;
}
inline void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node)
{
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
// find all neighbours
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID u = contractor_graph->GetTarget(e);
if (u != node)
{
neighbours.push_back(u);
}
}
// eliminate duplicate entries ( forward + backward edges )
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
for (const auto i : util::irange<std::size_t>(0, neighbours.size()))
{
contractor_graph->DeleteEdgesTo(neighbours[i], node);
}
}
inline bool UpdateNodeNeighbours(std::vector<float> &priorities,
std::vector<NodeDepth> &node_depth,
ContractorThreadData *const data,
const NodeID node)
{
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
// find all neighbours
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID u = contractor_graph->GetTarget(e);
if (u == node)
{
continue;
}
neighbours.push_back(u);
node_depth[u] = std::max(node_depth[node] + 1, node_depth[u]);
}
// eliminate duplicate entries ( forward + backward edges )
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
// re-evaluate priorities of neighboring nodes
for (const NodeID u : neighbours)
{
priorities[u] = EvaluateNodePriority(data, node_depth[u], u);
}
return true;
}
inline bool IsNodeIndependent(const std::vector<float> &priorities,
ContractorThreadData *const data,
NodeID node) const
{
const float priority = priorities[node];
std::vector<NodeID> &neighbours = data->neighbours;
neighbours.clear();
for (auto e : contractor_graph->GetAdjacentEdgeRange(node))
{
const NodeID target = contractor_graph->GetTarget(e);
if (node == target)
{
continue;
}
const float target_priority = priorities[target];
BOOST_ASSERT(target_priority >= 0);
// found a neighbour with lower priority?
if (priority > target_priority)
{
return false;
}
// tie breaking
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
Bias(node, target))
{
return false;
}
neighbours.push_back(target);
}
std::sort(neighbours.begin(), neighbours.end());
neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin());
// examine all neighbours that are at most 2 hops away
for (const NodeID u : neighbours)
{
for (auto e : contractor_graph->GetAdjacentEdgeRange(u))
{
const NodeID target = contractor_graph->GetTarget(e);
if (node == target)
{
continue;
}
const float target_priority = priorities[target];
BOOST_ASSERT(target_priority >= 0);
// found a neighbour with lower priority?
if (priority > target_priority)
{
return false;
}
// tie breaking
if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() &&
Bias(node, target))
{
return false;
}
}
}
return true;
}
// This bias function takes up 22 assembly instructions in total on X86
inline bool Bias(const NodeID a, const NodeID b) const
{
const unsigned short hasha = fast_hash(a);
const unsigned short hashb = fast_hash(b);
// The compiler optimizes that to conditional register flags but without branching
// statements!
if (hasha != hashb)
{
return hasha < hashb;
}
return a < b;
}
std::shared_ptr<ContractorGraph> contractor_graph;
stxxl::vector<QueryEdge> external_edge_list;
std::vector<NodeID> orig_node_id_from_new_node_id_map;
std::vector<float> node_levels;
// A list of weights for every node in the graph.
// The weight represents the cost for a u-turn on the segment in the base-graph in addition to
// its traversal.
// During contraction, self-loops are checked against this node weight to ensure that necessary
// self-loops are added.
std::vector<EdgeWeight> node_weights;
std::vector<bool> is_core_node;
util::XORFastHash<> fast_hash;
};
}
}
#endif // CONTRACTOR_HPP
|
#include "../directx11lib.h"
#include "directx11buffer.h"
#include "../directx11rendercontext.h"
#include "directx11vertexarray.h"
#include "Generic/bytebuffer.h"
namespace Engine {
namespace Render {
DirectX11Buffer::DirectX11Buffer(UINT bind, const ByteBuffer &data)
: mSize(data.mSize)
, mBind(bind)
{
if (mSize > 0) {
D3D11_BUFFER_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(D3D11_BUFFER_DESC));
bufferDesc.BindFlags = bind;
bufferDesc.ByteWidth = data.mSize;
bufferDesc.CPUAccessFlags = data.mData ? 0 : D3D11_CPU_ACCESS_WRITE;
bufferDesc.Usage = data.mData ? D3D11_USAGE_DEFAULT : D3D11_USAGE_DYNAMIC;
D3D11_SUBRESOURCE_DATA subData;
ZeroMemory(&subData, sizeof(D3D11_SUBRESOURCE_DATA));
subData.pSysMem = data.mData;
subData.SysMemPitch = 0;
subData.SysMemSlicePitch = 0;
HRESULT hr = sDevice->CreateBuffer(&bufferDesc, data.mData ? &subData : nullptr, &mBuffer);
DX11_CHECK(hr);
}
}
DirectX11Buffer::DirectX11Buffer(DirectX11Buffer &&other)
: mSize(std::exchange(other.mSize, 0))
, mBind(std::exchange(other.mBind, 0))
, mBuffer(std::exchange(other.mBuffer, nullptr))
{
}
DirectX11Buffer::~DirectX11Buffer()
{
reset();
}
DirectX11Buffer &DirectX11Buffer::operator=(DirectX11Buffer &&other)
{
std::swap(mBuffer, other.mBuffer);
std::swap(mBind, other.mBind);
std::swap(mSize, other.mSize);
return *this;
}
DirectX11Buffer::operator bool() const
{
return mBuffer != nullptr;
}
void DirectX11Buffer::bindVertex(UINT stride, size_t index) const
{
UINT offset = 0;
sDeviceContext->IASetVertexBuffers(index, 1, &mBuffer, &stride, &offset);
DX11_LOG("Bind Vertex Buffer -> " << mBuffer);
}
void DirectX11Buffer::bindIndex() const
{
sDeviceContext->IASetIndexBuffer(mBuffer, DXGI_FORMAT_R16_UINT, 0);
DX11_LOG("Bind Index Buffer -> " << mBuffer);
}
void DirectX11Buffer::reset()
{
if (mBuffer) {
mBuffer->Release();
mBuffer = nullptr;
}
}
void DirectX11Buffer::setData(const ByteBuffer &data)
{
if (mSize != data.mSize)
*this = { mBind, data };
else if (data.mData)
sDeviceContext->UpdateSubresource(mBuffer, 0, nullptr, data.mData, data.mSize, 0);
}
void DirectX11Buffer::resize(size_t size)
{
setData({ nullptr, size });
}
WritableByteBuffer DirectX11Buffer::mapData()
{
struct UnmapDeleter {
ID3D11Buffer *mBuffer;
void operator()(void *p)
{
sDeviceContext->Unmap(mBuffer, 0);
}
};
D3D11_MAPPED_SUBRESOURCE subres;
ZeroMemory(&subres, sizeof(subres));
HRESULT hr = sDeviceContext->Map(mBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &subres);
DX11_CHECK(hr);
std::unique_ptr<void, UnmapDeleter> dataBuffer { subres.pData, { mBuffer } };
return { std::move(dataBuffer), mSize };
}
ID3D11Buffer *DirectX11Buffer::handle()
{
return mBuffer;
}
}
}
|
/*
See LICENSE file in root folder
*/
#ifndef ___C3D_BonesInstantiationComponent_H___
#define ___C3D_BonesInstantiationComponent_H___
#include "Castor3D/Model/Mesh/Submesh/Component/BonesComponent.hpp"
#include "Castor3D/Model/Mesh/Submesh/Component/InstantiationComponent.hpp"
#include "Castor3D/Shader/ShaderBuffer.hpp"
namespace castor3d
{
class BonesInstantiationComponent
: public SubmeshComponent
{
public:
/**
*\~english
*\brief Constructor.
*\param[in] submesh The parent submesh.
*\param[in] instantiation The instantiation component.
*\param[in] bones The bones component.
*\~french
*\brief Constructeur.
*\param[in] submesh Le sous-maillage parent.
*\param[in] instantiation Le composant d'instanciation.
*\param[in] bones Le composant de bones.
*/
C3D_API BonesInstantiationComponent( Submesh & submesh
, InstantiationComponent const & instantiation
, BonesComponent const & bones );
/**
*\copydoc castor3d::SubmeshComponent::gather
*/
C3D_API void gather( ShaderFlags const & flags
, MaterialRPtr material
, ashes::BufferCRefArray & buffers
, std::vector< uint64_t > & offsets
, ashes::PipelineVertexInputStateCreateInfoCRefArray & layouts
, uint32_t instanceMult
, TextureFlagsArray const & mask
, uint32_t & currentLocation )override;
/**
*\copydoc castor3d::SubmeshComponent::clone
*/
C3D_API SubmeshComponentSPtr clone( Submesh & submesh )const override;
/**
*\~english
*\return The skeleton.
*\~french
*\return Le squelette.
*/
inline SkeletonSPtr getSkeleton()const
{
return m_bones.getSkeleton();
}
/**
*\~english
*\return Tells if the bone instantiation buffer exists.
*\~french
*\return Dit si le tampon d'instanciation des os existe.
*/
inline bool hasInstancedBonesBuffer()const
{
return bool( m_instancedBonesBuffer );
}
/**
*\~english
*\return The bone instantiation ShaderStorageBuffer.
*\~french
*\return Le ShaderStorageBuffer d'instanciation des os.
*/
inline ShaderBuffer const & getInstancedBonesBuffer()const
{
return *m_instancedBonesBuffer;
}
/**
*\~english
*\return The bone instantiation ShaderStorageBuffer.
*\~french
*\return Le ShaderStorageBuffer d'instanciation des os.
*/
inline ShaderBuffer & getInstancedBonesBuffer()
{
return *m_instancedBonesBuffer;
}
/**
*\~english
*\return The shader program flags.
*\~french
*\return Les indicateurs de shader.
*/
inline ProgramFlags getProgramFlags( MaterialRPtr material )const override
{
return ProgramFlags( 0 );
}
private:
bool doInitialise( RenderDevice const & device )override;
void doCleanup()override;
void doUpload()override;
public:
C3D_API static castor::String const Name;
private:
InstantiationComponent const & m_instantiation;
BonesComponent const & m_bones;
ShaderBufferUPtr m_instancedBonesBuffer;
};
}
#endif
|
#include "WavWriter.hh"
#include "MSXException.hh"
#include "Math.hh"
#include "vla.hh"
#include "endian.hh"
#include <cstring>
#include <vector>
namespace openmsx {
WavWriter::WavWriter(const Filename& filename,
unsigned channels, unsigned bits, unsigned frequency)
: file(filename, "wb")
, bytes(0)
{
// write wav header
struct WavHeader {
char chunkID[4]; // + 0 'RIFF'
Endian::L32 chunkSize; // + 4 total size
char format[4]; // + 8 'WAVE'
char subChunk1ID[4]; // +12 'fmt '
Endian::L32 subChunk1Size; // +16 = 16 (fixed)
Endian::L16 audioFormat; // +20 = 1 (fixed)
Endian::L16 numChannels; // +22
Endian::L32 sampleRate; // +24
Endian::L32 byteRate; // +28
Endian::L16 blockAlign; // +32
Endian::L16 bitsPerSample; // +34
char subChunk2ID[4]; // +36 'data'
Endian::L32 subChunk2Size; // +40
} header;
memcpy(header.chunkID, "RIFF", sizeof(header.chunkID));
header.chunkSize = 0; // actual value filled in later
memcpy(header.format, "WAVE", sizeof(header.format));
memcpy(header.subChunk1ID, "fmt ", sizeof(header.subChunk1ID));
header.subChunk1Size = 16;
header.audioFormat = 1;
header.numChannels = channels;
header.sampleRate = frequency;
header.byteRate = (channels * frequency * bits) / 8;
header.blockAlign = (channels * bits) / 8;
header.bitsPerSample = bits;
memcpy(header.subChunk2ID, "data", sizeof(header.subChunk2ID));
header.subChunk2Size = 0; // actaul value filled in later
file.write(&header, sizeof(header));
}
WavWriter::~WavWriter()
{
try {
// data chunk must have an even number of bytes
if (bytes & 1) {
uint8_t pad = 0;
file.write(&pad, 1);
}
flush(); // write header
} catch (MSXException&) {
// ignore, can't throw from destructor
}
}
void WavWriter::flush()
{
Endian::L32 totalSize = (bytes + 44 - 8 + 1) & ~1; // round up to even number
Endian::L32 wavSize = bytes;
file.seek(4);
file.write(&totalSize, 4);
file.seek(40);
file.write(&wavSize, 4);
file.seek(file.getSize()); // SEEK_END
file.flush();
}
void Wav8Writer::write(const uint8_t* buffer, unsigned samples)
{
file.write(buffer, samples);
bytes += samples;
}
void Wav16Writer::write(const int16_t* buffer, unsigned samples)
{
unsigned size = sizeof(int16_t) * samples;
if (OPENMSX_BIGENDIAN) {
// Variable length arrays (VLA) are part of c99 but not of c++
// (not even c++11). Some compilers (like gcc) do support VLA
// in c++ mode, others (like VC++) don't. Still other compilers
// (like clang) only support VLA for POD types.
// To side-step this issue we simply use a std::vector, this
// code is anyway not performance critical.
//VLA(Endian::L16, buf, samples); // doesn't work in clang
std::vector<Endian::L16> buf(buffer, buffer + samples);
file.write(buf.data(), size);
} else {
file.write(buffer, size);
}
bytes += size;
}
void Wav16Writer::write(const int* buffer, unsigned stereo, unsigned samples,
float ampLeft, float ampRight)
{
assert(stereo == 1 || stereo == 2);
std::vector<Endian::L16> buf(samples * stereo);
if (stereo == 1) {
assert(ampLeft == ampRight);
for (unsigned i = 0; i < samples; ++i) {
buf[i] = Math::clipIntToShort(lrintf(buffer[i] * ampLeft));
}
} else {
for (unsigned i = 0; i < samples; ++i) {
buf[2 * i + 0] = Math::clipIntToShort(lrintf(buffer[2 * i + 0] * ampLeft));
buf[2 * i + 0] = Math::clipIntToShort(lrintf(buffer[2 * i + 0] * ampRight));
}
}
unsigned size = sizeof(int16_t) * samples * stereo;
file.write(buf.data(), size);
bytes += size;
}
void Wav16Writer::writeSilence(unsigned samples)
{
VLA(int16_t, buf, samples);
unsigned size = sizeof(int16_t) * samples;
memset(buf, 0, size);
file.write(buf, size);
bytes += size;
}
} // namespace openmsx
|
/********************* */
/*! \file equality_engine.cpp
** \verbatim
** Top contributors (to current version):
** Dejan Jovanovic, Guy Katz, Andrew Reynolds
** This file is part of the CVC4 project.
** Copyright (c) 2009-2016 by the authors listed in the file AUTHORS
** in the top-level source directory) and their institutional affiliations.
** All rights reserved. See the file COPYING in the top-level source
** directory for licensing information.\endverbatim
**
** \brief [[ Add one-line brief description here ]]
**
** [[ Add lengthier description here ]]
** \todo document this file
**/
#include "theory/uf/equality_engine.h"
#include "smt/smt_statistics_registry.h"
namespace CVC4 {
namespace theory {
namespace eq {
EqualityEngine::Statistics::Statistics(std::string name)
: mergesCount(name + "::mergesCount", 0),
termsCount(name + "::termsCount", 0),
functionTermsCount(name + "::functionTermsCount", 0),
constantTermsCount(name + "::constantTermsCount", 0)
{
smtStatisticsRegistry()->registerStat(&mergesCount);
smtStatisticsRegistry()->registerStat(&termsCount);
smtStatisticsRegistry()->registerStat(&functionTermsCount);
smtStatisticsRegistry()->registerStat(&constantTermsCount);
}
EqualityEngine::Statistics::~Statistics() {
smtStatisticsRegistry()->unregisterStat(&mergesCount);
smtStatisticsRegistry()->unregisterStat(&termsCount);
smtStatisticsRegistry()->unregisterStat(&functionTermsCount);
smtStatisticsRegistry()->unregisterStat(&constantTermsCount);
}
/**
* Data used in the BFS search through the equality graph.
*/
struct BfsData {
// The current node
EqualityNodeId nodeId;
// The index of the edge we traversed
EqualityEdgeId edgeId;
// Index in the queue of the previous node. Shouldn't be too much of them, at most the size
// of the biggest equivalence class
size_t previousIndex;
BfsData(EqualityNodeId nodeId = null_id, EqualityEdgeId edgeId = null_edge, size_t prev = 0)
: nodeId(nodeId), edgeId(edgeId), previousIndex(prev) {}
};
class ScopedBool {
bool& watch;
bool oldValue;
public:
ScopedBool(bool& watch, bool newValue)
: watch(watch), oldValue(watch) {
watch = newValue;
}
~ScopedBool() {
watch = oldValue;
}
};
EqualityEngineNotifyNone EqualityEngine::s_notifyNone;
void EqualityEngine::init() {
Debug("equality") << "EqualityEdge::EqualityEngine(): id_null = " << +null_id << std::endl;
Debug("equality") << "EqualityEdge::EqualityEngine(): edge_null = " << +null_edge << std::endl;
Debug("equality") << "EqualityEdge::EqualityEngine(): trigger_null = " << +null_trigger << std::endl;
d_true = NodeManager::currentNM()->mkConst<bool>(true);
d_false = NodeManager::currentNM()->mkConst<bool>(false);
d_triggerDatabaseAllocatedSize = 100000;
d_triggerDatabase = (char*) malloc(d_triggerDatabaseAllocatedSize);
//We can't notify during the initialization because it notifies
// QuantifiersEngine.AddTermToDatabase that try to access to the uf
// instantiator that currently doesn't exist.
ScopedBool sb(d_performNotify, false);
addTermInternal(d_true);
addTermInternal(d_false);
d_trueId = getNodeId(d_true);
d_falseId = getNodeId(d_false);
d_freshMergeReasonType = eq::NUMBER_OF_MERGE_REASONS;
}
EqualityEngine::~EqualityEngine() {
free(d_triggerDatabase);
}
EqualityEngine::EqualityEngine(context::Context* context, std::string name, bool constantsAreTriggers)
: ContextNotifyObj(context)
, d_masterEqualityEngine(0)
, d_context(context)
, d_done(context, false)
, d_performNotify(true)
, d_notify(s_notifyNone)
, d_applicationLookupsCount(context, 0)
, d_nodesCount(context, 0)
, d_assertedEqualitiesCount(context, 0)
, d_equalityTriggersCount(context, 0)
, d_subtermEvaluatesSize(context, 0)
, d_stats(name)
, d_inPropagate(false)
, d_constantsAreTriggers(constantsAreTriggers)
, d_triggerDatabaseSize(context, 0)
, d_triggerTermSetUpdatesSize(context, 0)
, d_deducedDisequalitiesSize(context, 0)
, d_deducedDisequalityReasonsSize(context, 0)
, d_propagatedDisequalities(context)
, d_name(name)
{
init();
}
EqualityEngine::EqualityEngine(EqualityEngineNotify& notify, context::Context* context, std::string name, bool constantsAreTriggers)
: ContextNotifyObj(context)
, d_masterEqualityEngine(0)
, d_context(context)
, d_done(context, false)
, d_performNotify(true)
, d_notify(notify)
, d_applicationLookupsCount(context, 0)
, d_nodesCount(context, 0)
, d_assertedEqualitiesCount(context, 0)
, d_equalityTriggersCount(context, 0)
, d_subtermEvaluatesSize(context, 0)
, d_stats(name)
, d_inPropagate(false)
, d_constantsAreTriggers(constantsAreTriggers)
, d_triggerDatabaseSize(context, 0)
, d_triggerTermSetUpdatesSize(context, 0)
, d_deducedDisequalitiesSize(context, 0)
, d_deducedDisequalityReasonsSize(context, 0)
, d_propagatedDisequalities(context)
, d_name(name)
{
init();
}
void EqualityEngine::setMasterEqualityEngine(EqualityEngine* master) {
Assert(d_masterEqualityEngine == 0);
d_masterEqualityEngine = master;
}
void EqualityEngine::enqueue(const MergeCandidate& candidate, bool back) {
Debug("equality") << d_name << "::eq::enqueue(" << d_nodes[candidate.t1Id] << ", " << d_nodes[candidate.t2Id] << ", " << candidate.type << "). reason: " << candidate.reason << std::endl;
if (back) {
d_propagationQueue.push_back(candidate);
} else {
d_propagationQueue.push_front(candidate);
}
}
EqualityNodeId EqualityEngine::newApplicationNode(TNode original, EqualityNodeId t1, EqualityNodeId t2, FunctionApplicationType type) {
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << ")" << std::endl;
++ d_stats.functionTermsCount;
// Get another id for this
EqualityNodeId funId = newNode(original);
FunctionApplication funOriginal(type, t1, t2);
// The function application we're creating
EqualityNodeId t1ClassId = getEqualityNode(t1).getFind();
EqualityNodeId t2ClassId = getEqualityNode(t2).getFind();
FunctionApplication funNormalized(type, t1ClassId, t2ClassId);
// We add the original version
d_applications[funId] = FunctionApplicationPair(funOriginal, funNormalized);
// Add the lookup data, if it's not already there
ApplicationIdsMap::iterator find = d_applicationLookup.find(funNormalized);
if (find == d_applicationLookup.end()) {
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << "): no lookup, setting up" << std::endl;
// Mark the normalization to the lookup
storeApplicationLookup(funNormalized, funId);
} else {
// If it's there, we need to merge these two
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << "): lookup exists, adding to queue" << std::endl;
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << "): lookup = " << d_nodes[find->second] << std::endl;
enqueue(MergeCandidate(funId, find->second, MERGED_THROUGH_CONGRUENCE, TNode::null()));
}
// Add to the use lists
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << "): adding " << original << " to the uselist of " << d_nodes[t1] << std::endl;
d_equalityNodes[t1].usedIn(funId, d_useListNodes);
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << "): adding " << original << " to the uselist of " << d_nodes[t2] << std::endl;
d_equalityNodes[t2].usedIn(funId, d_useListNodes);
// Return the new id
Debug("equality") << d_name << "::eq::newApplicationNode(" << original << ", " << t1 << ", " << t2 << ") => " << funId << std::endl;
return funId;
}
EqualityNodeId EqualityEngine::newNode(TNode node) {
Debug("equality") << d_name << "::eq::newNode(" << node << ")" << std::endl;
++ d_stats.termsCount;
// Register the new id of the term
EqualityNodeId newId = d_nodes.size();
d_nodeIds[node] = newId;
// Add the node to it's position
d_nodes.push_back(node);
// Note if this is an application or not
d_applications.push_back(FunctionApplicationPair());
// Add the trigger list for this node
d_nodeTriggers.push_back(+null_trigger);
// Add it to the equality graph
d_equalityGraph.push_back(+null_edge);
// Mark the no-individual trigger
d_nodeIndividualTrigger.push_back(+null_set_id);
// Mark non-constant by default
d_isConstant.push_back(false);
// No terms to evaluate by defaul
d_subtermsToEvaluate.push_back(0);
// Mark equality nodes
d_isEquality.push_back(false);
// Mark the node as internal by default
d_isInternal.push_back(true);
// Add the equality node to the nodes
d_equalityNodes.push_back(EqualityNode(newId));
// Increase the counters
d_nodesCount = d_nodesCount + 1;
Debug("equality") << d_name << "::eq::newNode(" << node << ") => " << newId << std::endl;
// notify e.g. the UF theory strong solver
if (d_performNotify) {
d_notify.eqNotifyNewClass(node);
}
return newId;
}
void EqualityEngine::addFunctionKind(Kind fun, bool interpreted) {
d_congruenceKinds |= fun;
if (interpreted && fun != kind::EQUAL) {
Debug("equality::evaluation") << d_name << "::eq::addFunctionKind(): " << fun << " is interpreted " << std::endl;
d_congruenceKindsInterpreted |= fun;
}
}
void EqualityEngine::subtermEvaluates(EqualityNodeId id) {
Debug("equality::evaluation") << d_name << "::eq::subtermEvaluates(" << d_nodes[id] << "): " << d_subtermsToEvaluate[id] << std::endl;
Assert(!d_isInternal[id]);
Assert(d_subtermsToEvaluate[id] > 0);
if ((-- d_subtermsToEvaluate[id]) == 0) {
d_evaluationQueue.push(id);
}
d_subtermEvaluates.push_back(id);
d_subtermEvaluatesSize = d_subtermEvaluates.size();
Debug("equality::evaluation") << d_name << "::eq::subtermEvaluates(" << d_nodes[id] << "): new " << d_subtermsToEvaluate[id] << std::endl;
}
void EqualityEngine::addTermInternal(TNode t, bool isOperator) {
Debug("equality") << d_name << "::eq::addTermInternal(" << t << ")" << std::endl;
// If there already, we're done
if (hasTerm(t)) {
Debug("equality") << d_name << "::eq::addTermInternal(" << t << "): already there" << std::endl;
return;
}
if (d_done) {
return;
}
EqualityNodeId result;
if (t.getKind() == kind::EQUAL) {
addTermInternal(t[0]);
addTermInternal(t[1]);
EqualityNodeId t0id = getNodeId(t[0]);
EqualityNodeId t1id = getNodeId(t[1]);
result = newApplicationNode(t, t0id, t1id, APP_EQUALITY);
d_isInternal[result] = false;
d_isConstant[result] = false;
} else if (t.getNumChildren() > 0 && d_congruenceKinds[t.getKind()]) {
TNode tOp = t.getOperator();
// Add the operator
addTermInternal(tOp, true);
result = getNodeId(tOp);
// Add all the children and Curryfy
bool isInterpreted = isInterpretedFunctionKind(t.getKind());
for (unsigned i = 0; i < t.getNumChildren(); ++ i) {
// Add the child
addTermInternal(t[i]);
EqualityNodeId tiId = getNodeId(t[i]);
// Add the application
result = newApplicationNode(t, result, tiId, isInterpreted ? APP_INTERPRETED : APP_UNINTERPRETED);
}
d_isInternal[result] = false;
d_isConstant[result] = t.isConst();
// If interpreted, set the number of non-interpreted children
if (isInterpreted) {
// How many children are not constants yet
d_subtermsToEvaluate[result] = t.getNumChildren();
for (unsigned i = 0; i < t.getNumChildren(); ++ i) {
if (isConstant(getNodeId(t[i]))) {
Debug("equality::evaluation") << d_name << "::eq::addTermInternal(" << t << "): evaluates " << t[i] << std::endl;
subtermEvaluates(result);
}
}
}
} else {
// Otherwise we just create the new id
result = newNode(t);
// Is this an operator
d_isInternal[result] = isOperator;
d_isConstant[result] = !isOperator && t.isConst();
}
if (t.getKind() == kind::EQUAL) {
// We set this here as this only applies to actual terms, not the
// intermediate application terms
d_isEquality[result] = true;
} else if (d_constantsAreTriggers && d_isConstant[result]) {
// Non-Boolean constants are trigger terms for all tags
EqualityNodeId tId = getNodeId(t);
// Setup the new set
Theory::Set newSetTags = 0;
EqualityNodeId newSetTriggers[THEORY_LAST];
unsigned newSetTriggersSize = THEORY_LAST;
for (TheoryId currentTheory = THEORY_FIRST; currentTheory != THEORY_LAST; ++ currentTheory) {
newSetTags = Theory::setInsert(currentTheory, newSetTags);
newSetTriggers[currentTheory] = tId;
}
// Add it to the list for backtracking
d_triggerTermSetUpdates.push_back(TriggerSetUpdate(tId, null_set_id));
d_triggerTermSetUpdatesSize = d_triggerTermSetUpdatesSize + 1;
// Mark the the new set as a trigger
d_nodeIndividualTrigger[tId] = newTriggerTermSet(newSetTags, newSetTriggers, newSetTriggersSize);
}
// If this is not an internal node, add it to the master
if (d_masterEqualityEngine && !d_isInternal[result]) {
d_masterEqualityEngine->addTermInternal(t);
}
// Empty the queue
propagate();
Assert(hasTerm(t));
Debug("equality") << d_name << "::eq::addTermInternal(" << t << ") => " << result << std::endl;
}
bool EqualityEngine::hasTerm(TNode t) const {
return d_nodeIds.find(t) != d_nodeIds.end();
}
EqualityNodeId EqualityEngine::getNodeId(TNode node) const {
Assert(hasTerm(node), node.toString().c_str());
return (*d_nodeIds.find(node)).second;
}
EqualityNode& EqualityEngine::getEqualityNode(TNode t) {
return getEqualityNode(getNodeId(t));
}
EqualityNode& EqualityEngine::getEqualityNode(EqualityNodeId nodeId) {
Assert(nodeId < d_equalityNodes.size());
return d_equalityNodes[nodeId];
}
const EqualityNode& EqualityEngine::getEqualityNode(TNode t) const {
return getEqualityNode(getNodeId(t));
}
const EqualityNode& EqualityEngine::getEqualityNode(EqualityNodeId nodeId) const {
Assert(nodeId < d_equalityNodes.size());
return d_equalityNodes[nodeId];
}
void EqualityEngine::assertEqualityInternal(TNode t1, TNode t2, TNode reason, unsigned pid) {
Debug("equality") << d_name << "::eq::addEqualityInternal(" << t1 << "," << t2 << "), reason = " << reason << ", pid = " << pid << std::endl;
if (d_done) {
return;
}
// Add the terms if they are not already in the database
addTermInternal(t1);
addTermInternal(t2);
// Add to the queue and propagate
EqualityNodeId t1Id = getNodeId(t1);
EqualityNodeId t2Id = getNodeId(t2);
enqueue(MergeCandidate(t1Id, t2Id, pid, reason));
}
void EqualityEngine::assertPredicate(TNode t, bool polarity, TNode reason, unsigned pid) {
Debug("equality") << d_name << "::eq::addPredicate(" << t << "," << (polarity ? "true" : "false") << ")" << std::endl;
Assert(t.getKind() != kind::EQUAL, "Use assertEquality instead");
assertEqualityInternal(t, polarity ? d_true : d_false, reason, pid);
propagate();
}
void EqualityEngine::mergePredicates(TNode p, TNode q, TNode reason) {
Debug("equality") << d_name << "::eq::mergePredicates(" << p << "," << q << ")" << std::endl;
assertEqualityInternal(p, q, reason);
propagate();
}
void EqualityEngine::assertEquality(TNode eq, bool polarity, TNode reason, unsigned pid) {
Debug("equality") << d_name << "::eq::addEquality(" << eq << "," << (polarity ? "true" : "false") << ")" << std::endl;
if (polarity) {
// If two terms are already equal, don't assert anything
if (hasTerm(eq[0]) && hasTerm(eq[1]) && areEqual(eq[0], eq[1])) {
return;
}
// Add equality between terms
assertEqualityInternal(eq[0], eq[1], reason, pid);
propagate();
} else {
// If two terms are already dis-equal, don't assert anything
if (hasTerm(eq[0]) && hasTerm(eq[1]) && areDisequal(eq[0], eq[1], false)) {
return;
}
// notify the theory
if (d_performNotify) {
d_notify.eqNotifyDisequal(eq[0], eq[1], reason);
}
Debug("equality::trigger") << d_name << "::eq::addEquality(" << eq << "," << (polarity ? "true" : "false") << ")" << std::endl;
assertEqualityInternal(eq, d_false, reason, pid);
propagate();
if (d_done) {
return;
}
// If both have constant representatives, we don't notify anyone
EqualityNodeId a = getNodeId(eq[0]);
EqualityNodeId b = getNodeId(eq[1]);
EqualityNodeId aClassId = getEqualityNode(a).getFind();
EqualityNodeId bClassId = getEqualityNode(b).getFind();
if (d_isConstant[aClassId] && d_isConstant[bClassId]) {
return;
}
// If we are adding a disequality, notify of the shared term representatives
EqualityNodeId eqId = getNodeId(eq);
TriggerTermSetRef aTriggerRef = d_nodeIndividualTrigger[aClassId];
TriggerTermSetRef bTriggerRef = d_nodeIndividualTrigger[bClassId];
if (aTriggerRef != +null_set_id && bTriggerRef != +null_set_id) {
Debug("equality::trigger") << d_name << "::eq::addEquality(" << eq << "," << (polarity ? "true" : "false") << ": have triggers" << std::endl;
// The sets of trigger terms
TriggerTermSet& aTriggerTerms = getTriggerTermSet(aTriggerRef);
TriggerTermSet& bTriggerTerms = getTriggerTermSet(bTriggerRef);
// Go through and notify the shared dis-equalities
Theory::Set aTags = aTriggerTerms.tags;
Theory::Set bTags = bTriggerTerms.tags;
TheoryId aTag = Theory::setPop(aTags);
TheoryId bTag = Theory::setPop(bTags);
int a_i = 0, b_i = 0;
while (aTag != THEORY_LAST && bTag != THEORY_LAST) {
if (aTag < bTag) {
aTag = Theory::setPop(aTags);
++ a_i;
} else if (aTag > bTag) {
bTag = Theory::setPop(bTags);
++ b_i;
} else {
// Same tags, notify
EqualityNodeId aSharedId = aTriggerTerms.triggers[a_i++];
EqualityNodeId bSharedId = bTriggerTerms.triggers[b_i++];
// Propagate
if (!hasPropagatedDisequality(aTag, aSharedId, bSharedId)) {
// Store a proof if not there already
if (!hasPropagatedDisequality(aSharedId, bSharedId)) {
d_deducedDisequalityReasons.push_back(EqualityPair(aSharedId, a));
d_deducedDisequalityReasons.push_back(EqualityPair(bSharedId, b));
d_deducedDisequalityReasons.push_back(EqualityPair(eqId, d_falseId));
}
// Store the propagation
storePropagatedDisequality(aTag, aSharedId, bSharedId);
// Notify
Debug("equality::trigger") << d_name << "::eq::addEquality(" << eq << "," << (polarity ? "true" : "false") << ": notifying " << aTag << " for " << d_nodes[aSharedId] << " != " << d_nodes[bSharedId] << std::endl;
if (!d_notify.eqNotifyTriggerTermEquality(aTag, d_nodes[aSharedId], d_nodes[bSharedId], false)) {
break;
}
}
// Pop the next tags
aTag = Theory::setPop(aTags);
bTag = Theory::setPop(bTags);
}
}
}
}
}
TNode EqualityEngine::getRepresentative(TNode t) const {
Debug("equality::internal") << d_name << "::eq::getRepresentative(" << t << ")" << std::endl;
Assert(hasTerm(t));
EqualityNodeId representativeId = getEqualityNode(t).getFind();
Assert(!d_isInternal[representativeId]);
Debug("equality::internal") << d_name << "::eq::getRepresentative(" << t << ") => " << d_nodes[representativeId] << std::endl;
return d_nodes[representativeId];
}
bool EqualityEngine::merge(EqualityNode& class1, EqualityNode& class2, std::vector<TriggerId>& triggersFired) {
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << ")" << std::endl;
Assert(triggersFired.empty());
++ d_stats.mergesCount;
EqualityNodeId class1Id = class1.getFind();
EqualityNodeId class2Id = class2.getFind();
Node n1 = d_nodes[class1Id];
Node n2 = d_nodes[class2Id];
EqualityNode cc1 = getEqualityNode(n1);
EqualityNode cc2 = getEqualityNode(n2);
bool doNotify = false;
// notify the theory
// the second part of this check is needed due to the internal implementation of this class.
// It ensures that we are merging terms and not operators.
if (d_performNotify && class1Id==cc1.getFind() && class2Id==cc2.getFind()) {
doNotify = true;
}
if (doNotify) {
d_notify.eqNotifyPreMerge(n1, n2);
}
// Check for constant merges
bool class1isConstant = d_isConstant[class1Id];
bool class2isConstant = d_isConstant[class2Id];
Assert(class1isConstant || !class2isConstant, "Should always merge into constants");
Assert(!class1isConstant || !class2isConstant, "Don't merge constants");
// Trigger set of class 1
TriggerTermSetRef class1triggerRef = d_nodeIndividualTrigger[class1Id];
Theory::Set class1Tags = class1triggerRef == null_set_id ? 0 : getTriggerTermSet(class1triggerRef).tags;
// Trigger set of class 2
TriggerTermSetRef class2triggerRef = d_nodeIndividualTrigger[class2Id];
Theory::Set class2Tags = class2triggerRef == null_set_id ? 0 : getTriggerTermSet(class2triggerRef).tags;
// Disequalities coming from class2
TaggedEqualitiesSet class2disequalitiesToNotify;
// Disequalities coming from class1
TaggedEqualitiesSet class1disequalitiesToNotify;
// Individual tags
Theory::Set class1OnlyTags = Theory::setDifference(class1Tags, class2Tags);
Theory::Set class2OnlyTags = Theory::setDifference(class2Tags, class1Tags);
// Only get disequalities if they are not both constant
if (!class1isConstant || !class2isConstant) {
getDisequalities(!class1isConstant, class2Id, class1OnlyTags, class2disequalitiesToNotify);
getDisequalities(!class2isConstant, class1Id, class2OnlyTags, class1disequalitiesToNotify);
}
// Update class2 representative information
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << "): updating class " << class2Id << std::endl;
EqualityNodeId currentId = class2Id;
do {
// Get the current node
EqualityNode& currentNode = getEqualityNode(currentId);
// Update it's find to class1 id
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << "): " << currentId << "->" << class1Id << std::endl;
currentNode.setFind(class1Id);
// Go through the triggers and inform if necessary
TriggerId currentTrigger = d_nodeTriggers[currentId];
while (currentTrigger != null_trigger) {
Trigger& trigger = d_equalityTriggers[currentTrigger];
Trigger& otherTrigger = d_equalityTriggers[currentTrigger ^ 1];
// If the two are not already in the same class
if (otherTrigger.classId != trigger.classId) {
trigger.classId = class1Id;
// If they became the same, call the trigger
if (otherTrigger.classId == class1Id) {
// Id of the real trigger is half the internal one
triggersFired.push_back(currentTrigger);
}
}
// Go to the next trigger
currentTrigger = trigger.nextTrigger;
}
// Move to the next node
currentId = currentNode.getNext();
} while (currentId != class2Id);
// Update class2 table lookup and information if not a boolean
// since booleans can't be in an application
if (!d_isEquality[class2Id]) {
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << "): updating lookups of " << class2Id << std::endl;
do {
// Get the current node
EqualityNode& currentNode = getEqualityNode(currentId);
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << "): updating lookups of node " << currentId << std::endl;
// Go through the uselist and check for congruences
UseListNodeId currentUseId = currentNode.getUseList();
while (currentUseId != null_uselist_id) {
// Get the node of the use list
UseListNode& useNode = d_useListNodes[currentUseId];
// Get the function application
EqualityNodeId funId = useNode.getApplicationId();
Debug("equality") << d_name << "::eq::merge(" << class1.getFind() << "," << class2.getFind() << "): " << d_nodes[currentId] << " in " << d_nodes[funId] << std::endl;
const FunctionApplication& fun = d_applications[useNode.getApplicationId()].normalized;
// If it's interpreted and we can interpret
if (fun.isInterpreted() && class1isConstant && !d_isInternal[currentId]) {
// Get the actual term id
TNode term = d_nodes[funId];
subtermEvaluates(getNodeId(term));
}
// Check if there is an application with find arguments
EqualityNodeId aNormalized = getEqualityNode(fun.a).getFind();
EqualityNodeId bNormalized = getEqualityNode(fun.b).getFind();
FunctionApplication funNormalized(fun.type, aNormalized, bNormalized);
ApplicationIdsMap::iterator find = d_applicationLookup.find(funNormalized);
if (find != d_applicationLookup.end()) {
// Applications fun and the funNormalized can be merged due to congruence
if (getEqualityNode(funId).getFind() != getEqualityNode(find->second).getFind()) {
enqueue(MergeCandidate(funId, find->second, MERGED_THROUGH_CONGRUENCE, TNode::null()));
}
} else {
// There is no representative, so we can add one, we remove this when backtracking
storeApplicationLookup(funNormalized, funId);
}
// Go to the next one in the use list
currentUseId = useNode.getNext();
}
// Move to the next node
currentId = currentNode.getNext();
} while (currentId != class2Id);
}
// Now merge the lists
class1.merge<true>(class2);
// notify the theory
if (doNotify) {
d_notify.eqNotifyPostMerge(n1, n2);
}
// Go through the trigger term disequalities and propagate
if (!propagateTriggerTermDisequalities(class1OnlyTags, class1triggerRef, class2disequalitiesToNotify)) {
return false;
}
if (!propagateTriggerTermDisequalities(class2OnlyTags, class2triggerRef, class1disequalitiesToNotify)) {
return false;
}
// Notify the trigger term merges
if (class2triggerRef != +null_set_id) {
if (class1triggerRef == +null_set_id) {
// If class1 doesn't have individual triggers, but class2 does, mark it
d_nodeIndividualTrigger[class1Id] = class2triggerRef;
// Add it to the list for backtracking
d_triggerTermSetUpdates.push_back(TriggerSetUpdate(class1Id, +null_set_id));
d_triggerTermSetUpdatesSize = d_triggerTermSetUpdatesSize + 1;
} else {
// Get the triggers
TriggerTermSet& class1triggers = getTriggerTermSet(class1triggerRef);
TriggerTermSet& class2triggers = getTriggerTermSet(class2triggerRef);
// Initialize the merged set
Theory::Set newSetTags = Theory::setUnion(class1triggers.tags, class2triggers.tags);
EqualityNodeId newSetTriggers[THEORY_LAST];
unsigned newSetTriggersSize = 0;
int i1 = 0;
int i2 = 0;
Theory::Set tags1 = class1triggers.tags;
Theory::Set tags2 = class2triggers.tags;
TheoryId tag1 = Theory::setPop(tags1);
TheoryId tag2 = Theory::setPop(tags2);
// Comparing the THEORY_LAST is OK because all other theories are
// smaller, and will therefore be preferred
while (tag1 != THEORY_LAST || tag2 != THEORY_LAST)
{
if (tag1 < tag2) {
// copy tag1
newSetTriggers[newSetTriggersSize++] = class1triggers.triggers[i1++];
tag1 = Theory::setPop(tags1);
} else if (tag1 > tag2) {
// copy tag2
newSetTriggers[newSetTriggersSize++] = class2triggers.triggers[i2++];
tag2 = Theory::setPop(tags2);
} else {
// copy tag1
EqualityNodeId tag1id = newSetTriggers[newSetTriggersSize++] = class1triggers.triggers[i1++];
// since they are both tagged notify of merge
if (d_performNotify) {
EqualityNodeId tag2id = class2triggers.triggers[i2++];
if (!d_notify.eqNotifyTriggerTermEquality(tag1, d_nodes[tag1id], d_nodes[tag2id], true)) {
return false;
}
}
// Next tags
tag1 = Theory::setPop(tags1);
tag2 = Theory::setPop(tags2);
}
}
// Add the new trigger set, if different from previous one
if (class1triggers.tags != class2triggers.tags) {
// Add it to the list for backtracking
d_triggerTermSetUpdates.push_back(TriggerSetUpdate(class1Id, class1triggerRef));
d_triggerTermSetUpdatesSize = d_triggerTermSetUpdatesSize + 1;
// Mark the the new set as a trigger
d_nodeIndividualTrigger[class1Id] = newTriggerTermSet(newSetTags, newSetTriggers, newSetTriggersSize);
}
}
}
// Everything fine
return true;
}
void EqualityEngine::undoMerge(EqualityNode& class1, EqualityNode& class2, EqualityNodeId class2Id) {
Debug("equality") << d_name << "::eq::undoMerge(" << class1.getFind() << "," << class2Id << ")" << std::endl;
// Now unmerge the lists (same as merge)
class1.merge<false>(class2);
// Update class2 representative information
EqualityNodeId currentId = class2Id;
Debug("equality") << d_name << "::eq::undoMerge(" << class1.getFind() << "," << class2Id << "): undoing representative info" << std::endl;
do {
// Get the current node
EqualityNode& currentNode = getEqualityNode(currentId);
// Update it's find to class1 id
currentNode.setFind(class2Id);
// Go through the trigger list (if any) and undo the class
TriggerId currentTrigger = d_nodeTriggers[currentId];
while (currentTrigger != null_trigger) {
Trigger& trigger = d_equalityTriggers[currentTrigger];
trigger.classId = class2Id;
currentTrigger = trigger.nextTrigger;
}
// Move to the next node
currentId = currentNode.getNext();
} while (currentId != class2Id);
}
void EqualityEngine::backtrack() {
Debug("equality::backtrack") << "backtracking" << std::endl;
// If we need to backtrack then do it
if (d_assertedEqualitiesCount < d_assertedEqualities.size()) {
// Clear the propagation queue
while (!d_propagationQueue.empty()) {
d_propagationQueue.pop_front();
}
Debug("equality") << d_name << "::eq::backtrack(): nodes" << std::endl;
for (int i = (int)d_assertedEqualities.size() - 1, i_end = (int)d_assertedEqualitiesCount; i >= i_end; --i) {
// Get the ids of the merged classes
Equality& eq = d_assertedEqualities[i];
// Undo the merge
if (eq.lhs != null_id) {
undoMerge(d_equalityNodes[eq.lhs], d_equalityNodes[eq.rhs], eq.rhs);
}
}
d_assertedEqualities.resize(d_assertedEqualitiesCount);
Debug("equality") << d_name << "::eq::backtrack(): edges" << std::endl;
for (int i = (int)d_equalityEdges.size() - 2, i_end = (int)(2*d_assertedEqualitiesCount); i >= i_end; i -= 2) {
EqualityEdge& edge1 = d_equalityEdges[i];
EqualityEdge& edge2 = d_equalityEdges[i | 1];
d_equalityGraph[edge2.getNodeId()] = edge1.getNext();
d_equalityGraph[edge1.getNodeId()] = edge2.getNext();
}
d_equalityEdges.resize(2 * d_assertedEqualitiesCount);
}
if (d_triggerTermSetUpdates.size() > d_triggerTermSetUpdatesSize) {
// Unset the individual triggers
for (int i = d_triggerTermSetUpdates.size() - 1, i_end = d_triggerTermSetUpdatesSize; i >= i_end; -- i) {
const TriggerSetUpdate& update = d_triggerTermSetUpdates[i];
d_nodeIndividualTrigger[update.classId] = update.oldValue;
}
d_triggerTermSetUpdates.resize(d_triggerTermSetUpdatesSize);
}
if (d_equalityTriggers.size() > d_equalityTriggersCount) {
// Unlink the triggers from the lists
for (int i = d_equalityTriggers.size() - 1, i_end = d_equalityTriggersCount; i >= i_end; -- i) {
const Trigger& trigger = d_equalityTriggers[i];
d_nodeTriggers[trigger.classId] = trigger.nextTrigger;
}
// Get rid of the triggers
d_equalityTriggers.resize(d_equalityTriggersCount);
d_equalityTriggersOriginal.resize(d_equalityTriggersCount);
}
if (d_applicationLookups.size() > d_applicationLookupsCount) {
for (int i = d_applicationLookups.size() - 1, i_end = (int) d_applicationLookupsCount; i >= i_end; -- i) {
d_applicationLookup.erase(d_applicationLookups[i]);
}
d_applicationLookups.resize(d_applicationLookupsCount);
}
if (d_subtermEvaluates.size() > d_subtermEvaluatesSize) {
for(int i = d_subtermEvaluates.size() - 1, i_end = (int)d_subtermEvaluatesSize; i >= i_end; --i) {
d_subtermsToEvaluate[d_subtermEvaluates[i]] ++;
}
d_subtermEvaluates.resize(d_subtermEvaluatesSize);
}
if (d_nodes.size() > d_nodesCount) {
// Go down the nodes, check the application nodes and remove them from use-lists
for(int i = d_nodes.size() - 1, i_end = (int)d_nodesCount; i >= i_end; -- i) {
// Remove from the node -> id map
Debug("equality") << d_name << "::eq::backtrack(): removing node " << d_nodes[i] << std::endl;
d_nodeIds.erase(d_nodes[i]);
const FunctionApplication& app = d_applications[i].original;
if (!app.isNull()) {
// Remove b from use-list
getEqualityNode(app.b).removeTopFromUseList(d_useListNodes);
// Remove a from use-list
getEqualityNode(app.a).removeTopFromUseList(d_useListNodes);
}
}
// Now get rid of the nodes and the rest
d_nodes.resize(d_nodesCount);
d_applications.resize(d_nodesCount);
d_nodeTriggers.resize(d_nodesCount);
d_nodeIndividualTrigger.resize(d_nodesCount);
d_isConstant.resize(d_nodesCount);
d_subtermsToEvaluate.resize(d_nodesCount);
d_isEquality.resize(d_nodesCount);
d_isInternal.resize(d_nodesCount);
d_equalityGraph.resize(d_nodesCount);
d_equalityNodes.resize(d_nodesCount);
}
if (d_deducedDisequalities.size() > d_deducedDisequalitiesSize) {
for(int i = d_deducedDisequalities.size() - 1, i_end = (int)d_deducedDisequalitiesSize; i >= i_end; -- i) {
EqualityPair pair = d_deducedDisequalities[i];
Assert(d_disequalityReasonsMap.find(pair) != d_disequalityReasonsMap.end());
// Remove from the map
d_disequalityReasonsMap.erase(pair);
std::swap(pair.first, pair.second);
d_disequalityReasonsMap.erase(pair);
}
d_deducedDisequalityReasons.resize(d_deducedDisequalityReasonsSize);
d_deducedDisequalities.resize(d_deducedDisequalitiesSize);
}
}
void EqualityEngine::addGraphEdge(EqualityNodeId t1, EqualityNodeId t2, unsigned type, TNode reason) {
Debug("equality") << d_name << "::eq::addGraphEdge(" << d_nodes[t1] << "," << d_nodes[t2] << "," << reason << ")" << std::endl;
EqualityEdgeId edge = d_equalityEdges.size();
d_equalityEdges.push_back(EqualityEdge(t2, d_equalityGraph[t1], type, reason));
d_equalityEdges.push_back(EqualityEdge(t1, d_equalityGraph[t2], type, reason));
d_equalityGraph[t1] = edge;
d_equalityGraph[t2] = edge | 1;
if (Debug.isOn("equality::internal")) {
debugPrintGraph();
}
}
std::string EqualityEngine::edgesToString(EqualityEdgeId edgeId) const {
std::stringstream out;
bool first = true;
if (edgeId == null_edge) {
out << "null";
} else {
while (edgeId != null_edge) {
const EqualityEdge& edge = d_equalityEdges[edgeId];
if (!first) out << ",";
out << d_nodes[edge.getNodeId()];
edgeId = edge.getNext();
first = false;
}
}
return out.str();
}
void EqualityEngine::explainEquality(TNode t1, TNode t2, bool polarity, std::vector<TNode>& equalities, EqProof * eqp) const {
Debug("equality") << d_name << "::eq::explainEquality(" << t1 << ", " << t2 << ", " << (polarity ? "true" : "false") << ")" << ", proof = " << (eqp ? "ON" : "OFF") << std::endl;
// The terms must be there already
Assert(hasTerm(t1) && hasTerm(t2));;
// Get the ids
EqualityNodeId t1Id = getNodeId(t1);
EqualityNodeId t2Id = getNodeId(t2);
if (polarity) {
// Get the explanation
getExplanation(t1Id, t2Id, equalities, eqp);
} else {
if (eqp) {
eqp->d_id = eq::MERGED_THROUGH_TRANS;
eqp->d_node = d_nodes[t1Id].eqNode(d_nodes[t2Id]).notNode();
}
// Get the reason for this disequality
EqualityPair pair(t1Id, t2Id);
Assert(d_disequalityReasonsMap.find(pair) != d_disequalityReasonsMap.end(), "Don't ask for stuff I didn't notify you about");
DisequalityReasonRef reasonRef = d_disequalityReasonsMap.find(pair)->second;
for (unsigned i = reasonRef.mergesStart; i < reasonRef.mergesEnd; ++ i) {
EqualityPair toExplain = d_deducedDisequalityReasons[i];
EqProof* eqpc = NULL;
// If we're constructing a (transitivity) proof, we don't need to include an explanation for x=x.
if (eqp && toExplain.first != toExplain.second) {
eqpc = new EqProof;
}
getExplanation(toExplain.first, toExplain.second, equalities, eqpc);
if (eqpc) {
Debug("pf::ee") << "Child proof is:" << std::endl;
eqpc->debug_print("pf::ee", 1);
if (eqpc->d_id == eq::MERGED_THROUGH_TRANS) {
std::vector<EqProof *> orderedChildren;
bool nullCongruenceFound = false;
for (unsigned i = 0; i < eqpc->d_children.size(); ++i) {
if (eqpc->d_children[i]->d_id==eq::MERGED_THROUGH_CONGRUENCE &&
eqpc->d_children[i]->d_node.isNull()) {
nullCongruenceFound = true;
Debug("pf::ee") << "Have congruence with empty d_node. Splitting..." << std::endl;
orderedChildren.insert(orderedChildren.begin(), eqpc->d_children[i]->d_children[0]);
orderedChildren.push_back(eqpc->d_children[i]->d_children[1]);
} else {
orderedChildren.push_back(eqpc->d_children[i]);
}
}
if (nullCongruenceFound) {
eqpc->d_children = orderedChildren;
Debug("pf::ee") << "Child proof's children have been reordered. It is now:" << std::endl;
eqpc->debug_print("pf::ee", 1);
}
}
eqp->d_children.push_back(eqpc);
}
}
if (eqp) {
if (eqp->d_children.size() == 0) {
// Corner case where this is actually a disequality between two constants
Debug("pf::ee") << "Encountered a constant disequality (not a transitivity proof): "
<< eqp->d_node << std::endl;
Assert(eqp->d_node[0][0].isConst());
Assert(eqp->d_node[0][1].isConst());
eqp->d_id = MERGED_THROUGH_CONSTANTS;
} else if (eqp->d_children.size() == 1) {
// The transitivity proof has just one child. Simplify.
EqProof* temp = eqp->d_children[0];
eqp->d_children.clear();
*eqp = *temp;
delete temp;
}
Debug("pf::ee") << "Disequality explanation final proof: " << std::endl;
eqp->debug_print("pf::ee", 1);
}
}
}
void EqualityEngine::explainPredicate(TNode p, bool polarity, std::vector<TNode>& assertions, EqProof * eqp) const {
Debug("equality") << d_name << "::eq::explainPredicate(" << p << ")" << std::endl;
// Must have the term
Assert(hasTerm(p));
// Get the explanation
getExplanation(getNodeId(p), polarity ? d_trueId : d_falseId, assertions, eqp);
}
void EqualityEngine::getExplanation(EqualityNodeId t1Id, EqualityNodeId t2Id, std::vector<TNode>& equalities, EqProof * eqp) const {
Debug("equality") << d_name << "::eq::getExplanation(" << d_nodes[t1Id] << "," << d_nodes[t2Id] << ")" << std::endl;
// We can only explain the nodes that got merged
#ifdef CVC4_ASSERTIONS
bool canExplain = getEqualityNode(t1Id).getFind() == getEqualityNode(t2Id).getFind()
|| (d_done && isConstant(t1Id) && isConstant(t2Id));
if (!canExplain) {
Warning() << "Can't explain equality:" << std::endl;
Warning() << d_nodes[t1Id] << " with find " << d_nodes[getEqualityNode(t1Id).getFind()] << std::endl;
Warning() << d_nodes[t2Id] << " with find " << d_nodes[getEqualityNode(t2Id).getFind()] << std::endl;
}
Assert(canExplain);
#endif
// If the nodes are the same, we're done
if (t1Id == t2Id){
if( eqp ) {
if ((d_nodes[t1Id].getKind() == kind::BUILTIN) && (d_nodes[t1Id].getConst<Kind>() == kind::SELECT)) {
std::vector<Node> no_children;
eqp->d_node = NodeManager::currentNM()->mkNode(kind::PARTIAL_SELECT_0, no_children);
} else {
eqp->d_node = ProofManager::currentPM()->mkOp(d_nodes[t1Id]);
}
}
return;
}
if (Debug.isOn("equality::internal")) {
debugPrintGraph();
}
// Queue for the BFS containing nodes
std::vector<BfsData> bfsQueue;
// Find a path from t1 to t2 in the graph (BFS)
bfsQueue.push_back(BfsData(t1Id, null_id, 0));
size_t currentIndex = 0;
while (true) {
// There should always be a path, and every node can be visited only once (tree)
Assert(currentIndex < bfsQueue.size());
// The next node to visit
BfsData current = bfsQueue[currentIndex];
EqualityNodeId currentNode = current.nodeId;
Debug("equality") << d_name << "::eq::getExplanation(): currentNode = " << d_nodes[currentNode] << std::endl;
// Go through the equality edges of this node
EqualityEdgeId currentEdge = d_equalityGraph[currentNode];
if (Debug.isOn("equality")) {
Debug("equality") << d_name << "::eq::getExplanation(): edgesId = " << currentEdge << std::endl;
Debug("equality") << d_name << "::eq::getExplanation(): edges = " << edgesToString(currentEdge) << std::endl;
}
while (currentEdge != null_edge) {
// Get the edge
const EqualityEdge& edge = d_equalityEdges[currentEdge];
// If not just the backwards edge
if ((currentEdge | 1u) != (current.edgeId | 1u)) {
Debug("equality") << d_name << "::eq::getExplanation(): currentEdge = (" << d_nodes[currentNode] << "," << d_nodes[edge.getNodeId()] << ")" << std::endl;
// Did we find the path
if (edge.getNodeId() == t2Id) {
Debug("equality") << d_name << "::eq::getExplanation(): path found: " << std::endl;
std::vector<EqProof *> eqp_trans;
// Reconstruct the path
do {
// The current node
currentNode = bfsQueue[currentIndex].nodeId;
EqualityNodeId edgeNode = d_equalityEdges[currentEdge].getNodeId();
unsigned reasonType = d_equalityEdges[currentEdge].getReasonType();
Node reason = d_equalityEdges[currentEdge].getReason();
Debug("equality") << d_name << "::eq::getExplanation(): currentEdge = " << currentEdge << ", currentNode = " << currentNode << std::endl;
Debug("equality") << d_name << " targetNode = " << d_nodes[edgeNode] << std::endl;
Debug("equality") << d_name << " in currentEdge = (" << d_nodes[currentNode] << "," << d_nodes[edge.getNodeId()] << ")" << std::endl;
Debug("equality") << d_name << " reason type = " << reasonType << std::endl;
EqProof* eqpc = NULL;
// Make child proof if a proof is being constructed
if (eqp) {
eqpc = new EqProof;
eqpc->d_id = reasonType;
}
// Add the actual equality to the vector
switch (reasonType) {
case MERGED_THROUGH_CONGRUENCE: {
// f(x1, x2) == f(y1, y2) because x1 = y1 and x2 = y2
Debug("equality") << d_name << "::eq::getExplanation(): due to congruence, going deeper" << std::endl;
const FunctionApplication& f1 = d_applications[currentNode].original;
const FunctionApplication& f2 = d_applications[edgeNode].original;
Debug("equality") << push;
Debug("equality") << "Explaining left hand side equalities" << std::endl;
EqProof * eqpc1 = eqpc ? new EqProof : NULL;
getExplanation(f1.a, f2.a, equalities, eqpc1);
Debug("equality") << "Explaining right hand side equalities" << std::endl;
EqProof * eqpc2 = eqpc ? new EqProof : NULL;
getExplanation(f1.b, f2.b, equalities, eqpc2);
if( eqpc ){
eqpc->d_children.push_back( eqpc1 );
eqpc->d_children.push_back( eqpc2 );
if( d_nodes[currentNode].getKind()==kind::EQUAL ){
//leave node null for now
eqpc->d_node = Node::null();
} else {
if(d_nodes[f1.a].getKind() == kind::APPLY_UF ||
d_nodes[f1.a].getKind() == kind::SELECT ||
d_nodes[f1.a].getKind() == kind::STORE) {
eqpc->d_node = d_nodes[f1.a];
} else {
if (d_nodes[f1.a].getKind() == kind::BUILTIN && d_nodes[f1.a].getConst<Kind>() == kind::SELECT) {
eqpc->d_node = NodeManager::currentNM()->mkNode(kind::PARTIAL_SELECT_1, d_nodes[f1.b]);
// The first child is a PARTIAL_SELECT_0.
// Give it a child so that we know what kind of (read) it is, when we dump to LFSC.
Assert(eqpc->d_children[0]->d_node.getKind() == kind::PARTIAL_SELECT_0);
Assert(eqpc->d_children[0]->d_children.size() == 0);
eqpc->d_children[0]->d_node = NodeManager::currentNM()->mkNode(kind::PARTIAL_SELECT_0,
d_nodes[f1.b]);
} else {
eqpc->d_node = NodeManager::currentNM()->mkNode(kind::PARTIAL_APPLY_UF,
ProofManager::currentPM()->mkOp(d_nodes[f1.a]),
d_nodes[f1.b]);
}
}
}
}
Debug("equality") << pop;
break;
}
case MERGED_THROUGH_REFLEXIVITY: {
// x1 == x1
Debug("equality") << d_name << "::eq::getExplanation(): due to reflexivity, going deeper" << std::endl;
EqualityNodeId eqId = currentNode == d_trueId ? edgeNode : currentNode;
const FunctionApplication& eq = d_applications[eqId].original;
Assert(eq.isEquality(), "Must be an equality");
// Explain why a = b constant
Debug("equality") << push;
EqProof * eqpc1 = eqpc ? new EqProof : NULL;
getExplanation(eq.a, eq.b, equalities, eqpc1);
if( eqpc ){
eqpc->d_children.push_back( eqpc1 );
}
Debug("equality") << pop;
break;
}
case MERGED_THROUGH_CONSTANTS: {
// f(c1, ..., cn) = c semantically, we can just ignore it
Debug("equality") << d_name << "::eq::getExplanation(): due to constants, explain the constants" << std::endl;
Debug("equality") << push;
// Get the node we interpreted
TNode interpreted = d_nodes[currentNode];
if (interpreted.isConst()) {
interpreted = d_nodes[edgeNode];
}
// Explain why a is a constant by explaining each argument
for (unsigned i = 0; i < interpreted.getNumChildren(); ++ i) {
EqualityNodeId childId = getNodeId(interpreted[i]);
Assert(isConstant(childId));
EqProof * eqpcc = eqpc ? new EqProof : NULL;
getExplanation(childId, getEqualityNode(childId).getFind(), equalities, eqpcc);
if( eqpc ) {
eqpc->d_children.push_back( eqpcc );
Debug("pf::ee") << "MERGED_THROUGH_CONSTANTS. Dumping the child proof" << std::endl;
eqpc->debug_print("pf::ee", 1);
}
}
Debug("equality") << pop;
break;
}
default: {
// Construct the equality
Debug("equality") << d_name << "::eq::getExplanation(): adding: "
<< reason << std::endl;
Debug("equality") << d_name << "::eq::getExplanation(): reason type = " << reasonType << std::endl;
Node a = d_nodes[currentNode];
Node b = d_nodes[d_equalityEdges[currentEdge].getNodeId()];
if (eqpc) {
//apply proof reconstruction processing (when eqpc is non-null)
if (d_pathReconstructionTriggers.find(reasonType) != d_pathReconstructionTriggers.end()) {
d_pathReconstructionTriggers.find(reasonType)->second->notify(reasonType, reason, a, b,
equalities, eqpc);
}
if (reasonType == MERGED_THROUGH_EQUALITY) {
eqpc->d_node = reason;
} else {
// The LFSC translator prefers (not (= a b)) over (= (= a b) false)
if (a == NodeManager::currentNM()->mkConst(false)) {
eqpc->d_node = b.notNode();
} else if (b == NodeManager::currentNM()->mkConst(false)) {
eqpc->d_node = a.notNode();
} else {
eqpc->d_node = b.eqNode(a);
}
}
eqpc->d_id = reasonType;
}
equalities.push_back(reason);
break;
}
}
// Go to the previous
currentEdge = bfsQueue[currentIndex].edgeId;
currentIndex = bfsQueue[currentIndex].previousIndex;
//---from Morgan---
if (eqpc != NULL && eqpc->d_id == MERGED_THROUGH_REFLEXIVITY) {
if(eqpc->d_node.isNull()) {
Assert(eqpc->d_children.size() == 1);
EqProof *p = eqpc;
eqpc = p->d_children[0];
delete p;
} else {
Assert(eqpc->d_children.empty());
}
}
//---end from Morgan---
eqp_trans.push_back(eqpc);
} while (currentEdge != null_id);
if (eqp) {
if(eqp_trans.size() == 1) {
*eqp = *eqp_trans[0];
delete eqp_trans[0];
} else {
eqp->d_id = MERGED_THROUGH_TRANS;
eqp->d_children.insert( eqp->d_children.end(), eqp_trans.begin(), eqp_trans.end() );
eqp->d_node = NodeManager::currentNM()->mkNode(kind::EQUAL, d_nodes[t1Id], d_nodes[t2Id]);
}
eqp->debug_print("pf::ee", 1);
}
// Done
return;
}
// Push to the visitation queue if it's not the backward edge
bfsQueue.push_back(BfsData(edge.getNodeId(), currentEdge, currentIndex));
}
// Go to the next edge
currentEdge = edge.getNext();
}
// Go to the next node to visit
++ currentIndex;
}
}
void EqualityEngine::addTriggerEquality(TNode eq) {
Assert(eq.getKind() == kind::EQUAL);
if (d_done) {
return;
}
// Add the terms
addTermInternal(eq[0]);
addTermInternal(eq[1]);
bool skipTrigger = false;
// If they are equal or disequal already, no need for the trigger
if (areEqual(eq[0], eq[1])) {
d_notify.eqNotifyTriggerEquality(eq, true);
skipTrigger = true;
}
if (areDisequal(eq[0], eq[1], true)) {
d_notify.eqNotifyTriggerEquality(eq, false);
skipTrigger = true;
}
if (skipTrigger) {
return;
}
// Add the equality
addTermInternal(eq);
// Positive trigger
addTriggerEqualityInternal(eq[0], eq[1], eq, true);
// Negative trigger
addTriggerEqualityInternal(eq, d_false, eq, false);
}
void EqualityEngine::addTriggerPredicate(TNode predicate) {
Assert(predicate.getKind() != kind::NOT && predicate.getKind() != kind::EQUAL);
Assert(d_congruenceKinds.tst(predicate.getKind()), "No point in adding non-congruence predicates");
if (d_done) {
return;
}
// Add the term
addTermInternal(predicate);
bool skipTrigger = false;
// If it's know already, no need for the trigger
if (areEqual(predicate, d_true)) {
d_notify.eqNotifyTriggerPredicate(predicate, true);
skipTrigger = true;
}
if (areEqual(predicate, d_false)) {
d_notify.eqNotifyTriggerPredicate(predicate, false);
skipTrigger = true;
}
if (skipTrigger) {
return;
}
// Positive trigger
addTriggerEqualityInternal(predicate, d_true, predicate, true);
// Negative trigger
addTriggerEqualityInternal(predicate, d_false, predicate, false);
}
void EqualityEngine::addTriggerEqualityInternal(TNode t1, TNode t2, TNode trigger, bool polarity) {
Debug("equality") << d_name << "::eq::addTrigger(" << t1 << ", " << t2 << ", " << trigger << ")" << std::endl;
Assert(hasTerm(t1));
Assert(hasTerm(t2));
if (d_done) {
return;
}
// Get the information about t1
EqualityNodeId t1Id = getNodeId(t1);
EqualityNodeId t1classId = getEqualityNode(t1Id).getFind();
// We will attach it to the class representative, since then we know how to backtrack it
TriggerId t1TriggerId = d_nodeTriggers[t1classId];
// Get the information about t2
EqualityNodeId t2Id = getNodeId(t2);
EqualityNodeId t2classId = getEqualityNode(t2Id).getFind();
// We will attach it to the class representative, since then we know how to backtrack it
TriggerId t2TriggerId = d_nodeTriggers[t2classId];
Debug("equality") << d_name << "::eq::addTrigger(" << trigger << "): " << t1Id << " (" << t1classId << ") = " << t2Id << " (" << t2classId << ")" << std::endl;
// Create the triggers
TriggerId t1NewTriggerId = d_equalityTriggers.size();
d_equalityTriggers.push_back(Trigger(t1classId, t1TriggerId));
d_equalityTriggersOriginal.push_back(TriggerInfo(trigger, polarity));
TriggerId t2NewTriggerId = d_equalityTriggers.size();
d_equalityTriggers.push_back(Trigger(t2classId, t2TriggerId));
d_equalityTriggersOriginal.push_back(TriggerInfo(trigger, polarity));
// Update the counters
d_equalityTriggersCount = d_equalityTriggers.size();
Assert(d_equalityTriggers.size() == d_equalityTriggersOriginal.size());
Assert(d_equalityTriggers.size() % 2 == 0);
// Add the trigger to the trigger graph
d_nodeTriggers[t1classId] = t1NewTriggerId;
d_nodeTriggers[t2classId] = t2NewTriggerId;
if (Debug.isOn("equality::internal")) {
debugPrintGraph();
}
Debug("equality") << d_name << "::eq::addTrigger(" << t1 << "," << t2 << ") => (" << t1NewTriggerId << ", " << t2NewTriggerId << ")" << std::endl;
}
Node EqualityEngine::evaluateTerm(TNode node) {
Debug("equality::evaluation") << d_name << "::eq::evaluateTerm(" << node << ")" << std::endl;
NodeBuilder<> builder;
builder << node.getKind();
if (node.getMetaKind() == kind::metakind::PARAMETERIZED) {
builder << node.getOperator();
}
for (unsigned i = 0; i < node.getNumChildren(); ++ i) {
TNode child = node[i];
TNode childRep = getRepresentative(child);
Debug("equality::evaluation") << d_name << "::eq::evaluateTerm: " << child << " -> " << childRep << std::endl;
Assert(childRep.isConst());
builder << childRep;
}
Node newNode = builder;
return Rewriter::rewrite(newNode);
}
void EqualityEngine::processEvaluationQueue() {
Debug("equality::evaluation") << d_name << "::eq::processEvaluationQueue(): start" << std::endl;
while (!d_evaluationQueue.empty()) {
// Get the node
EqualityNodeId id = d_evaluationQueue.front();
d_evaluationQueue.pop();
// Replace the children with their representatives (must be constants)
Node nodeEvaluated = evaluateTerm(d_nodes[id]);
Debug("equality::evaluation") << d_name << "::eq::processEvaluationQueue(): " << d_nodes[id] << " evaluates to " << nodeEvaluated << std::endl;
Assert(nodeEvaluated.isConst());
addTermInternal(nodeEvaluated);
EqualityNodeId nodeEvaluatedId = getNodeId(nodeEvaluated);
// Enqueue the semantic equality
enqueue(MergeCandidate(id, nodeEvaluatedId, MERGED_THROUGH_CONSTANTS, TNode::null()));
}
Debug("equality::evaluation") << d_name << "::eq::processEvaluationQueue(): done" << std::endl;
}
void EqualityEngine::propagate() {
if (d_inPropagate) {
// We're already in propagate, go back
return;
}
// Make sure we don't get in again
ScopedBool inPropagate(d_inPropagate, true);
Debug("equality") << d_name << "::eq::propagate()" << std::endl;
while (!d_propagationQueue.empty() || !d_evaluationQueue.empty()) {
if (d_done) {
// If we're done, just empty the queue
while (!d_propagationQueue.empty()) d_propagationQueue.pop_front();
while (!d_evaluationQueue.empty()) d_evaluationQueue.pop();
continue;
}
// Process any evaluation requests
if (!d_evaluationQueue.empty()) {
processEvaluationQueue();
continue;
}
// The current merge candidate
const MergeCandidate current = d_propagationQueue.front();
d_propagationQueue.pop_front();
// Get the representatives
EqualityNodeId t1classId = getEqualityNode(current.t1Id).getFind();
EqualityNodeId t2classId = getEqualityNode(current.t2Id).getFind();
// If already the same, we're done
if (t1classId == t2classId) {
continue;
}
Debug("equality::internal") << d_name << "::eq::propagate(): t1: " << (d_isInternal[t1classId] ? "internal" : "proper") << std::endl;
Debug("equality::internal") << d_name << "::eq::propagate(): t2: " << (d_isInternal[t2classId] ? "internal" : "proper") << std::endl;
// Get the nodes of the representatives
EqualityNode& node1 = getEqualityNode(t1classId);
EqualityNode& node2 = getEqualityNode(t2classId);
Assert(node1.getFind() == t1classId);
Assert(node2.getFind() == t2classId);
// Add the actual equality to the equality graph
addGraphEdge(current.t1Id, current.t2Id, current.type, current.reason);
// If constants are being merged we're done
if (d_isConstant[t1classId] && d_isConstant[t2classId]) {
// When merging constants we are inconsistent, hence done
d_done = true;
// But in order to keep invariants (edges = 2*equalities) we put an equalities in
// Note that we can explain this merge as we have a graph edge
d_assertedEqualities.push_back(Equality(null_id, null_id));
d_assertedEqualitiesCount = d_assertedEqualitiesCount + 1;
// Notify
if (d_performNotify) {
d_notify.eqNotifyConstantTermMerge(d_nodes[t1classId], d_nodes[t2classId]);
}
// Empty the queue and exit
continue;
}
// Vector to collect the triggered events
std::vector<TriggerId> triggers;
// Figure out the merge preference
EqualityNodeId mergeInto = t1classId;
if (d_isInternal[t2classId] != d_isInternal[t1classId]) {
// We always keep non-internal nodes as representatives: if any node in
// the class is non-internal, then the representative will be non-internal
if (d_isInternal[t1classId]) {
mergeInto = t2classId;
} else {
mergeInto = t1classId;
}
} else if (d_isConstant[t2classId] != d_isConstant[t1classId]) {
// We always keep constants as representatives: if any (at most one) node
// in the class in a constant, then the representative will be a constant
if (d_isConstant[t2classId]) {
mergeInto = t2classId;
} else {
mergeInto = t1classId;
}
} else if (node2.getSize() > node1.getSize()) {
// We always merge into the bigger class to reduce the amount of traversing
// we need to do
mergeInto = t2classId;
}
if (mergeInto == t2classId) {
Debug("equality") << d_name << "::eq::propagate(): merging " << d_nodes[current.t1Id]<< " into " << d_nodes[current.t2Id] << std::endl;
d_assertedEqualities.push_back(Equality(t2classId, t1classId));
d_assertedEqualitiesCount = d_assertedEqualitiesCount + 1;
if (!merge(node2, node1, triggers)) {
d_done = true;
}
} else {
Debug("equality") << d_name << "::eq::propagate(): merging " << d_nodes[current.t2Id] << " into " << d_nodes[current.t1Id] << std::endl;
d_assertedEqualities.push_back(Equality(t1classId, t2classId));
d_assertedEqualitiesCount = d_assertedEqualitiesCount + 1;
if (!merge(node1, node2, triggers)) {
d_done = true;
}
}
// If not merging internal nodes, notify the master
if (d_masterEqualityEngine && !d_isInternal[t1classId] && !d_isInternal[t2classId]) {
d_masterEqualityEngine->assertEqualityInternal(d_nodes[t1classId], d_nodes[t2classId], TNode::null());
d_masterEqualityEngine->propagate();
}
// Notify the triggers
if (d_performNotify && !d_done) {
for (size_t trigger_i = 0, trigger_end = triggers.size(); trigger_i < trigger_end && !d_done; ++ trigger_i) {
const TriggerInfo& triggerInfo = d_equalityTriggersOriginal[triggers[trigger_i]];
if (triggerInfo.trigger.getKind() == kind::EQUAL) {
// Special treatment for disequalities
if (!triggerInfo.polarity) {
// Store that we are propagating a diseauality
TNode equality = triggerInfo.trigger;
EqualityNodeId original = getNodeId(equality);
TNode lhs = equality[0];
TNode rhs = equality[1];
EqualityNodeId lhsId = getNodeId(lhs);
EqualityNodeId rhsId = getNodeId(rhs);
// We use the THEORY_LAST as a marker for "marked as propagated, reasons stored".
// This tag is added to an internal theories set that is only inserted in, so this is
// safe. Had we iterated over, or done other set operations this might be dangerous.
if (!hasPropagatedDisequality(THEORY_LAST, lhsId, rhsId)) {
if (!hasPropagatedDisequality(lhsId, rhsId)) {
d_deducedDisequalityReasons.push_back(EqualityPair(original, d_falseId));
}
storePropagatedDisequality(THEORY_LAST, lhsId, rhsId);
if (!d_notify.eqNotifyTriggerEquality(triggerInfo.trigger, triggerInfo.polarity)) {
d_done = true;
}
}
} else {
// Equalities are simple
if (!d_notify.eqNotifyTriggerEquality(triggerInfo.trigger, triggerInfo.polarity)) {
d_done = true;
}
}
} else {
if (!d_notify.eqNotifyTriggerPredicate(triggerInfo.trigger, triggerInfo.polarity)) {
d_done = true;
}
}
}
}
}
}
void EqualityEngine::debugPrintGraph() const {
Debug("equality::graph") << std::endl << "Dumping graph" << std::endl;
for (EqualityNodeId nodeId = 0; nodeId < d_nodes.size(); ++ nodeId) {
Debug("equality::graph") << d_nodes[nodeId] << " " << nodeId << "(" << getEqualityNode(nodeId).getFind() << "):";
EqualityEdgeId edgeId = d_equalityGraph[nodeId];
while (edgeId != null_edge) {
const EqualityEdge& edge = d_equalityEdges[edgeId];
Debug("equality::graph") << " [" << edge.getNodeId() << "] " << d_nodes[edge.getNodeId()] << ":" << edge.getReason();
edgeId = edge.getNext();
}
Debug("equality::graph") << std::endl;
}
Debug("equality::graph") << std::endl;
}
bool EqualityEngine::areEqual(TNode t1, TNode t2) const {
Debug("equality") << d_name << "::eq::areEqual(" << t1 << "," << t2 << ")";
Assert(hasTerm(t1));
Assert(hasTerm(t2));
bool result = getEqualityNode(t1).getFind() == getEqualityNode(t2).getFind();
Debug("equality") << (result ? "\t(YES)" : "\t(NO)") << std::endl;
return result;
}
bool EqualityEngine::areDisequal(TNode t1, TNode t2, bool ensureProof) const
{
Debug("equality") << d_name << "::eq::areDisequal(" << t1 << "," << t2 << ")";
// Add the terms
Assert(hasTerm(t1));
Assert(hasTerm(t2));
// Get ids
EqualityNodeId t1Id = getNodeId(t1);
EqualityNodeId t2Id = getNodeId(t2);
// If we propagated this disequality we're true
if (hasPropagatedDisequality(t1Id, t2Id)) {
Debug("equality") << "\t(YES)" << std::endl;
return true;
}
// Get equivalence classes
EqualityNodeId t1ClassId = getEqualityNode(t1Id).getFind();
EqualityNodeId t2ClassId = getEqualityNode(t2Id).getFind();
// We are semantically const, for remembering stuff
EqualityEngine* nonConst = const_cast<EqualityEngine*>(this);
// Check for constants
if (d_isConstant[t1ClassId] && d_isConstant[t2ClassId] && t1ClassId != t2ClassId) {
if (ensureProof) {
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t1Id, t1ClassId));
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t2Id, t2ClassId));
nonConst->storePropagatedDisequality(THEORY_LAST, t1Id, t2Id);
}
Debug("equality") << "\t(YES)" << std::endl;
return true;
}
// Create the equality
FunctionApplication eqNormalized(APP_EQUALITY, t1ClassId, t2ClassId);
ApplicationIdsMap::const_iterator find = d_applicationLookup.find(eqNormalized);
if (find != d_applicationLookup.end()) {
if (getEqualityNode(find->second).getFind() == getEqualityNode(d_falseId).getFind()) {
if (ensureProof) {
const FunctionApplication original = d_applications[find->second].original;
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t1Id, original.a));
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(find->second, d_falseId));
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t2Id, original.b));
nonConst->storePropagatedDisequality(THEORY_LAST, t1Id, t2Id);
}
Debug("equality") << "\t(YES)" << std::endl;
return true;
}
}
// Check the symmetric disequality
std::swap(eqNormalized.a, eqNormalized.b);
find = d_applicationLookup.find(eqNormalized);
if (find != d_applicationLookup.end()) {
if (getEqualityNode(find->second).getFind() == getEqualityNode(d_falseId).getFind()) {
if (ensureProof) {
const FunctionApplication original = d_applications[find->second].original;
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t2Id, original.a));
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(find->second, d_falseId));
nonConst->d_deducedDisequalityReasons.push_back(EqualityPair(t1Id, original.b));
nonConst->storePropagatedDisequality(THEORY_LAST, t1Id, t2Id);
}
Debug("equality") << "\t(YES)" << std::endl;
return true;
}
}
// Couldn't deduce dis-equalityReturn whether the terms are disequal
Debug("equality") << "\t(NO)" << std::endl;
return false;
}
size_t EqualityEngine::getSize(TNode t) {
// Add the term
addTermInternal(t);
return getEqualityNode(getEqualityNode(t).getFind()).getSize();
}
void EqualityEngine::addPathReconstructionTrigger(unsigned trigger, const PathReconstructionNotify* notify) {
// Currently we can only inform one callback per trigger
Assert(d_pathReconstructionTriggers.find(trigger) == d_pathReconstructionTriggers.end());
d_pathReconstructionTriggers[trigger] = notify;
}
unsigned EqualityEngine::getFreshMergeReasonType() {
return d_freshMergeReasonType++;
}
void EqualityEngine::addTriggerTerm(TNode t, TheoryId tag)
{
Debug("equality::trigger") << d_name << "::eq::addTriggerTerm(" << t << ", " << tag << ")" << std::endl;
Assert(tag != THEORY_LAST);
if (d_done) {
return;
}
// Add the term if it's not already there
addTermInternal(t);
// Get the node id
EqualityNodeId eqNodeId = getNodeId(t);
EqualityNode& eqNode = getEqualityNode(eqNodeId);
EqualityNodeId classId = eqNode.getFind();
// Possibly existing set of triggers
TriggerTermSetRef triggerSetRef = d_nodeIndividualTrigger[classId];
if (triggerSetRef != +null_set_id && getTriggerTermSet(triggerSetRef).hasTrigger(tag)) {
// If the term already is in the equivalence class that a tagged representative, just notify
if (d_performNotify) {
EqualityNodeId triggerId = getTriggerTermSet(triggerSetRef).getTrigger(tag);
Debug("equality::trigger") << d_name << "::eq::addTriggerTerm(" << t << ", " << tag << "): already have this trigger in class with " << d_nodes[triggerId] << std::endl;
if (eqNodeId != triggerId && !d_notify.eqNotifyTriggerTermEquality(tag, t, d_nodes[triggerId], true)) {
d_done = true;
}
}
} else {
// Check for disequalities by going through the equivalence class looking for equalities in the
// uselists that have been asserted to false. All the representatives appearing on the other
// side of such disequalities, that have the tag on, are put in a set.
TaggedEqualitiesSet disequalitiesToNotify;
Theory::Set tags = Theory::setInsert(tag);
getDisequalities(!d_isConstant[classId], classId, tags, disequalitiesToNotify);
// Trigger data
Theory::Set newSetTags;
EqualityNodeId newSetTriggers[THEORY_LAST];
unsigned newSetTriggersSize;
// Setup the data for the new set
if (triggerSetRef != null_set_id) {
// Get the existing set
TriggerTermSet& triggerSet = getTriggerTermSet(triggerSetRef);
// Initialize the new set for copy/insert
newSetTags = Theory::setInsert(tag, triggerSet.tags);
newSetTriggersSize = 0;
// Copy into to new one, and insert the new tag/id
unsigned i = 0;
Theory::Set tags = newSetTags;
TheoryId current;
while ((current = Theory::setPop(tags)) != THEORY_LAST) {
// Remove from the tags
tags = Theory::setRemove(current, tags);
// Insert the id into the triggers
newSetTriggers[newSetTriggersSize++] =
current == tag ? eqNodeId : triggerSet.triggers[i++];
}
} else {
// Setup a singleton
newSetTags = Theory::setInsert(tag);
newSetTriggers[0] = eqNodeId;
newSetTriggersSize = 1;
}
// Add it to the list for backtracking
d_triggerTermSetUpdates.push_back(TriggerSetUpdate(classId, triggerSetRef));
d_triggerTermSetUpdatesSize = d_triggerTermSetUpdatesSize + 1;
// Mark the the new set as a trigger
d_nodeIndividualTrigger[classId] = triggerSetRef = newTriggerTermSet(newSetTags, newSetTriggers, newSetTriggersSize);
// Propagate trigger term disequalities we remembered
Debug("equality::trigger") << d_name << "::eq::addTriggerTerm(" << t << ", " << tag << "): propagating " << disequalitiesToNotify.size() << " disequalities " << std::endl;
propagateTriggerTermDisequalities(tags, triggerSetRef, disequalitiesToNotify);
}
}
bool EqualityEngine::isTriggerTerm(TNode t, TheoryId tag) const {
if (!hasTerm(t)) return false;
EqualityNodeId classId = getEqualityNode(t).getFind();
TriggerTermSetRef triggerSetRef = d_nodeIndividualTrigger[classId];
return triggerSetRef != +null_set_id && getTriggerTermSet(triggerSetRef).hasTrigger(tag);
}
TNode EqualityEngine::getTriggerTermRepresentative(TNode t, TheoryId tag) const {
Assert(isTriggerTerm(t, tag));
EqualityNodeId classId = getEqualityNode(t).getFind();
const TriggerTermSet& triggerSet = getTriggerTermSet(d_nodeIndividualTrigger[classId]);
unsigned i = 0;
Theory::Set tags = triggerSet.tags;
while (Theory::setPop(tags) != tag) {
++ i;
}
return d_nodes[triggerSet.triggers[i]];
}
void EqualityEngine::storeApplicationLookup(FunctionApplication& funNormalized, EqualityNodeId funId) {
Assert(d_applicationLookup.find(funNormalized) == d_applicationLookup.end());
d_applicationLookup[funNormalized] = funId;
d_applicationLookups.push_back(funNormalized);
d_applicationLookupsCount = d_applicationLookupsCount + 1;
Debug("equality::backtrack") << "d_applicationLookupsCount = " << d_applicationLookupsCount << std::endl;
Debug("equality::backtrack") << "d_applicationLookups.size() = " << d_applicationLookups.size() << std::endl;
Assert(d_applicationLookupsCount == d_applicationLookups.size());
// If an equality over constants we merge to false
if (funNormalized.isEquality()) {
if (funNormalized.a == funNormalized.b) {
enqueue(MergeCandidate(funId, d_trueId, MERGED_THROUGH_REFLEXIVITY, TNode::null()));
} else if (d_isConstant[funNormalized.a] && d_isConstant[funNormalized.b]) {
enqueue(MergeCandidate(funId, d_falseId, MERGED_THROUGH_CONSTANTS, TNode::null()));
}
}
}
void EqualityEngine::getUseListTerms(TNode t, std::set<TNode>& output) {
if (hasTerm(t)) {
// Get the equivalence class
EqualityNodeId classId = getEqualityNode(t).getFind();
// Go through the equivalence class and get where t is used in
EqualityNodeId currentId = classId;
do {
// Get the current node
EqualityNode& currentNode = getEqualityNode(currentId);
// Go through the use-list
UseListNodeId currentUseId = currentNode.getUseList();
while (currentUseId != null_uselist_id) {
// Get the node of the use list
UseListNode& useNode = d_useListNodes[currentUseId];
// Get the function application
EqualityNodeId funId = useNode.getApplicationId();
output.insert(d_nodes[funId]);
// Go to the next one in the use list
currentUseId = useNode.getNext();
}
// Move to the next node
currentId = currentNode.getNext();
} while (currentId != classId);
}
}
EqualityEngine::TriggerTermSetRef EqualityEngine::newTriggerTermSet(Theory::Set newSetTags, EqualityNodeId* newSetTriggers, unsigned newSetTriggersSize) {
// Size of the required set
size_t size = sizeof(TriggerTermSet) + newSetTriggersSize*sizeof(EqualityNodeId);
// Align the size
size = (size + 7) & ~((size_t)7);
// Reallocate if necessary
if (d_triggerDatabaseSize + size > d_triggerDatabaseAllocatedSize) {
d_triggerDatabaseAllocatedSize *= 2;
d_triggerDatabase = (char*) realloc(d_triggerDatabase, d_triggerDatabaseAllocatedSize);
}
// New reference
TriggerTermSetRef newTriggerSetRef = d_triggerDatabaseSize;
// Update the size
d_triggerDatabaseSize = d_triggerDatabaseSize + size;
// Copy the information
TriggerTermSet& newSet = getTriggerTermSet(newTriggerSetRef);
newSet.tags = newSetTags;
for (unsigned i = 0; i < newSetTriggersSize; ++i) {
newSet.triggers[i] = newSetTriggers[i];
}
// Return the new reference
return newTriggerSetRef;
}
bool EqualityEngine::hasPropagatedDisequality(EqualityNodeId lhsId, EqualityNodeId rhsId) const {
EqualityPair eq(lhsId, rhsId);
bool propagated = d_propagatedDisequalities.find(eq) != d_propagatedDisequalities.end();
#ifdef CVC4_ASSERTIONS
bool stored = d_disequalityReasonsMap.find(eq) != d_disequalityReasonsMap.end();
Assert(propagated == stored, "These two should be in sync");
#endif
Debug("equality::disequality") << d_name << "::eq::hasPropagatedDisequality(" << d_nodes[lhsId] << ", " << d_nodes[rhsId] << ") => " << (propagated ? "true" : "false") << std::endl;
return propagated;
}
bool EqualityEngine::hasPropagatedDisequality(TheoryId tag, EqualityNodeId lhsId, EqualityNodeId rhsId) const {
EqualityPair eq(lhsId, rhsId);
PropagatedDisequalitiesMap::const_iterator it = d_propagatedDisequalities.find(eq);
if (it == d_propagatedDisequalities.end()) {
Assert(d_disequalityReasonsMap.find(eq) == d_disequalityReasonsMap.end(), "Why do we have a proof if not propagated");
Debug("equality::disequality") << d_name << "::eq::hasPropagatedDisequality(" << tag << ", " << d_nodes[lhsId] << ", " << d_nodes[rhsId] << ") => false" << std::endl;
return false;
}
Assert(d_disequalityReasonsMap.find(eq) != d_disequalityReasonsMap.end(), "We propagated but there is no proof");
bool result = Theory::setContains(tag, (*it).second);
Debug("equality::disequality") << d_name << "::eq::hasPropagatedDisequality(" << tag << ", " << d_nodes[lhsId] << ", " << d_nodes[rhsId] << ") => " << (result ? "true" : "false") << std::endl;
return result;
}
void EqualityEngine::storePropagatedDisequality(TheoryId tag, EqualityNodeId lhsId, EqualityNodeId rhsId) {
Assert(!hasPropagatedDisequality(tag, lhsId, rhsId), "Check before you store it");
Assert(lhsId != rhsId, "Wow, wtf!");
Debug("equality::disequality") << d_name << "::eq::storePropagatedDisequality(" << tag << ", " << d_nodes[lhsId] << ", " << d_nodes[rhsId] << ")" << std::endl;
EqualityPair pair1(lhsId, rhsId);
EqualityPair pair2(rhsId, lhsId);
// Store the fact that we've propagated this already
Theory::Set notified = 0;
PropagatedDisequalitiesMap::const_iterator find = d_propagatedDisequalities.find(pair1);
if (find == d_propagatedDisequalities.end()) {
notified = Theory::setInsert(tag);
} else {
notified = Theory::setInsert(tag, (*find).second);
}
d_propagatedDisequalities[pair1] = notified;
d_propagatedDisequalities[pair2] = notified;
// Store the proof if provided
if (d_deducedDisequalityReasons.size() > d_deducedDisequalityReasonsSize) {
Debug("equality::disequality") << d_name << "::eq::storePropagatedDisequality(" << tag << ", " << d_nodes[lhsId] << ", " << d_nodes[rhsId] << "): storing proof" << std::endl;
Assert(d_disequalityReasonsMap.find(pair1) == d_disequalityReasonsMap.end(), "There can't be a proof if you're adding a new one");
DisequalityReasonRef ref(d_deducedDisequalityReasonsSize, d_deducedDisequalityReasons.size());
#ifdef CVC4_ASSERTIONS
// Check that the reasons are valid
for (unsigned i = ref.mergesStart; i < ref.mergesEnd; ++ i) {
Assert(getEqualityNode(d_deducedDisequalityReasons[i].first).getFind() == getEqualityNode(d_deducedDisequalityReasons[i].second).getFind());
}
#endif
if (Debug.isOn("equality::disequality")) {
for (unsigned i = ref.mergesStart; i < ref.mergesEnd; ++ i) {
TNode lhs = d_nodes[d_deducedDisequalityReasons[i].first];
TNode rhs = d_nodes[d_deducedDisequalityReasons[i].second];
Debug("equality::disequality") << d_name << "::eq::storePropagatedDisequality(): because " << lhs << " == " << rhs << std::endl;
}
}
// Store for backtracking
d_deducedDisequalities.push_back(pair1);
d_deducedDisequalitiesSize = d_deducedDisequalities.size();
d_deducedDisequalityReasonsSize = d_deducedDisequalityReasons.size();
// Store the proof reference
d_disequalityReasonsMap[pair1] = ref;
d_disequalityReasonsMap[pair2] = ref;
} else {
Assert(d_disequalityReasonsMap.find(pair1) != d_disequalityReasonsMap.end(), "You must provide a proof initially");
}
}
void EqualityEngine::getDisequalities(bool allowConstants, EqualityNodeId classId, Theory::Set inputTags, TaggedEqualitiesSet& out) {
// Must be empty on input
Assert(out.size() == 0);
// The class we are looking for, shouldn't have any of the tags we are looking for already set
Assert(d_nodeIndividualTrigger[classId] == null_set_id || Theory::setIntersection(getTriggerTermSet(d_nodeIndividualTrigger[classId]).tags, inputTags) == 0);
if (inputTags == 0) {
return;
}
// Set of already (through disequalities) visited equivalence classes
std::set<EqualityNodeId> alreadyVisited;
// Go through the equivalence class
EqualityNodeId currentId = classId;
do {
Debug("equality::trigger") << d_name << "::getDisequalities() : going through uselist of " << d_nodes[currentId] << std::endl;
// Current node in the equivalence class
EqualityNode& currentNode = getEqualityNode(currentId);
// Go through the uselist and look for disequalities
UseListNodeId currentUseId = currentNode.getUseList();
while (currentUseId != null_uselist_id) {
UseListNode& useListNode = d_useListNodes[currentUseId];
EqualityNodeId funId = useListNode.getApplicationId();
Debug("equality::trigger") << d_name << "::getDisequalities() : checking " << d_nodes[funId] << std::endl;
const FunctionApplication& fun = d_applications[useListNode.getApplicationId()].original;
// If it's an equality asserted to false, we do the work
if (fun.isEquality() && getEqualityNode(funId).getFind() == getEqualityNode(d_false).getFind()) {
// Get the other equality member
bool lhs = false;
EqualityNodeId toCompare = fun.b;
if (toCompare == currentId) {
toCompare = fun.a;
lhs = true;
}
// Representative of the other member
EqualityNodeId toCompareRep = getEqualityNode(toCompare).getFind();
if (toCompareRep == classId) {
// We're in conflict, so we will send it out from merge
out.clear();
return;
}
// Check if we already have this one
if (alreadyVisited.count(toCompareRep) == 0) {
// Mark as visited
alreadyVisited.insert(toCompareRep);
// Get the trigger set
TriggerTermSetRef toCompareTriggerSetRef = d_nodeIndividualTrigger[toCompareRep];
// We only care if we're not both constants and there are trigger terms in the other class
if ((allowConstants || !d_isConstant[toCompareRep]) && toCompareTriggerSetRef != null_set_id) {
// Tags of the other gey
TriggerTermSet& toCompareTriggerSet = getTriggerTermSet(toCompareTriggerSetRef);
// We only care if there are things in inputTags that is also in toCompareTags
Theory::Set commonTags = Theory::setIntersection(inputTags, toCompareTriggerSet.tags);
if (commonTags) {
out.push_back(TaggedEquality(funId, toCompareTriggerSetRef, lhs));
}
}
}
}
// Go to the next one in the use list
currentUseId = useListNode.getNext();
}
// Next in equivalence class
currentId = currentNode.getNext();
} while (!d_done && currentId != classId);
}
bool EqualityEngine::propagateTriggerTermDisequalities(Theory::Set tags, TriggerTermSetRef triggerSetRef, const TaggedEqualitiesSet& disequalitiesToNotify) {
// No tags, no food
if (!tags) {
return !d_done;
}
Assert(triggerSetRef != null_set_id);
// This is the class trigger set
const TriggerTermSet& triggerSet = getTriggerTermSet(triggerSetRef);
// Go through the disequalities and notify
TaggedEqualitiesSet::const_iterator it = disequalitiesToNotify.begin();
TaggedEqualitiesSet::const_iterator it_end = disequalitiesToNotify.end();
for (; !d_done && it != it_end; ++ it) {
// The information about the equality that is asserted to false
const TaggedEquality& disequalityInfo = *it;
const TriggerTermSet& disequalityTriggerSet = getTriggerTermSet(disequalityInfo.triggerSetRef);
Theory::Set commonTags = Theory::setIntersection(disequalityTriggerSet.tags, tags);
Assert(commonTags);
// This is the actual function
const FunctionApplication& fun = d_applications[disequalityInfo.equalityId].original;
// Figure out who we are comparing to in the original equality
EqualityNodeId toCompare = disequalityInfo.lhs ? fun.a : fun.b;
EqualityNodeId myCompare = disequalityInfo.lhs ? fun.b : fun.a;
if (getEqualityNode(toCompare).getFind() == getEqualityNode(myCompare).getFind()) {
// We're propagating a != a, which means we're inconsistent, just bail and let it go into
// a regular conflict
return !d_done;
}
// Go through the tags, and add the disequalities
TheoryId currentTag;
while (!d_done && ((currentTag = Theory::setPop(commonTags)) != THEORY_LAST)) {
// Get the tag representative
EqualityNodeId tagRep = disequalityTriggerSet.getTrigger(currentTag);
EqualityNodeId myRep = triggerSet.getTrigger(currentTag);
// Propagate
if (!hasPropagatedDisequality(currentTag, myRep, tagRep)) {
// Construct the proof if not there already
if (!hasPropagatedDisequality(myRep, tagRep)) {
d_deducedDisequalityReasons.push_back(EqualityPair(myCompare, myRep));
d_deducedDisequalityReasons.push_back(EqualityPair(toCompare, tagRep));
d_deducedDisequalityReasons.push_back(EqualityPair(disequalityInfo.equalityId, d_falseId));
}
// Store the propagation
storePropagatedDisequality(currentTag, myRep, tagRep);
// Notify
if (d_performNotify) {
if (!d_notify.eqNotifyTriggerTermEquality(currentTag, d_nodes[myRep], d_nodes[tagRep], false)) {
d_done = true;
}
}
}
}
}
return !d_done;
}
EqClassesIterator::EqClassesIterator() :
d_ee(NULL), d_it(0) {
}
EqClassesIterator::EqClassesIterator(const eq::EqualityEngine* ee)
: d_ee(ee)
{
Assert(d_ee->consistent());
d_it = 0;
// Go to the first non-internal node that is it's own representative
if(d_it < d_ee->d_nodesCount && (d_ee->d_isInternal[d_it] || d_ee->getEqualityNode(d_it).getFind() != d_it)) {
++d_it;
}
}
Node EqClassesIterator::operator*() const {
return d_ee->d_nodes[d_it];
}
bool EqClassesIterator::operator==(const EqClassesIterator& i) const {
return d_ee == i.d_ee && d_it == i.d_it;
}
bool EqClassesIterator::operator!=(const EqClassesIterator& i) const {
return !(*this == i);
}
EqClassesIterator& EqClassesIterator::operator++() {
++d_it;
while(d_it < d_ee->d_nodesCount && (d_ee->d_isInternal[d_it] || d_ee->getEqualityNode(d_it).getFind() != d_it)) {
++d_it;
}
return *this;
}
EqClassesIterator EqClassesIterator::operator++(int) {
EqClassesIterator i = *this;
++*this;
return i;
}
bool EqClassesIterator::isFinished() const {
return d_it >= d_ee->d_nodesCount;
}
EqClassIterator::EqClassIterator()
: d_ee(NULL)
, d_start(null_id)
, d_current(null_id)
{}
EqClassIterator::EqClassIterator(Node eqc, const eq::EqualityEngine* ee)
: d_ee(ee)
{
Assert(d_ee->consistent());
d_current = d_start = d_ee->getNodeId(eqc);
Assert(d_start == d_ee->getEqualityNode(d_start).getFind());
Assert (!d_ee->d_isInternal[d_start]);
}
Node EqClassIterator::operator*() const {
return d_ee->d_nodes[d_current];
}
bool EqClassIterator::operator==(const EqClassIterator& i) const {
return d_ee == i.d_ee && d_current == i.d_current;
}
bool EqClassIterator::operator!=(const EqClassIterator& i) const {
return !(*this == i);
}
EqClassIterator& EqClassIterator::operator++() {
Assert(!isFinished());
Assert(d_start == d_ee->getEqualityNode(d_current).getFind());
Assert(!d_ee->d_isInternal[d_current]);
// Find the next one
do {
d_current = d_ee->getEqualityNode(d_current).getNext();
} while(d_ee->d_isInternal[d_current]);
Assert(d_start == d_ee->getEqualityNode(d_current).getFind());
Assert(!d_ee->d_isInternal[d_current]);
if(d_current == d_start) {
// we end when we have cycled back to the original representative
d_current = null_id;
}
return *this;
}
EqClassIterator EqClassIterator::operator++(int) {
EqClassIterator i = *this;
++*this;
return i;
}
bool EqClassIterator::isFinished() const {
return d_current == null_id;
}
void EqProof::debug_print(const char* c, unsigned tb, PrettyPrinter* prettyPrinter) const {
for(unsigned i=0; i<tb; i++) { Debug( c ) << " "; }
if (prettyPrinter)
Debug( c ) << prettyPrinter->printTag(d_id);
else
Debug( c ) << d_id;
Debug( c ) << "(";
if( !d_children.empty() || !d_node.isNull() ){
if( !d_node.isNull() ){
Debug( c ) << std::endl;
for( unsigned i=0; i<tb+1; i++ ) { Debug( c ) << " "; }
Debug( c ) << d_node;
}
for( unsigned i=0; i<d_children.size(); i++ ){
if( i>0 || !d_node.isNull() ) Debug( c ) << ",";
Debug( c ) << std::endl;
d_children[i]->debug_print( c, tb+1, prettyPrinter );
}
}
Debug( c ) << ")" << std::endl;
}
} // Namespace uf
} // Namespace theory
} // Namespace CVC4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.