id
int64 0
877k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
66
| repo_stars
int64 94
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 11
values | repo_extraction_date
stringclasses 197
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,531,978
|
SampleSection.h
|
andreacasalino_Easy-Factor-Graph/samples/Helpers/SampleSection.h
|
/**
* Author: Andrea Casalino
* Created: 01.01.2021
*
* report any bug to andrecasa91@gmail.com.
**/
#pragma once
#include <iostream>
#include <string>
template <typename SampleBody>
void SAMPLE_SECTION(const std::string &DESCRIPTION,
const std::string &DOC_SECTION, SampleBody BODY) {
std::cout << "-------------------------------------------------------------"
"-------------------------\n\n";
std::cout << DESCRIPTION;
if (auto DOC_SECTION_STR = std::string{DOC_SECTION};
!DOC_SECTION_STR.empty()) {
std::cout << " , refer to Section " << DOC_SECTION_STR
<< " of the documentation";
}
std::cout << "\n-----------------------------------------------------------"
"---------------------------\n\n";
BODY();
std::cout << "\n=========================================================="
"============================\n";
};
| 939
|
C++
|
.h
| 26
| 30.769231
| 78
| 0.407489
|
andreacasalino/Easy-Factor-Graph
| 32
| 4
| 1
|
GPL-3.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,979
|
Printing.h
|
andreacasalino_Easy-Factor-Graph/samples/Helpers/Printing.h
|
/**
* Author: Andrea Casalino
* Created: 01.01.2021
*
* report any bug to andrecasa91@gmail.com.
**/
#pragma once
#include <EasyFactorGraph/categoric/GroupRange.h>
#include <EasyFactorGraph/factor/Function.h>
#include <ostream>
std::ostream &operator<<(std::ostream &s,
const EFG::categoric::VariablesSoup &group);
std::ostream &operator<<(std::ostream &s, const EFG::categoric::Group &group);
std::ostream &operator<<(std::ostream &s, const std::vector<float> &values);
std::ostream &operator<<(std::ostream &s,
const std::vector<std::size_t> &values);
std::ostream &operator<<(std::ostream &s,
const EFG::factor::Function &distribution);
| 733
|
C++
|
.h
| 18
| 34.833333
| 78
| 0.657709
|
andreacasalino/Easy-Factor-Graph
| 32
| 4
| 1
|
GPL-3.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,980
|
Frequencies.h
|
andreacasalino_Easy-Factor-Graph/samples/Helpers/Frequencies.h
|
/**
* Author: Andrea Casalino
* Created: 01.01.2021
*
* report any bug to andrecasa91@gmail.com.
**/
#pragma once
#include <EasyFactorGraph/structure/GibbsSampler.h>
std::vector<float>
getEmpiricalMarginals(EFG::categoric::VariablePtr var2Search,
const std::vector<std::vector<std::size_t>> &samples,
const EFG::categoric::VariablesSoup &samplesGroup);
float getEmpiricalProbability(
const std::vector<std::size_t> &comb2Search,
const EFG::categoric::VariablesSoup &combGroup,
const std::vector<std::vector<std::size_t>> &samples,
const EFG::categoric::VariablesSoup &samplesGroup);
std::vector<float> make_distribution(const std::vector<float> &values);
| 730
|
C++
|
.h
| 18
| 35.666667
| 75
| 0.714286
|
andreacasalino/Easy-Factor-Graph
| 32
| 4
| 1
|
GPL-3.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,982
|
gamma.h
|
marcmerlin_Framebuffer_GFX/gamma.h
|
#ifndef _GAMMA_H_
#define _GAMMA_H_
#ifdef __AVR
#include <avr/pgmspace.h>
#elif defined(ESP8266)
#include <pgmspace.h>
#else
#ifndef PROGMEM
#define PROGMEM
#endif
#endif
static const uint8_t PROGMEM
gamma5[] = {
0x00,0x01,0x02,0x03,0x05,0x07,0x09,0x0b,
0x0e,0x11,0x14,0x18,0x1d,0x22,0x28,0x2e,
0x36,0x3d,0x46,0x4f,0x59,0x64,0x6f,0x7c,
0x89,0x97,0xa6,0xb6,0xc7,0xd9,0xeb,0xff },
gamma6[] = {
0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x08,
0x09,0x0a,0x0b,0x0d,0x0e,0x10,0x12,0x13,
0x15,0x17,0x19,0x1b,0x1d,0x20,0x22,0x25,
0x27,0x2a,0x2d,0x30,0x33,0x37,0x3a,0x3e,
0x41,0x45,0x49,0x4d,0x52,0x56,0x5b,0x5f,
0x64,0x69,0x6e,0x74,0x79,0x7f,0x85,0x8b,
0x91,0x97,0x9d,0xa4,0xab,0xb2,0xb9,0xc0,
0xc7,0xcf,0xd6,0xde,0xe6,0xee,0xf7,0xff };
#endif // _GAMMA_H_
| 804
|
C++
|
.h
| 27
| 26.518519
| 46
| 0.718346
|
marcmerlin/Framebuffer_GFX
| 30
| 6
| 0
|
GPL-3.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,985
|
WhiteBoxInterpreter.cpp
|
CryptoExperts_wyverse/lib/ExecutionEngine/Interpreter/WhiteBoxInterpreter.cpp
|
//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the top-level functionality for the LLVM interpreter.
// This interpreter is designed to be a very simple, portable, inefficient
// interpreter.
//
//===----------------------------------------------------------------------===//
#include "Interpreter.h"
#include "WhiteBoxInterpreter.h"
#include <cstring>
using namespace llvm;
namespace {
static struct RegisterWBInterp {
RegisterWBInterp() { WhiteBoxInterpreter::Register(); }
} WBInterpRegistrator;
}
extern "C" void LLVMLinkInWhiteBoxInterpreter() { }
/// Create a new white-box interpreter object.
///
ExecutionEngine *WhiteBoxInterpreter::create(std::unique_ptr<Module> M,
Action *action,
std::string *ErrStr) {
// Tell this Module to materialize everything and release the GVMaterializer.
if (Error Err = M->materializeAll()) {
std::string Msg;
handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
Msg = EIB.message();
});
if (ErrStr)
*ErrStr = Msg;
// We got an error, just return 0
return nullptr;
}
return new WhiteBoxInterpreter(std::move(M), action);
}
| 1,451
|
C++
|
.cpp
| 42
| 32.047619
| 80
| 0.634807
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,986
|
Action.cpp
|
CryptoExperts_wyverse/lib/ExecutionEngine/Interpreter/Action.cpp
|
#include "llvm/ExecutionEngine/Action.h"
#include "Interpreter.h"
namespace llvm {
ExecutionContext *ECStackAccessor::currentEC() {
if (ECStack->empty()) {
return nullptr;
}
return &(ECStack->back());
}
void Action::setInterpreter(Interpreter * interpreter) {
this->interpreter = interpreter;
}
Interpreter * Action::getInterpreter() {
return interpreter;
}
namespace SampleKind {
// These are actually bitmasks that get or-ed together.
enum Kind {
MemoryRead = 0x1,
MemoryWrite = 0x2,
StackRead = 0x4,
StackWrite = 0x8,
RegisterStor = 0x16,
};
}
using byte = unsigned char;
struct TraceSample {
SampleKind::Kind kind;
uint8_t size;
SmallVector<uint8_t, 8> data;
TraceSample(SampleKind::Kind kind, uint8_t size, SmallVector<uint8_t, 8> data)
: kind(kind), size(size), data(data) {}
};
class TraceSampleFilter {
};
unsigned getAPIntNumBytes(APInt Val) {
assert(Val.getBitWidth() % 8 == 0);
return Val.getBitWidth() / 8;
}
void TraceProcessor::traceAPInt(APInt Val) {
if (Val.getBitWidth() == 1) {
outs() << Val.getBoolValue();
} else {
outs() << Val;
}
outs() << " " << Val.getBitWidth() << "\n";
}
void TraceProcessor::trace(GenericValue GV, Type *Ty) {
if (Ty->isVectorTy() &&
cast<VectorType>(Ty)->getElementType()->isIntegerTy()) {
for (unsigned i = 0; i < GV.AggregateVal.size(); ++i)
traceAPInt(GV.AggregateVal[i].IntVal);
} else if (Ty->isIntegerTy()) {
traceAPInt(GV.IntVal);
} else if (Ty->isPointerTy()) {
dbgs() << "Unhandled type: " << *Ty << " (" << Ty->getTypeID() << ")\n";
} else {
dbgs() << "Unhandled type: " << *Ty << " (" << Ty->getTypeID() << ")\n";
llvm_unreachable(nullptr);
}
}
GenericValue TraceProcessor::getOperandValue(Value *I, ExecutionContext &SF) {
return action->getInterpreter()->getOperandValue(I, SF);
}
void TraceProcessor::defaultVisitor(Value &Val) {
Type *Ty = Val.getType();
ExecutionContext * SF = currentEC();
GenericValue GV = getOperandValue(&Val, *SF);
trace(GV, Ty);
}
void TraceProcessor::visitStoreInst(StoreInst &I) {
Value * Op0= I.getOperand(0);
defaultVisitor(*Op0) ;
}
void TraceProcessor::visitReturnInst(ReturnInst &I) {
// return if calling the `main` function (the entry point)
ExecutionContext *CallingSF = currentEC();
if (!CallingSF)
return;
CallingSF->CurInst--;
Instruction &CallerInst = *(CallingSF->CurInst);
CallingSF->CurInst++;
// only trace non-void returns
if (!CallerInst.getType()->isVoidTy()) {
defaultVisitor(CallerInst);
}
}
class FaultAction : public Action {
public:
// TODO
};
Action * ActionFactory::createAction(const char * actionType) {
if (strcmp(actionType, "helloworld") == 0) {
return new HelloWorldAction();
} else if (strcmp(actionType, "trace") == 0) {
return new TraceAction();
} else {
errs() << "unknown action " << actionType << " ignored!\n";
return NULL;
}
}
void TraceAction::print(raw_ostream &ROS) {
ROS << "TraceAction => Trace memory / register while executing your binray.\n";
}
}
| 3,142
|
C++
|
.cpp
| 105
| 26.904762
| 81
| 0.66844
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,987
|
WhiteBoxExecution.cpp
|
CryptoExperts_wyverse/lib/ExecutionEngine/Interpreter/WhiteBoxExecution.cpp
|
//===-- Execution.cpp - Implement code to simulate the program ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the actual instruction interpreter.
//
//===----------------------------------------------------------------------===//
#include "WhiteBoxInterpreter.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/ExecutionEngine/Action.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cmath>
using namespace llvm;
#define DEBUG_TYPE "WhiteBoxInterpreter"
STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
WhiteBoxInterpreter::WhiteBoxInterpreter(std::unique_ptr<Module> M, Action *action)
: Interpreter(std::move(M)) {
this->action = action;
this->action->setInterpreter(this);
}
/// run - Start execution with the specified function and arguments.
///
GenericValue WhiteBoxInterpreter::runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) {
assert (F && "Function *F was null at entry to run()");
// Try extra hard not to pass extra args to a function that isn't
// expecting them. C programmers frequently bend the rules and
// declare main() with fewer parameters than it actually gets
// passed, and the interpreter barfs if you pass a function more
// parameters than it is declared to take. This does not attempt to
// take into account gratuitous differences in declared types,
// though.
const size_t ArgCount = F->getFunctionType()->getNumParams();
ArrayRef<GenericValue> ActualArgs =
ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
// Set up the function call.
callFunction(F, ActualArgs);
// Start executing the function.
run();
return this->ExitValue;
}
void WhiteBoxInterpreter::run() {
action->setECStack(&ECStack);
while (!ECStack.empty()) {
// Interpret a single instruction & increment the "PC".
ExecutionContext &SF = ECStack.back(); // Current stack frame
Instruction &I = *SF.CurInst++; // Increment before execute
// Track the number of dynamic instructions executed.
++NumDynamicInsts;
LLVM_DEBUG(dbgs() << "About to interpret: " << I);
action->beforeVisitInst(I, SF);
if (!action->skipExecuteInst(I)) {
// outs() << I << "\n";
visit(I); // Dispatch to one of the visit* methods...
}
action->afterVisitInst(I, SF);
}
}
//===----------------------------------------------------------------------===//
// control flow instruction
//===----------------------------------------------------------------------===//
// RETURN instruction
// ex: ret i32 0
// visitReturnInst(ReturnInst &I);
// branch instruction
// ex: br label %14 (unconditional branch)
// ex: br i1 %cond, label %IfEqual, label %IfUnequal (conditional branch)
// TraceAction: neither type is to be traced
// FaultAction: FIXME
// visitBranchInst(BranchInst &I);
// switch instruction
// ex: switch i32 %10, label %14 [
// i32 10, label %11
// ]
// TraceAction: not act
// FaultAction: TODO
// visitSwitchInst(SwitchInst &I);
// FIXME: to finish the implementation
// visitIndirectBrInst(IndirectBrInst &I);
//===----------------------------------------------------------------------===//
// binary operators and comparisons
//===----------------------------------------------------------------------===//
// Binary Operators
// ex: %16 = sub nsw i32 %15, 19
// visitBinaryOperator(BinaryOperator &I);
// icmp instruction
// ex: %5 = icmp sgt i32 %4, 10
// visitICmpInst(ICmpInst &I)
// fcmp instruction
// ex: %21 = fcmp ogt double %20, 1.000000e+0
// visitFCmpInst(FCmpInst &I)
//===----------------------------------------------------------------------===//
// memory operators
//===----------------------------------------------------------------------===//
// LOAD instruction
// ex: %4 = load i32, i32* %2, align 4
// visitLoadInst(LoadInst &I);
// STORE instruction
// ex: store i32 0, i32* %1, align 4
// visitStoreInst(StoreInst &I);
// GetElementPtr instruction
// ex: %32 = getelementptr inbounds [3 x i32], [3 x i32]* %4, i64 0, i64 %31
// visitGetElementPtrInst(GetElementPtrInst &I);
//===----------------------------------------------------------------------===//
// ternary operator
//===----------------------------------------------------------------------===//
// SELECT instruction: ternary operator
// ex: %X = select i1 true, i8 17, i8 42
// visitSelectInst(SelectInst &I)
//===----------------------------------------------------------------------===//
// call
//===----------------------------------------------------------------------===//
// visitCallInst(CallInst &I)
// visitInvokeInst(InvokeInst &I)
//===----------------------------------------------------------------------===//
// shift instructions
//===----------------------------------------------------------------------===//
// visitShl(BinaryOperator &I)
// visitLShr(BinaryOperator &I)
// visitAShr(BinaryOperator &I)
//===----------------------------------------------------------------------===//
// Miscellaneous Instruction Implementations
//===----------------------------------------------------------------------===//
// visitVAArgInst(VAArgInst &I)
// visitExtractElementInst(ExtractElementInst &I)
// visitInsertElementInst(InsertElementInst &I)
// visitShuffleVectorInst(ShuffleVectorInst &I)
// visitExtractValueInst(ExtractValueInst &I)
// visitInsertValueInst(InsertValueInst &I)
//
| 6,032
|
C++
|
.cpp
| 146
| 39.328767
| 83
| 0.569254
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,989
|
wyverse.cpp
|
CryptoExperts_wyverse/tools/wyverse/wyverse.cpp
|
//===- wyverse.cpp - LLVM Interpreter / Dynamic compiler ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This utility provides a simple wrapper around the LLVM Execution Engines,
// which allow the direct execution of LLVM programs through an interpreter.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/llvm-config.h"
#include "llvm/ExecutionEngine/Action.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Support/InitLLVM.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/WithColor.h"
#include "logo-ascii.inc"
#include <cerrno>
#ifdef __CYGWIN__
#include <cygwin/version.h>
#if defined(CYGWIN_VERSION_DLL_MAJOR) && CYGWIN_VERSION_DLL_MAJOR<1007
#define DO_NOTHING_ATEXIT 1
#endif
#endif
using namespace llvm;
#define DEBUG_TYPE "wyverse"
namespace {
cl::opt<std::string>
InputFile(cl::desc("<input bitcode>"), cl::Positional, cl::init("-"));
cl::list<std::string>
InputArgv(cl::ConsumeAfter, cl::desc("<program arguments>..."));
cl::opt<std::string>
EntryFunc("entry-function",
cl::desc("Specify the entry function (default = 'main') "
"of the executable"),
cl::value_desc("function"),
cl::init("main"));
cl::list<std::string>
ExtraModules("extra-module",
cl::desc("Extra modules to be loaded"),
cl::value_desc("input bitcode"));
cl::opt<std::string>
FakeArgv0("fake-argv0",
cl::desc("Override the 'argv[0]' value passed into the executing"
" program"), cl::value_desc("executable"));
enum ActionType {
helloworld, trace
};
cl::list<ActionType>
ActionList(cl::desc("Available Actions:"),
cl::values(clEnumVal(helloworld, "An example of action"),
clEnumVal(trace, "Tracing memory / register")));
cl::opt<int>
MemoryRead("memory-read",
cl::desc("Choose the memory read width to be traced "
"(trace action only): -1 disable, 0 all "),
cl::value_desc("bytes"),
cl::init(0));
cl::opt<int>
MemoryWrite("memory-write",
cl::desc("Choose the memory write width to be traced "
"(trace action only): -1 disable, 0 all "),
cl::value_desc("bytes"),
cl::init(0));
cl::opt<int>
StackAccess("stack",
cl::desc("Choose the stack access width to be traced "
"(trace action only): -1 disable, 0 all "),
cl::value_desc("bytes"),
cl::init(0));
cl::opt<int>
RegisterAccess("register",
cl::desc("Choose the register width to be traced "
"(trace action only): -1 disable, 0 all "),
cl::value_desc("bytes"),
cl::init(0));
ExitOnError ExitOnErr;
}
LLVM_ATTRIBUTE_NORETURN
static void reportError(SMDiagnostic Err, const char *ProgName) {
Err.print(ProgName, errs());
exit(1);
}
inline const char* ActionTypeToString(ActionType at)
{
switch (at) {
case helloworld: return "helloworld";
case trace: return "trace";
// default: return "[Unknown ActionType]";
}
}
//===----------------------------------------------------------------------===//
// main Driver function
//
int main(int argc, char **argv, char * const *envp) {
WithColor(outs(), raw_ostream::BLUE, true) << AsciiLogo;
InitLLVM X(argc, argv);
if (argc > 1)
ExitOnErr.setBanner(std::string(argv[0]) + ": ");
cl::ParseCommandLineOptions(argc, argv, "Wyverse interpreter\n");
// Create a chain of actions
ChainedAction *actionList = new ChainedAction(); // to free up
ActionFactory actionFactory;
for (unsigned i = 0; i != ActionList.size(); ++i) {
const char * actionType = ActionTypeToString(ActionList[i]);
Action *action = actionFactory.createAction(actionType);
if (action)
actionList->addAction(action);
}
WithColor stringOuts = WithColor(outs(), raw_ostream::GREEN);
stringOuts << "====== Enabled actions ======\n\n";
stringOuts.resetColor() << *actionList << "\n";
stringOuts.changeColor(raw_ostream::GREEN) << "*****************************\n";
stringOuts.resetColor();
outs() << RegisterAccess << " register access\n";
LLVMContext Context;
// Load the bitcode...
SMDiagnostic Err;
std::unique_ptr<Module> Owner = parseIRFile(InputFile, Err, Context);
Module *Mod = Owner.get();
if (!Mod)
reportError(Err, argv[0]);
std::string ErrorMsg;
EngineBuilder builder(std::move(Owner));
builder.setErrorStr(&ErrorMsg);
builder.setEngineKind(EngineKind::WhiteBoxInterpreter);
builder.setAction(actionList);
// Create execution engine
std::unique_ptr<ExecutionEngine> EE(builder.create());
if (!EE) {
if (!ErrorMsg.empty())
WithColor::error(errs(), argv[0])
<< "error creating EE: " << ErrorMsg << "\n";
else
WithColor::error(errs(), argv[0]) << "unknown error creating EE!\n";
exit(1);
}
// Load any additional modules specified on the command line.
for (unsigned i = 0, e = ExtraModules.size(); i != e; ++i) {
std::unique_ptr<Module> XMod = parseIRFile(ExtraModules[i], Err, Context);
if (!XMod)
reportError(Err, argv[0]);
EE->addModule(std::move(XMod));
}
// If the user specifically requested an argv[0] to pass into the program,
// do it now.
if (!FakeArgv0.empty()) {
InputFile = static_cast<std::string>(FakeArgv0);
} else {
// Otherwise, if there is a .bc suffix on the executable strip it off, it
// might confuse the program.
if (StringRef(InputFile).endswith(".bc"))
InputFile.erase(InputFile.length() - 3);
}
// Add the module's name to the start of the vector of arguments to main().
InputArgv.insert(InputArgv.begin(), InputFile);
// Call the main function from M as if its signature were:
// int main (int argc, char **argv, const char **envp)
// using the contents of Args to determine argc & argv, and the contents of
// EnvVars to determine envp.
//
Function *EntryFn = Mod->getFunction(EntryFunc);
if (!EntryFn) {
WithColor::error(errs(), argv[0])
<< '\'' << EntryFunc << "\' function not found in module.\n";
return -1;
}
// Reset errno to zero on entry to main.
errno = 0;
int Result = -1;
// If the program doesn't explicitly call exit, we will need the Exit
// function later on to make an explicit call, so get the function now.
Constant *Exit = Mod->getOrInsertFunction("exit", Type::getVoidTy(Context),
Type::getInt32Ty(Context));
EE->runStaticConstructorsDestructors(false);
// Trigger compilation separately so code regions that need to be
// invalidated will be known.
(void)EE->getPointerToFunction(EntryFn);
// Run main.
Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
// Run static destructors.
EE->runStaticConstructorsDestructors(true);
// If the program didn't call exit explicitly, we should call it now.
// This ensures that any atexit handlers get called correctly.
if (Function *ExitF = dyn_cast<Function>(Exit)) {
std::vector<GenericValue> Args;
GenericValue ResultGV;
ResultGV.IntVal = APInt(32, Result);
Args.push_back(ResultGV);
EE->runFunction(ExitF, Args);
WithColor::error(errs(), argv[0]) << "exit(" << Result << ") returned!\n";
abort();
} else {
WithColor::error(errs(), argv[0])
<< "exit defined with wrong prototype!\n";
abort();
}
return Result;
}
| 7,709
|
C++
|
.cpp
| 204
| 33.715686
| 84
| 0.647374
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,990
|
WhiteBoxInterpreter.h
|
CryptoExperts_wyverse/lib/ExecutionEngine/Interpreter/WhiteBoxInterpreter.h
|
#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_WHITEBOXINTERPRETER_H
#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_WHITEBOXINTERPRETER_H
#include "llvm/ExecutionEngine/Action.h"
#include "Interpreter.h"
namespace llvm {
class WhiteBoxInterpreter : public Interpreter {
using base_type = Interpreter;
Action * action;
public:
explicit WhiteBoxInterpreter(std::unique_ptr<Module> M, Action *action);
static void Register() {
WBInterpCtor = create;
}
/// Create an white-box interpreter ExecutionEngine.
///
static ExecutionEngine *create(std::unique_ptr<Module> M,
Action *action,
std::string *ErrorStr = nullptr);
GenericValue runFunction(Function *F,
ArrayRef<GenericValue> ArgValues) override;
void run() ;
};
} // End llvm namespace
#endif
| 838
|
C++
|
.h
| 24
| 29.791667
| 74
| 0.722637
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,994
|
Action.h
|
CryptoExperts_wyverse/include/llvm/ExecutionEngine/Action.h
|
#ifndef LLVM_EXECUTIONENGINE_ACTION_H
#define LLVM_EXECUTIONENGINE_ACTION_H
#include "llvm/ADT/APInt.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/IR/InstVisitor.h"
#include <list>
namespace llvm {
struct ExecutionContext;
class Interpreter;
class ECStackAccessor {
private:
std::vector<ExecutionContext> * ECStack;
public:
virtual ~ECStackAccessor() {}
virtual void setECStack(std::vector<ExecutionContext> * ECStack) {
this->ECStack = ECStack;
}
ExecutionContext *currentEC();
};
class Action: public ECStackAccessor {
private:
Interpreter * interpreter;
public:
Action() {}
virtual ~Action() {}
virtual void setInterpreter(Interpreter * interpreter);
Interpreter * getInterpreter();
virtual void beforeVisitInst(Instruction &I, ExecutionContext &SF) {}
virtual bool skipExecuteInst(Instruction &I) {return false;}
virtual void afterVisitInst(Instruction &I, ExecutionContext &SF) {}
virtual void print(raw_ostream &ROS) {}
};
class ActionFactory {
public:
Action * createAction(const char *);
};
class ChainedAction : public Action {
std::list<Action *> actionList;
public:
void addAction(Action *action) {
actionList.push_back(action);
}
void setInterpreter(Interpreter * interpreter) override {
for (Action *action : actionList) {
action->setInterpreter(interpreter);
}
}
void setECStack(std::vector<ExecutionContext> * ECStack) override {
for (Action *action : actionList) {
action->setECStack(ECStack);
}
}
void beforeVisitInst(Instruction &I, ExecutionContext &SF) override {
for (Action *action : actionList) {
action->beforeVisitInst(I, SF);
}
};
bool skipExecuteInst(Instruction &I) override {
bool ret = false;
for (Action *action : actionList) {
ret = ret || action->skipExecuteInst(I);
}
return ret;
}
void afterVisitInst(Instruction &I, ExecutionContext &SF) override {
for (Action *action : actionList) {
action->afterVisitInst(I, SF);
}
};
void print(raw_ostream &ROS) override {
for (Action *action : actionList) {
action->print(ROS);
}
}
};
class HelloWorldAction : public Action {
public:
void beforeVisitInst(Instruction &I, ExecutionContext &SF) override {
outs() << "(helloworld) Before visit: " << I << "\n";
};
bool skipExecuteInst(Instruction &I) override {
return false;
}
void afterVisitInst(Instruction &I, ExecutionContext &SF) override {
outs() << "(helloworld) After visit: " << I << "\n";
};
void print(raw_ostream &ROS) override {
ROS << "HelloWorld => An example implementation of Action.\n";
}
};
class TraceProcessor: public InstVisitor<TraceProcessor>,
public ECStackAccessor {
private:
Action * action;
void traceAPInt(APInt Val);
// default visitor for most of instructions
void defaultVisitor(Value &I);
void visitNotImplementedInst(Value &I) {
errs() << "Instruction not interpretable yet >>" << I << "\n";
llvm_unreachable(nullptr);
}
GenericValue getOperandValue(Value *I, ExecutionContext &SF);
public:
TraceProcessor(Action * action) { this->action = action; }
void trace(GenericValue GV, Type *Ty);
// =========== instruction visitors ============
// ----- Terminator Instructions ----
// `ret`: taking care the return values to the caller
void visitReturnInst(ReturnInst &I);
// `br`, `switch`, `indirectbr`: no need to trace, the conditions are
// contained in registers
void visitBranchInst(BranchInst &I) {}
void visitSwitchInst(SwitchInst &I) {}
// ---- Binary Instructions ----
void visitBinaryOperator(BinaryOperator &I) { defaultVisitor(I); }
void visitICmpInst(ICmpInst &I) { defaultVisitor(I); }
void visitFCmpInst(FCmpInst &I) { defaultVisitor(I); }
// ---- Memory Access Instructions ----
// `alloca`: no need to trace
void visitAllocaInst(AllocaInst &I) {}
void visitLoadInst(LoadInst &I) { defaultVisitor(I); }
void visitStoreInst(StoreInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I) { defaultVisitor(I); }
// ---- Value Trunc and Extend Instructions ----
// The following instructions are ignored by TraceAction
// `trunc .. to`, `fptrunc .. to`,
// `zext .. to`, `sext .. to`, `fpext .. to`,
// `uitofp .. to`, `sitofp .. to`,
// `fptoui .. to`, `fptosi .. to`,
// `ptrtoint .. to`, `inttoptr .. to`,
// `bitcast .. to`
void visitTruncInst(TruncInst &I) {}
void visitZExtInst(ZExtInst &I) {}
void visitSExtInst(SExtInst &I) {}
void visitBitCastInst(BitCastInst &I) {}
// ---- Conditional (ternary) operator ----
void visitSelectInst(SelectInst &I) { defaultVisitor(I); }
// ---- Function Calls ----
// `call`, `invoke`: returned values will be taken care by `ret` instruction
void visitCallInst(CallInst &I) {}
// ---- Shift Instructions ----
// `shl`, `lshr`, `ashr`: no need to trace
// ---- misc ----
void visitVAArgInst(VAArgInst &I) { visitNotImplementedInst(I); }
// `extractelement`, `insertelement`, `shufflevector`: no need to trace
// `extractvalue`, `insertvalue`: no need to trace
void visitInstruction(Instruction &I) { visitNotImplementedInst(I); }
};
class TraceAction : public Action, public InstVisitor<TraceAction> {
TraceProcessor postProcessor = TraceProcessor(this);
public:
void setECStack(std::vector<ExecutionContext> * ECStack) override {
postProcessor.setECStack(ECStack);
}
void afterVisitInst(Instruction &I, ExecutionContext &SF) override {
postProcessor.visit(I);
}
void print(raw_ostream &ROS) override;
};
// operators
inline raw_ostream &operator<<(raw_ostream &OS, Action& action)
{
action.print(OS);
return OS;
}
}
#endif
| 5,764
|
C++
|
.h
| 167
| 31.221557
| 78
| 0.699351
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,997
|
logo-ascii.inc
|
CryptoExperts_wyverse/tools/wyverse/logo-ascii.inc
|
const char * AsciiLogo = R"(
╔═════════════════════════════════ Welcome to ═════════════════════════════════╗
║ ║
║ ██╗ ██╗██╗ ██╗██╗ ██╗███████╗██████╗ ███████╗███████╗ ║
║ ██║ ██║╚██╗ ██╔╝██║ ██║██╔════╝██╔══██╗██╔════╝██╔════╝ ║
║ ██║ █╗ ██║ ╚████╔╝ ██║ ██║█████╗ ██████╔╝███████╗█████╗ ║
║ ██║███╗██║ ╚██╔╝ ╚██╗ ██╔╝██╔══╝ ██╔══██╗╚════██║██╔══╝ ║
║ ╚███╔███╔╝ ██║ ╚████╔╝ ███████╗██║ ██║███████║███████╗ ║
║ ╚══╝╚══╝ ╚═╝ ╚═══╝ ╚══════╝╚═╝ ╚═╝╚══════╝╚══════╝ ║
║ ║
╚══════════════════════════════════════════════════════════════════════════════╝
)";
| 1,766
|
C++
|
.inc
| 12
| 69.25
| 80
| 0.033694
|
CryptoExperts/wyverse
| 39
| 4
| 0
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,998
|
noodle.cpp
|
phylovi_bito/extras/noodle.cpp
|
#include "gp_instance.hpp"
#include "rooted_sbn_instance.hpp"
#include "unrooted_sbn_instance.hpp"
#include "stopwatch.hpp"
#include "rooted_tree_collection.hpp"
// This is just a place to muck around, and check out performance.
auto now = std::chrono::high_resolution_clock::now;
// To valgrind (you can pip install gprof2dot):
// valgrind --tool=callgrind ./_build/noodle
// gprof2dot -f callgrind callgrind.out.16763 | dot -Tpng -o ~/output.png
int main() {
std::cout << "RUNNING NOODLE" << std::endl;
uint32_t leaf_count = 10000;
Node::NodePtr topology = Node::Ladder(leaf_count);
std::vector<size_t> ids;
ids.reserve(1 + 2 * leaf_count);
Stopwatch timer;
auto t_start = now();
timer.Start();
for (int i = 0; i < 100; i++) {
ids.clear();
topology->Preorder([&ids](const Node* node) { ids.push_back(node->Id()); });
}
double watch_duration = timer.Stop();
std::chrono::duration<double> duration = now() - t_start;
std::cout << "time: " << duration.count() << " seconds\n";
}
| 1,020
|
C++
|
.cpp
| 27
| 35.222222
| 80
| 0.681218
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,999
|
benchmark.cpp
|
phylovi_bito/extras/benchmark.cpp
|
#include <iostream>
#include <fstream>
#include "stopwatch.hpp"
#include "gp_instance.hpp"
#include "rooted_sbn_instance.hpp"
#include "unrooted_sbn_instance.hpp"
#include "rooted_tree_collection.hpp"
// This is just a place to muck around, and check out performance.
auto now = std::chrono::high_resolution_clock::now;
// To valgrind (you can pip install gprof2dot):
// valgrind --tool=callgrind ./_build/noodle
// gprof2dot -f callgrind callgrind.out.16763 | dot -Tpng -o ~/output.png
auto GetArgVec(int argc, char* argv[]) {
std::string current_exec_name = argv[0]; // Name of the current exec program
std::vector<std::string> all_args;
if (argc > 1) {
all_args.assign(argv + 1, argv + argc);
}
return all_args;
}
auto MakeDAGInstanceFromFiles(const std::string& fasta_path,
const std::string& newick_path) {
GPInstance inst("_ignore/mmap.data");
inst.ReadFastaFile(fasta_path);
inst.ReadNewickFile(newick_path);
return inst;
}
void OutputNewickToFile(const std::string& file_path, const std::string& newick_str) {
std::ofstream file_out;
file_out.open(file_path);
file_out << newick_str << std::endl;
file_out.close();
}
namespace SubsplitSetBuilder {
void BuildAllSubsplitsRecurse(std::vector<int>& subsplit_assign, size_t n,
std::set<Bitset>& results) {
if (n > 0) {
for (size_t i = 0; i < 3; i++) {
subsplit_assign[subsplit_assign.size() - 1 - n] = i;
BuildAllSubsplitsRecurse(subsplit_assign, n - 1, results);
}
return;
}
if (n == 0) {
Bitset clade_left(subsplit_assign.size(), false);
Bitset clade_right(subsplit_assign.size(), false);
for (size_t i = 0; i < subsplit_assign.size(); i++) {
if (subsplit_assign[i] == 0) {
clade_left.set(i);
}
if (subsplit_assign[i] == 1) {
clade_right.set(i);
}
}
if ((clade_left.Count() == 0) && (clade_right.Count() == 0)) {
return;
}
if ((clade_left.Count() == 0) && (clade_right.Count() > 1)) {
return;
}
if ((clade_left.Count() > 1) && (clade_right.Count() == 0)) {
return;
}
Bitset subsplit = Bitset::Subsplit(clade_left, clade_right);
results.insert(subsplit);
return;
}
}
std::set<Bitset> BuildAllSubsplits(size_t n) {
std::vector<int> subsplit_assign(n);
std::set<Bitset> all_subsplits;
BuildAllSubsplitsRecurse(subsplit_assign, n, all_subsplits);
std::cout << "all_subsplits: " << n << " " << all_subsplits.size() << " "
<< all_subsplits << std::endl;
// return all_subsplits;
}
} // namespace SubsplitSetBuilder
int main(int argc, char* argv[]) {
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// Parse commandline args.
auto args = GetArgVec(argc, argv);
if (args.size() < 2 or args.size() > 3) {
std::cout << "usage: <fasta_path> <newick_path> <search_type_opt>" << std::endl;
exit(0);
}
auto fasta_path = args[0];
auto newick_path = args[1];
std::cout << "Fasta: " << fasta_path << std::endl;
std::cout << "Newick: " << newick_path << std::endl;
// Build DAG and Engines.
auto inst = MakeDAGInstanceFromFiles(fasta_path, newick_path);
inst.MakeDAG();
auto& dag = inst.GetDAG();
inst.MakeGPEngine();
inst.MakeTPEngine();
inst.MakeNNIEngine();
auto& nni_engine = inst.GetNNIEngine();
auto& gp_engine = inst.GetGPEngine();
auto& tp_engine = inst.GetTPEngine();
auto& graft_dag = nni_engine.GetGraftDAG();
nni_engine.SetTPLikelihoodCutoffFilteringScheme(0.0);
nni_engine.SetTopNScoreFilteringScheme(5);
std::cout << "# Build DAG and NNIEngine: " << timer.Lap() << " sec" << std::endl;
tp_engine.OptimizeBranchLengths();
std::cout << "# Optimize Branch Lengths: " << timer.Lap() << " sec" << std::endl;
nni_engine.RunInit();
std::cout << "# nni_engine.RunInit(): " << timer.Lap() << " sec" << std::endl;
std::cout << "DAG_COUNTS: " << dag.NodeCount() << " "
<< dag.EdgeCountWithLeafSubsplits() << std::endl;
std::cout << "TOPO_SORT: " << dag.LeafwardNodeTraversalTrace(true) << std::endl;
std::cout << "BRANCH_LENGTHS: " << tp_engine.GetBranchLengths() << std::endl;
std::cout << "DAG_COUNTS (START): " << dag.NodeCount() << " "
<< dag.EdgeCountWithLeafSubsplits() << std::endl;
std::cout << "GRAFT_COUNTS (START): " << graft_dag.HostNodeCount() << " "
<< graft_dag.HostEdgeCount() << std::endl;
std::cout << "GRAFT_COUNTS (START): " << graft_dag.GraftNodeCount() << " "
<< graft_dag.GraftEdgeCount() << std::endl;
Stopwatch iter_timer(true, Stopwatch::TimeScale::SecondScale);
size_t max_iter = 5;
for (size_t iter = 0; iter < max_iter; iter++) {
nni_engine.SyncAdjacentNNIsWithDAG();
std::cout << "### Iteration " << iter << " of " << max_iter << "..." << std::endl;
std::cout << "ADJACENT_NNIs: " << nni_engine.GetAdjacentNNIs().size() << std::endl;
// Main Loop
nni_engine.GraftAdjacentNNIsToDAG();
std::cout << "# nni_engine.GraftAdjacentNNIsToDAG(): " << timer.Lap() << " sec"
<< std::endl;
std::cout << "DAG_COUNTS (INNER): " << dag.NodeCount() << " "
<< dag.EdgeCountWithLeafSubsplits() << std::endl;
std::cout << "GRAFT_COUNTS (HOST_INNER): " << graft_dag.HostNodeCount() << " "
<< graft_dag.HostEdgeCount() << std::endl;
std::cout << "GRAFT_COUNTS (GRAFT_INNER): " << graft_dag.GraftNodeCount() << " "
<< graft_dag.GraftEdgeCount() << std::endl;
nni_engine.FilterPreUpdate();
std::cout << "# nni_engine.FilterPreUpdate(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.FilterEvaluateAdjacentNNIs();
std::cout << "# nni_engine.FilterEvaluateAdjacentNNIs(): " << timer.Lap()
<< std::endl;
nni_engine.FilterPostUpdate();
std::cout << "# nni_engine.FilterPostUpdate(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.FilterProcessAdjacentNNIs();
std::cout << "# nni_engine.FilterProcessAdjacentNNIs(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.RemoveAllGraftedNNIsFromDAG();
std::cout << "# nni_engine.RemoveAllGraftedNNIsFromDAG(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.AddAcceptedNNIsToDAG(false);
std::cout << "# nni_engine.AddAcceptedNNIsToDAG(): " << timer.Lap() << " sec"
<< std::endl;
// std::cout << "NNI_SCORES: " << nni_engine.GetScoredNNIs().size() << " "
// << nni_engine.GetScoredNNIs() << std::endl;
// Post Loop
nni_engine.UpdateAdjacentNNIs();
std::cout << "# nni_engine.UpdateAdjacentNNIs(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.UpdateAcceptedNNIs();
std::cout << "# nni_engine.UpdateAcceptedNNIs(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.UpdateRejectedNNIs();
std::cout << "# nni_engine.UpdateRejectedNNIs(): " << timer.Lap() << " sec"
<< std::endl;
nni_engine.UpdateScoredNNIs();
std::cout << "# nni_engine.UpdateScoredNNIs(): " << timer.Lap() << " sec"
<< std::endl;
// Iteration details
std::cout << "DAG_COUNTS (POST): " << dag.NodeCount() << " "
<< dag.EdgeCountWithLeafSubsplits() << std::endl;
std::cout << "GRAFT_COUNTS (HOST_POST): " << graft_dag.HostNodeCount() << " "
<< graft_dag.HostEdgeCount() << std::endl;
std::cout << "GRAFT_COUNTS (GRAFT_POST): " << graft_dag.GraftNodeCount() << " "
<< graft_dag.GraftEdgeCount() << std::endl;
std::cout << "### iter_time " << iter << ": " << iter_timer.Lap() << " sec"
<< std::endl;
}
}
| 7,705
|
C++
|
.cpp
| 177
| 37.977401
| 88
| 0.606182
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,000
|
reps_and_likelihoods.cpp
|
phylovi_bito/extras/reps_and_likelihoods.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// The purpose of this code is documented in the header.
#include "reps_and_likelihoods.hpp"
int main(int argc, char *argv[]) {
if (argc <= 4 || argc % 2 != 1) {
std::cout << "We need at least 4 arguments: fasta, rooted_nwk, repr_out_path, "
<< "and nwk_out_path." << std::endl
<< "Additional arguments must come in pairs: extra_rooted_nwk, "
<< "extra_repr_out_path." << std::endl;
abort();
}
std::string fasta_path = argv[1];
std::string rooted_nwk_path = argv[2];
std::string out_path = argv[3];
std::string nwk_out_path = argv[4];
std::vector<std::string> extra_nwk_paths;
std::vector<std::string> extra_out_paths;
for (int arg_index = 5; arg_index < argc; arg_index += 2) {
extra_nwk_paths.push_back(argv[arg_index]);
extra_out_paths.push_back(argv[arg_index + 1]);
}
const auto extras_count = extra_nwk_paths.size();
auto thread_count = std::thread::hardware_concurrency();
GPInstance all_trees_gp_inst("mmapped_plv.data");
all_trees_gp_inst.ReadNewickFile(rooted_nwk_path);
all_trees_gp_inst.ReadFastaFile(fasta_path);
all_trees_gp_inst.MakeGPEngine();
all_trees_gp_inst.TakeFirstBranchLength();
std::vector<RootedSBNInstance> extra_r_insts;
for (size_t i = 0; i < extras_count; i++) {
extra_r_insts.push_back(RootedSBNInstance("extra_trees" + std::to_string(i)));
extra_r_insts.at(i).ReadNewickFile(extra_nwk_paths.at(i));
}
const auto taxa_order = all_trees_gp_inst.GetTaxonNames();
for (const auto &r_inst : extra_r_insts) {
if (r_inst.tree_collection_.TaxonNames() != taxa_order) {
std::cout << "The first tree of each newick file must have the taxa appearing in "
<< "the same order. Insert a dummy tree if needed." << std::endl;
abort();
}
}
auto all_trees = all_trees_gp_inst.GenerateCompleteRootedTreeCollection();
auto indexer = all_trees_gp_inst.GetDAG().BuildEdgeIndexer();
auto all_representations = GetIndexerRepresentations(all_trees, indexer);
UnrootedSBNInstance ur_inst("charlie");
ur_inst.ReadNewickFile(rooted_nwk_path);
ur_inst.ReadFastaFile(fasta_path);
PhyloModelSpecification simple_specification{"JC69", "constant", "strict"};
ur_inst.PrepareForPhyloLikelihood(simple_specification, thread_count, {}, true,
all_trees.TreeCount());
const auto log_likelihoods = ur_inst.UnrootedLogLikelihoods(all_trees);
WriteTreesToFile(out_path, all_representations, log_likelihoods);
WriteNewickToFile(nwk_out_path, all_trees);
for (size_t i = 0; i < extras_count; i++) {
WriteTreesToFile(
extra_out_paths.at(i),
GetIndexerRepresentations(extra_r_insts.at(i).tree_collection_, indexer));
}
}
std::vector<RootedIndexerRepresentation> GetIndexerRepresentations(
PreRootedTreeCollection &trees, BitsetSizeMap &indexer) {
std::vector<RootedIndexerRepresentation> indexer_representations;
for (const auto &tree : trees.Trees()) {
indexer_representations.push_back(
RootedSBNMaps::IndexerRepresentationOf(indexer, tree.Topology(), SIZE_MAX));
}
return indexer_representations;
}
void WriteTreesToFile(const std::string &out_path,
const std::vector<RootedIndexerRepresentation> &representations,
const std::vector<double> &log_likelihoods) {
std::ofstream out_stream(out_path);
out_stream << std::setprecision(12);
const auto write_likelihood = !log_likelihoods.empty();
for (size_t which_tree = 0; which_tree < representations.size(); which_tree++) {
for (const auto &idx : representations.at(which_tree)) {
out_stream << idx << ",";
}
if (write_likelihood) {
out_stream << log_likelihoods.at(which_tree);
}
out_stream << "\n";
}
out_stream.close();
}
void WriteNewickToFile(const std::string &out_path, const RootedTreeCollection &trees) {
const auto node_labels = trees.TagTaxonMap();
std::ofstream out_stream(out_path);
for (const auto &tree : trees.Trees()) {
out_stream << tree.NewickTopology(node_labels) << std::endl;
}
out_stream.close();
}
| 4,249
|
C++
|
.cpp
| 95
| 39.884211
| 88
| 0.690712
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,001
|
sankoff_handler.cpp
|
phylovi_bito/src/sankoff_handler.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "sankoff_handler.hpp"
void SankoffHandler::Resize(const size_t new_node_count) {
psv_handler_.SetCount(new_node_count);
psv_handler_.SetAllocatedCount(
size_t(ceil(double(psv_handler_.GetPaddedCount()) * resizing_factor_)));
psv_handler_.Resize(new_node_count, psv_handler_.GetAllocatedCount());
}
void SankoffHandler::GenerateLeafPartials() {
// first check that the psv_handler has been resized to deal with the leaf labels
Assert(psv_handler_.GetCount() >= site_pattern_.TaxonCount(),
"Error in SankoffHandler::GenerateLeafPartials: "
"psv_handler_ should be initialized to accomodate"
"the number of leaf nodes in the site_pattern_.");
// Iterate over all leaf nodes to instantiate each with P partial values
for (NodeId leaf_node = 0; leaf_node < site_pattern_.TaxonCount(); leaf_node++) {
SankoffPartial node_partials(state_count_, site_pattern_.PatternCount());
// set leaf node partial to have big_double_ infinity substitute
node_partials.block(0, 0, state_count_, site_pattern_.PatternCount())
.fill(big_double_);
// now fill in appropriate entries of the leaf-partial where non-infinite
for (size_t pattern_idx = 0; pattern_idx < site_pattern_.PatternCount();
pattern_idx++) {
auto site_val = site_pattern_.GetPatternSymbol(leaf_node.value_, pattern_idx);
if (site_val < state_count_) {
node_partials(site_val, pattern_idx) = 0.;
} else if (site_val == state_count_) {
// Leaves with gaps in sequence and ambiguous nucleotides are assigned sankoff
// partial vector [0, 0, 0, 0] at the corresponding site.
node_partials.col(pattern_idx).fill(0);
} else {
Failwith(
"Error in SankoffHandler::GenerateLeafPartials: Invalid nucleotide state "
"in sequence alignment.");
}
}
psv_handler_.GetPV(PSVType::PLeft, leaf_node) = node_partials;
psv_handler_.GetPV(PSVType::PRight, leaf_node).fill(0);
}
}
EigenVectorXd SankoffHandler::ParentPartial(EigenVectorXd child_partials) {
Assert(child_partials.size() == state_count_,
"child_partials in SankoffHandler::ParentPartial should have 4 states.");
EigenVectorXd parent_partials(state_count_);
parent_partials.setZero();
for (size_t parent_state = 0; parent_state < state_count_; parent_state++) {
EigenVectorXd partials_for_state(state_count_);
for (size_t child_state = 0; child_state < state_count_; child_state++) {
auto cost = mutation_costs_.GetCost(parent_state, child_state);
partials_for_state[child_state] = cost + child_partials[child_state];
}
auto minimum_element =
*std::min_element(partials_for_state.data(),
partials_for_state.data() + partials_for_state.size());
parent_partials[parent_state] = minimum_element;
}
return parent_partials;
}
EigenVectorXd SankoffHandler::TotalPPartial(NodeId node_id, size_t site_idx) {
return psv_handler_.GetPV(PSVType::PLeft, node_id).col(site_idx) +
psv_handler_.GetPV(PSVType::PRight, node_id).col(site_idx);
}
void SankoffHandler::PopulateRootwardParsimonyPVForNode(const NodeId parent_id,
const NodeId left_child_id,
const NodeId right_child_id) {
for (size_t pattern_idx = 0; pattern_idx < site_pattern_.PatternCount();
pattern_idx++) {
// Which child partial is in right or left doesn't actually matter because they
// are summed when calculating q_partials.
psv_handler_.GetPV(PSVType::PLeft, parent_id).col(pattern_idx) =
ParentPartial(TotalPPartial(left_child_id, pattern_idx));
psv_handler_.GetPV(PSVType::PRight, parent_id).col(pattern_idx) =
ParentPartial(TotalPPartial(right_child_id, pattern_idx));
}
}
void SankoffHandler::PopulateLeafwardParsimonyPVForNode(const NodeId parent_id,
const NodeId left_child_id,
const NodeId right_child_id) {
for (size_t pattern_idx = 0; pattern_idx < site_pattern_.PatternCount();
pattern_idx++) {
auto partials_from_parent =
ParentPartial(psv_handler_.GetPV(PSVType::Q, parent_id).col(pattern_idx));
for (const auto child_id : {left_child_id, right_child_id}) {
NodeId sister_id = ((child_id == left_child_id) ? right_child_id : left_child_id);
auto partials_from_sister = ParentPartial(TotalPPartial(sister_id, pattern_idx));
psv_handler_.GetPV(PSVType::Q, child_id).col(pattern_idx) =
partials_from_sister + partials_from_parent;
}
}
}
void SankoffHandler::RunSankoff(Node::NodePtr topology) {
// Resize PVs to fit topology.
size_t node_count = topology->Id() + 1;
Resize(node_count);
// fill in leaf node partials for PSV (stored in PLeft in PSVHandler instance)
GenerateLeafPartials();
// generating p_partials (right and left)
topology->Postorder([this](const Node* node) {
if (!node->IsLeaf()) {
Assert(node->Children().size() == 2,
"Error in SankoffHandler::RunSankoff: Tree should be bifurcating.");
PopulateRootwardParsimonyPVForNode(NodeId(node->Id()),
NodeId(node->Children()[0]->Id()),
NodeId(node->Children()[1]->Id()));
}
});
// generating q-partials
topology->Preorder([this](const Node* node) {
if (!node->IsLeaf()) {
Assert(node->Children().size() == 2,
"Error in SankoffHandler::RunSankoff: Tree should be bifurcating.");
PopulateLeafwardParsimonyPVForNode(NodeId(node->Id()),
NodeId(node->Children()[0]->Id()),
NodeId(node->Children()[1]->Id()));
}
});
}
double SankoffHandler::ParsimonyScore(NodeId node_id) {
auto weights = site_pattern_.GetWeights();
double total_parsimony = 0.;
for (size_t pattern = 0; pattern < site_pattern_.PatternCount(); pattern++) {
// Note: doing ParentPartial first for the left and right p_partials and then adding
// them together will give the same minimum parsimony score, but doesn't give
// correct Sankoff Partial vector for the new rooting
auto total_tree = ParentPartial(TotalPPartial(node_id, pattern));
total_tree += ParentPartial(psv_handler_.GetPV(PSVType::Q, node_id).col(pattern));
// If node_id is the root node, calculating the total_tree vector like so does not
// yield the SankoffPartial of an actual rooting, but this will not change the
// minimum value in the partial, so the root node can still be used to calculate the
// parsimony score.
total_parsimony +=
*std::min_element(total_tree.begin(), total_tree.end()) * weights[pattern];
}
return total_parsimony;
}
| 7,062
|
C++
|
.cpp
| 136
| 44
| 88
| 0.661505
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,002
|
engine.cpp
|
phylovi_bito/src/engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "engine.hpp"
#include <numeric>
#include "beagle_flag_names.hpp"
Engine::Engine(const EngineSpecification &engine_specification,
const PhyloModelSpecification &model_specification,
SitePattern site_pattern)
: site_pattern_(std::move(site_pattern)) {
if (engine_specification.thread_count_ == 0) {
Failwith("Thread count needs to be strictly positive.");
} // else
const auto beagle_preference_flags =
engine_specification.beagle_flag_vector_.empty()
? BEAGLE_FLAG_VECTOR_SSE // Default flags.
: std::accumulate(engine_specification.beagle_flag_vector_.begin(),
engine_specification.beagle_flag_vector_.end(), 0,
std::bit_or<FatBeagle::PackedBeagleFlags>());
if (beagle_preference_flags & BEAGLE_FLAG_PRECISION_SINGLE &&
beagle_preference_flags & BEAGLE_FLAG_VECTOR_SSE) {
Failwith("Single precision not available with SSE vectorization in BEAGLE.");
}
for (size_t i = 0; i < engine_specification.thread_count_; i++) {
fat_beagles_.push_back(std::make_unique<FatBeagle>(
model_specification, site_pattern_, beagle_preference_flags,
engine_specification.use_tip_states_));
}
if (!engine_specification.beagle_flag_vector_.empty()) {
std::cout << "We asked BEAGLE for: "
<< BeagleFlagNames::OfBeagleFlags(beagle_preference_flags) << std::endl;
auto beagle_flags = fat_beagles_[0]->GetBeagleFlags();
std::cout << "BEAGLE gave us: " << BeagleFlagNames::OfBeagleFlags(beagle_flags)
<< std::endl;
if (beagle_flags & BEAGLE_FLAG_PROCESSOR_GPU) {
std::cout << R"raw(
____ ____ __ __ __ __ ______ __ __ __
/\ _`\ /\ _`\ /\ \/\ \ /\ \ /\ \/\ _ \ /\ \ /\ \/\ \
\ \ \L\_\ \ \L\ \ \ \ \ \ \ `\`\\/'/\ \ \L\ \\ `\`\\/'/\ \ \
\ \ \L_L\ \ ,__/\ \ \ \ \ `\ `\ /' \ \ __ \`\ `\ /' \ \ \
\ \ \/, \ \ \/ \ \ \_\ \ `\ \ \ \ \ \/\ \ `\ \ \ \ \_\
\ \____/\ \_\ \ \_____\ \ \_\ \ \_\ \_\ \ \_\ \/\_\
\/___/ \/_/ \/_____/ \/_/ \/_/\/_/ \/_/ \/_/
)raw";
}
}
}
const BlockSpecification &Engine::GetPhyloModelBlockSpecification() const {
// The BlockSpecification is well defined for an Engine because the interface
// assures that all of the PhyloModels have the same specification.
return GetFirstFatBeagle()->GetPhyloModelBlockSpecification();
}
std::vector<double> Engine::LogLikelihoods(
const UnrootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<double, UnrootedTree, UnrootedTreeCollection>(
FatBeagle::StaticUnrootedLogLikelihood, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<double> Engine::LogLikelihoods(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<double, RootedTree, RootedTreeCollection>(
FatBeagle::StaticRootedLogLikelihood, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<double> Engine::UnrootedLogLikelihoods(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<double, RootedTree, RootedTreeCollection>(
FatBeagle::StaticUnrootedLogLikelihoodOfRooted, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<double> Engine::LogDetJacobianHeightTransform(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<double, RootedTree, RootedTreeCollection>(
FatBeagle::StaticLogDetJacobianHeightTransform, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<PhyloGradient> Engine::Gradients(
const UnrootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<PhyloGradient, UnrootedTree, UnrootedTreeCollection>(
FatBeagle::StaticUnrootedGradient, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<PhyloGradient> Engine::Gradients(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<PhyloGradient, RootedTree, RootedTreeCollection>(
FatBeagle::StaticRootedGradient, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
std::vector<DoubleVector> Engine::GradientLogDeterminantJacobian(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags) const {
return FatBeagleParallelize<DoubleVector, RootedTree, RootedTreeCollection>(
FatBeagle::StaticGradientLogDeterminantJacobian, fat_beagles_, tree_collection,
phylo_model_params, rescaling, flags);
}
const FatBeagle *const Engine::GetFirstFatBeagle() const {
Assert(!fat_beagles_.empty(), "You have no FatBeagles.");
return fat_beagles_[0].get();
}
| 5,697
|
C++
|
.cpp
| 111
| 46.342342
| 86
| 0.692446
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,003
|
node.cpp
|
phylovi_bito/src/node.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "node.hpp"
#include <algorithm>
#include <climits>
#include <deque>
#include <functional>
#include <iostream>
#include <memory>
#include <sstream>
#include <stack>
#include <string>
#include <unordered_map>
#include <vector>
// We assume that the sizes of things related to trees are smaller than
// UINT32_MAX and so use that as our fundamental type. Because we use the STL,
// we use size_t as well, the size of which is implementation-dependent. Here we
// make sure that size_t is big enough (I know this is a little silly because
// size_t is quite big on 64 bit systems, but still...).
static_assert(UINT32_MAX <= SIZE_MAX, "size_t is too small.");
Node::Node(uint32_t leaf_id, Bitset leaves)
: children_({}),
id_(leaf_id),
leaves_(std::move(leaves)),
tag_(PackInts(leaf_id, 1)),
hash_(SOHash(leaf_id)) {}
Node::Node(NodePtrVec children, size_t id, Bitset leaves)
: children_(children), id_(id), leaves_(leaves) {
Assert(!children_.empty(), "Called internal Node constructor with no children.");
// Order the children by their max leaf ids.
std::sort(children_.begin(), children_.end(), [](const auto& lhs, const auto& rhs) {
if (lhs->MaxLeafID() == rhs->MaxLeafID()) {
// Children should have non-overlapping leaf sets, so there
// should not be ties.
Failwith("Tie observed between " + lhs->Newick() + " and " + rhs->Newick() +
"\n" + "Do you have a taxon name repeated?");
}
return lhs->MaxLeafID() < rhs->MaxLeafID();
});
// Children are sorted by their max_leaf_id, so we can get the max by
// looking at the last element.
uint32_t max_leaf_id = children_.back()->MaxLeafID();
uint32_t leaf_count = 0;
hash_ = 0;
for (const auto& child : children_) {
leaf_count += child->LeafCount();
hash_ ^= child->Hash();
}
tag_ = PackInts(max_leaf_id, leaf_count);
// Bit rotation is necessary because if we only XOR then we can get
// collisions when identical tips are in different
// ordered subtrees (an example is in below doctest).
hash_ = SORotate(hash_, 1);
}
Bitset Node::BuildSubsplit() const {
if (IsLeaf()) {
return Bitset::Subsplit(Leaves(), Bitset(Leaves().size()));
}
Assert(Children().size() == 2,
"For a valid subsplit, non-leaf nodes must be bifurcating.");
return Bitset::Subsplit(Children()[0]->Leaves(), Children()[1]->Leaves());
}
Bitset Node::BuildPCSP(const SubsplitClade clade) const {
Assert(Children().size() == 2,
"To build PCSP Edge Bitset, node must be non-leaf and bifurcating.");
Bitset parent_subsplit = BuildSubsplit();
auto child_node = (clade == SubsplitClade::Left) ? Children()[0] : Children()[1];
Bitset child_subsplit = child_node->BuildSubsplit();
return Bitset::PCSP(parent_subsplit, child_subsplit);
}
std::unordered_set<Bitset> Node::BuildSetOfSubsplits() const {
std::unordered_set<Bitset> subsplit_bitsets;
Preorder([this, &subsplit_bitsets](const Node* node) {
subsplit_bitsets.insert(node->BuildSubsplit());
});
return subsplit_bitsets;
}
std::unordered_set<Bitset> Node::BuildSetOfPCSPs() const {
std::unordered_set<Bitset> pcsp_bitsets;
Preorder([this, &pcsp_bitsets](const Node* node) {
if (!node->IsLeaf()) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
pcsp_bitsets.insert(node->BuildPCSP(clade));
}
}
});
return pcsp_bitsets;
}
bool Node::operator==(const Node& other) const {
if (this->Hash() != other.Hash()) {
return false;
}
size_t child_count = this->Children().size();
if (child_count != other.Children().size()) {
return false;
}
for (size_t i = 0; i < child_count; i++) {
if (!(*children_[i] == *other.Children()[i])) {
return false;
}
}
return true;
}
Node::NodePtr Node::DeepCopy() const {
return Node::OfParentIdVector(ParentIdVector());
}
void Node::Preorder(std::function<void(const Node*)> f) const {
std::stack<const Node*> stack;
stack.push(this);
const Node* node;
while (stack.size()) {
node = stack.top();
stack.pop();
f(node);
const auto& children = node->Children();
for (auto iter = children.rbegin(); iter != children.rend(); ++iter) {
stack.push((*iter).get());
}
}
}
void Node::ConditionalPreorder(std::function<bool(const Node*)> f) const {
std::stack<const Node*> stack;
stack.push(this);
const Node* node;
while (stack.size()) {
node = stack.top();
stack.pop();
if (f(node)) {
const auto& children = node->Children();
for (auto iter = children.rbegin(); iter != children.rend(); ++iter) {
stack.push((*iter).get());
}
}
}
}
void Node::MutablePostorder(std::function<void(Node*)> f) {
// The stack records the nodes and whether they have been visited or not.
std::stack<std::pair<Node*, bool>> stack;
stack.push({this, false});
Node* node;
bool visited;
while (stack.size()) {
std::tie(node, visited) = stack.top();
stack.pop();
if (visited) {
// If we've already visited this node then we are on our way back.
f(node);
} else {
// If not then we need to push ourself back on the stack (noting that
// we've been visited)...
stack.push({node, true});
// And all of our children, which have not.
const auto& children = node->Children();
for (auto iter = children.rbegin(); iter != children.rend(); ++iter) {
stack.push({(*iter).get(), false});
}
}
}
}
void Node::Postorder(std::function<void(const Node*)> f) const {
// https://stackoverflow.com/a/56603436/467327
Node* mutable_this = const_cast<Node*>(this);
mutable_this->MutablePostorder(f);
}
void Node::DepthFirst(std::function<void(const Node*)> pre,
std::function<void(const Node*)> post) const {
// The stack records the nodes and whether they have been visited or not.
std::stack<std::pair<const Node*, bool>> stack;
stack.push({this, false});
const Node* node;
bool visited;
while (stack.size()) {
std::tie(node, visited) = stack.top();
stack.pop();
if (visited) {
// If we've already visited this node then we are on our way back.
post(node);
} else {
pre(node);
// If not then we need to push ourself back on the stack (noting that
// we've been visited)...
stack.push({node, true});
// And all of our children, which have not.
const auto& children = node->Children();
for (auto iter = children.rbegin(); iter != children.rend(); ++iter) {
stack.push({(*iter).get(), false});
}
}
}
}
void Node::LevelOrder(std::function<void(const Node*)> f) const {
std::deque<const Node*> deque = {this};
while (deque.size()) {
auto n = deque.front();
deque.pop_front();
f(n);
for (const auto& child : n->children_) {
deque.push_back(child.get());
}
}
}
static std::function<void(const Node*, const Node*, const Node*)> const TripletIdInfix(
std::function<void(int, int, int)> f) {
return [&f](const Node* node0, const Node* node1, const Node* node2) {
f(static_cast<int>(node0->Id()), static_cast<int>(node1->Id()),
static_cast<int>(node2->Id()));
};
}
void Node::TripleIdPreorderBifurcating(
std::function<void(size_t, size_t, size_t)> f) const {
TriplePreorderBifurcating(TripletIdInfix(f));
}
static std::function<void(const Node*)> const BinaryIdInfix(
std::function<void(int, int, int)> f) {
return [&f](const Node* node) {
if (!node->IsLeaf()) {
Assert(node->Children().size() == 2, "BinaryIdInfix expects a bifurcating tree.");
f(static_cast<int>(node->Id()), static_cast<int>(node->Children()[0]->Id()),
static_cast<int>(node->Children()[1]->Id()));
}
};
}
void Node::BinaryIdPreorder(const std::function<void(size_t, size_t, size_t)> f) const {
Preorder(BinaryIdInfix(f));
}
void Node::BinaryIdPostorder(
const std::function<void(size_t, size_t, size_t)> f) const {
Postorder(BinaryIdInfix(f));
}
void Node::TriplePreorder(
std::function<void(const Node*, const Node*, const Node*)> f_root,
std::function<void(const Node*, const Node*, const Node*)> f_internal) const {
Assert(children_.size() == 3,
"TriplePreorder expects a tree with a trifurcation at the root.");
f_root(children_[0].get(), children_[1].get(), children_[2].get());
children_[0]->TriplePreorderBifurcating(f_internal);
f_root(children_[1].get(), children_[2].get(), children_[0].get());
children_[1]->TriplePreorderBifurcating(f_internal);
f_root(children_[2].get(), children_[0].get(), children_[1].get());
children_[2]->TriplePreorderBifurcating(f_internal);
}
void Node::TriplePreorderBifurcating(
std::function<void(const Node*, const Node*, const Node*)> f) const {
if (IsLeaf()) {
return;
} // else
std::stack<std::pair<const Node*, bool>> stack;
stack.push({this, false});
const Node* node;
bool visited;
while (stack.size()) {
// Here we visit each node twice, once for each orientation.
std::tie(node, visited) = stack.top();
stack.pop();
const auto& children = node->Children();
Assert(children.size() == 2,
"TriplePreorderBifurcating expects a bifurcating tree.");
if (visited) {
// We've already visited this node once, so do the second orientation.
f(children[1].get(), children[0].get(), node);
// Next traverse the right child.
if (!children[1]->IsLeaf()) {
stack.push({children[1].get(), false});
}
} else {
// We are visiting this node for the first time.
// Apply f in the first orientation.
f(children[0].get(), children[1].get(), node);
// Then set it up so it gets executed in the second orientation...
stack.push({node, true});
// ... after first traversing the left child.
if (!children[0]->IsLeaf()) {
stack.push({children[0].get(), false});
}
}
}
}
// See the typedef of UnrootedPCSPFun to understand the argument type to this
// function, and `doc/svg/pcsp.svg` for a diagram that will greatly help you
// understand the implementation.
void Node::UnrootedPCSPPreorder(UnrootedPCSPFun f) const {
this->TriplePreorder(
// f_root
[&f](const Node* node0, const Node* node1, const Node* node2) {
// Virtual root on node2's edge, with subsplit pointing up.
f(node2, false, node2, true, node0, false, node1, false, nullptr);
if (!node2->IsLeaf()) {
Assert(node2->Children().size() == 2,
"PCSPPreorder expects a bifurcating tree.");
auto child0 = node2->Children()[0].get();
auto child1 = node2->Children()[1].get();
// Virtual root in node1.
f(node0, false, node2, false, child0, false, child1, false, node1);
// Virtual root in node0.
f(node1, false, node2, false, child0, false, child1, false, node0);
// Virtual root on node2's edge, with subsplit pointing down.
f(node2, true, node2, false, child0, false, child1, false, nullptr);
// Virtual root in child0.
f(child1, false, node2, true, node0, false, node1, false, child0);
// Virtual root in child1.
f(child0, false, node2, true, node0, false, node1, false, child1);
}
},
// f_internal
[&f, this](const Node* node, const Node* sister, const Node* parent) {
// Virtual root on node's edge, with subsplit pointing up.
f(node, false, node, true, parent, true, sister, false, nullptr);
if (!node->IsLeaf()) {
Assert(node->Children().size() == 2,
"PCSPPreorder expects a bifurcating tree.");
auto child0 = node->Children()[0].get();
auto child1 = node->Children()[1].get();
// Virtual root up the tree.
f(sister, false, node, false, child0, false, child1, false, this);
// Virtual root in sister.
f(parent, true, node, false, child0, false, child1, false, sister);
// Virtual root on node's edge, with subsplit pointing down.
f(node, true, node, false, child0, false, child1, false, nullptr);
// Virtual root in child0.
f(child1, false, node, true, sister, false, parent, true, child0);
// Virtual root in child1.
f(child0, false, node, true, sister, false, parent, true, child1);
}
});
}
void Node::RootedPCSPPreorder(RootedPCSPFun f, bool allow_leaves) const {
this->TriplePreorderBifurcating(
[&f, &allow_leaves](const Node* node, const Node* sister, const Node* parent) {
if (node->IsLeaf() && allow_leaves) {
f(sister, node, nullptr, nullptr);
} else if (!node->IsLeaf()) {
Assert(node->Children().size() == 2,
"RootedPCSPPreorder expects a bifurcating tree.");
auto child0 = node->Children()[0].get();
auto child1 = node->Children()[1].get();
f(sister, node, child0, child1);
}
});
}
void Node::RootedSisterAndLeafTraversal(TwoNodeFun f) const {
this->TriplePreorderBifurcating(
[&f](const Node* node, const Node* sister, const Node* parent) {
if (node->IsLeaf()) {
f(sister, node);
}
});
}
// This function assigns ids to the nodes of the topology: the leaves get
// their fixed ids (which we assume are contiguously numbered from 0 through
// the leaf count -1) and the rest get ordered according to a postorder
// traversal. Thus if the tree is bifurcating the root always has id equal to
// the number of nodes in the tree.
//
// This function returns a map that maps the tags to their ids.
TagSizeMap Node::Polish(bool update_leaves, std::optional<size_t> leaf_count_opt) {
TagSizeMap tag_id_map;
const size_t leaf_count =
leaf_count_opt.has_value() ? leaf_count_opt.value() : MaxLeafID() + 1;
size_t next_id = leaf_count;
MutablePostorder([&tag_id_map, &next_id, &leaf_count, &update_leaves](Node* node) {
if (node->IsLeaf()) {
if (update_leaves) {
node->id_ = node->MaxLeafID();
node->leaves_ = Bitset::Singleton(leaf_count, node->id_);
}
} else {
node->id_ = next_id;
next_id++;
node->leaves_ = Node::LeavesOf(node->Children());
}
SafeInsert(tag_id_map, node->Tag(), node->id_);
});
return tag_id_map;
}
std::string Node::Newick(std::function<std::string(const Node*)> node_labeler,
const DoubleVectorOption& branch_lengths) const {
return NewickAux(node_labeler, branch_lengths) + ";";
}
std::string Node::NewickAux(std::function<std::string(const Node*)> node_labeler,
const DoubleVectorOption& branch_lengths) const {
std::string str;
if (IsLeaf()) {
str.assign(node_labeler(this));
} else {
str.assign("(");
for (auto iter = children_.begin(); iter != children_.end(); iter++) {
if (iter != children_.begin()) {
str.append(",");
}
str.append((*iter)->NewickAux(node_labeler, branch_lengths));
}
str.append(")");
str.append(node_labeler(this));
}
if (branch_lengths) {
Assert(Id() < (*branch_lengths).size(),
"branch_lengths vector is of insufficient length in NewickAux.");
// ostringstream is the way to get scientific notation using the STL.
std::ostringstream str_stream;
str_stream << (*branch_lengths)[Id()];
str.append(":" + str_stream.str());
}
return str;
}
std::string Node::Newick(const DoubleVectorOption& branch_lengths,
const TagStringMapOption& node_labels, bool show_tags) const {
return Newick(
[&node_labels, &show_tags](const Node* node) {
if (node->IsLeaf()) {
if (node_labels) {
return (*node_labels).at(node->Tag());
} else if (show_tags) {
return node->TagString();
} else {
return std::to_string(node->MaxLeafID());
}
} else {
if (show_tags) {
return node->TagString();
}
}
return std::string("");
},
branch_lengths);
}
std::vector<size_t> Node::ParentIdVector() const {
std::vector<size_t> ids(Id());
Postorder([&ids](const Node* node) {
if (!node->IsLeaf()) {
for (const auto& child : node->Children()) {
Assert(child->Id() < ids.size(), "Problematic ids in ParentIdVector.");
ids[child->Id()] = node->Id();
}
}
});
return ids;
}
Node::NodePtr Node::Deroot() {
Assert(LeafCount() >= 3, "Node::Deroot expects a tree with at least 3 tips.");
Assert(Children().size() == 2, "Can't deroot a non-bifurcating tree.");
auto deroot = [](const NodePtr other_child, const NodePtr has_descendants) {
// Make a vector copy by passing a vector in.
NodePtrVec children(has_descendants->Children());
children.push_back(other_child);
// has_descendants' id is now available.
return Join(children, has_descendants->Id());
};
if (children_[1]->LeafCount() == 1) {
return deroot(children_[1], children_[0]);
} // else
return deroot(children_[0], children_[1]);
}
Bitset Node::LeavesOf(const Node::NodePtrVec& children) {
Assert(children.size() > 0, "Need children in Node::LeavesOf.");
Bitset leaves(children[0]->Leaves());
for (size_t i = 1; i < children.size(); i++) {
leaves |= children[i]->Leaves();
}
return leaves;
}
// ** Class methods
Node::NodePtr Node::Leaf(uint32_t id, Bitset leaves) {
return std::make_shared<Node>(id, leaves);
}
Node::NodePtr Node::Leaf(uint32_t id, size_t taxon_count) {
return Node::Leaf(id, Bitset::Singleton(taxon_count, id));
}
Node::NodePtr Node::Join(NodePtrVec children, size_t id) {
return std::make_shared<Node>(children, id, Node::LeavesOf(children));
}
Node::NodePtr Node::Join(NodePtr left, NodePtr right, size_t id) {
return Join(std::vector<NodePtr>({left, right}), id);
}
Node::NodePtr Node::OfParentIdVector(const std::vector<size_t>& ids) {
// We will fill this map with the ids of the descendants.
std::unordered_map<size_t, std::vector<size_t>> downward_ids;
for (size_t child_id = 0; child_id < ids.size(); child_id++) {
const auto& parent_id = ids[child_id];
auto search = downward_ids.find(parent_id);
if (search == downward_ids.end()) {
// The first time we have seen this parent.
std::vector<size_t> child_ids({child_id});
SafeInsert(downward_ids, parent_id, std::move(child_ids));
} else {
// We've seen the parent before, so append the child to the parent's
// vector of descendants.
search->second.push_back(child_id);
}
}
// The leaf count is equal to the smallest non-leaf index, i.e. a parent
// index.
size_t leaf_count = *std::min_element(ids.begin(), ids.end());
std::function<NodePtr(size_t)> build_tree = [&build_tree, &downward_ids,
leaf_count](size_t current_id) {
auto search = downward_ids.find(current_id);
if (search == downward_ids.end()) {
// We assume that anything not in the map is a leaf, because leaves
// don't have any children.
return Leaf(static_cast<uint32_t>(current_id),
Bitset::Singleton(leaf_count, current_id));
} else {
const auto& children_ids = search->second;
std::vector<NodePtr> children;
for (const auto& child_id : children_ids) {
children.push_back(build_tree(child_id));
}
return Join(children, current_id);
}
};
// We assume that the maximum id of the tree is the length of the input
// id array. That makes sense because the root does not have a parent, so
// is the first "missing" entry in the input id array.
return build_tree(ids.size());
}
Node::NodePtrVec Node::ExampleTopologies() {
NodePtrVec topologies = {
// 0: (0,1,(2,3))
Join(std::vector<NodePtr>({Leaf(0), Leaf(1), Join(Leaf(2), Leaf(3))})),
// 1; (0,1,(2,3)) again
Join(std::vector<NodePtr>({Leaf(1), Leaf(0), Join(Leaf(3), Leaf(2))})),
// 2: (0,2,(1,3))
Join(std::vector<NodePtr>({Leaf(0), Leaf(2), Join(Leaf(1), Leaf(3))})),
// 3: (0,(1,(2,3)))
Join(std::vector<NodePtr>({Leaf(0), Join(Leaf(1), Join(Leaf(2), Leaf(3)))})),
// 4: ((0,(2,3)),1)
Join(std::vector<NodePtr>({Join(Leaf(0), Join(Leaf(2), Leaf(3))), Leaf(1)}))};
for (auto& topology : topologies) {
topology->Polish();
}
return topologies;
}
Node::NodePtr Node::Ladder(uint32_t leaf_count) {
Assert(leaf_count > 0, "leaf_count should be positive in Node::Ladder.");
NodePtr node = Leaf(0);
for (uint32_t i = 1; i < leaf_count; i++) {
node = Join(Leaf(i), node);
}
node->Polish();
return node;
}
inline uint32_t Node::SOHash(uint32_t x) {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return x;
}
inline size_t Node::SORotate(size_t n, uint32_t c) {
const uint32_t mask = (CHAR_BIT * sizeof(n) - 1); // assumes width is a power of 2.
// assert ( (c<=mask) &&"rotate by type width or more");
c &= mask;
return (n << c) | (n >> ((-c) & mask));
}
SizeVectorVector Node::IdsAbove() const {
SizeVectorVector ids_above(Id() + 1);
SizeVector mutable_ids;
DepthFirst(
[&ids_above, &mutable_ids](const Node* node) {
// Store the current set of ids above.
ids_above[node->Id()] = SizeVector(mutable_ids);
// As we travel down the tree, the current node will be above.
mutable_ids.push_back(node->Id());
},
// Going back up the tree, so remove the current node's id.
[&mutable_ids](const Node*) { mutable_ids.pop_back(); });
return ids_above;
}
std::unordered_map<size_t, const Node*> Node::BuildParentNodeMap() const {
std::unordered_map<size_t, const Node*> parent_map;
DepthFirst(
[this, &parent_map](const Node* parent_node) {
for (const auto& child_node : parent_node->Children()) {
parent_map[child_node->Id()] = parent_node;
}
},
[](const Node* parent_node) {});
return parent_map;
}
std::string Node::NodeIdAndLeavesToString() const {
std::stringstream os;
os << "{ id: " << Id();
os << ", leaves: " << Leaves();
SizeVector child_ids;
for (const auto& child : children_) {
child_ids.push_back(child.get()->Id());
}
os << ", children: " << child_ids << " }";
return os.str();
}
std::string Node::NodeIdAndLeavesToStringForTopology() const {
std::stringstream os;
os << "[ " << std::endl;
Preorder(
// pre function
[&os](const Node* node_ptr) {
os << "\t" << node_ptr->NodeIdAndLeavesToString() << ", " << std::endl;
});
os << " ]" << std::endl;
return os.str();
}
| 22,941
|
C++
|
.cpp
| 599
| 33.200334
| 88
| 0.630276
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,004
|
combinatorics.cpp
|
phylovi_bito/src/combinatorics.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "combinatorics.hpp"
double Combinatorics::TopologyCount(size_t taxon_count) {
double result = 1.;
for (size_t i = 2; i <= taxon_count; i++) {
result *= 2. * static_cast<double>(i) - 3.;
}
return result;
}
double Combinatorics::LogTreeCount(size_t taxon_count) {
double result = 0.;
for (size_t i = 2; i <= taxon_count; i++) {
result += std::log(2. * static_cast<double>(i) - 3.);
}
return result;
}
double Combinatorics::LogChildSubsplitCountRatioNaive(size_t child0_taxon_count,
size_t child1_taxon_count) {
return LogTreeCount(child0_taxon_count) + LogTreeCount(child1_taxon_count) -
LogTreeCount(child0_taxon_count + child1_taxon_count);
}
double Combinatorics::LogChildSubsplitCountRatio(size_t child0_taxon_count,
size_t child1_taxon_count) {
size_t total_count = child0_taxon_count + child1_taxon_count;
double total_without_child1 = 0.;
for (size_t i = child1_taxon_count + 1; i <= total_count; i++) {
total_without_child1 += std::log(2. * static_cast<double>(i) - 3.);
}
return LogTreeCount(child0_taxon_count) - total_without_child1;
}
| 1,336
|
C++
|
.cpp
| 31
| 36.935484
| 82
| 0.65613
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,005
|
graft_dag.cpp
|
phylovi_bito/src/graft_dag.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "graft_dag.hpp"
// ** Constructors
GraftDAG::GraftDAG(SubsplitDAG &dag)
: SubsplitDAG{dag, HostDispatchTag{}}, host_dag_{dag} {}
// ** Comparators
int GraftDAG::Compare(const GraftDAG &other) const {
return GraftDAG::Compare(*this, other);
}
int GraftDAG::Compare(const GraftDAG &lhs, const GraftDAG &rhs) {
const SubsplitDAG &lhs_host = lhs.GetHostDAG();
const SubsplitDAG &rhs_host = rhs.GetHostDAG();
// (1) Compare host DAGs.
int host_diff = SubsplitDAG::Compare(lhs_host, rhs_host);
if (host_diff != 0) {
return host_diff;
}
// (2) Compare graft nodes.
auto lhs_nodes = lhs.BuildSetOfNodeBitsets();
auto rhs_nodes = rhs.BuildSetOfNodeBitsets();
if (lhs_nodes != rhs_nodes) {
return (lhs_nodes < rhs_nodes) ? -1 : 1;
}
// (3) Compare graft edges.
auto lhs_edges = lhs.BuildSetOfEdgeBitsets();
auto rhs_edges = rhs.BuildSetOfEdgeBitsets();
if (lhs_edges != rhs_edges) {
return (lhs_edges < rhs_edges) ? -1 : 1;
}
return 0;
}
int GraftDAG::CompareToDAG(const SubsplitDAG &other) const {
return GraftDAG::CompareToDAG(*this, other);
}
int GraftDAG::CompareToDAG(const GraftDAG &lhs, const SubsplitDAG &rhs) {
// Compare taxon counts.
const int taxon_diff = lhs.TaxonCount() - rhs.TaxonCount();
if (taxon_diff != 0) {
return taxon_diff;
}
// Compare nodes.
auto lhs_nodes = lhs.BuildSetOfNodeBitsets();
auto rhs_nodes = rhs.BuildSetOfNodeBitsets();
if (lhs_nodes != rhs_nodes) {
return (lhs_nodes < rhs_nodes) ? -1 : 1;
}
// Compare edges.
auto lhs_edges = lhs.BuildSetOfEdgeBitsets();
auto rhs_edges = rhs.BuildSetOfEdgeBitsets();
if (lhs_edges != rhs_edges) {
return (lhs_edges < rhs_edges) ? -1 : 1;
}
return 0;
}
// ** Modify GraftDAG
SubsplitDAG::ModificationResult GraftDAG::AddNodePair(const NNIOperation &nni) {
return AddNodePair(nni.parent_, nni.child_);
}
SubsplitDAG::ModificationResult GraftDAG::AddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit) {
GetHostDAG().IsValidAddNodePair(parent_subsplit, child_subsplit);
return AddNodes({{parent_subsplit, child_subsplit}});
}
SubsplitDAG::ModificationResult GraftDAG::AddNodes(
const BitsetPairVector &node_subsplit_pairs) {
auto mods = SubsplitDAG::AddNodePairInternals(node_subsplit_pairs);
graft_node_count_ += mods.added_node_ids.size();
graft_edge_count_ += mods.added_edge_idxs.size();
return mods;
}
void GraftDAG::RemoveAllGrafts() {
ResetHostDAG(host_dag_);
graft_node_count_ = 0;
graft_edge_count_ = 0;
}
// ** Getters
const SubsplitDAG &GraftDAG::GetHostDAG() const { return host_dag_; }
// ** Counts
size_t GraftDAG::GraftNodeCount() const {
return storage_.GetVertices().size() - storage_.HostVerticesCount();
}
size_t GraftDAG::HostNodeCount() const { return storage_.HostVerticesCount(); }
size_t GraftDAG::GraftEdgeCount() const {
return storage_.GetLines().size() - storage_.HostLinesCount();
}
size_t GraftDAG::HostEdgeCount() const {
return GetHostDAG().EdgeCountWithLeafSubsplits();
}
bool GraftDAG::IsNodeFromHost(NodeId node_id) const {
return node_id < HostNodeCount();
}
bool GraftDAG::IsEdgeFromHost(EdgeId edge_id) const {
return edge_id < HostEdgeCount();
}
// ** Contains
bool GraftDAG::ContainsGraftNode(const Bitset node_subsplit) const {
if (!ContainsNode(node_subsplit)) return false;
return !IsNodeFromHost(GetDAGNodeId(node_subsplit));
}
bool GraftDAG::ContainsGraftNode(const NodeId node_id) const {
if (IsNodeFromHost(node_id)) return false;
return ContainsNode(node_id);
}
bool GraftDAG::ContainsGraftEdge(const NodeId parent_id, const NodeId child_id) const {
auto edge = storage_.GetLine(parent_id, child_id);
if (!edge.has_value()) return false;
return !IsEdgeFromHost(edge.value().GetId());
}
bool GraftDAG::ContainsGraftEdge(const EdgeId edge_idx) const {
auto edge = storage_.GetLine(edge_idx);
if (!edge.has_value()) return false;
return !IsEdgeFromHost(edge.value().GetId());
}
// ** Miscellaneous
size_t GraftDAG::GetPLVIndex(PLVNodeHandler::PLVType plv_type, NodeId node_id) const {
return PLVNodeHandler::GetPVIndex(plv_type, node_id, NodeCountWithoutDAGRoot())
.value_;
}
| 4,373
|
C++
|
.cpp
| 119
| 33.722689
| 87
| 0.723314
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,006
|
tree_collection.cpp
|
phylovi_bito/src/tree_collection.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "tree_collection.hpp"
// Explicit class template instantiation:
// https://en.cppreference.com/w/cpp/language/class_template#Explicit_instantiation
template class GenericTreeCollection<Tree>;
| 325
|
C++
|
.cpp
| 6
| 52.833333
| 83
| 0.817035
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,007
|
taxon_name_munging.cpp
|
phylovi_bito/src/taxon_name_munging.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "taxon_name_munging.hpp"
#include <iomanip>
#include <iostream>
#include <regex>
#include <sstream>
#include "numerical_utils.hpp"
std::string TaxonNameMunging::QuoteString(const std::string &in_str) {
std::stringstream ss;
ss << std::quoted(in_str);
return ss.str();
}
std::string TaxonNameMunging::DequoteString(const std::string &in_str) {
if (in_str.empty()) {
return std::string();
}
char delimiter = in_str.at(0);
if (delimiter != '\'' && delimiter != '"') {
return std::string(in_str);
} // else
std::stringstream ss(in_str);
std::string out_str;
ss >> std::quoted(out_str, delimiter);
return out_str;
}
TagStringMap TransformStringValues(std::function<std::string(const std::string &)> f,
const TagStringMap &in_map) {
TagStringMap out_map;
for (const auto &[tag, value] : in_map) {
out_map.insert({tag, f(value)});
}
return out_map;
}
TagStringMap TaxonNameMunging::DequoteTagStringMap(const TagStringMap &tag_string_map) {
return TransformStringValues(DequoteString, tag_string_map);
}
void TaxonNameMunging::MakeDatesRelativeToMaximum(TagDoubleMap &tag_date_map) {
double max_date = DOUBLE_NEG_INF;
for (const auto &[_, date] : tag_date_map) {
std::ignore = _;
max_date = std::max(date, max_date);
}
for (auto &[id, date] : tag_date_map) {
std::ignore = id;
date = max_date - date;
}
}
TagDoubleMap TaxonNameMunging::ConstantDatesForTagTaxonMap(TagStringMap tag_taxon_map) {
TagDoubleMap tag_date_map;
for (auto &[tag, taxon] : tag_taxon_map) {
std::ignore = taxon;
SafeInsert(tag_date_map, tag, 0.);
}
return tag_date_map;
}
TagDoubleMap TaxonNameMunging::ParseDatesFromTagTaxonMap(TagStringMap tag_taxon_map) {
TagDoubleMap tag_date_map;
std::regex date_regex(R"raw(^.+_(\d*\.?\d+(?:[eE][-+]?\d+)?)$)raw");
std::smatch match_date;
for (auto &[tag, taxon] : tag_taxon_map) {
if (std::regex_match(taxon, match_date, date_regex)) {
double date = std::stod(match_date[1].str());
SafeInsert(tag_date_map, tag, date);
} else {
Failwith("Couldn't parse a date from:" + taxon);
}
}
MakeDatesRelativeToMaximum(tag_date_map);
return tag_date_map;
}
| 2,361
|
C++
|
.cpp
| 71
| 29.760563
| 88
| 0.681579
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,008
|
sbn_maps.cpp
|
phylovi_bito/src/sbn_maps.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "sbn_maps.hpp"
#include <algorithm>
#include <memory>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <utility>
SizeBitsetMap SBNMaps::IdIdSetMapOf(const Node::NodePtr& topology) {
SizeBitsetMap map;
auto id_count = topology->Id() + 1;
topology->Postorder([&map, id_count](const Node* node) {
Bitset bitset(static_cast<size_t>(id_count));
Assert(node->Id() < id_count, "Malformed ids in IdIdSetMapOf.");
// Set the bit for the id of the current edge.
bitset.set(node->Id());
// Take the union of the children below.
for (const auto& child : node->Children()) {
bitset |= map.at(child->Id());
}
SafeInsert(map, node->Id(), std::move(bitset));
});
return map;
}
SizeVector SBNMaps::SplitIndicesOf(const BitsetSizeMap& indexer,
const Node::NodePtr& topology) {
SizeVector split_result(topology->Id());
topology->Preorder([&topology, &split_result, &indexer](const Node* node) {
// Skip the root.
if (node != topology.get()) {
const Bitset rootsplit_pcsp = Bitset::PCSPFromUCAToRootsplit(
Bitset::RootsplitSubsplitOfClade(node->Leaves()));
split_result[node->Id()] = indexer.at(rootsplit_pcsp);
}
});
return split_result;
}
StringPCSPMap SBNMaps::StringPCSPMapOf(PCSPCounter d) {
StringPCSPMap d_str;
for (const auto& [parent, child_dict] : d) {
d_str[parent.ToString()] = StringifyMap(child_dict.Map());
}
return d_str;
}
StringDoubleVector SBNMaps::StringDoubleVectorOf(BitsetDoubleMap m) {
StringDoubleVector result;
result.reserve(m.size());
for (const auto& [k, v] : m) {
result.push_back({k.ToString(), v});
}
std::sort(result.begin(), result.end());
return result;
}
Bitset SBNMaps::PCSPBitsetOf(const size_t leaf_count, //
const Node* sister_node, bool sister_direction,
const Node* focal_node, bool focal_direction,
const Node* child0_node, bool child0_direction,
const Node* child1_node, bool child1_direction) {
Bitset bitset(3 * leaf_count, false);
bitset.CopyFrom(sister_node->Leaves(), 0, sister_direction);
bitset.CopyFrom(focal_node->Leaves(), leaf_count, focal_direction);
if (child0_node == nullptr || child1_node == nullptr) {
Bitset null(leaf_count);
bitset.CopyFrom(null, 2 * leaf_count, false);
} else {
auto child0_bitset = child0_node->Leaves();
if (child0_direction) {
child0_bitset.flip();
}
auto child1_bitset = child1_node->Leaves();
if (child1_direction) {
child1_bitset.flip();
}
bitset.CopyFrom(std::min(child0_bitset, child1_bitset), 2 * leaf_count, false);
}
return bitset;
}
IndexerBundle SBNMaps::BuildIndexerBundle(const BitsetSizeDict& rootsplit_counter,
const PCSPCounter& pcsp_counter) {
BitsetVector rootsplits;
BitsetSizeMap indexer;
SizeBitsetMap index_to_child;
BitsetSizePairMap parent_to_range;
size_t index = 0;
// Start by adding the rootsplit PCSPs.
size_t taxon_count((rootsplit_counter.begin()->first).size() / 2);
Bitset uca_root(Bitset::UCASubsplitOfTaxonCount(taxon_count));
// Note: uca_root is rotated before being inserted into parent_to_range
// because the rootsplits are connected to the DAG root via rotated edges.
SafeInsert(parent_to_range, uca_root.SubsplitRotate(),
{index, index + rootsplit_counter.size()});
for (const auto& iter : rootsplit_counter) {
rootsplits.push_back(iter.first);
SafeInsert(indexer, Bitset::PCSPFromUCAToRootsplit(iter.first), index);
SafeInsert(index_to_child, index, iter.first);
index++;
}
// Now add the PCSPs.
for (const auto& [parent, child_counter] : pcsp_counter) {
SafeInsert(parent_to_range, parent, {index, index + child_counter.size()});
for (const auto& child_iter : child_counter) {
const auto& pcsp = parent + child_iter.first;
SafeInsert(indexer, pcsp, index);
SafeInsert(index_to_child, index, pcsp.PCSPGetChildSubsplit());
index++;
}
}
return {rootsplits, indexer, index_to_child, parent_to_range, index};
}
BitsetSizeDict UnrootedSBNMaps::RootsplitCounterOf(
const Node::TopologyCounter& topologies) {
BitsetSizeDict rootsplit_counter(0);
for (const auto& [topology, topology_count] : topologies) {
auto Aux = [&rootsplit_counter,
&topology_count = topology_count](const Node* node) {
const Bitset rootsplit = Bitset::RootsplitSubsplitOfClade(node->Leaves());
rootsplit_counter.increment(std::move(rootsplit), topology_count);
};
for (const auto& child : topology->Children()) {
child->Preorder(Aux);
}
}
return rootsplit_counter;
}
// See functions below or the comments above the definition of UnrootedPCSPFun to
// understand the collection of arguments starting with `sister_node`.
void AddToPCSPCounter(PCSPCounter& pcsp_dict, const size_t topology_count,
const size_t leaf_count, //
const Node* sister_node, bool sister_direction,
const Node* focal_node, bool focal_direction,
const Node* child0_node, bool child0_direction,
const Node* child1_node, bool child1_direction) {
Bitset parent(2 * leaf_count, false);
// The first chunk is for the sister node.
parent.CopyFrom(sister_node->Leaves(), 0, sister_direction);
// The second chunk is for the focal node.
parent.CopyFrom(focal_node->Leaves(), leaf_count, focal_direction);
// Now we build the child bitset.
auto child0 = child0_node->Leaves();
if (child0_direction) {
child0.flip();
}
auto child1 = child1_node->Leaves();
if (child1_direction) {
child1.flip();
}
auto child = std::min(child0, child1);
// Insert the parent-child pair into the map.
auto search = pcsp_dict.find(parent);
if (search == pcsp_dict.end()) {
// The first time we have seen this parent.
BitsetSizeDict child_singleton(0);
child_singleton.increment(std::move(child), topology_count);
SafeInsert(pcsp_dict, std::move(parent), std::move(child_singleton));
} else {
search->second.increment(std::move(child), topology_count);
}
}
PCSPCounter UnrootedSBNMaps::PCSPCounterOf(const Node::TopologyCounter& topologies) {
PCSPCounter pcsp_dict;
for (const auto& [topology, topology_count] : topologies) {
auto leaf_count = topology->LeafCount();
Assert(topology->Children().size() == 3,
"UnrootedSBNMaps::PCSPCounterOf was expecting a tree with a trifurcation at "
"the root!");
topology->UnrootedPCSPPreorder(
[&pcsp_dict, &topology_count = topology_count, &leaf_count](
const Node* sister_node, bool sister_direction, const Node* focal_node,
bool focal_direction, const Node* child0_node, bool child0_direction,
const Node* child1_node, bool child1_direction,
const Node* // ignore virtual root clade
) {
AddToPCSPCounter(pcsp_dict, topology_count, leaf_count, sister_node,
sister_direction, focal_node, focal_direction, child0_node,
child0_direction, child1_node, child1_direction);
});
}
return pcsp_dict;
}
// Return the rootsplit of a rooted bifurcating topology.
Bitset Rootsplit(const Node* rooted_topology) {
const auto children = rooted_topology->Children();
Assert(children.size() == 2, "Rootsplit expects a bifurcating tree.");
return Bitset::RootsplitSubsplitOfClade(children[0]->Leaves());
}
UnrootedIndexerRepresentation UnrootedSBNMaps::IndexerRepresentationOf(
const BitsetSizeMap& indexer, const Node::NodePtr& topology,
const size_t default_index) {
const auto leaf_count = topology->LeafCount();
// First, the rootsplits.
SizeVector rootsplit_result = SBNMaps::SplitIndicesOf(indexer, topology);
// We initialize each vector with the rootsplit index.
SizeVectorVector result(topology->Id());
std::transform(rootsplit_result.begin(), rootsplit_result.end(), result.begin(),
[&topology](const auto rootsplit) {
SizeVector v = {rootsplit};
// The number of PCSPs is less than number of internal nodes/2.
v.reserve(topology->Id() / 2);
return v;
});
// Now we append the PCSPs.
topology->UnrootedPCSPPreorder(
[&indexer, &default_index, &leaf_count, &result, &topology](
const Node* sister_node, bool sister_direction, const Node* focal_node,
bool focal_direction, const Node* child0_node, bool child0_direction,
const Node* child1_node, bool child1_direction,
const Node* virtual_root_clade) {
const auto bitset = SBNMaps::PCSPBitsetOf(
leaf_count, sister_node, sister_direction, focal_node, focal_direction,
child0_node, child0_direction, child1_node, child1_direction);
const auto indexer_position = AtWithDefault(indexer, bitset, default_index);
const auto& focal_index = focal_node->Id();
if (sister_node == focal_node) {
// We are in the bidirectional edge situation.
Assert(focal_index < result.size(), "focal_index out of range.");
// Rooting at the present edge will indeed lead to the given PCSP.
result[focal_index].push_back(indexer_position);
} else {
// The only time the virtual root clade should be nullptr should be when
// sister_node == focal_node, but we check anyhow.
Assert(virtual_root_clade != nullptr, "virtual_root_clade is null.");
// Virtual-rooting on every edge in the virtual rooting clade will also
// lead to this PCSP, because then the root will be "above" the PCSP.
virtual_root_clade->ConditionalPreorder([&result, &indexer_position,
&sister_node, &focal_node,
&topology](const Node* node) {
if (node == sister_node || node == focal_node) {
// Don't enter the sister or focal clades. This is only
// activated in the second case on the bottom row of pcsp.svg:
// we want to add everything in the clade above the focal node,
// but nothing else.
return false;
} // else
// Add all of the edges of the virtual rooting clade, except for the
// root of the topology.
if (node != topology.get()) {
Assert(node->Id() < result.size(), "node's root Id is out of range.");
result[node->Id()].push_back(indexer_position);
}
return true;
});
}
});
return result;
}
UnrootedIndexerRepresentationCounter UnrootedSBNMaps::IndexerRepresentationCounterOf(
const BitsetSizeMap& indexer, const Node::TopologyCounter& topology_counter,
const size_t default_index) {
UnrootedIndexerRepresentationCounter counter;
counter.reserve(topology_counter.size());
for (const auto& [topology, topology_count] : topology_counter) {
counter.push_back(
{UnrootedSBNMaps::IndexerRepresentationOf(indexer, topology, default_index),
topology_count});
}
return counter;
}
StringSetVector UnrootedSBNMaps::StringIndexerRepresentationOf(
const StringVector& reversed_indexer,
const UnrootedIndexerRepresentation& indexer_representation) {
StringSetVector string_sets;
for (const auto& rooted_representation : indexer_representation) {
string_sets.push_back(RootedSBNMaps::StringIndexerRepresentationOf(
reversed_indexer, rooted_representation));
}
return string_sets;
}
BitsetSizeDict RootedSBNMaps::RootsplitCounterOf(
const Node::TopologyCounter& topologies) {
BitsetSizeDict rootsplit_counter(0);
for (const auto& [topology, topology_count] : topologies) {
rootsplit_counter.increment(Rootsplit(topology.get()), topology_count);
}
return rootsplit_counter;
}
PCSPCounter RootedSBNMaps::PCSPCounterOf(const Node::TopologyCounter& topologies) {
PCSPCounter pcsp_dict;
for (const auto& [topology, topology_count] : topologies) {
auto leaf_count = topology->LeafCount();
Assert(topology->Children().size() == 2,
"RootedSBNMaps::PCSPCounterOf was expecting a bifurcating tree!");
topology->RootedPCSPPreorder(
[&pcsp_dict, &topology_count = topology_count, &leaf_count](
const Node* sister_node, const Node* focal_node, const Node* child0_node,
const Node* child1_node) {
AddToPCSPCounter(pcsp_dict, topology_count, leaf_count, sister_node, false,
focal_node, false, child0_node, false, child1_node, false);
},
false);
}
return pcsp_dict;
}
SizeVector RootedSBNMaps::IndexerRepresentationOf(const BitsetSizeMap& indexer,
const Node::NodePtr& topology,
const size_t default_index) {
const auto leaf_count = topology->LeafCount();
SizeVector result;
// First, add the rootsplit PCSPs.
const Bitset rootsplit_pcsp =
Bitset::PCSPFromUCAToRootsplit(Rootsplit(topology.get()));
result.push_back(AtWithDefault(indexer, rootsplit_pcsp, default_index));
// Now add the PCSPs.
topology->RootedPCSPPreorder(
[&leaf_count, &indexer, &default_index, &result](
const Node* sister_node, const Node* focal_node, const Node* child0_node,
const Node* child1_node) {
Bitset pcsp_bitset =
SBNMaps::PCSPBitsetOf(leaf_count, sister_node, false, focal_node, false,
child0_node, false, child1_node, false);
result.push_back(AtWithDefault(indexer, pcsp_bitset, default_index));
},
false);
return result;
}
StringSet RootedSBNMaps::StringIndexerRepresentationOf(
const StringVector& reversed_indexer,
const RootedIndexerRepresentation& indexer_representation) {
StringSet string_set;
for (const auto index : indexer_representation) {
SafeInsert(string_set, reversed_indexer.at(index));
}
return string_set;
}
RootedIndexerRepresentationCounter RootedSBNMaps::IndexerRepresentationCounterOf(
const BitsetSizeMap& indexer, const Node::TopologyCounter& topology_counter,
const size_t default_index) {
RootedIndexerRepresentationCounter counter;
counter.reserve(topology_counter.size());
for (const auto& [topology, topology_count] : topology_counter) {
counter.push_back(
{RootedSBNMaps::IndexerRepresentationOf(indexer, topology, default_index),
topology_count});
}
return counter;
}
void RootedSBNMaps::IncrementRootedIndexerRepresentationSizeDict(
RootedIndexerRepresentationSizeDict& dict,
RootedIndexerRepresentation rooted_indexer_representation) {
Assert(rooted_indexer_representation.size() > 1,
"Rooted indexer representation is too small in "
"IncrementRootedIndexerRepresentationSizeDict!");
std::sort(rooted_indexer_representation.begin() + 1,
rooted_indexer_representation.end());
dict.increment(rooted_indexer_representation, 1);
}
void RootedSBNMaps::IncrementRootedIndexerRepresentationSizeDict(
RootedIndexerRepresentationSizeDict& dict,
const UnrootedIndexerRepresentation& indexer_representation) {
for (const auto& rooted_indexer_representation : indexer_representation) {
IncrementRootedIndexerRepresentationSizeDict(dict, rooted_indexer_representation);
}
}
void RootedSBNMaps::FunctionOverRootedTreeCollection(
FunctionOnTreeNodeByGPCSP function_on_tree_node_by_gpcsp,
const RootedTreeCollection& tree_collection, const BitsetSizeMap& edge_indexer,
const size_t default_index) {
const auto leaf_count = tree_collection.TaxonCount();
size_t tree_id = 0;
for (const auto& tree : tree_collection.Trees()) {
tree.Topology()->RootedPCSPPreorder(
[&leaf_count, &default_index, &edge_indexer, &tree, &tree_id,
&function_on_tree_node_by_gpcsp](
const Node* sister_node, const Node* focal_node,
const Node* left_child_node, const Node* right_child_node) {
Bitset edge_bitset =
SBNMaps::PCSPBitsetOf(leaf_count, sister_node, false, focal_node, false,
left_child_node, false, right_child_node, false);
const auto edge_idx = AtWithDefault(edge_indexer, edge_bitset, default_index);
if (edge_idx != default_index) {
function_on_tree_node_by_gpcsp(EdgeId(edge_idx), edge_bitset, tree, tree_id,
focal_node);
}
},
true);
tree_id++;
}
}
| 17,057
|
C++
|
.cpp
| 378
| 37.936508
| 88
| 0.674952
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,009
|
stick_breaking_transform.cpp
|
phylovi_bito/src/stick_breaking_transform.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// This code closely follows
// https://mc-stan.org/docs/2_26/reference-manual/simplex-transform-section.html
// and so we follow their notation.
#include "stick_breaking_transform.hpp"
inline double inverse_logit(const double y) { return 1.0 / (1 + std::exp(-y)); }
inline double logit(const double x) { return std::log(x / (1.0 - x)); }
inline double log1p_exp(double a) {
// prevents underflow
if (a > 0.0) return a + std::log1p(exp(-a));
return std::log1p(exp(a));
}
EigenVectorXd StickBreakingTransform::operator()(EigenVectorXd const& y) const {
size_t K = y.size() + 1;
EigenVectorXd x(K);
double stick = 1.0;
for (size_t k = 0; k < K - 1; k++) {
double z = inverse_logit(y[k] - std::log(K - k - 1));
x[k] = stick * z;
stick -= x[k];
}
x[K - 1] = stick;
return x;
}
EigenVectorXd StickBreakingTransform::inverse(const EigenVectorXd& x) const {
size_t K = x.size();
double sum = 0;
EigenVectorXd y(K - 1);
for (size_t k = 0; k < K - 1; k++) {
double z = x[k] / (1.0 - sum);
y[k] = logit(z) + std::log(K - k - 1);
sum += x[k];
}
return y;
}
double StickBreakingTransform::log_abs_det_jacobian(const EigenVectorXd& x,
const EigenVectorXd& y) const {
size_t K = x.size();
double log_prob = 0.0;
double stick = 1.0;
for (size_t k = 0; k < K - 1; k++) {
double adj_y_k = y[k] - log(K - k - 1);
log_prob += std::log(stick);
log_prob -= log1p_exp(-adj_y_k);
log_prob -= log1p_exp(adj_y_k);
stick -= x[k];
}
return log_prob;
}
| 1,697
|
C++
|
.cpp
| 51
| 29.392157
| 83
| 0.610128
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,010
|
substitution_model.cpp
|
phylovi_bito/src/substitution_model.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "substitution_model.hpp"
std::unique_ptr<SubstitutionModel> SubstitutionModel::OfSpecification(
const std::string &specification) {
if (specification == "JC69") {
return std::make_unique<JC69Model>();
} // else
if (specification == "HKY") {
return std::make_unique<HKYModel>();
} // else
if (specification == "GTR") {
return std::make_unique<GTRModel>();
} // else
Failwith("Substitution model not known: " + specification);
}
void JC69Model::UpdateEigendecomposition() {
eigenvectors_ << 1.0, 2.0, 0.0, 0.5, 1.0, -2.0, 0.5, 0.0, 1.0, 2.0, 0.0, -0.5, 1.0,
-2.0, -0.5, 0.0;
inverse_eigenvectors_ << 0.25, 0.25, 0.25, 0.25, 0.125, -0.125, 0.125, -0.125, 0.0,
1.0, 0.0, -1.0, 1.0, 0.0, -1.0, 0.0;
eigenvalues_ << 0.0, -1.3333333333333333, -1.3333333333333333, -1.3333333333333333;
}
void JC69Model::UpdateQMatrix() {
Q_ << -1.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0, -1.0, 1.0 / 3.0, 1.0 / 3.0,
1.0 / 3.0, 1.0 / 3.0, -1.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0, -1.0;
}
void HKYModel::SetParameters(const EigenVectorXdRef param_vector) {
GetBlockSpecification().CheckParameterVectorSize(param_vector);
rates_ = ExtractSegment(param_vector, rates_key_);
frequencies_ = ExtractSegment(param_vector, frequencies_key_);
if (fabs(frequencies_.sum() - 1.) >= 0.001) {
std::ostringstream oss;
std::copy(frequencies_.begin(), frequencies_.end() - 1,
std::ostream_iterator<double>(oss, ","));
oss << *frequencies_.end();
Failwith("HKY frequencies do not sum to 1 +/- 0.001! frequency vector: (" +
oss.str() + ")");
}
Update();
};
void HKYModel::UpdateQMatrix() {
EigenVectorXd rates;
rates.resize(6);
rates << 1.0, rates_[0], 1.0, 1.0, rates_[0], 1.0;
int rate_index = 0;
for (int i = 0; i < 4; i++) {
for (int j = i + 1; j < 4; j++) {
double rate = rates[rate_index];
rate_index++;
Q_(i, j) = rate * frequencies_[j];
Q_(j, i) = rate * frequencies_[i];
}
}
// Set the diagonal entries so the rows sum to one.
double total_substitution_rate = 0;
for (int i = 0; i < 4; i++) {
double row_sum = 0;
for (int j = 0; j < 4; j++) {
if (i != j) {
row_sum += Q_(i, j);
}
}
Q_(i, i) = -row_sum;
total_substitution_rate += row_sum * frequencies_[i];
}
// Rescale matrix for unit substitution rate.
Q_ /= total_substitution_rate;
}
// Analytical eigendecomposition.
// See Hasegawa, Kishino, and Yano, 1985 for details.
void HKYModel::UpdateEigendecomposition() {
double kappa = rates_[0];
double pi_a = frequencies_[0];
double pi_c = frequencies_[1];
double pi_g = frequencies_[2];
double pi_t = frequencies_[3];
double pi_r = pi_a + pi_g;
double pi_y = pi_c + pi_t;
double beta = -1.0 / (2.0 * (pi_r * pi_y + kappa * (pi_a * pi_g + pi_c * pi_t)));
eigenvalues_[0] = 0;
eigenvalues_[1] = beta;
eigenvalues_[2] = beta * (1 + pi_y * (kappa - 1));
eigenvalues_[3] = beta * (1 + pi_r * (kappa - 1));
inverse_eigenvectors_.setZero();
eigenvectors_.setZero();
inverse_eigenvectors_.row(0) << pi_a, pi_c, pi_g, pi_t;
inverse_eigenvectors_.row(1) << pi_a * pi_y, -pi_c * pi_r, pi_g * pi_y, -pi_t * pi_r;
inverse_eigenvectors_(2, 1) = 1;
inverse_eigenvectors_(2, 3) = -1;
inverse_eigenvectors_(3, 0) = 1;
inverse_eigenvectors_(3, 2) = -1;
eigenvectors_.col(0).setOnes();
eigenvectors_.col(1) << 1. / pi_r, -1. / pi_y, 1. / pi_r, -1. / pi_y;
eigenvectors_(1, 2) = pi_t / pi_y;
eigenvectors_(3, 2) = -pi_c / pi_y;
eigenvectors_(0, 3) = pi_g / pi_r;
eigenvectors_(2, 3) = -pi_a / pi_r;
}
void GTRModel::SetParameters(const EigenVectorXdRef param_vector) {
GetBlockSpecification().CheckParameterVectorSize(param_vector);
rates_ = ExtractSegment(param_vector, rates_key_);
frequencies_ = ExtractSegment(param_vector, frequencies_key_);
if (fabs(frequencies_.sum() - 1.) >= 0.001) {
std::ostringstream oss;
std::copy(frequencies_.begin(), frequencies_.end() - 1,
std::ostream_iterator<double>(oss, ","));
oss << *frequencies_.end();
Failwith("GTR frequencies do not sum to 1 +/- 0.001! frequency vector: (" +
oss.str() + ")");
}
if (fabs(rates_.sum() - 1.) >= 0.001) {
std::ostringstream oss;
std::copy(rates_.begin(), rates_.end() - 1,
std::ostream_iterator<double>(oss, ","));
oss << *rates_.end();
Failwith("GTR rates do not sum to 1 +/- 0.001! rate vector: (" + oss.str() + ")");
}
Update();
};
void GTRModel::UpdateQMatrix() {
int rate_index = 0;
for (int i = 0; i < 4; i++) {
for (int j = i + 1; j < 4; j++) {
double rate = rates_[rate_index];
rate_index++;
Q_(i, j) = rate * frequencies_[j];
Q_(j, i) = rate * frequencies_[i];
}
}
// Set the diagonal entries so the rows sum to one.
double total_substitution_rate = 0;
for (int i = 0; i < 4; i++) {
double row_sum = 0;
for (int j = 0; j < 4; j++) {
if (i != j) {
row_sum += Q_(i, j);
}
}
Q_(i, i) = -row_sum;
total_substitution_rate += row_sum * frequencies_[i];
}
// Rescale matrix for unit substitution rate.
Q_ /= total_substitution_rate;
}
void DNAModel::UpdateEigendecomposition() {
Eigen::Map<const Eigen::Array4d> tmp(&frequencies_[0]);
EigenMatrixXd sqrt_frequencies = EigenMatrixXd(tmp.sqrt().matrix().asDiagonal());
EigenMatrixXd sqrt_frequencies_inv = EigenMatrixXd(sqrt_frequencies.inverse());
EigenMatrixXd S = EigenMatrixXd(sqrt_frequencies * Q_ * sqrt_frequencies_inv);
Eigen::SelfAdjointEigenSolver<Eigen::Matrix4d> solver(S);
// See p.206 of Felsenstein's book. We can get the eigendecomposition of a GTR
// model by first getting the eigendecomposition of an associated diagonal
// matrix and then doing this transformation.
eigenvectors_ = sqrt_frequencies_inv * solver.eigenvectors();
inverse_eigenvectors_ = solver.eigenvectors().transpose() * sqrt_frequencies;
eigenvalues_ = solver.eigenvalues();
}
void DNAModel::Update() {
UpdateQMatrix();
UpdateEigendecomposition();
}
| 6,248
|
C++
|
.cpp
| 162
| 34.549383
| 87
| 0.61904
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,011
|
tidy_subsplit_dag.cpp
|
phylovi_bito/src/tidy_subsplit_dag.cpp
|
// Copyright 2019-2020 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "tidy_subsplit_dag.hpp"
TidySubsplitDAG::TidySubsplitDAG() : SubsplitDAG() {}
TidySubsplitDAG::TidySubsplitDAG(const RootedTreeCollection &tree_collection)
: TidySubsplitDAG(tree_collection.TaxonCount(), tree_collection.TopologyCounter(),
tree_collection.TagTaxonMap()) {}
TidySubsplitDAG::TidySubsplitDAG(size_t node_count)
: above_rotated_(EigenMatrixXb::Identity(node_count, node_count)),
above_sorted_(EigenMatrixXb::Identity(node_count, node_count)){};
TidySubsplitDAG::TidySubsplitDAG(size_t taxon_count,
const Node::TopologyCounter &topology_counter,
const TagStringMap &tag_taxon_map)
: SubsplitDAG(taxon_count, topology_counter, tag_taxon_map) {
ReinitializeTidyVectors();
}
void TidySubsplitDAG::ReinitializeTidyVectors() {
auto node_count = NodeCount();
above_rotated_.resize(node_count, node_count);
above_rotated_.setIdentity();
above_sorted_.resize(node_count, node_count);
above_sorted_.setIdentity();
dirty_rotated_.resize(node_count);
dirty_rotated_.setZero();
dirty_sorted_.resize(node_count);
dirty_sorted_.setZero();
SubsplitDAG::DepthFirstWithAction(
{GetDAGRootNodeId()},
SubsplitDAGTraversalAction(
// BeforeNode
[](NodeId node_id) {},
// AfterNode
[](NodeId node_id) {},
// BeforeNodeClade
[](NodeId node_id, bool is_edge_on_left) {},
// VisitEdge
[this](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
SetBelow(node_id, is_edge_on_left, child_id);
}));
}
EigenArrayXb TidySubsplitDAG::BelowNode(NodeId node_id) {
return BelowNode(false, node_id).max(BelowNode(true, node_id));
}
EigenArrayXbRef TidySubsplitDAG::BelowNode(bool is_edge_on_left, NodeId node_id) {
if (is_edge_on_left) {
return above_rotated_.col(node_id.value_).array();
} else {
return above_sorted_.col(node_id.value_).array();
}
}
EigenArrayXb TidySubsplitDAG::AboveNode(NodeId node_id) const {
return AboveNode(false, node_id).max(AboveNode(true, node_id));
}
EigenArrayXb TidySubsplitDAG::AboveNode(bool is_edge_on_left, NodeId node_id) const {
if (is_edge_on_left) {
return above_rotated_.row(node_id.value_).array();
} else {
return above_sorted_.row(node_id.value_).array();
}
}
EigenArrayXbRef TidySubsplitDAG::DirtyVector(bool is_edge_on_left) {
if (is_edge_on_left) {
return dirty_rotated_;
} else {
return dirty_sorted_;
}
}
bool TidySubsplitDAG::IsDirtyBelow(NodeId node_id, bool is_edge_on_left) {
// We use `min` as a way of getting "and": we want to find if there are any dirty
// node-clades below us.
return BelowNode(is_edge_on_left, node_id)
.min(DirtyVector(is_edge_on_left))
.maxCoeff();
}
void TidySubsplitDAG::SetDirtyStrictlyAbove(NodeId node_id) {
for (const bool is_edge_on_left : {false, true}) {
EigenArrayXbRef dirty = DirtyVector(is_edge_on_left);
EigenArrayXb to_make_dirty = AboveNode(is_edge_on_left, node_id);
// We are only dirtying things that are strictly above us.
to_make_dirty[node_id.value_] = false;
// We use `max` as a way of getting "or": we want to maintain anything that's
// already dirty as dirty, while adding all nodes strictly above us.
dirty = dirty.max(to_make_dirty);
}
}
void TidySubsplitDAG::SetClean() {
updating_below_ = std::nullopt;
dirty_rotated_.setConstant(false);
dirty_sorted_.setConstant(false);
}
std::string EigenMatrixXbToString(EigenMatrixXb m) {
std::stringstream string_stream;
// I would have thought that we could just do string_stream << m, but this doesn't
// work.
for (Eigen::Index i = 0; i < m.rows(); i++) {
string_stream << m.row(i) << "\n";
}
return string_stream.str();
}
std::string TidySubsplitDAG::AboveMatricesAsString() const {
std::stringstream string_stream;
string_stream << "[\n"
<< EigenMatrixXbToString(above_rotated_) << ", \n"
<< EigenMatrixXbToString(above_sorted_) << "\n]";
return string_stream.str();
}
TidySubsplitDAG TidySubsplitDAG::TrivialExample() {
// ((0,1),2)
Node::NodePtr topology =
Node::Join(Node::Join(Node::Leaf(0), Node::Leaf(1)), Node::Leaf(2));
topology->Polish();
TagStringMap taxon_map = TidySubsplitDAG::BuildDummyTagTaxonMap(3);
return TidySubsplitDAG(3, {{topology, 1}}, taxon_map);
}
TidySubsplitDAG TidySubsplitDAG::ManualTrivialExample() {
auto manual_dag = TidySubsplitDAG(6);
// The tree ((0,1)3,2)4:
// https://github.com/phylovi/bito/issues/349#issuecomment-897963382
manual_dag.SetBelow(NodeId(3), true, NodeId(0));
manual_dag.SetBelow(NodeId(3), false, NodeId(1));
manual_dag.SetBelow(NodeId(4), false, NodeId(2));
manual_dag.SetBelow(NodeId(4), true, NodeId(3));
manual_dag.SetBelow(NodeId(5), true, NodeId(4));
return manual_dag;
}
TidySubsplitDAG TidySubsplitDAG::MotivatingExample() {
auto topologies = Node::ExampleTopologies();
TagStringMap taxon_map = TidySubsplitDAG::BuildDummyTagTaxonMap(4);
return TidySubsplitDAG(4, {{topologies[3], 1}, {topologies[4], 1}}, taxon_map);
}
std::string TidySubsplitDAG::RecordTraversal() {
std::stringstream result;
result << std::boolalpha;
DepthFirstWithTidyAction(
{GetDAGRootNodeId()},
TidySubsplitDAGTraversalAction(
// BeforeNode
[](NodeId node_id) {},
// AfterNode
[](NodeId node_id) {},
// BeforeNodeClade
[&result](NodeId node_id, bool is_edge_on_left) {
result << "descending along " << node_id << ", " << is_edge_on_left << "\n";
},
// ModifyEdge
[&result](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
result << "modifying: ";
result << node_id << ", " << child_id << ", " << is_edge_on_left << "\n";
},
// UpdateEdge
[&result](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
result << "updating: ";
result << node_id << ", " << child_id << ", " << is_edge_on_left << "\n";
}));
return result.str();
}
void TidySubsplitDAG::SetBelow(NodeId parent_id, bool is_edge_on_left,
NodeId child_id) {
BelowNode(is_edge_on_left, parent_id) =
BelowNode(is_edge_on_left, parent_id).max(BelowNode(child_id));
}
| 6,548
|
C++
|
.cpp
| 161
| 35.254658
| 88
| 0.667767
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,012
|
numerical_utils.cpp
|
phylovi_bito/src/numerical_utils.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "numerical_utils.hpp"
#include <iostream>
double NumericalUtils::LogSum(const EigenVectorXdRef vec) { return vec.redux(LogAdd); }
EigenVectorXd NumericalUtils::LogAddVectors(const EigenVectorXdRef vec1,
const EigenVectorXdRef vec2) {
EigenVectorXd result(vec1.size());
Assert(vec1.size() == vec2.size(),
"LogAddVectors: two vectors must have the same length.");
std::transform(vec1.begin(), vec1.end(), vec2.begin(), result.begin(), LogAdd);
return result;
}
void NumericalUtils::ProbabilityNormalizeInLog(EigenVectorXdRef vec) {
vec = vec.array() - LogSum(vec);
}
void NumericalUtils::Exponentiate(EigenVectorXdRef vec) { vec = vec.array().exp(); }
// This is any FE exception except for FE_INEXACT, which happens all the time.
constexpr auto FE_WORRYING_EXCEPT =
FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW;
std::optional<std::string>
NumericalUtils::DescribeFloatingPointEnvironmentExceptions() {
if (fetestexcept(FE_WORRYING_EXCEPT)) {
std::string warning_string(
"The following floating point problems have been encountered:");
if (fetestexcept(FE_DIVBYZERO)) {
warning_string.append(" FE_DIVBYZERO");
}
if (fetestexcept(FE_INVALID)) {
warning_string.append(" FE_INVALID");
}
if (fetestexcept(FE_OVERFLOW)) {
warning_string.append(" FE_OVERFLOW");
}
if (fetestexcept(FE_UNDERFLOW)) {
warning_string.append(" FE_UNDERFLOW");
}
return warning_string;
} // else
return std::nullopt;
}
void NumericalUtils::ReportFloatingPointEnvironmentExceptions(std::string context) {
auto warning_string = DescribeFloatingPointEnvironmentExceptions();
if (warning_string) {
std::cout << context << " " << *warning_string << std::endl;
feclearexcept(FE_ALL_EXCEPT);
}
}
| 1,968
|
C++
|
.cpp
| 48
| 36.5
| 87
| 0.715332
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,013
|
subsplit_dag.cpp
|
phylovi_bito/src/subsplit_dag.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "subsplit_dag.hpp"
#include "combinatorics.hpp"
#include "numerical_utils.hpp"
#include "sbn_probability.hpp"
// ** Constructors
SubsplitDAG::SubsplitDAG()
: taxon_count_(0), edge_count_without_leaf_subsplits_(0), topology_count_(0.) {}
SubsplitDAG::SubsplitDAG(const RootedTreeCollection &tree_collection)
: SubsplitDAG(tree_collection.TaxonCount(), tree_collection.TopologyCounter(),
tree_collection.TagTaxonMap()) {}
SubsplitDAG::SubsplitDAG(size_t taxon_count,
const Node::TopologyCounter &topology_counter,
const TagStringMap &tag_taxon_map)
: dag_taxa_(), taxon_count_(taxon_count) {
Assert(topology_counter.size() > 0, "Empty topology counter given to SubsplitDAG.");
Assert(topology_counter.begin()->first->LeafCount() == taxon_count,
"Taxon count mismatch in SubsplitDAG constructor.");
auto [edge_indexer, index_to_child, rootsplits] =
ProcessTopologyCounter(topology_counter);
BuildTaxonMap(tag_taxon_map);
BuildNodes(index_to_child, rootsplits);
BuildEdges(index_to_child);
BuildDAGEdgesFromEdgeIndexer(edge_indexer);
AddLeafSubsplitsToDAGEdgesAndParentToRange();
StoreEdgeIds();
CountTopologies();
Assert(IsValidTaxonMap(),
"SubsplitDAG::SubsplitDAG(): Given Taxon Map does not represent a valid map. "
"Taxon ids do not cover all indices from 0 to taxon_count.");
}
SubsplitDAG::SubsplitDAG(SubsplitDAG &host_dag, HostDispatchTag)
: storage_{host_dag.storage_, HostDispatchTag()},
dag_taxa_{host_dag.dag_taxa_},
subsplit_to_id_{host_dag.subsplit_to_id_},
subsplit_union_{host_dag.subsplit_union_},
subsplit_clade_{host_dag.subsplit_clade_},
parent_to_child_range_{host_dag.parent_to_child_range_},
taxon_count_{host_dag.taxon_count_},
edge_count_without_leaf_subsplits_{host_dag.edge_count_without_leaf_subsplits_},
topology_count_{host_dag.topology_count_},
topology_count_below_{host_dag.topology_count_below_} {}
void SubsplitDAG::ResetHostDAG(SubsplitDAG &host_dag) {
storage_.ResetHost(host_dag.storage_);
dag_taxa_ = host_dag.dag_taxa_;
subsplit_to_id_ = host_dag.subsplit_to_id_;
parent_to_child_range_ = host_dag.parent_to_child_range_;
taxon_count_ = host_dag.taxon_count_;
edge_count_without_leaf_subsplits_ = host_dag.edge_count_without_leaf_subsplits_;
topology_count_ = host_dag.topology_count_;
topology_count_below_ = host_dag.topology_count_below_;
subsplit_union_ = host_dag.subsplit_union_;
subsplit_clade_ = host_dag.subsplit_clade_;
subsplit_union_graft_.clear();
subsplit_clade_graft_.clear();
}
// ** Comparators
int SubsplitDAG::Compare(const SubsplitDAG &other, const bool quiet) const {
return SubsplitDAG::Compare(*this, other, quiet);
}
int SubsplitDAG::Compare(const SubsplitDAG &lhs, const SubsplitDAG &rhs,
const bool quiet) {
std::stringstream dev_null;
auto &os = quiet ? dev_null : std::cout;
// (1) Compare Taxon Sizes.
int taxon_diff = lhs.TaxonCount() - rhs.TaxonCount();
if (taxon_diff != 0) {
// #350 let's talk about -100 vs -200 here.
os << "Subsplit::Compare: taxa do not match." << std::endl;
return taxon_diff;
}
// Create translation map (lhs->rhs) for bitset clades.
auto taxon_map = SubsplitDAG::BuildTaxonTranslationMap(lhs, rhs);
// (2) Compare Subsplit Nodes.
std::set<Bitset> pre_lhs_nodes = lhs.BuildSetOfNodeBitsets();
std::set<Bitset> rhs_nodes = rhs.BuildSetOfNodeBitsets();
std::set<Bitset> lhs_nodes;
// Translate to account for different Taxon mappings and sort output.
for (const auto &pre_lhs_node : pre_lhs_nodes) {
Bitset lhs_node =
SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(pre_lhs_node, taxon_map);
lhs_nodes.insert(lhs_node.SubsplitSortClades());
}
if (lhs_nodes != rhs_nodes) {
os << "Subsplit::Compare: subsplits do not match." << std::endl;
return (lhs_nodes < rhs_nodes) ? -1 : 1;
}
// (3) Compare PCSP Edges.
std::set<Bitset> pre_lhs_edges = lhs.BuildSetOfEdgeBitsets();
std::set<Bitset> rhs_edges = rhs.BuildSetOfEdgeBitsets();
std::set<Bitset> lhs_edges;
// Translate to account for different Taxon mappings and sort output.
for (const auto &pre_lhs_edge : pre_lhs_edges) {
Bitset lhs_edge =
SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(pre_lhs_edge, taxon_map);
lhs_edges.insert(lhs_edge.PCSPSortClades());
}
if (lhs_edges != rhs_edges) {
os << "Subsplit::Compare: PCSPs do not match." << std::endl;
return (lhs_edges < rhs_edges) ? -1 : 1;
}
return 0;
}
std::tuple<std::set<Bitset>, std::set<Bitset>, std::set<Bitset>>
SubsplitDAG::CompareSubsplits(const SubsplitDAG &lhs, const SubsplitDAG &rhs) {
std::set<Bitset> pre_lhs_nodes = lhs.BuildSetOfNodeBitsets();
std::set<Bitset> rhs_nodes = rhs.BuildSetOfNodeBitsets();
std::set<Bitset> lhs_nodes;
auto taxon_map = SubsplitDAG::BuildTaxonTranslationMap(lhs, rhs);
for (const auto &pre_lhs_node : pre_lhs_nodes) {
Bitset lhs_node =
SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(pre_lhs_node, taxon_map);
lhs_nodes.insert(lhs_node.SubsplitSortClades());
}
std::set<Bitset> common, lhs_not_in_rhs, rhs_not_in_lhs;
for (const auto &node : lhs_nodes) {
if (rhs_nodes.find(node) == rhs_nodes.end()) {
lhs_not_in_rhs.insert(node);
} else {
common.insert(node);
}
}
for (const auto &node : rhs_nodes) {
if (lhs_nodes.find(node) == lhs_nodes.end()) {
rhs_not_in_lhs.insert(node);
} else {
common.insert(node);
}
}
return {common, lhs_not_in_rhs, rhs_not_in_lhs};
}
std::tuple<std::set<Bitset>, std::set<Bitset>, std::set<Bitset>>
SubsplitDAG::ComparePCSPs(const SubsplitDAG &lhs, const SubsplitDAG &rhs) {
std::set<Bitset> pre_lhs_edges = lhs.BuildSetOfEdgeBitsets();
std::set<Bitset> rhs_edges = rhs.BuildSetOfEdgeBitsets();
std::set<Bitset> lhs_edges;
auto taxon_map = SubsplitDAG::BuildTaxonTranslationMap(lhs, rhs);
for (const auto &pre_lhs_edge : pre_lhs_edges) {
Bitset lhs_edge =
SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(pre_lhs_edge, taxon_map);
lhs_edges.insert(lhs_edge.PCSPSortClades());
}
std::set<Bitset> common, lhs_not_in_rhs, rhs_not_in_lhs;
for (const auto &edge : lhs_edges) {
if (rhs_edges.find(edge) == rhs_edges.end()) {
lhs_not_in_rhs.insert(edge);
} else {
common.insert(edge);
}
}
for (const auto &edge : rhs_edges) {
if (lhs_edges.find(edge) == lhs_edges.end()) {
rhs_not_in_lhs.insert(edge);
} else {
common.insert(edge);
}
}
return {common, lhs_not_in_rhs, rhs_not_in_lhs};
}
bool operator==(const SubsplitDAG &lhs, const SubsplitDAG &rhs) {
return (SubsplitDAG::Compare(lhs, rhs) == 0);
}
bool operator!=(const SubsplitDAG &lhs, const SubsplitDAG &rhs) {
return (SubsplitDAG::Compare(lhs, rhs) != 0);
}
// ** Counts
void SubsplitDAG::CountTopologies() {
topology_count_below_ = EigenVectorXd::Ones(NodeCount());
for (const auto &node_id : RootwardNodeTraversalTrace(true)) {
const auto &node = GetDAGNode(node_id);
for (const auto clade : SubsplitCladeEnum::Iterator()) {
// When there are no leafward nodes in the `rotated` direction, we set the number
// of topologies for the rotation of the node to be 1.
double per_rotated_count = node.GetLeafward(clade).empty() ? 1. : 0.;
// Sum options across the possible children.
for (const auto &child_id : node.GetLeafward(clade)) {
per_rotated_count += topology_count_below_[child_id.value_];
}
// Take the product across the number of options for the left and right branches
// of the tree.
topology_count_below_[node_id.value_] *= per_rotated_count;
}
}
topology_count_ = topology_count_below_[GetDAGRootNodeId().value_];
}
void SubsplitDAG::CountEdgesWithoutLeafSubsplits() {
size_t edge_count = EdgeCountWithLeafSubsplits();
for (const auto taxon_id : GetTaxonIds()) {
edge_count -= GetLeafEdgeIds(taxon_id).size();
}
edge_count_without_leaf_subsplits_ = edge_count;
}
size_t SubsplitDAG::TaxonCount() const { return dag_taxa_.size(); }
size_t SubsplitDAG::NodeCount() const { return storage_.GetVertices().size(); }
size_t SubsplitDAG::NodeCountWithoutDAGRoot() const { return NodeCount() - 1; }
NodeIdPair SubsplitDAG::NodeIdRange() const { return {NodeId(0), NodeId(NodeCount())}; }
size_t SubsplitDAG::RootsplitCount() const { return GetRootsplitNodeIds().size(); }
size_t SubsplitDAG::EdgeCount() const { return edge_count_without_leaf_subsplits_; }
size_t SubsplitDAG::EdgeCountWithLeafSubsplits() const {
return storage_.GetLines().size();
}
EdgeIdPair SubsplitDAG::EdgeIdxRange() const {
return {EdgeId(0), EdgeId(EdgeCountWithLeafSubsplits())};
}
double SubsplitDAG::TopologyCount() const { return topology_count_; }
// ** I/O
StringSizeMap SubsplitDAG::SummaryStatistics() const {
return {{"node_count", NodeCount()}, {"edge_count", EdgeCountWithLeafSubsplits()}};
}
void SubsplitDAG::Print() const {
for (auto dag_node : storage_.GetVertices()) {
std::cout << dag_node.ToString() << std::endl;
}
}
void SubsplitDAG::PrintNodes() const {
for (const auto &dag_node : storage_.GetVertices()) {
std::cout << dag_node.Id() << ": " << dag_node.GetBitset().SubsplitToString()
<< std::endl;
}
}
void SubsplitDAG::PrintEdgeIndexer() const {
for (const auto &[edge, idx] : BuildEdgeIndexer()) {
std::cout << idx << ": " << edge.PCSPToString() << std::endl;
}
}
void SubsplitDAG::PrintDAGEdges() const {
for (const auto &[parent_child_id, edge_idx] : storage_.GetLines()) {
const auto &[parent_id, child_id] = parent_child_id;
std::cout << edge_idx << "->{" << parent_id << "," << child_id << "}" << std::endl;
}
}
void SubsplitDAG::PrintParentToRange() const {
for (const auto &[subsplit, edge_range] : parent_to_child_range_) {
std::cout << subsplit.SubsplitToString() << ": [" << edge_range.first << ", "
<< edge_range.second << "]" << std::endl;
}
}
void SubsplitDAG::ToDot(const std::string file_path, bool show_index_labels) const {
std::ofstream out_file(file_path);
out_file << ToDot(show_index_labels);
out_file.close();
}
std::string SubsplitDAG::ToDot(bool show_index_labels) const {
std::stringstream string_stream;
string_stream << "digraph g {\n";
string_stream << "node [shape=record];\n";
string_stream << "edge [colorscheme=dark23];\n";
DepthFirstWithAction(
{GetDAGRootNodeId()},
SubsplitDAGTraversalAction(
// BeforeNode
[this, &string_stream, &show_index_labels](NodeId node_id) {
const auto &node = GetDAGNode(node_id);
if (node.IsDAGRootNode()) {
string_stream << node_id << " [label=\"<f0>ρ\"]\n";
return;
}
auto bs = node.GetBitset();
string_stream
<< node_id << " [label=\"<f0>"
<< bs.SubsplitGetClade(SubsplitClade::Left).ToVectorOfSetBitsAsString()
<< "|<f1>";
if (show_index_labels) {
string_stream << node_id;
}
string_stream
<< "|<f2>"
<< bs.SubsplitGetClade(SubsplitClade::Right).ToVectorOfSetBitsAsString()
<< "\"]\n";
},
// AfterNode
[](NodeId node_id) {},
// BeforeNodeClade
[](NodeId node_id, bool is_edge_on_left) {},
// VisitEdge
[this, &string_stream, &show_index_labels](NodeId node_id, NodeId child_id,
bool is_edge_on_left) {
if (GetDAGNode(child_id).IsLeaf()) {
string_stream << child_id << " [label=\"<f1>" << child_id << "\"]\n";
}
string_stream << "\"" << node_id << "\":";
string_stream << (is_edge_on_left ? "f0" : "f2");
string_stream << "->\"";
string_stream << child_id << "\":f1";
if (show_index_labels) {
string_stream << " [label=\"" << GetEdgeIdx(node_id, child_id);
if (is_edge_on_left) {
string_stream << "\", color=1, fontcolor=1";
} else {
string_stream << "\", color=3, fontcolor=3";
}
if (GetDAGNode(node_id).IsDAGRootNode()) {
string_stream << ",style=dashed]";
} else {
string_stream << "]";
}
} else {
if (GetDAGNode(node_id).IsDAGRootNode()) {
string_stream << "[style=dashed]";
}
}
string_stream << "\n";
}));
string_stream << "}";
return string_stream.str();
}
std::string SubsplitDAG::TreeToNewickTree(const RootedTree &tree) const {
return tree.Newick(GetTagTaxonMap());
}
std::string SubsplitDAG::TreeToNewickTopology(const RootedTree &tree) const {
return tree.NewickTopology(GetTagTaxonMap());
}
std::string SubsplitDAG::TopologyToNewickTopology(
const Node::Topology &topology) const {
return topology->Newick(std::nullopt, GetTagTaxonMap());
}
// ** Build Indexers/Vectors
BitsetSizeMap SubsplitDAG::BuildEdgeIndexer() const {
auto edge_indexer = BitsetSizeMap();
TopologicalEdgeTraversal([this, &edge_indexer](NodeId parent_id, bool is_edge_on_left,
NodeId child_id, EdgeId edge_idx) {
const auto parent_subsplit = GetDAGNode(parent_id).GetBitset(is_edge_on_left);
const auto child_subsplit = GetDAGNode(child_id).GetBitset();
SafeInsert(edge_indexer, Bitset::PCSP(parent_subsplit, child_subsplit),
edge_idx.value_);
});
return edge_indexer;
}
SizeBitsetMap SubsplitDAG::BuildInverseEdgeIndexer() const {
auto edge_to_pcsp_map = SizeBitsetMap();
TopologicalEdgeTraversal([this, &edge_to_pcsp_map](NodeId parent_id,
bool is_edge_on_left,
NodeId child_id, EdgeId edge_idx) {
const auto parent_subsplit = GetDAGNode(parent_id).GetBitset(is_edge_on_left);
const auto child_subsplit = GetDAGNode(child_id).GetBitset();
SafeInsert(edge_to_pcsp_map, edge_idx.value_,
Bitset::PCSP(parent_subsplit, child_subsplit));
});
return edge_to_pcsp_map;
}
// ** Access
const BitsetNodeIdSetMap &SubsplitDAG::GetSubsplitUnionMap() const {
return subsplit_union_;
}
const BitsetNodeIdSetMap &SubsplitDAG::GetSubsplitUnionGraftMap() const {
return subsplit_union_graft_;
}
const BitsetNodeIdSetMap &SubsplitDAG::GetSubsplitCladeMap() const {
return subsplit_clade_;
}
const BitsetNodeIdSetMap &SubsplitDAG::GetSubsplitCladeGraftMap() const {
return subsplit_clade_graft_;
}
TaxonIdVector SubsplitDAG::GetTaxonIds() const {
TaxonIdVector taxon_ids;
for (TaxonId taxon_id = 0; taxon_id < TaxonCount(); taxon_id++) {
taxon_ids.push_back(taxon_id);
}
return taxon_ids;
}
TaxonId SubsplitDAG::GetTaxonId(const std::string &name) const {
Assert(
ContainsTaxon(name),
"SubsplitDAG::GetTaxonId(): Taxon with given name is not contained in the DAG.");
return TaxonId(dag_taxa_.find(name)->second);
}
SubsplitDAGNode SubsplitDAG::GetDAGNode(const NodeId node_id) const {
Assert(ContainsNode(node_id), "Node with the given node_id does not exist in DAG.");
return storage_.GetVertices().at(node_id.value_);
}
MutableSubsplitDAGNode SubsplitDAG::GetDAGNode(const NodeId node_id) {
Assert(ContainsNode(node_id), "Node with the given node_id does not exist in DAG.");
return storage_.GetVertices().at(node_id.value_);
}
Bitset SubsplitDAG::GetDAGNodeBitset(const NodeId node_id) const {
return GetDAGNode(node_id).GetBitset();
}
NodeId SubsplitDAG::GetDAGNodeId(const Bitset &subsplit) const {
Assert(ContainsNode(subsplit), "Node with the given subsplit does not exist in DAG.");
if (ContainsGraft()) {
auto node = storage_.FindVertex(subsplit);
return NodeId(node->get().GetId());
}
return subsplit_to_id_.at(subsplit);
}
NodeId SubsplitDAG::GetDAGRootNodeId() const { return NodeId(NodeCount() - 1); }
ConstNeighborsView SubsplitDAG::GetRootsplitNodeIds() const {
return GetDAGNode(GetDAGRootNodeId()).GetLeftLeafward();
}
EdgeIdVector SubsplitDAG::GetRootsplitEdgeIds() const {
EdgeIdVector edge_ids;
const auto node_id = GetDAGRootNodeId();
for (const auto adj_node_id : GetRootsplitNodeIds()) {
const auto edge_id = GetEdgeIdx(node_id, adj_node_id);
edge_ids.push_back(edge_id);
}
return edge_ids;
}
EdgeId SubsplitDAG::GetFirstRootsplitEdgeId() const {
const auto node_id = GetDAGRootNodeId();
for (const auto adj_node_id : GetRootsplitNodeIds()) {
const auto edge_id = GetEdgeIdx(node_id, adj_node_id);
return edge_id;
}
Failwith("ERROR: No found rootsplit nodes.");
}
NodeIdVector SubsplitDAG::GetLeafNodeIds() const {
NodeIdVector node_ids;
for (TaxonId taxon_id(0); taxon_id < TaxonCount(); taxon_id++) {
const auto node_id = GetLeafNodeId(taxon_id);
node_ids.push_back(node_id);
}
return node_ids;
}
NodeId SubsplitDAG::GetLeafNodeId(const TaxonId taxon_id) const {
return NodeId(taxon_id.value_);
}
EdgeIdVector SubsplitDAG::GetLeafEdgeIds() const {
EdgeIdVector edge_ids;
for (const auto node_id : GetLeafNodeIds()) {
const auto node = GetDAGNode(node_id);
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id : node.GetNeighbors(Direction::Rootward, clade)) {
const auto edge_id = GetEdgeIdx(adj_node_id, node.Id());
edge_ids.push_back(edge_id);
}
}
}
return edge_ids;
}
EdgeIdVector SubsplitDAG::GetLeafEdgeIds(const TaxonId taxon_id) const {
EdgeIdVector edge_ids;
const auto node = GetDAGNode(GetLeafNodeId(taxon_id));
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id : node.GetNeighbors(Direction::Rootward, clade)) {
const auto edge_id = GetEdgeIdx(adj_node_id, node.Id());
edge_ids.push_back(edge_id);
}
}
return edge_ids;
}
ConstLineView SubsplitDAG::GetDAGEdge(const EdgeId edge_id) const {
Assert(ContainsEdge(edge_id), "Node with the given node_id does not exist in DAG.");
return storage_.GetLine(edge_id).value();
}
Bitset SubsplitDAG::GetDAGEdgeBitset(const EdgeId edge_id) const {
auto edge = GetDAGEdge(edge_id);
Bitset parent = GetDAGNodeBitset(edge.GetParent());
Bitset child = GetDAGNodeBitset(edge.GetChild());
return Bitset::PCSP(parent, child);
}
EdgeId SubsplitDAG::GetEdgeIdx(const Bitset &parent_subsplit,
const Bitset &child_subsplit) const {
return GetEdgeIdx(GetDAGNodeId(parent_subsplit), GetDAGNodeId(child_subsplit));
}
EdgeId SubsplitDAG::GetEdgeIdx(const NodeId parent_id, const NodeId child_id) const {
auto edge = storage_.GetLine(parent_id, child_id);
if (!edge.has_value()) {
std::cerr << "Edge not found: Node" << parent_id << ", Node" << child_id
<< std::endl;
}
Assert(edge.has_value(), "Edge not found in DAG.");
return EdgeId(edge.value().GetId());
}
EdgeId SubsplitDAG::GetEdgeIdx(const Bitset &edge_pcsp) const {
return GetEdgeIdx(edge_pcsp.PCSPGetParentSubsplit(),
edge_pcsp.PCSPGetChildSubsplit());
}
EdgeId SubsplitDAG::GetEdgeIdx(const NNIOperation &nni) const {
return GetEdgeIdx(nni.GetParent(), nni.GetChild());
}
SubsplitClade SubsplitDAG::GetFocalClade(const EdgeId edge_id) const {
return GetDAGEdge(edge_id).GetSubsplitClade();
}
SubsplitClade SubsplitDAG::GetSisterClade(const EdgeId edge_id) const {
return Bitset::Opposite(GetFocalClade(edge_id));
}
NNIOperation SubsplitDAG::GetNNI(const EdgeId edge_id) const {
const auto &edge = GetDAGEdge(edge_id);
Bitset parent = GetDAGNodeBitset(edge.GetParent());
Bitset child = GetDAGNodeBitset(edge.GetChild());
return NNIOperation(parent, child);
}
NNIOperation SubsplitDAG::FindNNINeighborInDAG(const NNIOperation &nni) const {
for (const auto child_clade_swapped_with_sister : SubsplitCladeEnum::Iterator()) {
const auto &swapped_nni = nni.GetNeighboringNNI(child_clade_swapped_with_sister);
if (ContainsNNI(swapped_nni)) {
const auto parent_id = GetDAGNodeId(swapped_nni.GetParent());
const auto child_id = GetDAGNodeId(swapped_nni.GetChild());
if (parent_id > NodeCount() || child_id > NodeCount()) {
std::cout << "ERROR: found NNI Neighbor from the GraftDAG!" << std::endl;
}
return swapped_nni;
}
}
Failwith("NNIOperation has no neighbors found in the DAG.");
}
SubsplitCladeEnum::Array<std::optional<NNIOperation>>
SubsplitDAG::FindAllNNINeighborsInDAG(const NNIOperation &nni) const {
SubsplitCladeEnum::Array<std::optional<NNIOperation>> neighbor_nnis;
for (const auto child_clade_swapped_with_sister : SubsplitCladeEnum::Iterator()) {
const auto &neighbor_nni = nni.GetNeighboringNNI(child_clade_swapped_with_sister);
if (ContainsNNI(neighbor_nni)) {
auto parent_id = GetDAGNodeId(neighbor_nni.GetParent());
auto child_id = GetDAGNodeId(neighbor_nni.GetChild());
if (parent_id > NodeCount() || child_id > NodeCount()) {
neighbor_nnis[child_clade_swapped_with_sister] = std::nullopt;
} else {
neighbor_nnis[child_clade_swapped_with_sister] = neighbor_nni;
}
} else {
neighbor_nnis[child_clade_swapped_with_sister] = std::nullopt;
}
}
return neighbor_nnis;
}
EdgeIdPair SubsplitDAG::GetChildEdgeRange(const Bitset &subsplit,
const bool is_edge_on_left) const {
Assert(
ContainsNode(subsplit),
"Node with the given subsplit does not exist in SubsplitDAG::GetChildEdgeRange.");
return parent_to_child_range_.at(SubsplitToSortedOrder(subsplit, is_edge_on_left));
}
StringVector SubsplitDAG::BuildSetOfTaxonNames() const {
StringVector taxa;
for (const auto &name_id : dag_taxa_) {
taxa.push_back(name_id.first);
}
std::sort(taxa.begin(), taxa.end());
return taxa;
}
std::set<Bitset> SubsplitDAG::BuildSetOfNodeBitsets() const {
std::set<Bitset> nodes;
for (NodeId node_id = NodeId(0); node_id < NodeCount(); node_id++) {
Bitset node_bitset = GetDAGNode(node_id).GetBitset();
nodes.insert(node_bitset);
}
return nodes;
}
std::set<Bitset> SubsplitDAG::BuildSetOfEdgeBitsets() const {
std::set<Bitset> edges;
for (auto i : storage_.GetLines()) {
auto parent_bitset = GetDAGNode(NodeId(i.GetParent())).GetBitset();
auto child_bitset = GetDAGNode(NodeId(i.GetChild())).GetBitset();
Bitset edge_bitset = Bitset::PCSP(parent_bitset, child_bitset);
edges.insert(edge_bitset);
}
return edges;
}
const StringTaxonIdMap &SubsplitDAG::GetTaxonMap() const { return dag_taxa_; }
const TagStringMapOption SubsplitDAG::GetTagTaxonMap() const {
if (!tag_taxon_map_) {
return std::nullopt;
}
return *tag_taxon_map_;
}
const BitsetNodeIdMap &SubsplitDAG::GetSubsplitToIdMap() const {
return subsplit_to_id_;
}
EigenVectorXd SubsplitDAG::BuildUniformOnTopologicalSupportPrior() const {
EigenVectorXd q = EigenVectorXd::Ones(EdgeCountWithLeafSubsplits());
for (const auto &node_id : RootwardNodeTraversalTrace(true)) {
const auto &node = GetDAGNode(node_id);
for (const bool rotated : {false, true}) {
if (!node.GetLeafward(rotated).empty()) {
double per_rotated_count = 0.;
for (auto child_id : node.GetLeafward(rotated)) {
per_rotated_count += topology_count_below_[child_id.value_];
}
for (auto child_id : node.GetLeafward(rotated)) {
auto edge_idx = GetEdgeIdx(node.Id(), NodeId(child_id));
q(size_t(edge_idx)) =
topology_count_below_(child_id.value_) / per_rotated_count;
}
}
}
}
return q;
}
Node::NodePtrVec SubsplitDAG::GenerateAllTopologies() const {
std::vector<Node::NodePtrVec> topology_below(NodeCount());
auto GetSubtopologies = [&topology_below](SubsplitDAGNode node) {
Node::NodePtrVec rotated_subtopologies, sorted_subtopologies;
for (const bool rotated : {false, true}) {
for (const auto &child_id : node.GetLeafward(rotated)) {
for (const auto &subtopology : topology_below.at(child_id.value_)) {
rotated ? rotated_subtopologies.push_back(subtopology)
: sorted_subtopologies.push_back(subtopology);
}
}
}
return std::make_pair(rotated_subtopologies, sorted_subtopologies);
};
auto MergeTopologies = [this](NodeId node_id, Node::NodePtrVec &rotated_subtopologies,
Node::NodePtrVec &sorted_subtopologies) {
Node::NodePtrVec topologies;
for (const auto &rotated_subtopology : rotated_subtopologies) {
for (const auto &sorted_subtopology : sorted_subtopologies) {
Node::NodePtr new_topology =
Node::Join(sorted_subtopology, rotated_subtopology, node_id.value_);
topologies.push_back(new_topology);
}
}
if (node_id == GetDAGRootNodeId()) {
// DAG root node has no `sorted_subtopologies`, so loop above yields empty
// `topologies` vector.
return rotated_subtopologies;
}
return topologies;
};
for (const auto &node_id : RootwardNodeTraversalTrace(true)) {
const auto &node = GetDAGNode(node_id);
if (node.IsLeaf()) {
topology_below.at(node_id.value_).push_back(Node::Leaf(node_id.value_));
} else {
auto [rotated_topologies, sorted_topologies] = GetSubtopologies(node);
topology_below[node_id.value_] =
MergeTopologies(node_id, rotated_topologies, sorted_topologies);
}
}
const auto &topologies = topology_below.at(GetDAGRootNodeId().value_);
Assert(topologies.size() == TopologyCount(),
"The realized number of topologies does not match the expected count.");
// We return a deep copy of every Polished topology to avoid loops in the pointer
// structure. Such loops can create problems when we Polish the topologies one at a
// time: polishing a second topology can change the numbering of a} previous topology.
// This is checked for in the "GPInstance: GenerateCompleteRootedTreeCollection" test.
Node::NodePtrVec final_topologies;
final_topologies.reserve(topologies.size());
for (auto &topology : topologies) {
topology->Polish();
final_topologies.push_back(topology->DeepCopy());
}
return final_topologies;
}
std::vector<RootedTree> SubsplitDAG::GenerateAllTrees(
const EigenVectorXd &dag_branch_lengths) const {
Assert(size_t(dag_branch_lengths.size()) == EdgeCountWithLeafSubsplits(),
"dag_branch_lengths is the wrong size.");
auto topologies = GenerateAllTopologies();
std::vector<RootedTree> trees;
for (const auto &topology : topologies) {
trees.push_back(BuildTreeFromTopology(topology, dag_branch_lengths));
}
return trees;
}
std::string SubsplitDAG::ToNewickOfAllTopologies() const {
std::stringstream str;
auto topologies = GenerateAllTopologies();
for (const auto &topology : topologies) {
str << topology->Newick(std::nullopt, GetTagTaxonMap()) << std::endl;
}
return str.str();
}
Node::NodePtrVec SubsplitDAG::GenerateCoveringTopologies() const {
Node::NodePtrVec topologies;
std::vector<bool> visited_edges(EdgeCountWithLeafSubsplits(), false);
std::vector<bool> visited_edges_below_node(NodeCount(), false);
for (const auto node_id : GetLeafNodeIds()) {
visited_edges_below_node[node_id.value_] = true;
}
// Continue generating topologies until all edges have been visited.
while (!std::all_of(visited_edges.begin(), visited_edges.end(),
[](bool v) { return v; })) {
ParentToChildNodeIdMap tree_map;
std::vector<NodeId> node_ids;
node_ids.push_back(GetDAGRootNodeId());
while (!node_ids.empty()) {
const auto node = GetDAGNode(node_ids.back());
node_ids.pop_back();
SubsplitCladeEnum::Array<NodeId> child_ids = {{NodeId(NoId), NodeId(NoId)}};
for (const auto clade : SubsplitCladeEnum::Iterator()) {
EdgeId best_edge_id = EdgeId(NoId);
for (const auto adj_node_id : node.GetNeighbors(Direction::Leafward, clade)) {
const auto adj_edge_id = GetEdgeIdx(node.Id(), adj_node_id);
// Prioritize unvisited edges. If all edges have been visited, prioritize
// edges with have nodes with univisited edges beneath them.
if (!visited_edges[adj_edge_id.value_]) {
best_edge_id = adj_edge_id;
child_ids[clade] = adj_node_id;
break;
} else if (!visited_edges_below_node[adj_node_id.value_]) {
best_edge_id = adj_edge_id;
child_ids[clade] = adj_node_id;
} else if (child_ids[clade] == NoId) {
best_edge_id = adj_edge_id;
child_ids[clade] = adj_node_id;
}
}
if (child_ids[clade] != NoId) {
visited_edges[best_edge_id.value_] = true;
if (!IsNodeLeaf(child_ids[clade])) {
node_ids.push_back(child_ids[clade]);
}
}
}
tree_map[node.Id()] = child_ids;
}
// Build topology from tree_map.
const auto rootsplit_id =
(tree_map[GetDAGRootNodeId()][SubsplitClade::Left] != NoId)
? tree_map[GetDAGRootNodeId()][SubsplitClade::Left]
: tree_map[GetDAGRootNodeId()][SubsplitClade::Right];
tree_map.erase(GetDAGRootNodeId());
auto topology = BuildTopologyFromNodeIdMap(tree_map, rootsplit_id);
topologies.push_back(topology);
// Update visited edges below.
std::vector<NodeId> update_node_ids;
for (const auto &[node_id, child_ids] : tree_map) {
if (!visited_edges_below_node[node_id.value_]) {
update_node_ids.push_back(node_id);
}
}
while (!update_node_ids.empty()) {
const auto node = GetDAGNode(update_node_ids.back());
update_node_ids.pop_back();
bool visited_edges_below_this_node = true;
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id : node.GetNeighbors(Direction::Leafward, clade)) {
const auto adj_edge_id = GetEdgeIdx(node.Id(), adj_node_id);
if (!visited_edges[adj_edge_id.value_] ||
!visited_edges_below_node[adj_node_id.value_]) {
visited_edges_below_this_node = false;
break;
}
}
if (!visited_edges_below_this_node) break;
}
if (visited_edges_below_this_node) {
visited_edges_below_node[node.Id().value_] = true;
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id : node.GetNeighbors(Direction::Rootward, clade)) {
update_node_ids.push_back(adj_node_id);
}
}
}
}
}
return topologies;
}
std::vector<RootedTree> SubsplitDAG::GenerateCoveringTrees(
const EigenVectorXd &dag_branch_lengths) const {
Assert(size_t(dag_branch_lengths.size()) == EdgeCountWithLeafSubsplits(),
"dag_branch_lengths is the wrong size.");
auto topologies = GenerateCoveringTopologies();
std::vector<RootedTree> trees;
for (const auto &topology : topologies) {
auto tree = BuildTreeFromTopology(topology, dag_branch_lengths);
trees.push_back(tree);
}
return trees;
}
std::string SubsplitDAG::ToNewickOfCoveringTopologies() const {
std::stringstream str;
auto topologies = GenerateCoveringTopologies();
for (const auto &topology : topologies) {
str << topology->Newick(std::nullopt, GetTagTaxonMap()) << std::endl;
}
return str.str();
}
std::ostream &operator<<(std::ostream &os,
const SubsplitDAG::ParentToChildNodeIdMap &tree_map) {
os << "{ ";
for (const auto &[parent_id, child_ids] : tree_map) {
os << "{ " << parent_id << ", [ " << child_ids[SubsplitClade::Left] << ", "
<< child_ids[SubsplitClade::Right] << " ] }, ";
}
os << " }";
return os;
}
Node::Topology SubsplitDAG::BuildTopologyFromNodeIdMap(ParentToChildNodeIdMap &tree_map,
NodeId rootsplit_id) const {
std::unordered_map<NodeId, Node::NodePtr> id_to_nodes;
// Initialize parent nodes.
for (const auto &[parent_id, child_ids] : tree_map) {
std::ignore = child_ids;
auto leaves = GetDAGNodeBitset(parent_id).SubsplitCladeUnion();
id_to_nodes[parent_id] = Node::Leaf(parent_id.value_, leaves);
}
// Initialize leaf nodes
for (const auto leaf_id : GetLeafNodeIds()) {
auto leaves = GetDAGNodeBitset(leaf_id).SubsplitCladeUnion();
id_to_nodes[leaf_id] = Node::Leaf(leaf_id.value_, leaves);
}
// Join nodes into topology.
for (const auto &[parent_id, child_ids] : tree_map) {
id_to_nodes[parent_id]->AddChildren(id_to_nodes[child_ids[SubsplitClade::Left]],
id_to_nodes[child_ids[SubsplitClade::Right]]);
}
// Polish topology.
Node::Topology &topology = id_to_nodes[rootsplit_id];
topology->Polish(false, TaxonCount());
return topology;
}
EigenVectorXd SubsplitDAG::BuildUniformOnAllTopologiesPrior() const {
EigenVectorXd result = EigenVectorXd::Zero(EdgeCountWithLeafSubsplits());
for (const auto &[parent_child_id, edge_idx] : storage_.GetLines()) {
const auto &[parent_id, child_id] = parent_child_id;
std::ignore = parent_id;
// If child is a leaf and subsplit is sorted, then child0 will have a zero taxon
// count.
auto child_left_taxon_count =
GetDAGNode(child_id).GetBitset().SubsplitGetClade(SubsplitClade::Left).Count();
// As long as subsplit is sorted and nonempty, then child1 will have a nonzero taxon
// count.
auto child_right_taxon_count =
GetDAGNode(child_id).GetBitset().SubsplitGetClade(SubsplitClade::Right).Count();
// The ordering of this subsplit is flipped so that this ratio will be nonzero in
// the denominator in the case of root & leaves.
result(size_t(edge_idx)) = Combinatorics::LogChildSubsplitCountRatio(
child_right_taxon_count, child_left_taxon_count);
}
NumericalUtils::Exponentiate(result);
return result;
}
// ** DAG Lambda Iterators
void SubsplitDAG::IterateOverRealNodes(const NodeLambda &f) const {
Assert(taxon_count_ < NodeCount(), "No real DAG nodes!");
for (auto it = storage_.GetVertices().cbegin() + taxon_count_;
it < storage_.GetVertices().cend() - 1; it++) {
f(*it);
}
}
void SubsplitDAG::IterateOverLeafwardEdges(SubsplitDAGNode node, bool rotated,
const NodeLambda &f) const {
for (const auto child_id : node.GetLeafward(rotated)) {
f(GetDAGNode(child_id));
}
}
void SubsplitDAG::IterateOverLeafwardEdges(SubsplitDAGNode node,
const EdgeDestinationLambda &f) const {
for (bool is_edge_on_left : {false, true}) {
for (const auto child_id : node.GetLeafward(is_edge_on_left)) {
f(is_edge_on_left, GetDAGNode(child_id));
}
}
}
void SubsplitDAG::IterateOverLeafwardEdgesAndChildren(
SubsplitDAGNode node, const EdgeAndNodeLambda &f) const {
IterateOverLeafwardEdges(
node, [this, &node, &f](bool is_edge_on_left, SubsplitDAGNode child) {
f(GetEdgeIdx(node.Id(), child.Id()), is_edge_on_left, child.Id());
});
}
void SubsplitDAG::IterateOverRootwardEdges(SubsplitDAGNode node,
const EdgeDestinationLambda &f) const {
if (!node.IsRootsplit()) {
for (bool is_edge_on_left : {false, true}) {
for (const auto parent_id : node.GetRootward(is_edge_on_left)) {
f(is_edge_on_left, GetDAGNode(parent_id));
}
}
}
}
void SubsplitDAG::IterateOverRootwardEdgesAndParents(SubsplitDAGNode node,
const EdgeAndNodeLambda &f) const {
IterateOverRootwardEdges(
node, [this, &node, &f](bool rotated, SubsplitDAGNode parent) {
f(GetEdgeIdx(parent.Id(), node.Id()), rotated, parent.Id());
});
}
void SubsplitDAG::IterateOverParentAndChildAndLeafwardEdges(
SubsplitDAGNode node, const ParentRotationChildEdgeLambda &f) const {
IterateOverLeafwardEdges(
node, [this, &node, &f](bool is_edge_on_left, SubsplitDAGNode child) {
f(node.Id(), is_edge_on_left, child.Id(), GetEdgeIdx(node.Id(), child.Id()));
});
}
RootedIndexerRepresentation SubsplitDAG::IndexerRepresentationOf(
const BitsetSizeMap &indexer, const Node::NodePtr &topology,
size_t default_index) const {
return RootedSBNMaps::IndexerRepresentationOf(indexer, topology, default_index);
}
EigenVectorXd SubsplitDAG::UnconditionalNodeProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters) const {
EigenVectorXd node_probabilities(NodeCount());
node_probabilities.setZero();
node_probabilities[GetDAGRootNodeId().value_] = 1.;
TopologicalEdgeTraversal([&node_probabilities, &normalized_sbn_parameters](
const NodeId parent_id, const bool is_edge_on_left,
const NodeId child_id, const EdgeId edge_idx) {
const double child_probability_given_parent =
normalized_sbn_parameters[edge_idx.value_];
Assert(child_probability_given_parent >= 0. && child_probability_given_parent <= 1.,
"UnconditionalNodeProbabilities: got an out-of-range probability. Are these "
"normalized and in linear space?");
const double parent_probability = node_probabilities[parent_id.value_];
node_probabilities[child_id.value_] +=
parent_probability * child_probability_given_parent;
});
return node_probabilities;
}
BitsetDoubleMap SubsplitDAG::UnconditionalSubsplitProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters) const {
auto node_probabilities = UnconditionalNodeProbabilities(normalized_sbn_parameters);
BitsetDoubleMap subsplit_probability_map;
for (NodeId node_id = NodeId(0);
static_cast<Eigen::Index>(node_id.value_) < node_probabilities.size();
node_id++) {
const auto &subsplit_bitset = GetDAGNode(node_id).GetBitset();
if (node_id != GetDAGRootNodeId() && !subsplit_bitset.SubsplitIsLeaf()) {
SafeInsert(subsplit_probability_map, subsplit_bitset,
node_probabilities[node_id.value_]);
}
}
return subsplit_probability_map;
}
EigenVectorXd SubsplitDAG::InvertedGPCSPProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters,
EigenConstVectorXdRef node_probabilities) const {
EigenVectorXd inverted_probabilities =
EigenVectorXd(normalized_sbn_parameters.size());
inverted_probabilities.setOnes();
TopologicalEdgeTraversal(
[this, &node_probabilities, &normalized_sbn_parameters, &inverted_probabilities](
const NodeId parent_id, const bool is_edge_on_left, const NodeId child_id,
const EdgeId edge_idx) {
// The traversal doesn't set the rootsplit probabilities, but those are always 1
// (there is only one "parent" of a rootsplit).
if (parent_id != GetDAGRootNodeId()) {
// For a PCSP t -> s:
inverted_probabilities[edge_idx.value_] = // P(t|s)
node_probabilities[parent_id.value_] * // P(t)
normalized_sbn_parameters[edge_idx.value_] / // P(s|t)
node_probabilities[child_id.value_]; // P(s)
}
});
return inverted_probabilities;
}
BitsetVector SubsplitDAG::GetChildSubsplits(const SizeBitsetMap &index_to_child,
const Bitset &parent_subsplit,
bool include_leaf_subsplits) {
BitsetVector children_subsplits;
// Add all non-leaf child subsplit bitsets.
if (parent_to_child_range_.count(parent_subsplit) > 0) {
const auto [start, stop] = parent_to_child_range_.at(parent_subsplit);
for (auto idx = start; idx < stop; idx++) {
children_subsplits.push_back(index_to_child.at(idx.value_));
}
}
// Optionally add leaf child subsplit bitsets.
else if (include_leaf_subsplits) {
// This method is designed to be called before calling
// AddLeafSubsplitsToDAGEdgesAndParentToRange. In that case, if the second clade
// of the subsplit is just a single taxon, the subsplit will not map to any value in
// parent_to_child_range_.
//
// But we still need to create and connect to leaf subsplits in the DAG. So, here we
// make a leaf child subsplit.
children_subsplits.push_back(Bitset::LeafSubsplitOfParentSubsplit(parent_subsplit));
}
return children_subsplits;
}
BitsetEdgeIdPairMap BitsetSizePairMapToBitsetEdgeIdPairMap(
BitsetSizePairMap &bitset_size_map) {
BitsetEdgeIdPairMap bitset_edgeid_map;
for (const auto &[bitset, id_pair] : bitset_size_map) {
const auto &[begin_id, end_id] = id_pair;
bitset_edgeid_map[bitset] = {EdgeId(begin_id), EdgeId(end_id)};
}
return bitset_edgeid_map;
}
std::tuple<BitsetSizeMap, SizeBitsetMap, BitsetVector>
SubsplitDAG::ProcessTopologyCounter(const Node::TopologyCounter &topology_counter) {
BitsetSizeMap edge_indexer;
SizeBitsetMap index_to_child;
BitsetVector rootsplits;
BitsetSizePairMap parent_to_child_range;
std::tie(rootsplits, edge_indexer, index_to_child, parent_to_child_range,
edge_count_without_leaf_subsplits_) =
SBNMaps::BuildIndexerBundle(RootedSBNMaps::RootsplitCounterOf(topology_counter),
RootedSBNMaps::PCSPCounterOf(topology_counter));
parent_to_child_range_ =
BitsetSizePairMapToBitsetEdgeIdPairMap(parent_to_child_range);
return {edge_indexer, index_to_child, rootsplits};
}
std::unordered_map<EdgeId, EdgeId> SubsplitDAG::BuildEdgeIdMapBetweenDAGs(
const SubsplitDAG &dag_a, const SubsplitDAG &dag_b) {
std::unordered_map<EdgeId, EdgeId> edge_map;
auto node_map = BuildNodeIdMapBetweenDAGs(dag_a, dag_b);
const auto taxon_map = BuildTaxonTranslationMap(dag_a, dag_b);
for (EdgeId edgeid_a(0); edgeid_a < dag_a.EdgeCountWithLeafSubsplits(); edgeid_a++) {
const auto &edge_a = dag_a.GetDAGEdge(edgeid_a);
auto GetNodeId = [&node_map](const NodeId node_id) {
if (node_map.find(node_id) != node_map.end()) {
return node_map.find(node_id)->second;
}
return NodeId{NoId};
};
const auto parent_b = GetNodeId(edge_a.GetParent());
const auto child_b = GetNodeId(edge_a.GetChild());
if (dag_b.ContainsEdge(parent_b, child_b)) {
auto edgeid_b = dag_b.GetEdgeIdx(parent_b, child_b);
edge_map[edgeid_a] = edgeid_b;
}
}
return edge_map;
}
std::unordered_map<NodeId, NodeId> SubsplitDAG::BuildNodeIdMapBetweenDAGs(
const SubsplitDAG &dag_a, const SubsplitDAG &dag_b) {
std::unordered_map<NodeId, NodeId> node_map;
const auto taxon_map = BuildTaxonTranslationMap(dag_a, dag_b);
for (NodeId nodeid_a(0); nodeid_a < dag_a.NodeCount(); nodeid_a++) {
const auto &subsplit_a = dag_a.GetDAGNodeBitset(nodeid_a);
auto trans_subsplit_b =
SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(subsplit_a, taxon_map);
trans_subsplit_b = trans_subsplit_b.SubsplitSortClades();
if (dag_b.ContainsNode(trans_subsplit_b)) {
auto nodeid_b = dag_b.GetDAGNodeId(trans_subsplit_b);
node_map[nodeid_a] = nodeid_b;
}
}
return node_map;
}
void SubsplitDAG::BuildTaxonMap(const TagStringMap &tag_taxon_map) {
// Insert all taxa from tree_collections's map to SubsplitDAG map.
for (const auto &[tag, name] : tag_taxon_map) {
// The "tag" key of the tree_collection's taxon_map is 2 bitpacked ints: [id,
// topology count]. We only care about the id.
TaxonId id = TaxonId{static_cast<size_t>(UnpackFirstInt(tag))};
dag_taxa_.insert(std::make_pair(name, id));
}
tag_taxon_map_ = &tag_taxon_map;
}
NodeId SubsplitDAG::CreateAndInsertNode(const Bitset &subsplit) {
NodeId node_id = NodeId(NodeCount());
storage_.AddVertex({node_id, subsplit});
SafeInsert(subsplit_to_id_, subsplit, node_id);
for (const auto contains_graft : {false, true}) {
// Determine to add node to map as a real node or as a graft.
if (contains_graft != ContainsGraft()) continue;
auto &subsplit_union_map =
!ContainsGraft() ? subsplit_union_ : subsplit_union_graft_;
auto &subsplit_clade_map =
!ContainsGraft() ? subsplit_clade_ : subsplit_clade_graft_;
// Add Node to adjacency maps.
if (!subsplit.SubsplitIsUCA()) {
// Add clade union to map.
const auto subsplit_union = subsplit.SubsplitCladeUnion();
if (subsplit_union_map.find(subsplit_union) == subsplit_union_map.end()) {
subsplit_union_map[subsplit_union] = NodeIdSet();
}
subsplit_union_map[subsplit_union].insert(node_id);
}
if (!subsplit.SubsplitIsLeaf()) {
// Add left clade to map.
const auto subsplit_left = subsplit.SubsplitGetClade(SubsplitClade::Left);
if (subsplit_clade_map.find(subsplit_left) == subsplit_clade_map.end()) {
subsplit_clade_map[subsplit_left] = NodeIdSet();
}
subsplit_clade_map[subsplit_left].insert(node_id);
// Add right clade to map.
const auto subsplit_right = subsplit.SubsplitGetClade(SubsplitClade::Right);
if (subsplit_clade_map.find(subsplit_right) == subsplit_clade_map.end()) {
subsplit_clade_map[subsplit_right] = NodeIdSet();
}
subsplit_clade_map[subsplit_right].insert(node_id);
}
}
return node_id;
}
EdgeId SubsplitDAG::CreateAndInsertEdge(const NodeId parent_id, const NodeId child_id,
const bool is_edge_on_left) {
Assert(ContainsNode(parent_id), "Node with the given parent_id does not exist.");
Assert(ContainsNode(child_id), "Node with the given child_id does not exist.");
// Insert edge between parent and child.
EdgeId edge_idx = EdgeId(EdgeCountWithLeafSubsplits());
ConnectGivenNodes(parent_id, child_id, is_edge_on_left, edge_idx);
storage_.AddLine({edge_idx, parent_id, child_id,
is_edge_on_left ? SubsplitClade::Left : SubsplitClade::Right});
return edge_idx;
}
void SubsplitDAG::ConnectGivenNodes(const NodeId parent_id, const NodeId child_id,
const bool is_edge_on_left, const EdgeId edge_id) {
auto parent_node = GetDAGNode(parent_id);
auto child_node = GetDAGNode(child_id);
SubsplitClade which_parent_clade =
(is_edge_on_left ? SubsplitClade::Left : SubsplitClade::Right);
parent_node.AddEdge(child_node.Id(), edge_id, Direction::Leafward,
which_parent_clade);
child_node.AddEdge(parent_node.Id(), edge_id, Direction::Rootward,
which_parent_clade);
}
void SubsplitDAG::ConnectNodes(const SizeBitsetMap &index_to_child, NodeId node_id,
bool is_edge_on_left) {
// Get bitset of parent node according to its rotation.
const auto subsplit = GetDAGNode(node_id).GetBitset(is_edge_on_left);
// Build vector of child node's subsplits.
const auto children = GetChildSubsplits(index_to_child, subsplit, true);
// Connect parent node to all child nodes.
for (const auto &child_subsplit : children) {
ConnectGivenNodes(node_id, GetDAGNodeId(child_subsplit), is_edge_on_left,
EdgeId(NoId));
}
}
void SubsplitDAG::BuildNodes(const SizeBitsetMap &index_to_child,
const BitsetVector &rootsplits) {
std::unordered_set<Bitset> visited_subsplits;
// We will create leaf subsplits and insert to storage_ nodes.
// Leaf subsplits (i.e. leaf nodes) will take IDs in [0, taxon_count_).
for (size_t taxon_idx = 0; taxon_idx < taxon_count_; taxon_idx++) {
CreateAndInsertNode(Bitset::LeafSubsplitOfNonemptyClade(
Bitset::Singleton(taxon_count_, taxon_idx)));
}
// Then we add the remaining nodes.
// The root splits will take on the higher IDs compared to the non-rootsplits.
for (const auto &rootsplit : rootsplits) {
BuildNodesDepthFirst(index_to_child, rootsplit, visited_subsplits);
}
// #350 look for "DAG root" and fix
// Finally, we add the DAG root node.
CreateAndInsertNode(Bitset::UCASubsplitOfTaxonCount(taxon_count_));
}
void SubsplitDAG::BuildNodesDepthFirst(const SizeBitsetMap &index_to_child,
const Bitset &subsplit,
std::unordered_set<Bitset> &visited_subsplits) {
visited_subsplits.insert(subsplit);
for (bool rotated : {false, true}) {
for (const auto &child_subsplit : GetChildSubsplits(
index_to_child, SubsplitToSortedOrder(subsplit, rotated), false)) {
if (visited_subsplits.count(child_subsplit) == 0) {
BuildNodesDepthFirst(index_to_child, child_subsplit, visited_subsplits);
}
}
}
CreateAndInsertNode(subsplit);
}
void SubsplitDAG::BuildEdges(const SizeBitsetMap &index_to_child) {
// Connect every node except for the DAG root node.
for (NodeId node_id = NodeId(taxon_count_); node_id < GetDAGRootNodeId(); node_id++) {
ConnectNodes(index_to_child, node_id, true);
ConnectNodes(index_to_child, node_id, false);
}
// Connect the DAG root node.
ConnectNodes(index_to_child, GetDAGRootNodeId(), true);
}
void SubsplitDAG::BuildDAGEdgesFromEdgeIndexer(BitsetSizeMap &edge_indexer) {
for (const auto &[edge, index] : edge_indexer) {
Assert(edge.size() == 3 * taxon_count_,
"All edges should be bitsets with size 3 times taxon_count_.");
const auto parent_id = GetDAGNodeId(edge.PCSPGetParentSubsplit());
const auto child_id = GetDAGNodeId(edge.PCSPGetChildSubsplit());
auto left = GetDAGNode(parent_id).GetLeftLeafward();
auto clade = std::find(left.begin(), left.end(), child_id) != left.end()
? SubsplitClade::Left
: SubsplitClade::Right;
if (clade == SubsplitClade::Right) {
auto right = GetDAGNode(parent_id).GetRightLeafward();
Assert(std::find(right.begin(), right.end(), child_id) != right.end(),
"Parent has no connection to child");
}
storage_.AddLine({EdgeId(index), parent_id, child_id, clade});
}
}
void SubsplitDAG::AddLeafSubsplitsToDAGEdgesAndParentToRange() {
for (NodeId node_id = NodeId(0); node_id < taxon_count_; node_id++) {
const auto current_bitset(GetDAGNode(node_id).GetBitset());
IterateOverRootwardEdges(GetDAGNode(node_id), [this, current_bitset](
const bool is_edge_on_left,
SubsplitDAGNode node) {
SafeInsert(parent_to_child_range_, node.GetBitset(is_edge_on_left),
{EdgeId(EdgeCountWithLeafSubsplits()),
EdgeId(EdgeCountWithLeafSubsplits() + 1)});
storage_.AddLine({EdgeId(EdgeCountWithLeafSubsplits()), node.Id(),
GetDAGNodeId(current_bitset),
is_edge_on_left ? SubsplitClade::Left : SubsplitClade::Right});
});
}
}
void SubsplitDAG::StoreEdgeIds() {
for (auto edge : storage_.GetLines()) {
auto parent = storage_.GetVertices().at(edge.GetParent().value_);
auto child = storage_.GetVertices().at(edge.GetChild().value_);
parent.SetEdgeId(edge.GetChild(), edge.GetId());
child.SetEdgeId(edge.GetParent(), edge.GetId());
}
}
void SubsplitDAG::RootwardDepthFirst(NodeId node_id, NodeIdVector &visit_order,
std::unordered_set<NodeId> &visited_nodes) const {
// Add to set of all visited nodes.
SafeInsert(visited_nodes, node_id);
// #350: Look for sorted/rotated and update.
// Recurse on sorted children.
const auto &node = GetDAGNode(node_id);
for (auto parent_id : node.GetRightRootward()) {
if (visited_nodes.count(parent_id) == 0) {
RootwardDepthFirst(parent_id, visit_order, visited_nodes);
}
}
// Recurse on rotated children.
for (auto parent_id : node.GetLeftRootward()) {
if (visited_nodes.count(parent_id) == 0) {
RootwardDepthFirst(parent_id, visit_order, visited_nodes);
}
}
// Append to vector post-order (after all children have been visited).
visit_order.push_back(node_id);
}
void SubsplitDAG::LeafwardDepthFirst(NodeId node_id, NodeIdVector &visit_order,
std::unordered_set<NodeId> &visited_nodes) const {
// Add to set of all visited nodes.
SafeInsert(visited_nodes, node_id);
// Recurse on right/sorted children.
for (auto child_id : GetDAGNode(node_id).GetRightLeafward()) {
if (visited_nodes.count(child_id) == 0) {
LeafwardDepthFirst(child_id, visit_order, visited_nodes);
}
}
// Recurse on left/rotated children.
for (auto child_id : GetDAGNode(node_id).GetLeftLeafward()) {
if (visited_nodes.count(child_id) == 0) {
LeafwardDepthFirst(child_id, visit_order, visited_nodes);
}
}
// Append to vector post-order (after all children have been visited).
visit_order.push_back(node_id);
}
NodeIdVector SubsplitDAG::LeafwardNodeTraversalTrace(bool include_dag_root_node) const {
NodeIdVector visit_order;
std::unordered_set<NodeId> visited_nodes;
if (!include_dag_root_node) {
SafeInsert(visited_nodes, GetDAGRootNodeId());
}
for (NodeId leaf_id = NodeId(0); leaf_id < taxon_count_; leaf_id++) {
RootwardDepthFirst(leaf_id, visit_order, visited_nodes);
}
return visit_order;
}
NodeIdVector SubsplitDAG::RootwardNodeTraversalTrace(bool include_dag_root_node) const {
NodeIdVector visit_order;
std::unordered_set<NodeId> visited_nodes;
for (const auto &rootsplit_id : GetRootsplitNodeIds()) {
LeafwardDepthFirst(rootsplit_id, visit_order, visited_nodes);
}
if (include_dag_root_node) {
visit_order.push_back(GetDAGRootNodeId());
}
return visit_order;
}
NodeIdVector SubsplitDAG::TopologicalNodeTraversalTrace() const {
auto visit_order = RootwardNodeTraversalTrace(true);
std::reverse(visit_order.begin(), visit_order.end());
return visit_order;
}
EdgeIdVector SubsplitDAG::LeafwardEdgeTraversalTrace(bool include_dag_root_node) const {
EdgeIdVector visit_order;
for (NodeId node_id : LeafwardNodeTraversalTrace(include_dag_root_node)) {
const auto &node = GetDAGNode(node_id);
for (SubsplitClade clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (NodeId adj_node_id : node.GetNeighbors(Direction::Leafward, clade)) {
EdgeId edge_id = GetEdgeIdx(node_id, adj_node_id);
visit_order.push_back(edge_id);
}
}
}
return visit_order;
}
EdgeIdVector SubsplitDAG::RootwardEdgeTraversalTrace(bool include_dag_root_node) const {
EdgeIdVector visit_order;
for (NodeId node_id : RootwardNodeTraversalTrace(include_dag_root_node)) {
const auto &node = GetDAGNode(node_id);
for (SubsplitClade clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (NodeId adj_node_id : node.GetNeighbors(Direction::Leafward, clade)) {
EdgeId edge_id = GetEdgeIdx(node_id, adj_node_id);
visit_order.push_back(edge_id);
}
}
}
return visit_order;
}
EdgeIdVector SubsplitDAG::TopologicalEdgeTraversalTrace(
bool include_dag_root_node) const {
auto visit_order = RootwardEdgeTraversalTrace(include_dag_root_node);
std::reverse(visit_order.begin(), visit_order.end());
return visit_order;
}
void SubsplitDAG::TopologicalEdgeTraversal(ParentRotationChildEdgeLambda f) const {
for (const auto node_id : TopologicalNodeTraversalTrace()) {
IterateOverLeafwardEdgesAndChildren(
GetDAGNode(node_id),
[&f, &node_id](const EdgeId edge_idx, const bool is_edge_on_left,
const NodeId child_id) {
f(node_id, is_edge_on_left, child_id, edge_idx);
});
}
}
// ** Miscellaneous
Bitset SubsplitDAG::SubsplitToSortedOrder(const Bitset &subsplit, bool rotated) const {
return rotated ? subsplit.SubsplitRotate() : subsplit;
}
SizeVector SubsplitDAG::BuildTaxonTranslationMap(const SubsplitDAG &dag_a,
const SubsplitDAG &dag_b) {
auto names_a = dag_a.BuildSetOfTaxonNames();
auto names_b = dag_b.BuildSetOfTaxonNames();
Assert(names_a == names_b,
"SubsplitDAG::BuildTaxonTranslationMap(): SubsplitDAGs do not cover the same "
"taxon set.");
Assert(names_a.size() == dag_a.TaxonCount(),
"SubsplitDAG::BuildTaxonTranslationMap(): Number of taxon names does not "
"match the number of taxa in the DAG.");
SizeVector taxon_map(names_a.size());
for (const auto &name : names_a) {
taxon_map[dag_b.GetTaxonId(name).value_] = dag_a.GetTaxonId(name).value_;
}
return taxon_map;
}
int SubsplitDAG::BitsetCompareViaTaxonTranslationMap(const Bitset &bitset_a,
const Bitset &bitset_b,
const SizeVector &taxon_map) {
Bitset bitset_a_translated_to_b =
BitsetTranslateViaTaxonTranslationMap(bitset_a, taxon_map);
return Bitset::Compare(bitset_a_translated_to_b, bitset_b);
}
Bitset SubsplitDAG::BitsetTranslateViaTaxonTranslationMap(
const Bitset &bitset, const SizeVector &taxon_map, const bool forward_translate) {
Bitset translated_bitset(bitset.size());
size_t clade_size = taxon_map.size();
size_t clade_count = bitset.size() / taxon_map.size();
// Remap each clade individually.
for (size_t i = 0; i < clade_count; i++) {
// Remap each bit in clade according to a new position according to the taxon map.
for (size_t j = 0; j < clade_size; j++) {
size_t input_offset = (i * clade_size) + j;
size_t output_offset = (i * clade_size) + taxon_map[j];
if (forward_translate) {
translated_bitset.set(input_offset, bitset[output_offset]);
} else {
translated_bitset.set(output_offset, bitset[input_offset]);
}
}
}
return translated_bitset;
}
TagStringMap SubsplitDAG::BuildDummyTagTaxonMap(const size_t taxon_count) {
TagStringMap tag_taxon_map;
for (size_t i = 0; i < taxon_count; i++) {
std::string name = std::string("x" + std::to_string(i));
tag_taxon_map.insert(std::make_pair(PackInts(i, 0), name));
}
return tag_taxon_map;
}
// ** Query DAG
bool SubsplitDAG::ContainsGraft() const { return storage_.HaveHost(); }
bool SubsplitDAG::ContainsTaxon(const std::string &name) const {
return dag_taxa_.find(name) != dag_taxa_.end();
}
bool SubsplitDAG::ContainsNode(const Bitset &subsplit) const {
if (ContainsGraft()) {
if (storage_.FindVertex(subsplit).has_value()) return true;
}
return subsplit_to_id_.find(subsplit) != subsplit_to_id_.end();
}
bool SubsplitDAG::ContainsNode(const NodeId node_id) const {
return node_id < NodeCount();
}
bool SubsplitDAG::ContainsEdge(const Bitset &parent_subsplit,
const Bitset &child_subsplit) const {
if (!(ContainsNode(parent_subsplit) && ContainsNode(child_subsplit))) {
return false;
}
return ContainsEdge(GetDAGNodeId(parent_subsplit), GetDAGNodeId(child_subsplit));
}
bool SubsplitDAG::ContainsEdge(const NodeId parent_id, const NodeId child_id) const {
return storage_.GetLine(parent_id, child_id).has_value();
}
bool SubsplitDAG::ContainsEdge(const Bitset &edge_pcsp) const {
return ContainsEdge(edge_pcsp.PCSPGetParentSubsplit(),
edge_pcsp.PCSPGetChildSubsplit());
}
bool SubsplitDAG::ContainsEdge(const EdgeId edge_id) const {
return storage_.GetLine(edge_id).has_value();
}
bool SubsplitDAG::IsNodeRoot(const NodeId node_id) const {
return (node_id == GetDAGRootNodeId());
}
bool SubsplitDAG::IsNodeLeaf(const NodeId node_id) const {
return (node_id < TaxonCount());
}
bool SubsplitDAG::IsEdgeRoot(const EdgeId edge_id) const {
const auto parent_id = NodeId(GetDAGEdge(edge_id).GetParent());
return IsNodeRoot(parent_id);
}
bool SubsplitDAG::IsEdgeLeaf(const EdgeId edge_id) const {
const auto child_id = NodeId(GetDAGEdge(edge_id).GetChild());
return IsNodeLeaf(child_id);
}
bool SubsplitDAG::ContainsNNI(const NNIOperation &nni) const {
return ContainsNode(nni.GetParent()) && ContainsNode(nni.GetChild()) &&
ContainsEdge(GetDAGNodeId(nni.GetParent()), GetDAGNodeId(nni.GetChild()));
}
bool SubsplitDAG::ContainsTree(const RootedTree &tree, const bool is_quiet) const {
return ContainsTopology(tree.Topology(), is_quiet);
}
bool SubsplitDAG::ContainsTopology(const Node::Topology &topology,
const bool is_quiet) const {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cerr);
bool contains_topology = true;
BoolVector leaf_check(TaxonCount(), false);
// iterate over rest of topology.
topology->Preorder([this, &os, &leaf_check, &contains_topology](const Node *node) {
// Skip if already found topology not in DAG.
if (!contains_topology) {
return;
}
// Check that topology covers entire taxon set.
if (node->Leaves().size() != TaxonCount()) {
os << "DoesNotContainTopology: Number of topology leaves different size than "
"taxon set."
<< std::endl;
contains_topology = false;
return;
}
// If node is a child, make sure it is a singleton and check the leaf bit.
if (node->IsLeaf()) {
const auto singleton = node->Leaves().SingletonOption();
if (!singleton.has_value()) {
os << "DoesNotContainTopology: Leaf node is not a singleton. -- "
<< node->Leaves() << std::endl;
contains_topology = false;
return;
}
leaf_check[singleton.value()] = true;
}
// Otherwise, find both child PCSPs from node and check that they are in the DAG.
else {
const auto child_nodes = node->Children();
if (child_nodes.size() != 2) {
os << "DoesNotContainTopology: Non-leaf node does not have 2 children."
<< std::endl;
contains_topology = false;
return;
}
const auto parent_subsplit = node->BuildSubsplit();
for (const auto &child_node : child_nodes) {
const auto child_subsplit = child_node->BuildSubsplit();
if (!ContainsEdge(parent_subsplit, child_subsplit)) {
os << "DoesNotContainTopology: Edge in topology not found in DAG -- "
<< Bitset::PCSP(parent_subsplit, child_subsplit).PCSPToString()
<< std::endl;
contains_topology = false;
return;
}
}
}
});
if (!contains_topology) {
return false;
}
// Check that every leaf node has been visited.
bool all_leaves = std::all_of(leaf_check.begin(), leaf_check.end(),
[](bool all_true) { return all_true; });
if (!all_leaves) {
os << "DoesNotContainTopology: Topology does not span every leaf -- " << leaf_check
<< std::endl;
return false;
}
return true;
}
// ** Trees/Topologies
std::unordered_map<NodeId, const Node *>
SubsplitDAG::BuildDAGNodeIdToTreeNodeMapFromTopology(
const Node::Topology &topology) const {
std::unordered_map<NodeId, const Node *> node_map;
topology->Preorder([this, &node_map](const Node *node) {
NodeId node_id = GetDAGNodeId(node->BuildSubsplit());
node_map[node_id] = node;
});
return node_map;
}
std::unordered_map<NodeId, size_t> SubsplitDAG::BuildNodeIdMapFromTopology(
const Node::Topology &topology) const {
std::unordered_map<NodeId, size_t> node_id_map;
topology->Preorder([this, &node_id_map](const Node *node) {
NodeId node_id = GetDAGNodeId(node->BuildSubsplit());
node_id_map[node_id] = node->Id();
});
return node_id_map;
}
std::unordered_map<EdgeId, SizePair> SubsplitDAG::BuildEdgeIdMapFromTopology(
const Node::Topology &topology) const {
std::unordered_map<EdgeId, SizePair> edge_id_map;
topology->Preorder([this, &edge_id_map](const Node *node) {
if (!node->IsLeaf()) {
auto subsplit = node->BuildSubsplit();
NodeId parent_id = GetDAGNodeId(subsplit);
for (auto child_node : node->Children()) {
NodeId child_id = GetDAGNodeId(child_node->BuildSubsplit());
EdgeId edge_id = GetEdgeIdx(parent_id, child_id);
edge_id_map[edge_id] = {node->Id(), child_node->Id()};
}
}
});
return edge_id_map;
}
RootedTree SubsplitDAG::BuildTreeFromTopology(
const Node::Topology &topology, const EigenVectorXd &dag_branch_lengths) const {
auto dag_id_to_tree_id_map = BuildEdgeIdMapFromTopology(topology);
std::vector<double> tree_branch_lengths(dag_id_to_tree_id_map.size() + 1);
// Root node length is 0.
tree_branch_lengths[tree_branch_lengths.size() - 1] = 0.;
// Find branch lengths via map.
for (const auto &[dag_id, tree_id_pair] : dag_id_to_tree_id_map) {
const auto &[parent_tree_id, child_tree_id] = tree_id_pair;
std::ignore = parent_tree_id;
tree_branch_lengths[child_tree_id] = dag_branch_lengths[dag_id.value_];
}
return RootedTree(topology, tree_branch_lengths);
}
// ** Build Output Indexers/Vectors
NodeIdVectorPair SubsplitDAG::FindParentNodeIdsViaMap(const Bitset &subsplit) const {
// Map lookup: Find parents by looking for nodes in DAG where one of their clades
// match this subsplit's clade union.
NodeIdVector left_parents, right_parents;
if (subsplit.SubsplitIsUCA()) {
return {left_parents, right_parents};
}
const auto subsplit_union = subsplit.SubsplitCladeUnion();
for (const auto &subsplit_map : {subsplit_clade_, subsplit_clade_graft_}) {
if (subsplit_map.find(subsplit_union) != subsplit_map.end()) {
const auto &parents = subsplit_map.find(subsplit_union)->second;
for (const auto parent_id : parents) {
const auto parent_subsplit = GetDAGNodeBitset(parent_id);
// Sort results in left and right clades.
for (const auto clade : SubsplitCladeEnum::Iterator()) {
const auto parent_clade = parent_subsplit.SubsplitGetClade(clade);
if (parent_clade == subsplit_union) {
if (clade == SubsplitClade::Left) {
left_parents.push_back(parent_id);
} else {
right_parents.push_back(parent_id);
}
}
}
}
}
}
return {left_parents, right_parents};
}
NodeIdVectorPair SubsplitDAG::FindChildNodeIdsViaMap(const Bitset &subsplit) const {
// Map lookup: Find children by looking for nodes in DAG where their clade union is
// equal to one of the clades of this subsplit.
NodeIdVector left_children, right_children;
if (subsplit.SubsplitIsLeaf()) {
return {left_children, right_children};
}
for (const auto &subsplit_map : {subsplit_union_, subsplit_union_graft_}) {
const auto subsplit_left = subsplit.SubsplitGetClade(SubsplitClade::Left);
if (subsplit_map.find(subsplit_left) != subsplit_map.end()) {
const auto &new_left_children = subsplit_map.find(subsplit_left)->second;
left_children.insert(left_children.end(), new_left_children.begin(),
new_left_children.end());
}
const auto subsplit_right = subsplit.SubsplitGetClade(SubsplitClade::Right);
if (subsplit_map.find(subsplit_right) != subsplit_map.end()) {
const auto &new_right_children = subsplit_map.find(subsplit_right)->second;
right_children.insert(right_children.end(), new_right_children.begin(),
new_right_children.end());
}
}
return {left_children, right_children};
}
NodeIdVectorPair SubsplitDAG::FindParentNodeIdsViaScan(const Bitset &subsplit,
bool graft_nodes_only) const {
NodeIdVector left_parents, right_parents;
for (const auto &[potential_parent_subsplit, node_id] : subsplit_to_id_) {
if (graft_nodes_only && (node_id < NodeCount())) continue;
if (subsplit.SubsplitIsLeftChildOf(potential_parent_subsplit)) {
left_parents.push_back(node_id);
} else if (subsplit.SubsplitIsRightChildOf(potential_parent_subsplit)) {
right_parents.push_back(node_id);
}
}
return {left_parents, right_parents};
}
NodeIdVectorPair SubsplitDAG::FindChildNodeIdsViaScan(const Bitset &subsplit,
bool graft_nodes_only) const {
NodeIdVector left_children, right_children;
for (const auto &[potential_child_subsplit, node_id] : subsplit_to_id_) {
if (graft_nodes_only && (node_id < NodeCount())) continue;
if (potential_child_subsplit.SubsplitIsLeftChildOf(subsplit)) {
left_children.push_back(node_id);
} else if (potential_child_subsplit.SubsplitIsRightChildOf(subsplit)) {
right_children.push_back(node_id);
}
}
return {left_children, right_children};
}
std::pair<std::set<NodeId>, std::set<NodeId>> NodeIdVectorsToSets(
const NodeIdVectorPair &vecs) {
auto &[left, right] = vecs;
std::set<NodeId> left_set(left.begin(), left.end());
std::set<NodeId> right_set(right.begin(), right.end());
return {left_set, right_set};
}
template <typename ContainerType>
void CompareNodeIds(const ContainerType &lhs, const ContainerType &rhs,
const std::string &name) {
if (lhs != rhs) {
std::cerr << "ERROR: " << name << " NodeIds do not match --" << std::endl;
std::cerr << lhs << std::endl << rhs << std::endl;
}
}
NodeIdVectorPair SubsplitDAG::FindParentNodeIds(const Bitset &subsplit) const {
auto [left_map, right_map] = FindParentNodeIdsViaMap(subsplit);
return {left_map, right_map};
}
NodeIdVectorPair SubsplitDAG::FindChildNodeIds(const Bitset &subsplit) const {
auto [left_map, right_map] = FindChildNodeIdsViaMap(subsplit);
return {left_map, right_map};
}
NodeId SubsplitDAG::FindFirstParentNodeId(const Bitset &subsplit) const {
const auto parents = FindParentNodeIds(subsplit);
for (const auto node_ids : {parents.first, parents.second}) {
for (const auto node_id : node_ids) {
return node_id;
}
}
Failwith("Given subsplit has no parent nodes in DAG.");
}
NodeId SubsplitDAG::FindFirstChildNodeId(const Bitset &subsplit,
const SubsplitClade clade) const {
const auto [left, right] = FindChildNodeIds(subsplit);
const auto &node_ids = (clade == SubsplitClade::Left) ? left : right;
for (const auto node_id : node_ids) {
return node_id;
}
Failwith("Given subsplit has no child nodes in DAG.");
}
// ** Modify DAG Helpers
void SubsplitDAG::ConnectChildToAllChildren(const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs) {
const auto [left_leafward_of_child, right_leafward_of_child] =
FindChildNodeIds(child_subsplit);
for (const auto &[children_of_child, rotated] :
std::vector<std::pair<NodeIdVector, bool>>{{left_leafward_of_child, true},
{right_leafward_of_child, false}}) {
SafeInsert(parent_to_child_range_, SubsplitToSortedOrder(child_subsplit, rotated),
{EdgeId(EdgeCountWithLeafSubsplits()),
EdgeId(EdgeCountWithLeafSubsplits() + children_of_child.size())});
for (const auto child_of_child_id : children_of_child) {
const auto new_edge_idx =
CreateAndInsertEdge(GetDAGNodeId(child_subsplit), child_of_child_id, rotated);
added_edge_idxs.push_back(new_edge_idx);
}
}
}
void SubsplitDAG::ConnectParentToAllChildrenExcept(const Bitset &parent_subsplit,
const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs) {
const auto [left_leafward_of_parent, right_leafward_of_parent] =
FindChildNodeIds(parent_subsplit);
for (const auto &[children_of_parent, is_edge_on_left] :
std::vector<std::pair<NodeIdVector, bool>>{{left_leafward_of_parent, true},
{right_leafward_of_parent, false}}) {
SafeInsert(parent_to_child_range_,
SubsplitToSortedOrder(parent_subsplit, is_edge_on_left),
{EdgeId(EdgeCountWithLeafSubsplits()),
EdgeId(EdgeCountWithLeafSubsplits() + children_of_parent.size())});
for (const auto child_of_parent_id : children_of_parent) {
if (child_of_parent_id != GetDAGNodeId(child_subsplit)) {
const auto new_edge_idx = CreateAndInsertEdge(
GetDAGNodeId(parent_subsplit), child_of_parent_id, is_edge_on_left);
added_edge_idxs.push_back(new_edge_idx);
}
}
}
}
void SubsplitDAG::ConnectChildToAllParentsExcept(const Bitset &parent_subsplit,
const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs) {
const auto [left_rootward_of_child, right_rootward_of_child] =
FindParentNodeIds(child_subsplit);
for (const auto &[parents_of_child, rotated] :
std::vector<std::pair<NodeIdVector, bool>>{{left_rootward_of_child, true},
{right_rootward_of_child, false}}) {
for (const auto parent_of_child_id : parents_of_child) {
if (parent_of_child_id != GetDAGNodeId(parent_subsplit)) {
const auto new_edge_idx = CreateAndInsertEdge(
parent_of_child_id, GetDAGNodeId(child_subsplit), rotated);
added_edge_idxs.push_back(new_edge_idx);
}
}
}
}
void SubsplitDAG::ConnectParentToAllParents(const Bitset &parent_subsplit,
EdgeIdVector &added_edge_idxs) {
const auto [left_rootward_of_parent, right_rootward_of_parent] =
FindParentNodeIds(parent_subsplit);
for (const auto &[parents_of_parent, rotated] :
std::vector<std::pair<NodeIdVector, bool>>{{left_rootward_of_parent, true},
{right_rootward_of_parent, false}}) {
for (const auto parent_of_parent_id : parents_of_parent) {
const auto new_edge_idx = CreateAndInsertEdge(
parent_of_parent_id, GetDAGNodeId(parent_subsplit), rotated);
added_edge_idxs.push_back(new_edge_idx);
}
}
}
// ** Modify DAG
void SubsplitDAG::ModificationResult::Reinit(const SubsplitDAG &dag) {
prv_node_count = dag.NodeCount();
prv_edge_count = dag.EdgeCountWithLeafSubsplits();
node_reindexer = Reindexer::IdentityReindexer(prv_node_count);
edge_reindexer = Reindexer::IdentityReindexer(prv_edge_count);
added_node_ids = NodeIdVector();
added_edge_idxs = EdgeIdVector();
}
SubsplitDAG::ModificationResult SubsplitDAG::ModificationResult::ComposeWith(
const ModificationResult other) {
ModificationResult res;
res.prv_node_count = prv_node_count;
res.prv_edge_count = prv_edge_count;
res.cur_node_count = other.cur_node_count;
res.cur_edge_count = other.cur_edge_count;
res.node_reindexer = node_reindexer.ComposeWith(other.node_reindexer);
res.edge_reindexer = edge_reindexer.ComposeWith(other.edge_reindexer);
res.added_node_ids = NodeIdVector();
res.added_edge_idxs = EdgeIdVector();
for (const auto &node_id : added_node_ids) {
auto new_node_id =
NodeId(other.node_reindexer.GetNewIndexByOldIndex(node_id.value_));
res.added_node_ids.push_back(new_node_id);
}
for (const auto &edge_id : added_edge_idxs) {
auto new_edge_id =
EdgeId(other.edge_reindexer.GetNewIndexByOldIndex(edge_id.value_));
res.added_edge_idxs.push_back(new_edge_id);
}
res.added_node_ids.insert(res.added_node_ids.end(), other.added_node_ids.begin(),
other.added_node_ids.end());
res.added_edge_idxs.insert(res.added_edge_idxs.end(), other.added_edge_idxs.begin(),
other.added_edge_idxs.end());
return res;
}
SubsplitDAG::ModificationResult SubsplitDAG::AddNodePair(const NNIOperation &nni) {
return AddNodePair(nni.parent_, nni.child_);
}
SubsplitDAG::ModificationResult SubsplitDAG::AddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit) {
// Check that node pair will create a valid SubsplitDAG.
Assert(IsValidAddNodePair(parent_subsplit, child_subsplit),
"The given pair of nodes is incompatible with DAG in "
"SubsplitDAG::AddNodePair.");
// Perform add node pair operation.
auto mods = AddNodePairInternals(parent_subsplit, child_subsplit);
// Check that node pair was added correctly.
if (!ContainsEdge(parent_subsplit, child_subsplit)) {
std::cerr << "contains_parent: " << ContainsNode(parent_subsplit) << std::endl;
std::cerr << "contains_child: " << ContainsNode(child_subsplit) << std::endl;
}
Assert(ContainsEdge(parent_subsplit, child_subsplit),
"AddNodePair failed to add given node pair.");
return mods;
}
SubsplitDAG::ModificationResult SubsplitDAG::AddNodes(
const BitsetPairVector &node_subsplit_pairs) {
return AddNodePairInternals(node_subsplit_pairs);
}
SubsplitDAG::ModificationResult SubsplitDAG::AddNodePairInternals(
const Bitset &parent_subsplit, const Bitset &child_subsplit) {
std::vector<std::pair<Bitset, Bitset>> node_subsplit_pairs;
node_subsplit_pairs.push_back({parent_subsplit, child_subsplit});
return AddNodePairInternals(node_subsplit_pairs);
}
SubsplitDAG::ModificationResult SubsplitDAG::AddNodePairInternals(
const std::vector<std::pair<Bitset, Bitset>> &node_subsplit_pairs) {
// Initialize output vectors.
ModificationResult mods;
// Note: `prev_node_count` acts as a place marker. We know what the DAG root
// node id is (`prev_node_count - 1`).
mods.prv_node_count = NodeCount();
mods.prv_edge_count = EdgeCountWithLeafSubsplits();
// Check if any of the edges are new.
bool edge_is_new = false;
for (const auto &[parent_subsplit, child_subsplit] : node_subsplit_pairs) {
// Check if either parent or child don't already exist in the DAG.
const bool parent_is_new = !ContainsNode(parent_subsplit);
const bool child_is_new = !ContainsNode(child_subsplit);
if ((parent_is_new || child_is_new) &&
!ContainsEdge(parent_subsplit, child_subsplit)) {
edge_is_new = true;
}
}
// Soft assert: This allows for parent-child pair to exist in the DAG, but no
// work is done. If both the parent and child already exist in DAG, return
// added_node_ids and added_edge_idxs as empty, and node_reindexer and
// edge_reindexer as identity reindexers.
if (!edge_is_new) {
// Return default reindexers if both nodes already exist.
mods.node_reindexer = Reindexer::IdentityReindexer(NodeCount());
mods.edge_reindexer = Reindexer::IdentityReindexer(EdgeCountWithLeafSubsplits());
mods.cur_node_count = NodeCount();
mods.cur_edge_count = EdgeCountWithLeafSubsplits();
return mods;
}
// Add nodes and edges.
AddNodePairInternalsWithoutReindexing(node_subsplit_pairs, mods);
// If GraftDAG, does not perform reindexing.
if (!ContainsGraft()) {
// Create reindexers.
mods.node_reindexer = BuildNodeReindexer(mods.prv_node_count);
mods.edge_reindexer = BuildEdgeReindexer(mods.prv_edge_count);
// Update the ids in added_node_ids and added_edge_idxs according to the
// reindexers.
Reindexer::RemapIdVector<NodeId>(mods.added_node_ids, mods.node_reindexer);
Reindexer::RemapIdVector<EdgeId>(mods.added_edge_idxs, mods.edge_reindexer);
// Update fields in the Subsplit DAG according to the reindexers.
RemapNodeIds(mods.node_reindexer);
RemapEdgeIdxs(mods.edge_reindexer);
// Update Counts.
CountTopologies();
CountEdgesWithoutLeafSubsplits();
}
mods.cur_node_count = NodeCount();
mods.cur_edge_count = EdgeCountWithLeafSubsplits();
return mods;
}
void SubsplitDAG::AddNodePairInternalsWithoutReindexing(
const std::vector<std::pair<Bitset, Bitset>> &node_subsplit_pairs,
ModificationResult &mods) {
for (const auto &[parent_subsplit, child_subsplit] : node_subsplit_pairs) {
const bool parent_is_new = !ContainsNode(parent_subsplit);
const bool child_is_new = !ContainsNode(child_subsplit);
// Add parent/child nodes and connect them to their children
// If child node is new, add node and connect it to all its children.
if (child_is_new) {
CreateAndInsertNode(child_subsplit);
mods.added_node_ids.push_back(GetDAGNodeId(child_subsplit));
// Don't reindex these edges.
ConnectChildToAllChildren(child_subsplit, mods.added_edge_idxs);
}
// If parent node is new, add node it to all its children (except the new child
// node).
if (parent_is_new) {
CreateAndInsertNode(parent_subsplit);
mods.added_node_ids.push_back(GetDAGNodeId(parent_subsplit));
// Don't reindex these edges.
ConnectParentToAllChildrenExcept(parent_subsplit, child_subsplit,
mods.added_edge_idxs);
}
}
// Note: `prev_edge_count` is a marker conveying where we need to start
// reindexing edge idxs.
// Edges are only reindexed if the parent node already existed in the DAG
// (so as to ensure that edges descending from the same node clade have
// contiguous idxs).
mods.prv_edge_count = EdgeCountWithLeafSubsplits();
for (const auto &[parent_subsplit, child_subsplit] : node_subsplit_pairs) {
const bool parent_is_new = (GetDAGNodeId(parent_subsplit) >= mods.prv_node_count);
const bool child_is_new = (GetDAGNodeId(child_subsplit) >= mods.prv_node_count);
// Connect the given parent node to the given child node.
mods.added_edge_idxs.push_back(EdgeId(EdgeCountWithLeafSubsplits()));
CreateAndInsertEdge(GetDAGNodeId(parent_subsplit), GetDAGNodeId(child_subsplit),
child_subsplit.SubsplitIsLeftChildOf(parent_subsplit));
// Don't reindex the edge between the given parent and child if the parent is new.
if (parent_is_new) {
mods.prv_edge_count = EdgeCountWithLeafSubsplits();
}
if (child_is_new) {
// Reindex these edges.
ConnectChildToAllParentsExcept(parent_subsplit, child_subsplit,
mods.added_edge_idxs);
}
if (parent_is_new) {
// Reindex these edges.
ConnectParentToAllParents(parent_subsplit, mods.added_edge_idxs);
}
}
}
SubsplitDAG::ModificationResult SubsplitDAG::AddEdges(
const std::vector<Bitset> &edge_pcsps) {
ModificationResult mods;
mods.prv_node_count = NodeCount();
mods.node_reindexer = Reindexer::IdentityReindexer(NodeCount());
mods.prv_edge_count = EdgeCountWithLeafSubsplits();
for (const auto &edge_pcsp : edge_pcsps) {
const auto parent_subsplit = edge_pcsp.PCSPGetParentSubsplit();
const auto child_subsplit = edge_pcsp.PCSPGetChildSubsplit();
// Add nodes if necessary.
bool parent_is_new = !ContainsNode(parent_subsplit);
if (parent_is_new) {
CreateAndInsertNode(parent_subsplit);
mods.added_node_ids.push_back(GetDAGNodeId(parent_subsplit));
}
bool child_is_new = !ContainsNode(child_subsplit);
if (child_is_new) {
CreateAndInsertNode(child_subsplit);
mods.added_node_ids.push_back(GetDAGNodeId(child_subsplit));
}
// Connect the given parent node to the given child node.
mods.added_edge_idxs.push_back(EdgeId(EdgeCountWithLeafSubsplits()));
CreateAndInsertEdge(GetDAGNodeId(parent_subsplit), GetDAGNodeId(child_subsplit),
child_subsplit.SubsplitIsLeftChildOf(parent_subsplit));
}
mods.cur_node_count = NodeCount();
mods.cur_edge_count = EdgeCountWithLeafSubsplits();
// Update Counts.
CountTopologies();
CountEdgesWithoutLeafSubsplits();
return mods;
}
SubsplitDAG::ModificationResult SubsplitDAG::FullyConnect() {
// Initialize output vectors.
size_t prv_node_count = NodeCount();
size_t prv_edge_count = EdgeCountWithLeafSubsplits();
NodeIdVector added_node_ids;
EdgeIdVector added_edge_idxs;
Reindexer node_reindexer, edge_reindexer;
node_reindexer = Reindexer::IdentityReindexer(NodeCount());
// Find potential edges
for (const auto &node : storage_.GetVertices()) {
if (node.IsLeaf()) {
continue;
}
const auto [left_children, right_children] = FindChildNodeIds(node.GetBitset());
for (const auto &children : {left_children, right_children}) {
const bool is_on_left = (children == left_children);
for (const auto child_id : children) {
if (!ContainsEdge(node.Id(), child_id)) {
const auto edge_idx = CreateAndInsertEdge(node.Id(), child_id, is_on_left);
added_edge_idxs.push_back(edge_idx);
}
}
}
}
// Create reindexer and update fields.
edge_reindexer = BuildEdgeReindexer(prv_edge_count);
Reindexer::RemapIdVector<EdgeId>(added_edge_idxs, edge_reindexer);
RemapEdgeIdxs(edge_reindexer);
// Recount topologies.
CountTopologies();
size_t cur_node_count = NodeCount();
size_t cur_edge_count = EdgeCountWithLeafSubsplits();
return {added_node_ids, added_edge_idxs, node_reindexer, edge_reindexer,
prv_node_count, prv_edge_count, cur_node_count, cur_edge_count};
}
// ** Validation Tests
bool SubsplitDAG::IsConsistent() const {
Failwith("SubsplitDAG::IsConsistent() is not yet implemented.");
return false;
}
bool SubsplitDAG::IsValid() const {
size_t correct_id = 0;
for (auto node : storage_.GetVertices()) {
if (correct_id++ != node.Id().value_) {
return false;
}
if (!node.IsValid()) {
return false;
}
}
return true;
}
bool SubsplitDAG::IsValidAddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit) const {
auto [left_leafward_of_parent, right_leafward_of_parent] =
GetSubsplitNodeNeighborCounts(parent_subsplit, Direction::Leafward);
// Add child to parent's adjacent nod counts.
const bool is_left_child = child_subsplit.SubsplitIsLeftChildOf(parent_subsplit);
if (is_left_child) {
left_leafward_of_parent++;
} else {
right_leafward_of_parent++;
}
// (1) Added nodes are parent/child pair.
if (Bitset::SubsplitIsParentChildPair(parent_subsplit, child_subsplit) == false) {
return false;
}
// (2) Nodes do not add or remove taxa.
if ((parent_subsplit.size() != 2 * taxon_count_) ||
(child_subsplit.size() != 2 * taxon_count_)) {
return false;
}
// (3) The parent node has at least one parent, and at least one left and right
// child (including the added child node).
if (!SubsplitNodeHasParent(parent_subsplit)) {
return false;
}
const bool parent_has_children =
(left_leafward_of_parent > 0) && (right_leafward_of_parent > 0);
if (!parent_has_children) {
return false;
}
// (4) The child node has at least one parent, and at least one left and right
// child. (We know child node has a parent node, so only need to check children.)
if (!SubsplitNodeHasLeftAndRightChild(child_subsplit)) {
return false;
}
return true;
}
SizePair SubsplitDAG::GetSubsplitNodeNeighborCounts(const Bitset &subsplit,
const Direction direction) const {
const auto [left, right] = (direction == Direction::Rootward)
? FindParentNodeIds(subsplit)
: FindChildNodeIds(subsplit);
SizePair sides = {left.size(), right.size()};
return sides;
};
bool SubsplitDAG::SubsplitNodeHasParent(const Bitset &node_subsplit) const {
auto [left_parents, right_parents] =
GetSubsplitNodeNeighborCounts(node_subsplit, Direction::Rootward);
return (left_parents + right_parents) > 0;
}
bool SubsplitDAG::SubsplitNodeHasLeftAndRightChild(const Bitset &node_subsplit) const {
auto [left_children, right_children] =
GetSubsplitNodeNeighborCounts(node_subsplit, Direction::Leafward);
return (left_children > 0) && (right_children > 0);
}
bool SubsplitDAG::IsValidTaxonMap() const {
std::vector<bool> id_exists(TaxonCount());
// Get all ids from map.
for (const auto &[name, taxon_id] : dag_taxa_) {
std::ignore = name;
if (taxon_id.value_ > id_exists.size()) {
return false;
}
if (id_exists[taxon_id.value_]) {
return false;
} else {
id_exists[taxon_id.value_] = true;
}
}
// Taxon map should cover the all ids from 0 to taxon_count-1.
for (size_t i = 0; i < id_exists.size(); i++) {
if (!id_exists[i]) {
return false;
}
}
return true;
}
// ** Reindexers
// NOTE: To be performed *after* DAG modification.
Reindexer SubsplitDAG::BuildNodeReindexer(const size_t prev_node_count) {
Reindexer node_reindexer = Reindexer::IdentityReindexer(NodeCount());
// Begin reindex values at taxon count to account for ...leaves?
size_t running_traversal_idx = taxon_count_;
NodeId dag_root_node_id = NodeId(prev_node_count - 1);
// Build node_reindexer by using post-order traversal (topological sort) of entire
// DAG to assign new ids, where the index is the "before" node_id (stored in the
// node object), and the value is the "after" node_id.
DepthFirstWithAction(
{dag_root_node_id},
SubsplitDAGTraversalAction(
// BeforeNode
[](NodeId node_id) {},
// AfterNode
[&node_reindexer, &running_traversal_idx](NodeId node_id) {
node_reindexer.SetReindex(node_id.value_, running_traversal_idx);
running_traversal_idx++;
},
// BeforeNodeClade
[](NodeId node_id, bool is_edge_on_left) {},
// VisitEdge
[](NodeId node_id, NodeId child_id, bool is_edge_on_left) {}));
return node_reindexer;
}
Reindexer SubsplitDAG::BuildEdgeReindexer(const size_t prev_edge_count) {
Reindexer edge_reindexer = Reindexer::IdentityReindexer(EdgeCountWithLeafSubsplits());
// Only edges from an existing parent node to a new child node need to be reindexed.
// See SubsplitDAG::AddNodePair().
for (EdgeId edge_idx = EdgeId(prev_edge_count);
edge_idx < EdgeCountWithLeafSubsplits(); edge_idx++) {
// Find edge with given idx.
auto element = storage_.GetLine(edge_idx);
Assert(element.has_value(),
"An edge with given edge_idx did not exist in "
"SubsplitDAG::BuildEdgeReindexer.");
auto [node_pair, idx] = element.value();
std::ignore = idx;
const auto &[parent_id, child_id] = std::pair<NodeId, NodeId>(node_pair);
const Bitset parent_subsplit = GetDAGNode(NodeId(parent_id)).GetBitset();
const Bitset child_subsplit = GetDAGNode(NodeId(child_id)).GetBitset();
const auto idx_range = GetChildEdgeRange(
parent_subsplit, child_subsplit.SubsplitIsLeftChildOf(parent_subsplit));
// New edge is added to the end of the range.
const auto new_idx =
EdgeId(edge_reindexer.GetNewIndexByOldIndex(idx_range.second.value_));
edge_reindexer.ReassignAndShift(edge_idx.value_, new_idx.value_);
}
return edge_reindexer;
}
void SubsplitDAG::RemapNodeIds(const Reindexer &node_reindexer) {
// no need to reindex if no changes were made
if (node_reindexer == Reindexer::IdentityReindexer(node_reindexer.size())) {
return;
}
std::vector<DAGVertex> nodes = {storage_.GetVertices().begin(),
storage_.GetVertices().end()};
std::vector<DAGVertex> nodes_copy = Reindexer::Reindex(nodes, node_reindexer);
storage_.SetVertices(nodes_copy);
// Update each node's id and leafward/rootward ids.
for (NodeId node_id = NodeId(0); node_id < NodeCount(); node_id++) {
GetDAGNode(node_id).RemapNodeIds(node_reindexer);
}
// Update `subsplit_to_id_`.
for (const auto &[subsplit, node_id] : subsplit_to_id_) {
subsplit_to_id_.at(subsplit) =
NodeId(node_reindexer.GetNewIndexByOldIndex(node_id.value_));
}
// Update `subsplit_clade_`.
for (auto &[clade, old_node_ids] : subsplit_clade_) {
NodeIdSet new_node_ids;
for (const auto old_node_id : old_node_ids) {
auto new_node_id =
NodeId(node_reindexer.GetNewIndexByOldIndex(old_node_id.value_));
new_node_ids.insert(new_node_id);
}
subsplit_clade_[clade] = new_node_ids;
}
// Update `subsplit_union_`.
for (auto &[clade, old_node_ids] : subsplit_union_) {
NodeIdSet new_node_ids;
for (const auto old_node_id : old_node_ids) {
auto new_node_id =
NodeId(node_reindexer.GetNewIndexByOldIndex(old_node_id.value_));
new_node_ids.insert(new_node_id);
}
subsplit_union_[clade] = new_node_ids;
}
// Update edges.
for (auto i : storage_.GetLines()) {
storage_.ReindexLine(
i.GetId(), NodeId(node_reindexer.GetNewIndexByOldIndex(i.GetParent().value_)),
NodeId(node_reindexer.GetNewIndexByOldIndex(i.GetChild().value_)));
}
}
void SubsplitDAG::RemapEdgeIdxs(const Reindexer &edge_reindexer) {
// no need to reindex if no changes were made
if (edge_reindexer == Reindexer::IdentityReindexer(edge_reindexer.size())) {
return;
}
// Update edges.
std::vector<DAGLineStorage> edges_copy(storage_.GetLines().size());
for (auto i : storage_.GetLines()) {
EdgeId new_idx = EdgeId(edge_reindexer.GetNewIndexByOldIndex(i.GetId().value_));
edges_copy[new_idx.value_] = i;
edges_copy[new_idx.value_].SetId(new_idx);
}
storage_.SetLines(edges_copy);
for (NodeId node_id = NodeId(0); node_id < NodeCount(); node_id++) {
GetDAGNode(node_id).RemapEdgeIdxs(edge_reindexer);
}
// Update `parent_to_child_range_`.
for (const auto &[subsplit, idx_range] : parent_to_child_range_) {
parent_to_child_range_.at(subsplit) = {
EdgeId(edge_reindexer.GetNewIndexByOldIndex(idx_range.first.value_)),
EdgeId(edge_reindexer.GetNewIndexByOldIndex(idx_range.second.value_))};
}
}
| 94,137
|
C++
|
.cpp
| 2,151
| 37.833101
| 88
| 0.680519
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,014
|
clock_model.cpp
|
phylovi_bito/src/clock_model.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "clock_model.hpp"
std::unique_ptr<ClockModel> ClockModel::OfSpecification(
const std::string &specification) {
if (specification == "none") {
return std::make_unique<NoClockModel>();
} // else
if (specification == "strict") {
return std::make_unique<StrictClockModel>();
} // else
Failwith("Clock model not known: " + specification);
}
void StrictClockModel::SetParameters(const EigenVectorXdRef param_vector) {
GetBlockSpecification().CheckParameterVectorSize(param_vector);
EigenVectorXd rate = ExtractSegment(param_vector, rate_key_);
rate_ = rate[0];
}
| 721
|
C++
|
.cpp
| 18
| 37.333333
| 75
| 0.744286
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,015
|
block_specification.cpp
|
phylovi_bito/src/block_specification.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "block_specification.hpp"
void BlockSpecification::Insert(const std::string& key, Coordinates value) {
SafeInsert(map_, key, value);
}
void BlockSpecification::Insert(const char* key, Coordinates value) {
Insert(std::string(key), value);
}
BlockSpecification::BlockSpecification(ParamCounts param_counts) {
size_t next_available_idx = 0;
for (const auto& [block_name, block_size] : param_counts) {
Insert(block_name, {next_available_idx, block_size});
next_available_idx += block_size;
}
InsertEntireKey({0, next_available_idx});
}
BlockSpecification::Coordinates BlockSpecification::Find(const std::string& key) const {
auto search = map_.find(key);
if (search == map_.end()) {
Failwith("Can't find '" + key + "' in block specification!");
}
return search->second;
}
void BlockSpecification::InsertEntireKey(Coordinates coordinates) {
EraseEntireKey();
Insert(entire_key_, coordinates);
}
void BlockSpecification::Append(const std::string& sub_entire_key,
BlockSpecification other) {
const auto our_parameter_count = ParameterCount();
for (const auto& [block_name, coordinate] : other.GetMap()) {
auto [start_idx, block_size] = coordinate;
if (block_name == entire_key_) {
Assert(start_idx == 0, "Start index of entire block isn't zero.");
Insert(sub_entire_key, {our_parameter_count, block_size});
} else {
auto search = map_.find(block_name);
if (search != map_.end()) {
Failwith("Key overlap between BlockSpecifications: " + block_name);
} // else
Insert(block_name, {our_parameter_count + start_idx, block_size});
}
}
InsertEntireKey({0, our_parameter_count + other.ParameterCount()});
}
void BlockSpecification::CheckParameterVectorSize(
const EigenVectorXdRef param_vector) const {
Assert(param_vector.size() == static_cast<Eigen::Index>(ParameterCount()),
"Parameters are the wrong dimension!");
}
void BlockSpecification::CheckParameterMatrixSize(
const EigenMatrixXdRef param_matrix) const {
Assert(param_matrix.cols() == static_cast<Eigen::Index>(ParameterCount()),
"Parameters are the wrong dimension!");
}
EigenVectorXdRef BlockSpecification::ExtractSegment(EigenVectorXdRef param_vector,
std::string key) const {
auto [start_idx, parameter_count] = Find(key);
if (static_cast<Eigen::Index>(start_idx + parameter_count) > param_vector.size()) {
Failwith("Model parameter '" + key +
"' request too long for a param_vector of length " +
std::to_string(param_vector.size()) + ".");
} // else
return param_vector.segment(start_idx, parameter_count);
}
EigenMatrixXdRef BlockSpecification::ExtractBlock(EigenMatrixXdRef param_matrix,
std::string key) const {
auto [start_idx, parameter_count] = Find(key);
if (static_cast<Eigen::Index>(start_idx + parameter_count) > param_matrix.cols()) {
Failwith("Model parameter '" + key +
"' request too long for a param_matrix of width " +
std::to_string(param_matrix.cols()) + ".");
} // else
return param_matrix.block(0, start_idx, param_matrix.rows(), parameter_count);
}
// In the interest of having a single code path, this implementation does a
// lookups in ExtractSegment even though it doesn't really need to. My intent is
// not to have it be called frequently-- just once when we're setting things up.
// So if this changes let's do something about it.
BlockSpecification::ParameterSegmentMap BlockSpecification::ParameterSegmentMapOf(
EigenVectorXdRef param_vector) const {
CheckParameterVectorSize(param_vector);
ParameterSegmentMap parameter_segment_map;
for (const auto& [key, _] : GetMap()) {
std::ignore = _;
SafeInsert(parameter_segment_map, key, ExtractSegment(param_vector, key));
}
return parameter_segment_map;
}
BlockSpecification::ParameterBlockMap BlockSpecification::ParameterBlockMapOf(
EigenMatrixXdRef param_matrix) const {
ParameterBlockMap parameter_block_map;
CheckParameterMatrixSize(param_matrix);
for (const auto& [key, _] : GetMap()) {
std::ignore = _;
SafeInsert(parameter_block_map, key, ExtractBlock(param_matrix, key));
}
return parameter_block_map;
}
| 4,473
|
C++
|
.cpp
| 100
| 39.73
| 88
| 0.700527
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,016
|
topology_sampler.cpp
|
phylovi_bito/src/topology_sampler.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "topology_sampler.hpp"
Node::NodePtr TopologySampler::Sample(SubsplitDAGNode node, SubsplitDAG& dag,
EigenConstVectorXdRef normalized_sbn_parameters,
EigenConstVectorXdRef inverted_probabilities) {
SamplingSession session({dag, normalized_sbn_parameters, inverted_probabilities});
session.result_.AddVertex({node.Id(), node.GetBitset()});
SampleRootward(session, node);
SampleLeafward(session, node, SubsplitClade::Left);
SampleLeafward(session, node, SubsplitClade::Right);
session.result_.ConnectAllVertices();
auto root = session.result_.FindRoot();
if (!root.has_value()) Failwith("No root found");
return BuildTree(session, root.value().get());
}
void TopologySampler::SetSeed(uint64_t seed) { mersenne_twister_.SetSeed(seed); }
void TopologySampler::VisitNode(SamplingSession& session, SubsplitDAGNode node,
Direction direction, SubsplitClade clade) {
session.result_.AddVertex({node.Id(), node.GetBitset()});
switch (direction) {
case Direction::Rootward:
SampleLeafward(session, node, SubsplitClade::Left);
SampleLeafward(session, node, SubsplitClade::Right);
break;
case Direction::Leafward:
SampleRootward(session, node);
SampleLeafward(session, node, Bitset::Opposite(clade));
break;
}
}
void TopologySampler::SampleRootward(SamplingSession& session, SubsplitDAGNode node) {
auto left = node.GetLeftRootward();
auto right = node.GetRightRootward();
if (left.empty() && right.empty()) {
// reached root
return;
}
auto [parent_node, parent_edge] = SampleParentNodeAndEdge(session, left, right);
session.result_.AddLine({parent_edge.GetId(), parent_edge.GetParent(),
parent_edge.GetChild(), parent_edge.GetSubsplitClade()});
VisitNode(session, parent_node, Direction::Leafward, parent_edge.GetSubsplitClade());
}
void TopologySampler::SampleLeafward(SamplingSession& session, SubsplitDAGNode node,
SubsplitClade clade) {
auto neighbors = node.GetNeighbors(Direction::Leafward, clade);
if (neighbors.empty()) {
// reached leaf
return;
}
auto child = SampleChildNodeAndEdge(session, neighbors);
session.result_.AddLine({child.second.GetId(), child.second.GetParent(),
child.second.GetChild(), child.second.GetSubsplitClade()});
VisitNode(session, child.first, Direction::Rootward, clade);
}
std::pair<SubsplitDAGNode, ConstLineView> TopologySampler::SampleParentNodeAndEdge(
SamplingSession& session, ConstNeighborsView left, ConstNeighborsView right) {
std::vector<double> weights;
weights.resize(left.size() + right.size());
size_t i = 0;
for (auto parent = left.begin(); parent != left.end(); ++parent)
weights[i++] = session.inverted_probabilities_[parent.GetEdgeId().value_];
for (auto parent = right.begin(); parent != right.end(); ++parent)
weights[i++] = session.inverted_probabilities_[parent.GetEdgeId().value_];
std::discrete_distribution<> distribution(weights.begin(), weights.end());
auto sampled_index =
static_cast<size_t>(distribution(mersenne_twister_.GetGenerator()));
if (sampled_index < left.size()) {
auto parent = left.begin();
std::advance(parent, sampled_index);
return {session.dag_.GetDAGNode(NodeId(parent.GetNodeId())),
session.dag_.GetDAGEdge(EdgeId(parent.GetEdgeId()))};
} // else
auto parent = right.begin();
std::advance(parent, sampled_index - left.size());
return {session.dag_.GetDAGNode(NodeId(parent.GetNodeId())),
session.dag_.GetDAGEdge(EdgeId(parent.GetEdgeId()))};
}
std::pair<SubsplitDAGNode, ConstLineView> TopologySampler::SampleChildNodeAndEdge(
SamplingSession& session, ConstNeighborsView neighbors) {
std::vector<double> weights;
weights.resize(neighbors.size());
size_t i = 0;
for (auto child = neighbors.begin(); child != neighbors.end(); ++child) {
weights[i++] = session.normalized_sbn_parameters_[child.GetEdgeId().value_];
}
std::discrete_distribution<> distribution(weights.begin(), weights.end());
i = static_cast<size_t>(distribution(mersenne_twister_.GetGenerator()));
auto child = neighbors.begin();
std::advance(child, i);
return {session.dag_.GetDAGNode(NodeId(child.GetNodeId())),
session.dag_.GetDAGEdge(EdgeId(child.GetEdgeId()))};
}
Node::NodePtr TopologySampler::BuildTree(SamplingSession& session,
const DAGVertex& node) {
auto left = node.GetNeighbors(Direction::Leafward, SubsplitClade::Left);
auto right = node.GetNeighbors(Direction::Leafward, SubsplitClade::Right);
NodeId left_id = NodeId(NoId), right_id = NodeId(NoId);
if (!left.empty()) {
left_id = left.begin().GetNodeId();
}
if (!right.empty()) {
right_id = right.begin().GetNodeId();
}
if (left_id != NoId && right_id != NoId) {
return Node::Join(BuildTree(session, session.result_.GetVertex(left_id)),
BuildTree(session, session.result_.GetVertex(right_id)),
node.GetId().value_);
}
if (node.IsLeaf()) {
return Node::Leaf(node.GetId().value_);
}
if (node.IsRoot()) {
Assert(left_id != NoId, "Root has no children");
return Node::Join({BuildTree(session, session.result_.GetVertex(left_id))},
node.GetId().value_);
}
Failwith("Node can't have only one child");
}
| 5,623
|
C++
|
.cpp
| 119
| 41.285714
| 87
| 0.691174
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,017
|
tp_choice_map.cpp
|
phylovi_bito/src/tp_choice_map.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "tp_choice_map.hpp"
// ** Access
EdgeId TPChoiceMap::GetEdgeChoice(const EdgeId edge_id, EdgeAdjacent edge_adj) const {
return edge_choice_vector_[edge_id.value_][edge_adj];
}
void TPChoiceMap::SetEdgeChoice(const EdgeId edge_id, const EdgeAdjacent edge_adj,
const EdgeId new_edge_choice) {
edge_choice_vector_[edge_id.value_][edge_adj] = new_edge_choice;
}
void TPChoiceMap::ResetEdgeChoice(const EdgeId edge_id) {
edge_choice_vector_[edge_id.value_] = {EdgeId(NoId), EdgeId(NoId), EdgeId(NoId),
EdgeId(NoId)};
}
TPChoiceMap::EdgeChoiceNodeIds TPChoiceMap::GetEdgeChoiceNodeIds(
const EdgeId edge_id) const {
return GetEdgeChoiceNodeIds(GetEdgeChoice(edge_id));
}
TPChoiceMap::EdgeChoiceSubsplits TPChoiceMap::GetEdgeChoiceSubsplits(
const EdgeId edge_id) const {
return GetEdgeChoiceSubsplits(GetEdgeChoice(edge_id));
}
TPChoiceMap::EdgeChoiceNodeIds TPChoiceMap::GetEdgeChoiceNodeIds(
const EdgeChoice &edge_choice) const {
auto GetNodeFromEdge = [this](const EdgeId edge_id, const Direction dir) {
if (edge_id == NoId) {
return NodeId(NoId);
}
const auto &edge = GetDAG().GetDAGEdge(edge_id);
return (dir == Direction::Rootward) ? edge.GetParent() : edge.GetChild();
};
EdgeChoiceNodeIds choice_data;
choice_data.parent = GetNodeFromEdge(edge_choice.parent, Direction::Rootward);
choice_data.sister = GetNodeFromEdge(edge_choice.sister, Direction::Leafward);
choice_data.left_child = GetNodeFromEdge(edge_choice.left_child, Direction::Leafward);
choice_data.right_child =
GetNodeFromEdge(edge_choice.right_child, Direction::Leafward);
return choice_data;
}
TPChoiceMap::EdgeChoiceSubsplits TPChoiceMap::GetEdgeChoiceSubsplits(
const EdgeChoice &edge_choice) const {
auto GetSubsplitFromEdge = [this](const EdgeId edge_id, const Direction dir) {
if (edge_id == NoId) {
return Bitset::EmptyBitset();
}
const auto &edge = GetDAG().GetDAGEdge(edge_id);
const auto node_id =
(dir == Direction::Rootward) ? edge.GetParent() : edge.GetChild();
return GetDAG().GetDAGNodeBitset(node_id);
};
EdgeChoiceSubsplits choice_data{
GetSubsplitFromEdge(edge_choice.parent, Direction::Rootward),
GetSubsplitFromEdge(edge_choice.sister, Direction::Leafward),
GetSubsplitFromEdge(edge_choice.left_child, Direction::Leafward),
GetSubsplitFromEdge(edge_choice.right_child, Direction::Leafward)};
return choice_data;
}
TPChoiceMap::EdgeChoicePCSPs TPChoiceMap::GetEdgeChoicePCSPs(
const EdgeId edge_id) const {
auto GetPCSPFromEdge = [this](const EdgeId edge_id) -> Bitset {
if (edge_id == NoId) {
return Bitset::EmptyBitset();
}
return GetDAG().GetDAGEdgeBitset(edge_id);
};
const auto &edge_choice = GetEdgeChoice(edge_id);
EdgeChoicePCSPs choice_data{
GetPCSPFromEdge(edge_choice.parent), GetPCSPFromEdge(edge_choice.sister),
GetPCSPFromEdge(edge_id), GetPCSPFromEdge(edge_choice.left_child),
GetPCSPFromEdge(edge_choice.right_child)};
return choice_data;
}
// ** Maintenance
// Grow and reindex data to fit new DAG. Initialize new choice map to first edge.
void TPChoiceMap::GrowEdgeData(const size_t new_edge_count,
std::optional<const Reindexer> edge_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
edge_choice_vector_.resize(new_edge_count);
if (edge_reindexer.has_value()) {
auto &reindexer = edge_reindexer.value();
// Remap edge choices.
for (EdgeId edge_id(0); edge_id < new_edge_count; edge_id++) {
for (const auto edge_choice_type : EdgeAdjacentEnum::Iterator()) {
if (GetEdgeChoice(edge_id, edge_choice_type) != NoId) {
SetEdgeChoice(edge_id, edge_choice_type,
EdgeId(reindexer.GetNewIndexByOldIndex(
size_t(GetEdgeChoice(edge_id, edge_choice_type)))));
}
}
}
// Reindex ordering of choice map.
Reindexer::ReindexInPlace<EdgeChoiceVector, EdgeChoice>(edge_choice_vector_,
reindexer, new_edge_count);
}
}
// ** Selectors
void TPChoiceMap::SelectFirstEdge() {
for (EdgeId edge_id = EdgeId(0); edge_id < GetDAG().EdgeCountWithLeafSubsplits();
edge_id++) {
SelectFirstEdge(edge_id);
}
}
void TPChoiceMap::SelectFirstEdge(const EdgeId edge_id) {
const auto edge = GetDAG().GetDAGEdge(edge_id);
const auto focal_clade = edge.GetSubsplitClade();
const auto parent_node = GetDAG().GetDAGNode(NodeId(edge.GetParent()));
const auto child_node = GetDAG().GetDAGNode(NodeId(edge.GetChild()));
ResetEdgeChoice(edge_id);
auto &edge_choice = GetEdgeChoice(edge_id);
// Query neighbor nodes.
const SizeVector &left_parents =
parent_node.GetNeighbors(Direction::Rootward, SubsplitClade::Left);
const SizeVector &right_parents =
parent_node.GetNeighbors(Direction::Rootward, SubsplitClade::Right);
const SizeVector &sisters =
parent_node.GetNeighbors(Direction::Leafward, Bitset::Opposite(focal_clade));
const SizeVector &left_children =
child_node.GetNeighbors(Direction::Leafward, SubsplitClade::Left);
const SizeVector &right_children =
child_node.GetNeighbors(Direction::Leafward, SubsplitClade::Right);
// If neighbor lists are non-empty, get first edge from list.
if (!left_parents.empty()) {
edge_choice.parent = GetDAG().GetEdgeIdx(NodeId(left_parents[0]), parent_node.Id());
}
if (!right_parents.empty()) {
edge_choice.parent =
GetDAG().GetEdgeIdx(NodeId(right_parents[0]), parent_node.Id());
}
if (!sisters.empty()) {
edge_choice.sister = GetDAG().GetEdgeIdx(parent_node.Id(), NodeId(sisters[0]));
}
if (!left_children.empty()) {
edge_choice.left_child =
GetDAG().GetEdgeIdx(child_node.Id(), NodeId(left_children[0]));
}
if (!right_children.empty()) {
edge_choice.right_child =
GetDAG().GetEdgeIdx(child_node.Id(), NodeId(right_children[0]));
}
}
bool TPChoiceMap::SelectionIsValid(const bool is_quiet) const {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cerr);
bool is_valid = true;
EdgeId edge_max_id = GetDAG().EdgeIdxRange().second;
for (EdgeId edge_id = EdgeId(0); edge_id < edge_choice_vector_.size(); edge_id++) {
const auto &edge_choice = edge_choice_vector_[edge_id.value_];
if ((edge_choice.parent == NoId) && (edge_choice.sister == NoId) &&
(edge_choice.left_child == NoId) && (edge_choice.right_child == NoId)) {
os << "Invalid Selection: Edge Choice is empty." << std::endl;
is_valid = false;
}
// If edge id is outside valid range.
if ((edge_choice.parent > edge_max_id) || (edge_choice.sister > edge_max_id)) {
// If they are not NoId, then it is an invalid edge_id.
if ((edge_choice.parent != NoId) || (edge_choice.sister != NoId)) {
os << "Invalid Selection: Parent or Sister has invalid edge_id." << std::endl;
is_valid = false;
}
// NoId is valid only if edge goes to a root.
if (!GetDAG().IsEdgeRoot(edge_id)) {
os << "Invalid Selection: Parent or Sister has NoId when edge is not a root."
<< std::endl;
is_valid = false;
}
}
for (const auto &child_edge_id :
EdgeIdVector({edge_choice.left_child, edge_choice.right_child})) {
// If edge id is outside valid range.
if (child_edge_id > edge_max_id) {
// If they are not NoId, then it is an invalid edge_id.
if (child_edge_id != NoId) {
os << "Invalid Selection: Child has invalid edge id." << std::endl;
is_valid = false;
}
// NoId is valid only if edge goes to a leaf.
if (!GetDAG().IsEdgeLeaf(edge_id)) {
os << "Invalid Selection: Child has NoId when edge is not a leaf."
<< std::endl;
is_valid = false;
}
}
}
if (!is_valid) {
os << "Failed at Edge" << edge_id << ": IsLeaf? " << GetDAG().IsEdgeLeaf(edge_id)
<< ", IsRoot? " << GetDAG().IsEdgeRoot(edge_id) << std::endl;
os << "EdgeChoice: " << EdgeChoiceToString(edge_id) << std::endl;
os << std::endl;
break;
}
}
return is_valid;
}
// ** TreeMask
// - Makes two passes:
// - The first pass goes up along the chosen edges of the DAG to the root, adding
// each edge it encounters.
// - The second pass goes leafward, descending along the chosen edges to the leaf
// edges from the sister of each edge in the rootward pass and the child edges from
// the focal edge.
TPChoiceMap::ExpandedTreeMask TPChoiceMap::ExtractExpandedTreeMask(
const TreeMask &tree_mask) const {
ExpandedTreeMask tree_mask_ext;
for (const auto edge_id : tree_mask) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
const auto focal_clade = edge.GetSubsplitClade();
const NodeId parent_id = edge.GetParent();
const NodeId child_id = edge.GetChild();
// Add nodes to map if they don't already exist.
for (const auto &node_id : {parent_id, child_id}) {
if (tree_mask_ext.find(node_id) == tree_mask_ext.end()) {
NodeAdjacentArray<NodeId> adj_nodes;
adj_nodes.fill(NodeId(NoId));
tree_mask_ext.insert({node_id, adj_nodes});
}
}
// Add adjacent nodes to map.
const auto which_node = (focal_clade == SubsplitClade::Left)
? NodeAdjacent::LeftChild
: NodeAdjacent::RightChild;
Assert(tree_mask_ext[parent_id][which_node] == NoId,
"Invalid TreeMask: Cannot reassign adjacent child node.");
tree_mask_ext[parent_id][which_node] = child_id;
Assert(tree_mask_ext[child_id][NodeAdjacent::Parent] == NoId,
"Invalid TreeMask: Cannot reassign adjacent parent node.");
tree_mask_ext[child_id][NodeAdjacent::Parent] = parent_id;
}
return tree_mask_ext;
}
// Convert stack to a vector.
template <typename T>
static std::set<T> StackToVector(std::stack<T> &stack) {
T *end = &stack.top() + 1;
T *begin = end - stack.size();
std::vector<T> stack_contents(begin, end);
return stack_contents;
}
// Push Id to Stack if it is not NoId.
template <typename T, typename IdType>
static void StackPushIfValidId(std::stack<T> &stack, IdType id) {
if (id != NoId) {
stack.push(id);
}
}
TPChoiceMap::TreeMask TPChoiceMap::ExtractTreeMask(const EdgeId initial_edge_id) const {
TreeMask tree_mask;
std::stack<EdgeId> rootward_stack, leafward_stack;
const auto edge_max_id = GetDAG().EdgeIdxRange().second;
// Rootward Pass: Capture parent and sister edges above focal edge.
// For focal edge, add children to stack for leafward pass.
auto focal_edge_id = initial_edge_id;
const auto &focal_choices = edge_choice_vector_.at(focal_edge_id.value_);
StackPushIfValidId(rootward_stack, focal_choices.left_child);
StackPushIfValidId(rootward_stack, focal_choices.right_child);
// Follow parentage upward until root.
bool at_root = false;
while (!at_root) {
if (focal_edge_id >= edge_max_id) {
std::cout << "FOCAL EDGE OUT-OF-RANGE: " << focal_edge_id << " " << edge_max_id
<< std::endl;
}
Assert(focal_edge_id < edge_max_id, "edge_id is outside valid edge range.");
tree_mask.insert(focal_edge_id);
const auto &focal_choices = edge_choice_vector_.at(focal_edge_id.value_);
// End upward pass if we are at the root.
if (GetDAG().IsEdgeRoot(focal_edge_id)) {
at_root = true;
} else {
// If not at root, add sister for leafward pass.
focal_edge_id = focal_choices.parent;
rootward_stack.push(focal_choices.sister);
}
}
// Leafward Pass: Capture all children from parentage.
while (!rootward_stack.empty()) {
const auto parent_edge_id = rootward_stack.top();
rootward_stack.pop();
leafward_stack.push(parent_edge_id);
}
while (!leafward_stack.empty()) {
const auto edge_id = leafward_stack.top();
leafward_stack.pop();
tree_mask.insert(edge_id);
const auto edge_choice = edge_choice_vector_.at(edge_id.value_);
StackPushIfValidId(leafward_stack, edge_choice.left_child);
StackPushIfValidId(leafward_stack, edge_choice.right_child);
}
return tree_mask;
}
TPChoiceMap::ExpandedTreeMask TPChoiceMap::ExtractExpandedTreeMask(
const EdgeId initial_edge_id) const {
return ExtractExpandedTreeMask(ExtractTreeMask(initial_edge_id));
}
bool TPChoiceMap::TreeMaskIsValid(const TreeMask &tree_mask,
const bool is_quiet) const {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cerr);
SizeVector node_ids;
bool root_check = false;
BoolVector leaf_check(GetDAG().TaxonCount(), false);
// Node map for checking node connectivity.
using NodeMap = std::map<NodeId, NodeAdjacentArray<bool>>;
NodeMap nodemap_check;
// Check each edge in tree mask.
for (const auto edge_id : tree_mask) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
const auto &parent_node = GetDAG().GetDAGNode(NodeId(edge.GetParent()));
const auto &child_node = GetDAG().GetDAGNode(NodeId(edge.GetChild()));
// Check if edge goes to root exactly once.
if (GetDAG().IsNodeRoot(parent_node.Id())) {
if (root_check) {
os << "Invalid TreeMask: Multiple edges going to tree root." << std::endl;
return false;
}
root_check = true;
}
// Check if edge goes to each leaf exactly once.
if (GetDAG().IsNodeLeaf(child_node.Id())) {
const auto taxon_id = child_node.Id();
if (leaf_check.at(taxon_id.value_)) {
os << "Invalid TreeMask: Multiple edges going to a tree leaf." << std::endl;
return false;
}
leaf_check.at(taxon_id.value_) = true;
}
// Update node map. If a node already has parent or child, it is an invalid
// tree.
for (const NodeId node_id : {parent_node.Id(), child_node.Id()}) {
if (nodemap_check.find(node_id) == nodemap_check.end()) {
nodemap_check.insert({node_id, {{false, false, false}}});
}
}
const auto which_child = (edge.GetSubsplitClade() == SubsplitClade::Left)
? NodeAdjacent::LeftChild
: NodeAdjacent::RightChild;
if (nodemap_check[parent_node.Id()][which_child] == true) {
os << "Invalid TreeMask: Node has muliple parents." << std::endl;
return false;
}
nodemap_check[parent_node.Id()][which_child] = true;
if (nodemap_check[child_node.Id()][NodeAdjacent::Parent] == true) {
os << "Invalid TreeMask: Node has multiple children." << std::endl;
return false;
}
nodemap_check[child_node.Id()][NodeAdjacent::Parent] = true;
}
// Check if all nodes are fully connected.
for (const auto &[node_id, connections] : nodemap_check) {
// Check children.
if (!(connections[NodeAdjacent::LeftChild] ||
connections[NodeAdjacent::RightChild])) {
// If one child is not connected, neither should be.
if (connections[NodeAdjacent::LeftChild] ^
connections[NodeAdjacent::RightChild]) {
os << "Invalid TreeMask: Node has only one child." << std::endl;
return false;
}
if (!GetDAG().IsNodeLeaf(node_id)) {
os << "Invalid TreeMask: Non-leaf node has no children." << std::endl;
return false;
}
}
// Check parent.
if (!connections[NodeAdjacent::Parent]) {
if (!GetDAG().IsNodeRoot(node_id)) {
os << "Invalid TreeMask: Non-root node has no parent." << std::endl;
return false;
}
}
}
// Check if spans root.
if (!root_check) {
os << "Invalid TreeMask: Tree does not span root." << std::endl;
return false;
}
// Check if spans all leaf nodes.
for (size_t i = 0; i < leaf_check.size(); i++) {
if (!leaf_check[i]) {
os << "Invalid TreeMask: Tree does not span all leaves." << std::endl;
return false;
}
}
return true;
}
std::string TPChoiceMap::TreeMaskToString(const TreeMask &tree_mask) const {
std::stringstream os;
os << "[ " << std::endl;
for (const auto edge_id : tree_mask) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
os << "\t" << edge_id << ":(" << edge.GetParent() << "->" << edge.GetChild()
<< "), " << std::endl;
}
os << "]";
return os.str();
}
std::string TPChoiceMap::ExpandedTreeMaskToString(
const ExpandedTreeMask &tree_mask) const {
std::stringstream os;
os << "[ " << std::endl;
for (const auto [node_id, adj_node_ids] : tree_mask) {
os << "\t" << node_id << ":(" << adj_node_ids[NodeAdjacent::Parent] << ", "
<< adj_node_ids[NodeAdjacent::LeftChild] << ", "
<< adj_node_ids[NodeAdjacent::RightChild] << "), " << std::endl;
}
os << "]";
return os.str();
}
// ** Topology
// - Makes two passes:
// - The first pass goes up along the chosen edges of the DAG to the root, adding
// each edge it encounters.
// - The second pass goes leafward, descending along the chosen edges to the leaf
// edges from the sister of each edge in the rootward pass and the child edges from
// the focal edge.
Node::Topology TPChoiceMap::ExtractTopology(const EdgeId focal_edge_id) const {
return ExtractTopology(ExtractTreeMask(focal_edge_id));
}
Node::Topology TPChoiceMap::ExtractTopology(const TreeMask &tree_mask) const {
ExpandedTreeMask tree_mask_ext = ExtractExpandedTreeMask(tree_mask);
return ExtractTopology(tree_mask_ext);
}
Node::Topology TPChoiceMap::ExtractTopology(ExpandedTreeMask &tree_mask_ext) const {
const auto dag_root_id = GetDAG().GetDAGRootNodeId();
Assert(tree_mask_ext.find(dag_root_id) != tree_mask_ext.end(),
"DAG Root Id does not exist in ExpandedTreeMask map.");
Assert(tree_mask_ext[dag_root_id][NodeAdjacent::LeftChild] != NoId,
"DAG Root Id has no children in ExpandedTreeMask map.");
const auto dag_rootsplit_id = tree_mask_ext[dag_root_id][NodeAdjacent::LeftChild];
std::unordered_map<NodeId, bool> visited_left;
std::unordered_map<NodeId, bool> visited_right;
std::unordered_map<NodeId, Node::NodePtr> nodes;
// Build tree skeleton. Set all nodes to unvisited.
for (const auto &[node_id, adj_node_ids] : tree_mask_ext) {
std::ignore = adj_node_ids;
visited_left[node_id] = false;
visited_right[node_id] = false;
}
size_t node_id_counter = GetDAG().TaxonCount();
NodeId next_node_id = NodeId(NoId);
NodeId current_node_id = dag_root_id;
// Continue until left and right children of rootsplit node have been visited.
nodes[dag_rootsplit_id] = nullptr;
while (nodes[dag_rootsplit_id] == nullptr) {
// If right branch (and left branch) already visited, join child nodes and return
// up the tree.
if (visited_right[current_node_id]) {
const auto left_child_id =
tree_mask_ext[current_node_id][NodeAdjacent::LeftChild];
const auto right_child_id =
tree_mask_ext[current_node_id][NodeAdjacent::RightChild];
nodes[current_node_id] = Node::Join(nodes.at(left_child_id),
nodes.at(right_child_id), node_id_counter);
node_id_counter++;
next_node_id = tree_mask_ext[current_node_id][NodeAdjacent::Parent];
}
// If left branch already visited, go down the right branch.
else if (visited_left[current_node_id]) {
visited_right[current_node_id] = true;
next_node_id = tree_mask_ext[current_node_id][NodeAdjacent::RightChild];
}
// If node is a leaf, create leaf node and return up the the tree
else if (GetDAG().IsNodeLeaf(current_node_id)) {
nodes[current_node_id] =
Node::Leaf(current_node_id.value_, GetDAG().TaxonCount());
next_node_id = tree_mask_ext[current_node_id][NodeAdjacent::Parent];
}
// If neither left or right child has been visited, go down the left branch.
else {
visited_left[current_node_id] = true;
next_node_id = tree_mask_ext[current_node_id][NodeAdjacent::LeftChild];
}
Assert(next_node_id != current_node_id, "Node cannot be adjacent to itself.");
current_node_id = NodeId(next_node_id);
}
Node::NodePtr topology = nodes[dag_rootsplit_id];
Assert((nodes.size() + 1) == tree_mask_ext.size(),
"Invalid TreeMask-to-Tree: Topology did not span every node in "
"the TreeMask.");
return topology;
}
// ** I/O
std::string TPChoiceMap::EdgeChoiceToString(const EdgeId edge_id) const {
const auto &edge_choice = GetEdgeChoice(edge_id);
std::stringstream os;
auto PrintEdge = [this, &os](const std::string &name, const EdgeId edge_id) {
NodeId parent_id, child_id;
if (edge_id != NoId) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
parent_id = edge.GetParent();
child_id = edge.GetChild();
}
os << name << ": " << edge_id << " -> (" << parent_id << "," << child_id << "), ";
};
os << "{ ";
PrintEdge("focal", edge_id);
PrintEdge("parent", edge_choice.parent);
PrintEdge("sister", edge_choice.sister);
PrintEdge("left_child", edge_choice.left_child);
PrintEdge("right_child", edge_choice.right_child);
os << " }";
return os.str();
}
std::string TPChoiceMap::EdgeChoiceToString(
const TPChoiceMap::EdgeChoice &edge_choice) {
std::stringstream os;
os << "{ ";
os << "parent: " << edge_choice.parent << ", ";
os << "sister: " << edge_choice.sister << ", ";
os << "left_child: " << edge_choice.left_child << ", ";
os << "right_child: " << edge_choice.right_child;
os << " }";
return os.str();
}
std::string TPChoiceMap::ToString() const {
std::stringstream os;
os << "[ " << std::endl;
for (EdgeId edge_id = EdgeId(0); edge_id < size(); edge_id++) {
os << "\t" << edge_id << ": ";
os << EdgeChoiceToString(GetEdgeChoice(edge_id));
if (edge_id.value_ + 1 < size()) {
os << ", ";
}
os << std::endl;
}
os << "]";
return os.str();
}
TPChoiceMap::PCSPToPCSPVectorMap TPChoiceMap::BuildPCSPMap() const {
PCSPToPCSPVectorMap pcsp_map;
for (EdgeId edge_id(0); edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
const auto pcsps = GetEdgeChoicePCSPs(edge_id);
pcsp_map[pcsps.focal] = {
{pcsps.parent, pcsps.sister, pcsps.left_child, pcsps.right_child}};
}
return pcsp_map;
}
std::ostream &operator<<(std::ostream &os, const TPChoiceMap::EdgeChoice &edge_choice) {
os << TPChoiceMap::EdgeChoiceToString(edge_choice);
return os;
}
std::ostream &operator<<(std::ostream &os, const TPChoiceMap &choice_map) {
os << choice_map.ToString();
return os;
}
| 22,914
|
C++
|
.cpp
| 546
| 36.571429
| 88
| 0.660782
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,018
|
tp_evaluation_engine.cpp
|
phylovi_bito/src/tp_evaluation_engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
#include "tp_evaluation_engine.hpp"
#include "tp_engine.hpp"
#include "pv_handler.hpp"
#include "numerical_utils.hpp"
// ** TPEvalEngine
TPEvalEngine::TPEvalEngine(TPEngine &tp_engine)
: tp_engine_(&tp_engine),
dag_(&GetTPEngine().GetDAG()),
graft_dag_(&GetTPEngine().GetGraftDAG()),
site_pattern_(&GetTPEngine().GetSitePattern()) {
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
}
void TPEvalEngine::Initialize() { Failwith("Pure virtual function call."); }
void TPEvalEngine::ComputeScores(std::optional<EdgeIdVector> opt_edge_ids) {
Failwith("Pure virtual function call.");
}
void TPEvalEngine::GrowNodeData(const size_t new_node_count,
std::optional<const Reindexer> node_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
// No node data to grow/reindex.
}
void TPEvalEngine::GrowEdgeData(const size_t edge_count,
std::optional<const Reindexer> edge_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
// Build resizer for resizing data.
Resizer resizer(GetTPEngine().GetEdgeCount(), GetTPEngine().GetSpareEdgeCount(),
GetTPEngine().GetAllocatedEdgeCount(), edge_count, std::nullopt,
explicit_alloc, GetTPEngine().GetResizingFactor());
resizer.ApplyResizeToEigenVector<EigenVectorXd, double>(GetTopTreeScores(),
DOUBLE_NEG_INF);
// Reindex work space to realign with DAG.
if (edge_reindexer.has_value()) {
Reindexer::ReindexInPlace<EigenVectorXd, double>(
top_tree_per_edge_, edge_reindexer.value(), resizer.GetNewCount());
}
}
void TPEvalEngine::GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) {
GrowNodeData(GetDAG().NodeCount(), node_reindexer, std::nullopt, false);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), edge_reindexer, std::nullopt,
false);
}
void TPEvalEngine::GrowSpareNodeData(const size_t new_node_spare_count) {
if (new_node_spare_count > GetTPEngine().GetSpareNodeCount()) {
GetTPEngine().GrowSpareNodeData(new_node_spare_count);
}
GrowNodeData(GetTPEngine().GetNodeCount());
}
void TPEvalEngine::GrowSpareEdgeData(const size_t new_edge_spare_count) {
if (new_edge_spare_count > GetTPEngine().GetSpareEdgeCount()) {
GetTPEngine().GrowSpareEdgeData(new_edge_spare_count);
}
GrowEdgeData(GetTPEngine().GetEdgeCount());
}
void TPEvalEngine::GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference,
const bool use_unique_temps) {
Failwith("Pure virtual function call.");
}
void TPEvalEngine::UpdateEngineAfterDAGAddNodePair(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) {
Failwith("Pure virtual function call.");
}
void TPEvalEngine::UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
Failwith("Pure virtual function.");
}
double TPEvalEngine::GetTopTreeScoreWithEdge(const EdgeId edge_id) const {
double score = top_tree_per_edge_[edge_id.value_];
return score;
}
double TPEvalEngine::GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map) {
Failwith("Pure virtual function call.");
return DOUBLE_NEG_INF;
}
void TPEvalEngine::CopyEdgeData(const EdgeId src_edge_id, const EdgeId dest_edge_id) {
top_tree_per_edge_[dest_edge_id.value_] = top_tree_per_edge_[src_edge_id.value_];
}
// ** TPEvalEngineViaLikelihood
TPEvalEngineViaLikelihood::TPEvalEngineViaLikelihood(TPEngine &tp_engine,
const std::string &mmap_path)
: TPEvalEngine(tp_engine),
likelihood_pvs_(mmap_path, GetDAG().EdgeCountWithLeafSubsplits(),
GetSitePattern().PatternCount(), 2.0),
branch_handler_(tp_engine.GetDAG()) {
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
InitializeBranchLengthHandler();
}
void TPEvalEngineViaLikelihood::Initialize() {
// Set all PVs to Zero
ZeroPVs();
// Populate Leaves with Site Patterns.
PopulateLeafPVsWithSitePatterns();
// Populate Rootsplit with Stationary Distribution.
PopulateRootPVsWithStationaryDistribution();
// Populate rootward and leafward PVs.
PopulatePVs();
}
void TPEvalEngineViaLikelihood::ZeroPVs() {
for (EdgeId edge_id = 0; edge_id < GetPVs().GetCount(); edge_id++) {
for (const auto pv_type : PLVTypeEnum::Iterator()) {
GetPVs().GetPV(pv_type, edge_id).setZero();
}
}
}
void TPEvalEngineViaLikelihood::PopulatePVs() {
// Rootward Pass (populate P-PVs)
PopulateRootwardPVs();
// Leafward Pass (populate R-PVs)
PopulateLeafwardPVs();
}
void TPEvalEngineViaLikelihood::PopulateRootwardPVs() {
const auto rootward_node_ids = GetDAG().RootwardNodeTraversalTrace(false);
for (const auto node_id : rootward_node_ids) {
PopulateRootwardPVForNode(node_id);
}
}
void TPEvalEngineViaLikelihood::PopulateLeafwardPVs() {
const auto leafward_node_ids = GetDAG().LeafwardNodeTraversalTrace(true);
for (const auto node_id : leafward_node_ids) {
PopulateLeafwardPVForNode(node_id);
}
}
void TPEvalEngineViaLikelihood::GrowNodeData(
const size_t node_count, std::optional<const Reindexer> node_reindexer,
std::optional<const size_t> explicit_alloc, const bool on_init) {
// No node data to resize.
}
void TPEvalEngineViaLikelihood::GrowEdgeData(
const size_t edge_count, std::optional<const Reindexer> edge_reindexer,
std::optional<const size_t> explicit_alloc, const bool on_init) {
bool is_quiet = true;
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// Build resizer for resizing data.
Resizer resizer(GetTPEngine().GetEdgeCount(), GetTPEngine().GetSpareEdgeCount(),
GetTPEngine().GetAllocatedEdgeCount(), edge_count, std::nullopt,
explicit_alloc, GetTPEngine().GetResizingFactor());
GetDAGBranchHandler().Resize(resizer.GetNewCount(), edge_reindexer);
GetMatrix().conservativeResize(resizer.GetNewAlloc(),
GetTPEngine().GetSitePattern().PatternCount());
GetMatrix().conservativeResize(resizer.GetNewPadded(),
GetTPEngine().GetSitePattern().PatternCount());
resizer.ApplyResizeToEigenVector<EigenVectorXd, double>(GetTopTreeScores(),
DOUBLE_NEG_INF);
GetPVs().Resize(resizer.GetNewCount(), resizer.GetNewAlloc(), resizer.GetNewSpare());
// Reindex work space to realign with DAG.
if (edge_reindexer.has_value()) {
auto pv_reindexer = GetPVs().BuildPVReindexer(
edge_reindexer.value(), resizer.GetOldCount(), resizer.GetNewCount());
GetPVs().Reindex(pv_reindexer);
os << "TPLikelihood::ReindexEdgeData::PVs: " << timer.Lap() << std::endl;
Reindexer::ReindexInPlace<EigenVectorXd, double>(
GetTopTreeScores(), edge_reindexer.value(), resizer.GetNewCount());
os << "TPLikelihood::ReindexEdgeData::TopTreeScores: " << timer.Lap() << std::endl;
}
}
void TPEvalEngineViaLikelihood::GrowSpareNodeData(const size_t new_node_spare_count) {
if (new_node_spare_count > GetTPEngine().GetSpareNodeCount()) {
GetTPEngine().GrowSpareNodeData(new_node_spare_count);
}
GrowNodeData(GetTPEngine().GetNodeCount());
}
void TPEvalEngineViaLikelihood::GrowSpareEdgeData(const size_t new_edge_spare_count) {
if (new_edge_spare_count > GetTPEngine().GetSpareEdgeCount()) {
GetTPEngine().GrowSpareEdgeData(new_edge_spare_count);
}
GrowEdgeData(GetTPEngine().GetEdgeCount());
}
void TPEvalEngineViaLikelihood::GrowEngineForDAG(
std::optional<Reindexer> node_reindexer, std::optional<Reindexer> edge_reindexer) {
const auto &dag = GetTPEngine().GetDAG();
GrowNodeData(dag.NodeCount(), node_reindexer, std::nullopt, false);
GrowEdgeData(dag.EdgeCountWithLeafSubsplits(), edge_reindexer, std::nullopt, false);
}
void TPEvalEngineViaLikelihood::GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference,
const bool use_unique_temps) {
if (via_reference) {
if (use_unique_temps) {
GrowSpareNodeData(spare_nodes_per_nni_ * adjacent_nnis.size());
GrowSpareEdgeData(spare_edges_per_nni_ * adjacent_nnis.size());
} else {
GrowSpareNodeData(spare_nodes_per_nni_);
GrowSpareEdgeData(spare_edges_per_nni_);
}
GrowSpareEdgeData(adjacent_nnis.size());
} else {
GrowNodeData(GetGraftDAG().NodeCountWithoutDAGRoot());
GrowEdgeData(GetGraftDAG().EdgeCountWithLeafSubsplits());
}
}
void TPEvalEngineViaLikelihood::UpdateEngineAfterDAGAddNodePair(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) {
// Copy over branch lengths.
GetTPEngine().CopyOverEdgeDataFromPreNNIToPostNNI(
post_nni, pre_nni,
[this](const EdgeId src, const EdgeId dest) { CopyEdgeData(src, dest); },
new_tree_id);
const EdgeId focal_edge_id = GetDAG().GetEdgeIdx(post_nni);
const auto &choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(focal_edge_id);
// Center and adjacent edges, arranged rootward.
const EdgeIdVector adj_edge_ids = {{choices.left_child, choices.right_child,
focal_edge_id, choices.sister, choices.parent}};
// Update PVs.
for (const EdgeId edge_id : adj_edge_ids) {
PopulateRootwardPVForEdge(edge_id);
}
for (auto it = adj_edge_ids.rbegin(); it != adj_edge_ids.rend(); ++it) {
const EdgeId edge_id = *it;
PopulateLeafwardPVForEdge(edge_id);
}
// Optimize focal branch and all adjacent branches.
if (do_optimize_new_edges_) {
for (const EdgeId edge_id : adj_edge_ids) {
BranchLengthOptimization(edge_id, false);
}
}
}
void TPEvalEngineViaLikelihood::UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
bool is_quiet = true;
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// Populate Leaves with Site Patterns.
PopulateLeafPVsWithSitePatterns();
// Populate Rootsplit with Stationary Distribution.
PopulateRootPVsWithStationaryDistribution();
os << "UpdateEngineAfterModifyingDAG::Init: " << timer.Lap() << std::endl;
// Find edges to optimize.
std::set<EdgeId> new_edges, nni_edges, extra_edges, update_edges;
for (size_t i = prev_edge_count; i < edge_reindexer.size(); i++) {
const EdgeId edge_id = EdgeId(edge_reindexer.GetNewIndexByOldIndex(i));
new_edges.insert(edge_id);
extra_edges.insert(edge_id);
update_edges.insert(edge_id);
}
for (const auto &[post_nni, pre_nni] : nni_to_pre_nni) {
std::ignore = pre_nni;
const auto edge_id = GetDAG().GetEdgeIdx(post_nni);
const auto &choice = GetTPEngine().GetChoiceMap(edge_id);
nni_edges.insert(edge_id);
extra_edges.erase(choice.right_child);
extra_edges.erase(choice.left_child);
extra_edges.erase(choice.sister);
extra_edges.erase(edge_id);
extra_edges.erase(choice.parent);
update_edges.insert(choice.right_child);
update_edges.insert(choice.left_child);
update_edges.insert(choice.sister);
update_edges.insert(edge_id);
update_edges.insert(choice.parent);
}
// Find topological sort of edges.
std::vector<EdgeId> rootward_edges(update_edges.begin(), update_edges.end());
std::sort(rootward_edges.begin(), rootward_edges.end(),
[this](const EdgeId lhs, const EdgeId rhs) {
return GetDAG().GetDAGEdge(lhs).GetParent() <
GetDAG().GetDAGEdge(rhs).GetParent();
});
std::vector<EdgeId> leafward_edges(update_edges.begin(), update_edges.end());
std::sort(leafward_edges.begin(), leafward_edges.end(),
[this](const EdgeId lhs, const EdgeId rhs) {
return GetDAG().GetDAGEdge(lhs).GetChild() >
GetDAG().GetDAGEdge(rhs).GetChild();
});
os << "UpdateEngineAfterModifyingDAG::UpdateEdges: " << timer.Lap() << std::endl;
// Initialize new PLVs.
auto RootwardPass = [&]() {
for (const auto edge_id : rootward_edges) {
PopulateRootwardPVForEdge(edge_id);
}
};
auto LeafwardPass = [&]() {
for (const auto edge_id : leafward_edges) {
PopulateLeafwardPVForEdge(edge_id);
}
};
std::map<EdgeId, size_t> optimize_counter;
auto OptimizeEdge = [&](const EdgeId edge_id, const EdgeId parent_edge_id,
const EdgeId focal_edge_id,
const bool is_not_child_edge = true,
const bool is_not_parent_edge = true,
const bool do_optimize = true,
const bool test_output = false) {
if (optimize_counter.find(edge_id) == optimize_counter.end()) {
optimize_counter[edge_id] = 1;
} else {
optimize_counter[edge_id] = optimize_counter[edge_id] + 1;
}
const auto focal = GetDAG().GetFocalClade(edge_id);
const auto sister = GetDAG().GetSisterClade(edge_id);
if (is_not_child_edge) {
// If child does not go to a leaf, update child_p (in case leftchild or
// rightchild branch length were changed).
MultiplyPVs(GetPVs().GetPVIndex(PLVType::P, edge_id),
GetPVs().GetPVIndex(PLVType::PHatLeft, edge_id),
GetPVs().GetPVIndex(PLVType::PHatRight, edge_id));
}
if (is_not_parent_edge) {
// Update parent_rfocal (in case that sister branch length was changed).
if (!GetDAG().IsEdgeRoot(edge_id)) {
MultiplyPVs(GetPVs().GetPVIndex(PLVTypeEnum::RPLVType(focal), parent_edge_id),
GetPVs().GetPVIndex(PLVType::RHat, parent_edge_id),
GetPVs().GetPVIndex(PLVTypeEnum::PPLVType(sister), parent_edge_id));
} else {
TakePVValue(GetPVs().GetPVIndex(PLVTypeEnum::RPLVType(focal), parent_edge_id),
GetPVs().GetPVIndex(PLVType::RHat, parent_edge_id));
}
}
// Optimize branch length.
const auto &[parent_rfocal_pvid, child_p_pvid] = GetPrimaryPVIdsOfEdge(edge_id);
if ((new_edges.find(edge_id) != new_edges.end()) && do_optimize) {
branch_handler_.OptimizeBranchLength(edge_id, parent_rfocal_pvid, child_p_pvid,
false);
}
if (is_not_parent_edge) {
// Update parent_phatfocal after changing branch length.
SetToEvolvedPV(GetPVs().GetPVIndex(PLVTypeEnum::PPLVType(focal), parent_edge_id),
edge_id, GetPVs().GetPVIndex(PLVType::P, edge_id));
// Update parent_p after changing branch length.
MultiplyPVs(GetPVs().GetPVIndex(PLVType::P, parent_edge_id),
GetPVs().GetPVIndex(PLVType::PHatLeft, parent_edge_id),
GetPVs().GetPVIndex(PLVType::PHatRight, parent_edge_id));
}
};
auto NNIRootwardPass = [&](const EdgeId edge_id) {
const auto &choice = GetTPEngine().GetChoiceMap(edge_id);
const auto pvids = GetLocalPVIdsOfEdge(edge_id);
// Evolve up child P-PLVs.
SetToEvolvedPV(pvids.child_phatleft_, choice.left_child, pvids.leftchild_p_);
SetToEvolvedPV(pvids.child_phatright_, choice.right_child, pvids.rightchild_p_);
MultiplyPVs(pvids.child_p_, pvids.child_phatleft_, pvids.child_phatright_);
// Evolve up parent P-PLVs
SetToEvolvedPV(pvids.parent_phatsister_, choice.sister, pvids.sister_p_);
SetToEvolvedPV(pvids.parent_phatfocal_, edge_id, pvids.child_p_);
MultiplyPVs(pvids.parent_p_, pvids.parent_phatfocal_, pvids.parent_phatsister_);
};
auto NNILeafwardPass = [&](const EdgeId edge_id) {
const auto &choice = GetTPEngine().GetChoiceMap(edge_id);
const auto pvids = GetLocalPVIdsOfEdge(edge_id);
// If the parent is not the DAG root, then evolve grandparent down to parent.
if (pvids.grandparent_rfocal_ != NoId) {
SetToEvolvedPV(pvids.parent_rhat_, choice.parent, pvids.grandparent_rfocal_);
}
// Evolve down parent R-PLVs.
MultiplyPVs(pvids.parent_rfocal_, pvids.parent_rhat_, pvids.parent_phatsister_);
MultiplyPVs(pvids.parent_rsister_, pvids.parent_rhat_, pvids.parent_phatfocal_);
// Evolve down child R-PLVs.
SetToEvolvedPV(pvids.child_rhat_, edge_id, pvids.parent_rfocal_);
MultiplyPVs(pvids.child_rleft_, pvids.child_rhat_, pvids.child_phatright_);
MultiplyPVs(pvids.child_rright_, pvids.child_rhat_, pvids.child_phatleft_);
};
auto NNIUpdatePVs = [&]() {
// Update new NNI PVs.
for (const auto edge_id : nni_edges) {
NNIRootwardPass(edge_id);
NNILeafwardPass(edge_id);
}
};
RootwardPass();
LeafwardPass();
os << "UpdateEngineAfterModifyingDAG::Leafward/RootwardPass: " << timer.Lap()
<< std::endl;
if (IsOptimizeNewEdges()) {
Stopwatch opt_timer(true, Stopwatch::TimeScale::SecondScale);
for (size_t iter = 0; iter < optimize_max_iter_; iter++) {
// Optimize each NNI.
for (const auto edge_id : nni_edges) {
const auto &choice = GetTPEngine().GetChoiceMap(edge_id);
OptimizeEdge(choice.left_child, edge_id, edge_id, false, true, true);
OptimizeEdge(choice.right_child, edge_id, edge_id, false, true, true);
OptimizeEdge(choice.sister, choice.parent, edge_id, false, true, true);
OptimizeEdge(edge_id, choice.parent, edge_id, true, true, true);
if (!GetDAG().IsEdgeRoot(choice.parent)) {
const auto &choice_2 = GetTPEngine().GetChoiceMap(choice.parent);
OptimizeEdge(choice.parent, choice_2.parent, edge_id, true, false, true,
true);
}
os << "UpdateEngineAfterModifyingDAG::OptimizeCentralEdges: " << opt_timer.Lap()
<< std::endl;
}
// Optimize incidental new edges.
for (const auto edge_id : extra_edges) {
Stopwatch opt_timer(true, Stopwatch::TimeScale::SecondScale);
const auto &choice = GetTPEngine().GetChoiceMap(edge_id);
if (!GetDAG().IsEdgeRoot(choice.parent)) {
OptimizeEdge(edge_id, choice.parent, edge_id);
}
os << "UpdateEngineAfterModifyingDAG::OptimizeAdjEdges: " << opt_timer.Lap()
<< std::endl;
}
// Update new NNI PVs.
NNIUpdatePVs();
os << "UpdateEngineAfterModifyingDAG::NNIUpdatePVs: " << opt_timer.Lap()
<< std::endl;
}
os << "UpdateEngineAfterModifyingDAG::OptimizeNewEdges: " << timer.Lap()
<< std::endl;
}
// Update scores.
EdgeIdVector update_edges_vec(update_edges.begin(), update_edges.end());
ComputeScores(update_edges_vec);
os << "UpdateEngineAfterModifyingDAG::ComputeScores: " << timer.Lap() << std::endl;
}
double TPEvalEngineViaLikelihood::GetTopTreeScoreWithEdge(const EdgeId edge_id) const {
return TPEvalEngine::GetTopTreeScoreWithEdge(edge_id);
}
double TPEvalEngineViaLikelihood::GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map_opt) {
auto nni_info =
GetProposedNNIInfo(post_nni, pre_nni, spare_offset, best_edge_map_opt);
const auto &temp_pv_ids = nni_info.temp_pv_ids;
const auto &temp_edge_ids = nni_info.temp_edge_ids;
const auto &ref_pv_ids = nni_info.ref_pv_ids;
const auto &ref_edge_ids = nni_info.ref_edge_ids;
const auto &adj_edge_ids = nni_info.adj_edge_ids;
auto &do_optimize_edge = nni_info.do_optimize_edge;
// Initialize branch lengths.
for (auto nni_adj_type : NNIAdjacentEnum::Iterator()) {
// Initialize with default branch lengths.
branch_handler_(temp_edge_ids[nni_adj_type]) =
branch_handler_.GetDefaultBranchLength();
// If flagged for (or best_edge_map has been given), copy branch lengths mapped over
// from pre-edge to post-edge.
if (do_init_proposed_branch_lengths_with_dag_ or best_edge_map_opt.has_value()) {
branch_handler_(temp_edge_ids[nni_adj_type]) =
branch_handler_(ref_edge_ids[nni_adj_type]);
// If edge already exists in the DAG, use it's branch length.
if (adj_edge_ids[nni_adj_type] != NoId) {
branch_handler_(temp_edge_ids[nni_adj_type]) =
branch_handler_(adj_edge_ids[nni_adj_type]);
if (do_fix_proposed_branch_lengths_from_dag_) {
do_optimize_edge[nni_adj_type] = false;
}
}
}
}
auto RootwardPass = [&]() {
// Evolve up child P-PLVs.
SetToEvolvedPV(temp_pv_ids.child_phatleft_, temp_edge_ids.left_child,
ref_pv_ids.leftchild_p_);
SetToEvolvedPV(temp_pv_ids.child_phatright_, temp_edge_ids.right_child,
ref_pv_ids.rightchild_p_);
MultiplyPVs(temp_pv_ids.child_p_, temp_pv_ids.child_phatleft_,
temp_pv_ids.child_phatright_);
// Evolve up parent P-PLVs
SetToEvolvedPV(temp_pv_ids.parent_phatsister_, temp_edge_ids.sister,
ref_pv_ids.sister_p_);
SetToEvolvedPV(temp_pv_ids.parent_phatfocal_, temp_edge_ids.focal,
temp_pv_ids.child_p_);
MultiplyPVs(temp_pv_ids.parent_p_, temp_pv_ids.parent_phatfocal_,
temp_pv_ids.parent_phatsister_);
};
auto LeafwardPass = [&]() {
// If the parent is not the DAG root, then evolve grandparent down to parent.
if ((ref_pv_ids.grandparent_rfocal_ != NoId)) {
SetToEvolvedPV(temp_pv_ids.parent_rhat_, temp_edge_ids.parent,
ref_pv_ids.grandparent_rfocal_);
} else {
TakePVValue(temp_pv_ids.parent_rhat_, ref_pv_ids.parent_rhat_);
}
// Evolve down parent R-PLVs.
MultiplyPVs(temp_pv_ids.parent_rfocal_, temp_pv_ids.parent_rhat_,
temp_pv_ids.parent_phatsister_);
MultiplyPVs(temp_pv_ids.parent_rsister_, temp_pv_ids.parent_rhat_,
temp_pv_ids.parent_phatfocal_);
// Evolve down child R-PLVs.
SetToEvolvedPV(temp_pv_ids.child_rhat_, temp_edge_ids.focal,
temp_pv_ids.parent_rfocal_);
MultiplyPVs(temp_pv_ids.child_rleft_, temp_pv_ids.child_rhat_,
temp_pv_ids.child_phatright_);
MultiplyPVs(temp_pv_ids.child_rright_, temp_pv_ids.child_rhat_,
temp_pv_ids.child_phatleft_);
};
// Optimize branch lengths.
auto OptimizeEdge = [&](size_t iter, const NNIAdjacent nni_adj_type,
const EdgeId edge_id, const EdgeId focal_edge_id,
const PVId parent_p, const PVId parent_phatfocal,
const PVId parent_phatsister, const PVId parent_rhat,
const PVId parent_rfocal, const PVId parent_rsister,
const PVId child_p, const PVId child_phatleft,
const PVId child_phatright, const bool update_branch_length,
const bool is_not_child_edge, const bool is_not_parent_edge) {
if (is_not_child_edge) {
// If child does not go to a leaf, update child_p (in case leftchild
// or rightchild branch length were changed).
MultiplyPVs(child_p, child_phatleft, child_phatright);
}
if (is_not_parent_edge) {
// Update parent_rfocal (in case that sister branch length was
// changed).
MultiplyPVs(parent_rfocal, parent_rhat, parent_phatsister);
}
if (update_branch_length) {
// Optimize branch length.
branch_handler_.OptimizeBranchLength(edge_id, parent_rfocal, child_p, iter > 0);
}
if (is_not_parent_edge) {
// Update parent_phatfocal after changing branch length.
SetToEvolvedPV(parent_phatfocal, edge_id, child_p);
// If not parent edge, update parent_p after changing branch length.
MultiplyPVs(parent_p, parent_phatfocal, parent_phatsister);
}
};
auto OptimizeLeftChild = [&](const size_t iter, const bool do_optimize = true) {
OptimizeEdge(iter, NNIAdjacent::LeftChild, temp_edge_ids.left_child,
temp_edge_ids.focal, temp_pv_ids.child_p_, temp_pv_ids.child_phatleft_,
temp_pv_ids.child_phatright_, temp_pv_ids.child_rhat_,
temp_pv_ids.child_rleft_, temp_pv_ids.child_rright_,
ref_pv_ids.leftchild_p_, PVId(NoId), PVId(NoId),
(do_optimize_edge.left_child && do_optimize), false, true);
};
auto OptimizeRightChild = [&](const size_t iter, const bool do_optimize = true) {
OptimizeEdge(
iter, NNIAdjacent::RightChild, temp_edge_ids.right_child, temp_edge_ids.focal,
temp_pv_ids.child_p_, temp_pv_ids.child_phatright_, temp_pv_ids.child_phatleft_,
temp_pv_ids.child_rhat_, temp_pv_ids.child_rright_, temp_pv_ids.child_rleft_,
ref_pv_ids.rightchild_p_, PVId(NoId), PVId(NoId),
(do_optimize_edge.right_child && do_optimize), false, true);
};
auto OptimizeSister = [&](const size_t iter, const bool do_optimize = true) {
OptimizeEdge(iter, NNIAdjacent::Sister, temp_edge_ids.sister, temp_edge_ids.focal,
temp_pv_ids.parent_p_, temp_pv_ids.parent_phatsister_,
temp_pv_ids.parent_phatfocal_, temp_pv_ids.parent_rhat_,
temp_pv_ids.parent_rsister_, temp_pv_ids.parent_rfocal_,
ref_pv_ids.sister_p_, PVId(NoId), PVId(NoId),
(do_optimize_edge.sister && do_optimize), false, true);
};
auto OptimizeCentral = [&](const size_t iter, const bool do_optimize = true) {
OptimizeEdge(iter, NNIAdjacent::Focal, temp_edge_ids.focal, temp_edge_ids.focal,
temp_pv_ids.parent_p_, temp_pv_ids.parent_phatfocal_,
temp_pv_ids.parent_phatsister_, temp_pv_ids.parent_rhat_,
temp_pv_ids.parent_rfocal_, temp_pv_ids.parent_rsister_,
temp_pv_ids.child_p_, temp_pv_ids.child_phatleft_,
temp_pv_ids.child_phatright_, (do_optimize_edge.focal && do_optimize),
true, true);
};
auto OptimizeParent = [&](const size_t iter, const bool do_optimize = true) {
OptimizeEdge(iter, NNIAdjacent::Parent, temp_edge_ids.parent, temp_edge_ids.focal,
PVId(NoId), PVId(NoId), PVId(NoId), ref_pv_ids.grandparent_rhat_,
ref_pv_ids.grandparent_rfocal_, ref_pv_ids.grandparent_rsister_,
temp_pv_ids.parent_p_, temp_pv_ids.parent_phatfocal_,
temp_pv_ids.parent_phatsister_,
(do_optimize_edge.parent && do_optimize), true, false);
};
// Initialize new PLVs.
RootwardPass();
LeafwardPass();
// Branch length optimization.
if (IsOptimizeNewEdges()) {
for (size_t iter = 0; iter < optimize_max_iter_; iter++) {
// Optimize Branch Lengths
OptimizeLeftChild(iter);
OptimizeRightChild(iter);
OptimizeSister(iter);
OptimizeCentral(iter);
if (!post_nni.GetParent().SubsplitIsRootsplit()) {
if (temp_edge_ids.parent != NoId) {
OptimizeParent(iter);
}
}
// Update PLVs.
RootwardPass();
LeafwardPass();
}
}
// Compute likelihood of focal edge by evolving up from child to parent.
ComputeLikelihood(temp_edge_ids.focal, temp_pv_ids.child_p_,
temp_pv_ids.parent_rfocal_);
EigenVectorXd top_tree_likelihood =
GetMatrix().block(temp_edge_ids.focal.value_, 0, 1, GetMatrix().cols()) *
GetTPEngine().GetSitePatternWeights();
return top_tree_likelihood[0];
}
ProposedNNIInfo TPEvalEngineViaLikelihood::GetProposedNNIInfo(
const NNIOperation &post_nni, const NNIOperation &tmp_pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map_opt) const {
// Get the best pre-NNI candidate.
const NNIOperation pre_nni =
GetTPEngine().FindHighestPriorityNeighborNNIInDAG(post_nni);
// Get reference edge choices from pre-NNI in DAG, remapped according to post-NNI.
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
const auto clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(pre_nni, post_nni);
const auto pre_edge_ids = GetTPEngine().GetChoiceMap().GetEdgeChoice(pre_edge_id);
const auto pre_edge_ids_remapped =
GetTPEngine().GetChoiceMap().RemapEdgeChoiceDataViaNNICladeMap(pre_edge_ids,
clade_map);
NNIAdjEdgeIds ref_edge_ids;
ref_edge_ids.parent = pre_edge_ids_remapped.parent;
ref_edge_ids.sister = pre_edge_ids_remapped.sister;
ref_edge_ids.focal = pre_edge_id;
ref_edge_ids.left_child = pre_edge_ids_remapped.left_child;
ref_edge_ids.right_child = pre_edge_ids_remapped.right_child;
// Get reference node choices from pre-NNI in DAG, remapped according to post-NNI.
const auto pre_node_ids_remapped =
GetTPEngine().GetChoiceMap().GetEdgeChoiceNodeIds(pre_edge_ids_remapped);
NNIAdjNodeIds ref_node_ids;
ref_node_ids.parent = pre_node_ids_remapped.parent;
ref_node_ids.sister = pre_node_ids_remapped.sister;
ref_node_ids.focal = NodeId(NoId);
ref_node_ids.left_child = pre_node_ids_remapped.left_child;
ref_node_ids.right_child = pre_node_ids_remapped.right_child;
// Get adjacent PCSPs from edges.
const auto adj_pcsps =
GetTPEngine().BuildAdjacentPCSPsToProposedNNI(post_nni, pre_node_ids_remapped);
// If passed best_edge_map, override ref_edge_ids using map.
if (best_edge_map_opt.has_value()) {
auto &best_edge_map = best_edge_map_opt.value();
for (auto nni_adj_type : NNIAdjacentEnum::Iterator()) {
ref_edge_ids[nni_adj_type] = best_edge_map[adj_pcsps[nni_adj_type]];
}
}
// Update adjacent edge ids if they already exists in DAG.
NNIAdjEdgeIds adj_edge_ids;
for (auto nni_adj_type : NNIAdjacentEnum::Iterator()) {
EdgeId edge_id{NoId};
if (GetDAG().ContainsEdge(adj_pcsps[nni_adj_type])) {
edge_id = GetDAG().GetEdgeIdx(adj_pcsps[nni_adj_type]);
}
adj_edge_ids[nni_adj_type] = edge_id;
}
NNIAdjBools do_optimize_edge{true, true, true, true, true};
// Get reference PLV IDs from pre-NNI in DAG, remapped according to post-NNI.
const auto pre_pv_ids = GetLocalPVIdsOfEdge(pre_edge_id);
const auto pre_pv_ids_remapped = RemapLocalPVIdsForPostNNI(pre_pv_ids, clade_map);
NNIAdjacentMap<PVId> ref_primary_pv_ids;
ref_primary_pv_ids.parent = pre_pv_ids_remapped.grandparent_rfocal_;
ref_primary_pv_ids.sister = pre_pv_ids_remapped.sister_p_;
ref_primary_pv_ids.focal = pre_pv_ids_remapped.parent_rfocal_;
ref_primary_pv_ids.left_child = pre_pv_ids_remapped.leftchild_p_;
ref_primary_pv_ids.right_child = pre_pv_ids_remapped.rightchild_p_;
// Get temp locations for post-NNI PVs.
const auto temp_pv_ids = GetTempLocalPVIdsForProposedNNIs(spare_offset);
// Get temp locations for post-NNI branch lengths.
const auto temp_edge_ids = GetTempEdgeIdsForProposedNNIs(spare_offset);
ProposedNNIInfo nni_info{post_nni,
pre_nni,
temp_pv_ids,
temp_edge_ids,
pre_pv_ids_remapped,
ref_primary_pv_ids,
ref_edge_ids,
ref_node_ids,
adj_edge_ids,
adj_pcsps,
do_optimize_edge};
return nni_info;
}
ProposedNNIInfo TPEvalEngineViaLikelihood::GetRealNNIInfo(
const NNIOperation &post_nni, const NNIOperation &tmp_pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map_opt) const {
auto nni_info =
GetProposedNNIInfo(post_nni, tmp_pre_nni, spare_offset, best_edge_map_opt);
auto edge_id = GetDAG().GetEdgeIdx(post_nni);
nni_info.ref_pv_ids = GetLocalPVIdsOfEdge(edge_id);
auto &ref_pv_ids = nni_info.ref_pv_ids;
auto &ref_primary_pv_ids = nni_info.ref_primary_pv_ids;
ref_primary_pv_ids.parent = ref_pv_ids.grandparent_rfocal_;
ref_primary_pv_ids.sister = ref_pv_ids.sister_p_;
ref_primary_pv_ids.focal = ref_pv_ids.parent_rfocal_;
ref_primary_pv_ids.left_child = ref_pv_ids.leftchild_p_;
ref_primary_pv_ids.right_child = ref_pv_ids.rightchild_p_;
return nni_info;
}
LocalPVIds TPEvalEngineViaLikelihood::GetTempLocalPVIdsForProposedNNIs(
const size_t spare_offset) const {
LocalPVIds temp_pv_ids;
size_t spare_count = 0;
auto GetNextSparePVIndex = [this, spare_offset, &spare_count]() {
PVId next_pvid =
GetPVs().GetSparePVIndex((spare_offset * spare_nodes_per_nni_) + spare_count);
spare_count++;
return next_pvid;
};
temp_pv_ids.grandparent_rhat_ = GetNextSparePVIndex();
temp_pv_ids.grandparent_rfocal_ = GetNextSparePVIndex();
temp_pv_ids.grandparent_rsister_ = GetNextSparePVIndex();
temp_pv_ids.parent_p_ = GetNextSparePVIndex();
temp_pv_ids.parent_phatfocal_ = GetNextSparePVIndex();
temp_pv_ids.parent_phatsister_ = GetNextSparePVIndex();
temp_pv_ids.parent_rfocal_ = GetNextSparePVIndex();
temp_pv_ids.parent_rhat_ = GetNextSparePVIndex();
temp_pv_ids.parent_rsister_ = GetNextSparePVIndex();
temp_pv_ids.child_p_ = GetNextSparePVIndex();
temp_pv_ids.child_phatleft_ = GetNextSparePVIndex();
temp_pv_ids.child_phatright_ = GetNextSparePVIndex();
temp_pv_ids.child_rhat_ = GetNextSparePVIndex();
temp_pv_ids.child_rleft_ = GetNextSparePVIndex();
temp_pv_ids.child_rright_ = GetNextSparePVIndex();
temp_pv_ids.sister_p_ = GetNextSparePVIndex();
temp_pv_ids.leftchild_p_ = GetNextSparePVIndex();
temp_pv_ids.rightchild_p_ = GetNextSparePVIndex();
return temp_pv_ids;
}
NNIAdjEdgeIds TPEvalEngineViaLikelihood::GetTempEdgeIdsForProposedNNIs(
const size_t spare_offset) const {
NNIAdjEdgeIds temp_edge_ids;
size_t spare_count = 0;
auto GetNextSpareEdgeId = [this, spare_offset, &spare_count]() {
EdgeId next_edge_id = EdgeId(GetTPEngine().GetEdgeCount() +
(spare_offset * spare_edges_per_nni_) + spare_count);
spare_count++;
return next_edge_id;
};
temp_edge_ids.focal = GetNextSpareEdgeId();
temp_edge_ids.parent = GetNextSpareEdgeId();
temp_edge_ids.sister = GetNextSpareEdgeId();
temp_edge_ids.left_child = GetNextSpareEdgeId();
temp_edge_ids.right_child = GetNextSpareEdgeId();
return temp_edge_ids;
}
void TPEvalEngineViaLikelihood::CopyEdgeData(const EdgeId src_edge_id,
const EdgeId dest_edge_id) {
branch_handler_(dest_edge_id) = branch_handler_(src_edge_id);
TPEvalEngine::CopyEdgeData(src_edge_id, dest_edge_id);
}
// For rootward traversal. Compute the likelihood PV for a given node, by using
// the left and right child edges from the choice map of the edge below node.
void TPEvalEngineViaLikelihood::PopulateRootwardPVForNode(const NodeId node_id) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(Direction::Rootward, clade)) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(adj_node_id, node_id);
PopulateRootwardPVForEdge(edge_id);
}
}
}
void TPEvalEngineViaLikelihood::PopulateRootwardPVForEdge(const EdgeId edge_id) {
// Populate edge PLV by evolving up given edge.
const auto choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
// Update parent's PLeftHat PLV, evolved up from left child's P PLV.
if (choices.left_child != NoId) {
EvolvePPVUpEdge(edge_id, choices.left_child);
}
// Update parent's PRightHat PLV, evolved up from right child's P PLV.
if (choices.right_child != NoId) {
EvolvePPVUpEdge(edge_id, choices.right_child);
}
// Update P-PLV from PHatLeft and PHatRight by taking product.
const PVId p_pvid = GetPVs().GetPVIndex(PLVType::P, edge_id);
const PVId phatleft_pvid = GetPVs().GetPVIndex(PLVType::PHatLeft, edge_id);
const PVId phatright_pvid = GetPVs().GetPVIndex(PLVType::PHatRight, edge_id);
if ((choices.left_child != NoId) && (choices.right_child != NoId)) {
MultiplyPVs(p_pvid, phatleft_pvid, phatright_pvid);
} else if (choices.left_child != NoId) {
TakePVValue(p_pvid, phatleft_pvid);
} else if (choices.right_child != NoId) {
TakePVValue(p_pvid, phatright_pvid);
}
}
// For leafward traversal. Compute the likelihood PV for a given node, by using the
// parent and sister edges from the choice map of the edge above node.
void TPEvalEngineViaLikelihood::PopulateLeafwardPVForNode(const NodeId node_id) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(Direction::Leafward, clade)) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(node_id, adj_node_id);
PopulateLeafwardPVForEdge(edge_id);
}
}
}
void TPEvalEngineViaLikelihood::PopulateLeafwardPVForEdge(const EdgeId edge_id) {
// Populate edge PLV by evolving down given edge.
const auto choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
// Evolve down parent.
// Updates child's R-PLV by evolving down from parent's RFocalHat-PLV.
if (choices.parent != NoId) {
EvolveRPVDownEdge(choices.parent, edge_id);
}
// Updates child's RFocalHat-PLV and RSisterHat-PLV by taking (RHat o PHatOpposite).
const PVId rhat_pvid = GetPVs().GetPVIndex(PLVType::RHat, edge_id);
const PVId rleft_pvid = GetPVs().GetPVIndex(PLVType::RLeft, edge_id);
const PVId rright_pvid = GetPVs().GetPVIndex(PLVType::RRight, edge_id);
const PVId phatleft_pvid = GetPVs().GetPVIndex(PLVType::PHatLeft, edge_id);
const PVId phatright_pvid = GetPVs().GetPVIndex(PLVType::PHatRight, edge_id);
MultiplyPVs(rleft_pvid, rhat_pvid, phatright_pvid);
MultiplyPVs(rright_pvid, rhat_pvid, phatleft_pvid);
}
void TPEvalEngineViaLikelihood::PopulateLeafPVsWithSitePatterns() {
auto BuildPVForSitePattern = [this](const TaxonId taxon_id, const PVId pv_id) {
auto &pv = GetPVs().GetPV(pv_id);
pv.setZero();
const auto &pattern = GetSitePattern().GetPatterns()[taxon_id.value_];
size_t site_idx = 0;
for (const int symbol : pattern) {
Assert(symbol >= 0, "Negative symbol!");
if (symbol == MmappedNucleotidePLV::base_count_) { // Gap character.
pv.col(site_idx).setConstant(1.);
} else if (symbol < MmappedNucleotidePLV::base_count_) {
pv(symbol, site_idx) = 1.;
}
site_idx++;
}
};
for (const auto taxon_id : GetDAG().GetTaxonIds()) {
const EdgeIdVector leaf_edge_ids = GetDAG().GetLeafEdgeIds(taxon_id);
PVId src_pvid;
if (leaf_edge_ids.size() > 0) {
src_pvid = GetPVs().GetPVIndex(PLVType::P, leaf_edge_ids[0]);
BuildPVForSitePattern(taxon_id, src_pvid);
}
for (const auto edge_id : leaf_edge_ids) {
for (const auto pv_type : {PLVType::P}) {
const auto dest_pvid = GetPVs().GetPVIndex(pv_type, edge_id);
TakePVValue(dest_pvid, src_pvid);
}
}
}
}
void TPEvalEngineViaLikelihood::PopulateRootPVsWithStationaryDistribution(
std::optional<EdgeIdVector> opt_edge_ids) {
auto AssignPVToStationaryDistribution = [this](const PVId pv_id) {
auto &pv = GetPVs().GetPV(pv_id);
for (Eigen::Index row_idx = 0; row_idx < pv.rows(); ++row_idx) {
pv.row(row_idx).array() = stationary_distribution_(row_idx);
}
};
EdgeIdVector rootsplit_edge_ids =
(opt_edge_ids.has_value() ? opt_edge_ids.value()
: GetDAG().GetRootsplitEdgeIds());
for (const auto edge_id : rootsplit_edge_ids) {
for (const auto pv_type : {PLVType::RHat}) {
const auto pvid = GetPVs().GetPVIndex(pv_type, edge_id);
AssignPVToStationaryDistribution(pvid);
}
}
}
void TPEvalEngineViaLikelihood::ComputeScores(
std::optional<EdgeIdVector> opt_edge_ids) {
EdgeIdVector edge_ids = opt_edge_ids.has_value()
? opt_edge_ids.value()
: GetDAG().LeafwardEdgeTraversalTrace(true);
// Compute likelihoods for each edge.
for (const auto edge_id : edge_ids) {
const auto &[parent_pvid, child_pvid] = GetPrimaryPVIdsOfEdge(edge_id);
ComputeLikelihood(edge_id, child_pvid, parent_pvid);
}
auto &top_tree_likelihoods = GetTopTreeScores();
top_tree_likelihoods =
GetMatrix().block(0, 0, GetTPEngine().GetEdgeCount(), GetMatrix().cols()) *
GetTPEngine().GetSitePatternWeights();
}
void TPEvalEngineViaLikelihood::InitializeBranchLengthHandler() {
// Set Nongradient Brent.
DAGBranchHandler::NegLogLikelihoodFunc brent_nongrad_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
SetTransitionMatrixToHaveBranchLength(exp(log_branch_length));
PreparePerPatternLogLikelihoodsForEdge(parent_id, child_id);
double result =
-per_pattern_log_likelihoods_.dot(GetTPEngine().GetSitePatternWeights());
return result;
};
branch_handler_.SetBrentFunc(brent_nongrad_func);
// Set Gradient Brent.
DAGBranchHandler::NegLogLikelihoodAndDerivativeFunc brent_grad_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
double branch_length = exp(log_branch_length);
branch_handler_(edge_id) = branch_length;
auto [log_likelihood, log_likelihood_derivative] =
this->LogLikelihoodAndDerivative(edge_id);
return std::make_pair(-log_likelihood,
-branch_length * log_likelihood_derivative);
};
branch_handler_.SetBrentWithGradientFunc(brent_grad_func);
// Set Gradient Ascent.
DAGBranchHandler::LogLikelihoodAndDerivativeFunc grad_ascent_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double branch_length) {
branch_handler_(edge_id) = branch_length;
return this->LogLikelihoodAndDerivative(edge_id);
};
branch_handler_.SetGradientAscentFunc(grad_ascent_func);
// Set Logspace Gradient Ascent.
DAGBranchHandler::LogLikelihoodAndDerivativeFunc logspace_grad_ascent_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double branch_length) {
branch_handler_(edge_id) = branch_length;
return this->LogLikelihoodAndDerivative(edge_id);
};
branch_handler_.SetLogSpaceGradientAscentFunc(logspace_grad_ascent_func);
// Set Newton-Raphson.
DAGBranchHandler::LogLikelihoodAndFirstTwoDerivativesFunc newton_raphson_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
double x = exp(log_branch_length);
branch_handler_(edge_id) = x;
auto [f_x, f_prime_x, f_double_prime_x] =
this->LogLikelihoodAndFirstTwoDerivatives(edge_id);
// x = exp(y) --> f'(exp(y)) = exp(y) * f'(exp(y)) = x * f'(x)
double f_prime_y = x * f_prime_x;
double f_double_prime_y = f_prime_y + std::pow(x, 2) * f_double_prime_x;
return std::make_tuple(f_x, f_prime_y, f_double_prime_y);
};
branch_handler_.SetNewtonRaphsonFunc(newton_raphson_func);
}
void TPEvalEngineViaLikelihood::BranchLengthOptimization(
std::optional<bool> check_branch_convergence) {
bool check_branch_convergence_ = check_branch_convergence.has_value()
? check_branch_convergence.value()
: !IsFirstOptimization();
// Update R-PVs and optimize branch lengths leafward.
const EdgeIdVector edge_ids = GetDAG().RootwardEdgeTraversalTrace(false);
for (size_t opt_count = 0; opt_count < GetOptimizationMaxIteration(); opt_count++) {
for (const EdgeId edge_id : edge_ids) {
BranchLengthOptimization(edge_id, check_branch_convergence_);
}
IncrementOptimizationCount();
}
}
void TPEvalEngineViaLikelihood::BranchLengthOptimization(
const EdgeId edge_id, const bool check_branch_convergence, const bool update_only) {
const auto &choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
if (choices.parent != NoId) {
// Update parent's RFocal PLV from previous branch length changes.
PopulateRootwardPVForEdge(edge_id);
PopulateRootwardPVForEdge(choices.parent);
PopulateLeafwardPVForEdge(choices.parent);
// Optimize branch length.
if (!update_only) {
const auto &[parent_rfocal_pvid, child_p_pvid] = GetPrimaryPVIdsOfEdge(edge_id);
if (parent_rfocal_pvid != NoId) {
branch_handler_.OptimizeBranchLength(edge_id, parent_rfocal_pvid, child_p_pvid,
check_branch_convergence);
}
}
// Update parent's PHatFocal PLV for new branch length.
PopulateLeafwardPVForEdge(edge_id);
}
}
void TPEvalEngineViaLikelihood::EvolvePPVUpEdge(const EdgeId rootward_edge_id,
const EdgeId leafward_edge_id) {
const auto focal = GetDAG().GetFocalClade(leafward_edge_id);
const PVId parent_phatfocal_pvid =
GetPVs().GetPVIndex(PLVTypeEnum::PPLVType(focal), rootward_edge_id);
const PVId child_p_pvid = GetPVs().GetPVIndex(PLVType::P, leafward_edge_id);
SetToEvolvedPV(parent_phatfocal_pvid, leafward_edge_id, child_p_pvid);
}
void TPEvalEngineViaLikelihood::EvolveRPVDownEdge(const EdgeId rootward_edge_id,
const EdgeId leafward_edge_id) {
const auto focal = GetDAG().GetFocalClade(leafward_edge_id);
const PVId parent_rfocal_pvid =
GetPVs().GetPVIndex(PLVTypeEnum::RPLVType(focal), rootward_edge_id);
const PVId child_rhat_pvid = GetPVs().GetPVIndex(PLVType::RHat, leafward_edge_id);
SetToEvolvedPV(child_rhat_pvid, leafward_edge_id, parent_rfocal_pvid);
}
std::pair<PVId, PVId> TPEvalEngineViaLikelihood::GetPrimaryPVIdsOfEdge(
const EdgeId edge_id) const {
const auto &choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
PVId parent_rfocal_pvid{NoId};
if (choices.parent == NoId) {
EdgeId root_id = GetDAG().GetFirstRootsplitEdgeId();
parent_rfocal_pvid = GetPVs().GetPVIndex(PLVType::RHat, root_id);
} else {
parent_rfocal_pvid = GetPVs().GetPVIndex(
PLVTypeEnum::RPLVType(GetDAG().GetFocalClade(edge_id)), choices.parent);
}
PVId child_p_pvid = GetPVs().GetPVIndex(PLVType::P, edge_id);
return std::make_pair(parent_rfocal_pvid, child_p_pvid);
}
LocalPVIds TPEvalEngineViaLikelihood::GetLocalPVIdsOfEdge(const EdgeId edge_id) const {
LocalPVIds pv_ids;
const auto &choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
// Grandparent PVIds (if parent edge is not a rootsplit).
pv_ids.grandparent_rhat_ = GetPVs().GetPVIndex(PLVType::RHat, choices.parent);
if (!GetDAG().IsEdgeRoot(choices.parent)) {
const auto &parent_choices =
GetTPEngine().GetChoiceMap().GetEdgeChoice(choices.parent);
const auto focal = GetDAG().GetFocalClade(choices.parent);
const auto sister = Bitset::Opposite(focal);
pv_ids.grandparent_rfocal_ =
GetPVs().GetPVIndex(PLVTypeEnum::RPLVType(focal), parent_choices.parent);
pv_ids.grandparent_rsister_ =
GetPVs().GetPVIndex(PLVTypeEnum::RPLVType(sister), parent_choices.parent);
}
// Parent PVIds
pv_ids.parent_p_ = GetPVs().GetPVIndex(PLVType::P, choices.parent);
pv_ids.parent_phatfocal_ = GetPVs().GetPVIndex(
PLVTypeEnum::PPLVType(GetDAG().GetFocalClade(edge_id)), choices.parent);
pv_ids.parent_phatsister_ = GetPVs().GetPVIndex(
PLVTypeEnum::PPLVType(GetDAG().GetSisterClade(edge_id)), choices.parent);
pv_ids.parent_rhat_ = GetPVs().GetPVIndex(PLVType::RHat, choices.parent);
pv_ids.parent_rfocal_ = GetPVs().GetPVIndex(
PLVTypeEnum::RPLVType(GetDAG().GetFocalClade(edge_id)), choices.parent);
pv_ids.parent_rsister_ = GetPVs().GetPVIndex(
PLVTypeEnum::RPLVType(GetDAG().GetSisterClade(edge_id)), choices.parent);
// Child PVIds
pv_ids.child_p_ = GetPVs().GetPVIndex(PLVType::P, edge_id);
pv_ids.child_phatleft_ = GetPVs().GetPVIndex(PLVType::PHatLeft, edge_id);
pv_ids.child_phatright_ = GetPVs().GetPVIndex(PLVType::PHatRight, edge_id);
pv_ids.child_rhat_ = GetPVs().GetPVIndex(PLVType::RHat, edge_id);
pv_ids.child_rleft_ = GetPVs().GetPVIndex(PLVType::RLeft, edge_id);
pv_ids.child_rright_ = GetPVs().GetPVIndex(PLVType::RRight, edge_id);
// Grandchild PVIds
pv_ids.sister_p_ = GetPVs().GetPVIndex(PLVType::P, choices.sister);
pv_ids.leftchild_p_ = GetPVs().GetPVIndex(PLVType::P, choices.left_child);
pv_ids.rightchild_p_ = GetPVs().GetPVIndex(PLVType::P, choices.right_child);
return pv_ids;
}
LocalPVIds TPEvalEngineViaLikelihood::RemapLocalPVIdsForPostNNI(
const LocalPVIds &pre_pv_ids, const NNIOperation::NNICladeArray &clade_map) const {
using NNIClade = NNIOperation::NNIClade;
using NNICladeEnum = NNIOperation::NNICladeEnum;
LocalPVIds post_pv_ids(pre_pv_ids);
NNICladeEnum::Array<PVId> pre_id_map;
pre_id_map[clade_map[NNIClade::ParentSister]] = pre_pv_ids.sister_p_;
pre_id_map[clade_map[NNIClade::ChildLeft]] = pre_pv_ids.leftchild_p_;
pre_id_map[clade_map[NNIClade::ChildRight]] = pre_pv_ids.rightchild_p_;
post_pv_ids.sister_p_ = pre_id_map[NNIClade::ParentSister];
post_pv_ids.leftchild_p_ = pre_id_map[NNIClade::ChildLeft];
post_pv_ids.rightchild_p_ = pre_id_map[NNIClade::ChildRight];
pre_id_map[clade_map[NNIClade::ParentSister]] = pre_pv_ids.parent_rsister_;
pre_id_map[clade_map[NNIClade::ChildLeft]] = pre_pv_ids.child_rleft_;
pre_id_map[clade_map[NNIClade::ChildRight]] = pre_pv_ids.child_rright_;
post_pv_ids.parent_rsister_ = pre_id_map[NNIClade::ParentSister];
post_pv_ids.child_rleft_ = pre_id_map[NNIClade::ChildLeft];
post_pv_ids.child_rright_ = pre_id_map[NNIClade::ChildRight];
return post_pv_ids;
}
void TPEvalEngineViaLikelihood::TakePVValue(const PVId dest_id, const PVId src_id) {
GetPVs().GetPV(dest_id).array() = GetPVs().GetPV(src_id).array();
}
void TPEvalEngineViaLikelihood::MultiplyPVs(const PVId dest_id, const PVId src1_id,
const PVId src2_id) {
GetPVs().GetPV(dest_id).array() =
GetPVs().GetPV(src1_id).array() * GetPVs().GetPV(src2_id).array();
// #462: Need to add rescaling to PVs.
}
void TPEvalEngineViaLikelihood::ComputeLikelihood(const EdgeId dest_id,
const PVId child_id,
const PVId parent_id) {
SetTransitionMatrixToHaveBranchLength(branch_handler_(dest_id));
PreparePerPatternLogLikelihoodsForEdge(parent_id, child_id);
log_likelihoods_.row(dest_id.value_) = per_pattern_log_likelihoods_;
}
void TPEvalEngineViaLikelihood::SetToEvolvedPV(const PVId dest_id, const EdgeId edge_id,
const PVId src_id) {
SetTransitionMatrixToHaveBranchLength(branch_handler_(edge_id));
GetPVs().GetPV(dest_id).array() = (transition_matrix_ * GetPVs().GetPV(src_id));
}
void TPEvalEngineViaLikelihood::MultiplyWithEvolvedPV(const PVId dest_id,
const EdgeId edge_id,
const PVId src_id) {
SetTransitionMatrixToHaveBranchLength(branch_handler_(edge_id));
GetPVs().GetPV(dest_id).array() =
GetPVs().GetPV(dest_id).array() *
(transition_matrix_ * GetPVs().GetPV(src_id)).array();
}
DoublePair TPEvalEngineViaLikelihood::LogLikelihoodAndDerivative(const EdgeId edge_id) {
const auto &[parent_pvid, child_pvid] = GetPrimaryPVIdsOfEdge(edge_id);
SetTransitionAndDerivativeMatricesToHaveBranchLength(branch_handler_(edge_id));
PreparePerPatternLogLikelihoodsForEdge(parent_pvid, child_pvid);
// The prior is expressed using the current value of q_.
// The phylogenetic component of the likelihood is weighted with the number of times
// we see the site patterns.
const double log_likelihood =
per_pattern_log_likelihoods_.dot(GetTPEngine().GetSitePatternWeights());
// The per-site likelihood derivative is calculated in the same way as the per-site
// likelihood, but using the derivative matrix instead of the transition matrix.
// We first prepare two useful vectors _without_ likelihood rescaling, because the
// rescalings cancel out in the ratio below.
PrepareUnrescaledPerPatternLikelihoodDerivatives(parent_pvid, child_pvid);
PrepareUnrescaledPerPatternLikelihoods(parent_pvid, child_pvid);
// If l_i is the per-site likelihood, the derivative of log(l_i) is the derivative
// of l_i divided by l_i.
per_pattern_likelihood_derivative_ratios_ =
per_pattern_likelihood_derivatives_.array() / per_pattern_likelihoods_.array();
const double log_likelihood_derivative =
per_pattern_likelihood_derivative_ratios_.dot(
GetTPEngine().GetSitePatternWeights());
return {log_likelihood, log_likelihood_derivative};
}
std::tuple<double, double, double>
TPEvalEngineViaLikelihood::LogLikelihoodAndFirstTwoDerivatives(const EdgeId edge_id) {
const auto &[parent_pvid, child_pvid] = GetPrimaryPVIdsOfEdge(edge_id);
SetTransitionAndDerivativeMatricesToHaveBranchLength(branch_handler_(edge_id));
PreparePerPatternLogLikelihoodsForEdge(parent_pvid, child_pvid);
const double log_likelihood =
per_pattern_log_likelihoods_.dot(GetTPEngine().GetSitePatternWeights());
// The per-site likelihood derivative is calculated in the same way as the per-site
// likelihood, but using the derivative matrix instead of the transition matrix.
// We first prepare two useful vectors _without_ likelihood rescaling, because the
// rescalings cancel out in the ratio below.
PrepareUnrescaledPerPatternLikelihoodDerivatives(parent_pvid, child_pvid);
PrepareUnrescaledPerPatternLikelihoods(parent_pvid, child_pvid);
// If l_i is the per-site likelihood, the derivative of log(l_i) is the derivative
// of l_i divided by l_i.
per_pattern_likelihood_derivative_ratios_ =
per_pattern_likelihood_derivatives_.array() / per_pattern_likelihoods_.array();
const double log_likelihood_gradient = per_pattern_likelihood_derivative_ratios_.dot(
GetTPEngine().GetSitePatternWeights());
// Second derivative is calculated the same way, but has an extra term due to
// the product rule.
PrepareUnrescaledPerPatternLikelihoodSecondDerivatives(parent_pvid, child_pvid);
per_pattern_likelihood_second_derivative_ratios_ =
(per_pattern_likelihood_second_derivatives_.array() *
per_pattern_likelihoods_.array() -
per_pattern_likelihood_derivatives_.array() *
per_pattern_likelihood_derivatives_.array()) /
(per_pattern_likelihoods_.array() * per_pattern_likelihoods_.array());
const double log_likelihood_hessian =
per_pattern_likelihood_second_derivative_ratios_.dot(
GetTPEngine().GetSitePatternWeights());
return std::make_tuple(log_likelihood, log_likelihood_gradient,
log_likelihood_hessian);
}
void TPEvalEngineViaLikelihood::SetTransitionMatrixToHaveBranchLength(
double branch_length) {
diagonal_matrix_.diagonal() = (branch_length * eigenvalues_).array().exp();
transition_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
}
void TPEvalEngineViaLikelihood::SetTransitionAndDerivativeMatricesToHaveBranchLength(
double branch_length) {
diagonal_vector_ = (branch_length * eigenvalues_).array().exp();
diagonal_matrix_.diagonal() = diagonal_vector_;
transition_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
// Now calculating derivative matrix
diagonal_matrix_.diagonal() = eigenvalues_.array() * diagonal_vector_.array();
derivative_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
// Now calculating hessian matrix
diagonal_matrix_.diagonal() =
eigenvalues_.array() * eigenvalues_.array() * diagonal_vector_.array();
hessian_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
}
void TPEvalEngineViaLikelihood::SetTransitionMatrixToHaveBranchLengthAndTranspose(
double branch_length) {
diagonal_matrix_.diagonal() = (branch_length * eigenvalues_).array().exp();
transition_matrix_ =
inverse_eigenmatrix_.transpose() * diagonal_matrix_ * eigenmatrix_.transpose();
}
// ** TPEvalEngineViaParsimony
TPEvalEngineViaParsimony::TPEvalEngineViaParsimony(TPEngine &tp_engine,
const std::string &mmap_path)
: TPEvalEngine(tp_engine),
parsimony_pvs_(mmap_path, GetDAG().EdgeCountWithLeafSubsplits(),
GetSitePattern().PatternCount(), 2.0) {
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
}
void TPEvalEngineViaParsimony::Initialize() {
// Set all PVs to Zero
ZeroPVs();
// Populate Leaves with Site Patterns.
PopulateLeafParsimonyPVsWithSitePatterns();
// Populate rootward and leafward PVs.
PopulatePVs();
// Compute Scores.
ComputeScores();
}
void TPEvalEngineViaParsimony::ZeroPVs() {
for (EdgeId edge_id = 0; edge_id < GetPVs().GetCount(); edge_id++) {
for (const auto pv_type : PSVTypeEnum::Iterator()) {
GetPVs().GetPV(pv_type, edge_id).setZero();
}
}
}
void TPEvalEngineViaParsimony::PopulatePVs() {
// Rootward Pass (populate P PVs)
PopulateRootwardPVs();
// Leafward Pass (populate R PVs)
PopulateLeafwardPVs();
}
void TPEvalEngineViaParsimony::PopulateRootwardPVs() {
const auto rootward_node_ids = GetDAG().RootwardNodeTraversalTrace(false);
for (const auto node_id : rootward_node_ids) {
PopulateRootwardParsimonyPVForNode(node_id);
}
}
void TPEvalEngineViaParsimony::PopulateLeafwardPVs() {
const auto leafward_node_ids = GetDAG().LeafwardNodeTraversalTrace(true);
for (const auto node_id : leafward_node_ids) {
PopulateLeafwardParsimonyPVForNode(node_id);
}
}
void TPEvalEngineViaParsimony::GrowNodeData(
const size_t node_count, std::optional<const Reindexer> node_reindexer,
std::optional<const size_t> explicit_alloc, const bool on_init) {
// No node data to resize.
}
void TPEvalEngineViaParsimony::GrowEdgeData(
const size_t edge_count, std::optional<const Reindexer> edge_reindexer,
std::optional<const size_t> explicit_alloc, const bool on_init) {
// Build resizer for resizing data.
Resizer resizer =
Resizer(GetTPEngine().GetEdgeCount(), GetTPEngine().GetSpareEdgeCount(),
GetTPEngine().GetAllocatedEdgeCount(), edge_count, std::nullopt,
explicit_alloc, GetTPEngine().GetResizingFactor());
resizer.ApplyResizeToEigenVector<EigenVectorXd, double>(GetTopTreeScores(),
DOUBLE_NEG_INF);
GetPVs().Resize(resizer.GetNewCount(), resizer.GetNewAlloc(), resizer.GetNewSpare());
// Reindex work space to realign with DAG.
if (edge_reindexer.has_value()) {
auto pv_reindexer = GetPVs().BuildPVReindexer(
edge_reindexer.value(), resizer.GetOldCount(), resizer.GetNewCount());
GetPVs().Reindex(pv_reindexer);
Reindexer::ReindexInPlace<EigenVectorXd, double>(
GetTopTreeScores(), edge_reindexer.value(), resizer.GetNewCount());
}
}
void TPEvalEngineViaParsimony::GrowSpareNodeData(const size_t new_node_spare_count) {
if (new_node_spare_count > GetTPEngine().GetSpareNodeCount()) {
GetTPEngine().GrowSpareNodeData(new_node_spare_count);
}
GrowNodeData(GetTPEngine().GetNodeCount());
}
void TPEvalEngineViaParsimony::GrowSpareEdgeData(const size_t new_edge_spare_count) {
if (new_edge_spare_count > GetTPEngine().GetSpareEdgeCount()) {
GetTPEngine().GrowSpareEdgeData(new_edge_spare_count);
}
GrowEdgeData(GetTPEngine().GetEdgeCount());
}
void TPEvalEngineViaParsimony::GrowEngineForDAG(
std::optional<Reindexer> node_reindexer, std::optional<Reindexer> edge_reindexer) {
const auto &dag = GetTPEngine().GetDAG();
GrowNodeData(dag.NodeCount(), node_reindexer, std::nullopt, false);
GrowEdgeData(dag.EdgeCountWithLeafSubsplits(), edge_reindexer, std::nullopt, false);
}
void TPEvalEngineViaParsimony::GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference,
const bool use_unique_temps) {
if (via_reference) {
if (use_unique_temps) {
GrowSpareNodeData(spare_nodes_per_nni_ * adjacent_nnis.size());
GrowSpareEdgeData(spare_edges_per_nni_ * adjacent_nnis.size());
} else {
GrowSpareNodeData(spare_nodes_per_nni_);
GrowSpareEdgeData(spare_edges_per_nni_);
}
GrowSpareEdgeData(adjacent_nnis.size());
} else {
GrowNodeData(GetGraftDAG().NodeCountWithoutDAGRoot());
GrowEdgeData(GetGraftDAG().EdgeCountWithLeafSubsplits());
}
}
void TPEvalEngineViaParsimony::UpdateEngineAfterDAGAddNodePair(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) {
// Copy over edge data.
GetTPEngine().CopyOverEdgeDataFromPreNNIToPostNNI(
post_nni, pre_nni,
[this](const EdgeId src, const EdgeId dest) { CopyEdgeData(src, dest); },
new_tree_id);
// Populate PVs.
PopulatePVs();
}
void TPEvalEngineViaParsimony::UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
Initialize();
// Update scores.
ComputeScores();
}
double TPEvalEngineViaParsimony::GetTopTreeScoreWithEdge(const EdgeId edge_id) const {
return TPEvalEngine::GetTopTreeScoreWithEdge(edge_id);
}
double TPEvalEngineViaParsimony::GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map) {
using NNIClade = NNIOperation::NNIClade;
using NNICladeEnum = NNIOperation::NNICladeEnum;
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits());
// Node ids from pre-NNI in DAG.
NNICladeEnum::Array<EdgeId> pre_id_map;
NNICladeEnum::Array<EdgeId> post_id_map;
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
// Create mapping between pre-NNI and post-NNI.
const auto clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(pre_nni, post_nni);
// PLV ids from pre-NNI in DAG.
auto choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(pre_edge_id);
pre_id_map[NNIClade::ParentFocal] = choices.parent;
pre_id_map[NNIClade::ParentSister] = choices.sister;
pre_id_map[NNIClade::ChildLeft] = choices.left_child;
pre_id_map[NNIClade::ChildRight] = choices.right_child;
// Use clade mapping to reference pre-NNI PVs for post-NNI PVs.
for (const auto nni_clade : NNICladeEnum::Iterator()) {
post_id_map[nni_clade] = pre_id_map[clade_map[nni_clade]];
}
// Get temp PVs for post-NNI PVs and edge lengths.
const PVId q_pvid = PVId(spare_offset * 3);
const PVId pleft_pvid = PVId((spare_offset * 3) + 1);
const PVId pright_pvid = PVId((spare_offset * 3) + 2);
// Compute Pleft and Pright.
for (size_t pattern_idx = 0; pattern_idx < GetSitePattern().PatternCount();
pattern_idx++) {
GetPVs().GetPV(pleft_pvid).col(pattern_idx) =
ParentPartial(TotalPPartial(post_id_map[NNIClade::ChildLeft], pattern_idx));
GetPVs().GetPV(pright_pvid).col(pattern_idx) =
ParentPartial(TotalPPartial(post_id_map[NNIClade::ChildRight], pattern_idx));
}
// Compute Q.
for (size_t pattern_idx = 0; pattern_idx < GetSitePattern().PatternCount();
pattern_idx++) {
auto partials_from_parent =
ParentPartial(GetPVs()
.GetPV(PSVType::Q, post_id_map[NNIClade::ParentFocal])
.col(pattern_idx));
for (const auto child_id :
{post_id_map[NNIClade::ChildLeft], post_id_map[NNIClade::ChildRight]}) {
EdgeId sister_id = ((child_id == post_id_map[NNIClade::ChildLeft])
? post_id_map[NNIClade::ChildRight]
: post_id_map[NNIClade::ChildLeft]);
auto partials_from_sister = ParentPartial(TotalPPartial(sister_id, pattern_idx));
GetPVs().GetPV(q_pvid).col(pattern_idx) =
partials_from_sister + partials_from_parent;
}
}
// Compute total parsimony.
double score = ParsimonyScore(q_pvid, pleft_pvid, pright_pvid);
return score;
}
void TPEvalEngineViaParsimony::CopyEdgeData(const EdgeId src_edge_id,
const EdgeId dest_edge_id) {
TPEvalEngine::CopyEdgeData(src_edge_id, dest_edge_id);
}
void TPEvalEngineViaParsimony::PopulateLeafParsimonyPVsWithSitePatterns() {
// first check that the psv_handler has been resized to deal with the leaf labels
Assert(GetPVs().GetCount() >= GetSitePattern().TaxonCount(),
"Error in SankoffHandler::GenerateLeafPartials: "
"parsimony_pvs_ should be initialized to accomodate"
"the number of leaf nodes in the GetSitePattern().");
// Iterate over all leaf nodes to instantiate each with P partial values
for (const auto node_id : GetDAG().GetLeafNodeIds()) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(Direction::Rootward, clade)) {
const auto edge_id = GetDAG().GetEdgeIdx(adj_node_id, node_id);
SankoffPartial leaf_partials(state_count_, GetSitePattern().PatternCount());
// set leaf node partial to have big_double_ infinity substitute
leaf_partials.block(0, 0, state_count_, GetSitePattern().PatternCount())
.fill(big_double_);
// now fill in appropriate entries of the leaf-partial where non-infinite
for (size_t pattern_idx = 0; pattern_idx < GetSitePattern().PatternCount();
pattern_idx++) {
auto site_val =
GetSitePattern().GetPatternSymbol(node_id.value_, pattern_idx);
if (site_val < state_count_) {
leaf_partials(site_val, pattern_idx) = 0.;
} else if (site_val == state_count_) {
// Leaves with gaps in sequence and ambiguous nucleotides are assigned
// sankoff partial vector [0, 0, 0, 0] at the corresponding site.
leaf_partials.col(pattern_idx).fill(0);
} else {
Failwith(
"Error in SankoffHandler::GenerateLeafPartials: Invalid nucleotide "
"state in sequence alignment.");
}
}
GetPVs().GetPV(PSVType::PLeft, edge_id) = leaf_partials;
GetPVs().GetPV(PSVType::PRight, edge_id).fill(0);
}
}
}
}
void TPEvalEngineViaParsimony::PopulateRootwardParsimonyPVForNode(
const NodeId node_id) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(Direction::Rootward, clade)) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(adj_node_id, node_id);
// Populate edge PLV by accumulating parsimony up given edge.
PopulateRootwardParsimonyPVForEdge(edge_id);
}
}
}
void TPEvalEngineViaParsimony::PopulateRootwardParsimonyPVForEdge(
const EdgeId edge_id) {
const auto choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
if (choices.left_child != NoId && choices.right_child != NoId) {
// Accumulate parsimony from left and right child.
const EdgeId left_child_edge_id = choices.left_child;
const EdgeId right_child_edge_id = choices.right_child;
PopulateRootwardParsimonyPVForEdge(edge_id, left_child_edge_id,
right_child_edge_id);
}
}
void TPEvalEngineViaParsimony::PopulateLeafwardParsimonyPVForNode(
const NodeId node_id) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(Direction::Leafward, clade)) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(node_id, adj_node_id);
// Populate edge PLV by accumulating parsimony down given edge.
PopulateLeafwardParsimonyPVForEdge(edge_id);
}
}
}
void TPEvalEngineViaParsimony::PopulateLeafwardParsimonyPVForEdge(
const EdgeId edge_id) {
const auto choices = GetTPEngine().GetChoiceMap().GetEdgeChoice(edge_id);
if (choices.parent != NoId && choices.sister != NoId) {
// Evolve down parent.
const auto edge = GetDAG().GetDAGEdge(edge_id);
const EdgeId parent_edge_id = choices.parent;
const EdgeId sister_edge_id = choices.sister;
const EdgeId left_child_edge_id =
(edge.GetSubsplitClade() == SubsplitClade::Left) ? edge_id : sister_edge_id;
const EdgeId right_child_edge_id =
(edge.GetSubsplitClade() == SubsplitClade::Left) ? sister_edge_id : edge_id;
PopulateLeafwardParsimonyPVForEdge(parent_edge_id, left_child_edge_id,
right_child_edge_id);
}
}
void TPEvalEngineViaParsimony::ComputeScores(std::optional<EdgeIdVector> opt_edge_ids) {
for (EdgeId edge_id = 0; edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
GetTopTreeScores()[edge_id.value_] = ParsimonyScore(edge_id);
}
}
EigenVectorXd TPEvalEngineViaParsimony::ParentPartial(EigenVectorXd child_partials) {
Assert(child_partials.size() == state_count_,
"child_partials in SankoffHandler::ParentPartial should have 4 states.");
EigenVectorXd parent_partials(state_count_);
parent_partials.setZero();
for (size_t parent_state = 0; parent_state < state_count_; parent_state++) {
EigenVectorXd partials_for_state(state_count_);
for (size_t child_state = 0; child_state < state_count_; child_state++) {
auto cost = parsimony_cost_matrix_.GetCost(parent_state, child_state);
partials_for_state[child_state] = cost + child_partials[child_state];
}
auto minimum_element =
*std::min_element(partials_for_state.data(),
partials_for_state.data() + partials_for_state.size());
parent_partials[parent_state] = minimum_element;
}
return parent_partials;
}
EigenVectorXd TPEvalEngineViaParsimony::TotalPPartial(const EdgeId edge_id,
const size_t site_idx) {
return TotalPPartial(GetPVs().GetPVIndex(PSVType::PLeft, edge_id),
GetPVs().GetPVIndex(PSVType::PRight, edge_id), site_idx);
}
EigenVectorXd TPEvalEngineViaParsimony::TotalPPartial(const PVId edge_pleft_pvid,
const PVId edge_pright_pvid,
const size_t site_idx) {
return GetPVs().GetPV(edge_pleft_pvid).col(site_idx) +
GetPVs().GetPV(edge_pright_pvid).col(site_idx);
}
void TPEvalEngineViaParsimony::PopulateRootwardParsimonyPVForEdge(
const EdgeId parent_id, const EdgeId left_child_id, const EdgeId right_child_id) {
for (size_t pattern_idx = 0; pattern_idx < GetSitePattern().PatternCount();
pattern_idx++) {
// Which child partial is in right or left doesn't actually matter because they
// are summed when calculating q_partials.
GetPVs().GetPV(PSVType::PLeft, parent_id).col(pattern_idx) =
ParentPartial(TotalPPartial(left_child_id, pattern_idx));
GetPVs().GetPV(PSVType::PRight, parent_id).col(pattern_idx) =
ParentPartial(TotalPPartial(right_child_id, pattern_idx));
}
}
void TPEvalEngineViaParsimony::PopulateLeafwardParsimonyPVForEdge(
const EdgeId parent_id, const EdgeId left_child_id, const EdgeId right_child_id) {
for (size_t pattern_idx = 0; pattern_idx < GetSitePattern().PatternCount();
pattern_idx++) {
auto partials_from_parent =
ParentPartial(GetPVs().GetPV(PSVType::Q, parent_id).col(pattern_idx));
for (const auto child_id : {left_child_id, right_child_id}) {
EdgeId sister_id = ((child_id == left_child_id) ? right_child_id : left_child_id);
auto partials_from_sister = ParentPartial(TotalPPartial(sister_id, pattern_idx));
GetPVs().GetPV(PSVType::Q, child_id).col(pattern_idx) =
partials_from_sister + partials_from_parent;
}
}
}
double TPEvalEngineViaParsimony::ParsimonyScore(const EdgeId edge_id) {
return ParsimonyScore(GetPVs().GetPVIndex(PSVType::Q, edge_id),
GetPVs().GetPVIndex(PSVType::PLeft, edge_id),
GetPVs().GetPVIndex(PSVType::PRight, edge_id));
}
double TPEvalEngineViaParsimony::ParsimonyScore(const PVId edge_q_pvid,
const PVId edge_pleft_pvid,
const PVId edge_pright_pvid) {
auto weights = GetSitePattern().GetWeights();
double total_parsimony = 0.;
for (size_t pattern = 0; pattern < GetSitePattern().PatternCount(); pattern++) {
// Note: doing ParentPartial first for the left and right p_partials and then
// adding them together will give the same minimum parsimony score, but doesn't
// give correct Sankoff Partial vector for the new rooting
auto total_tree =
ParentPartial(TotalPPartial(edge_pleft_pvid, edge_pright_pvid, pattern));
total_tree += ParentPartial(GetPVs().GetPV(edge_q_pvid).col(pattern));
// If node_id is the root node, calculating the total_tree vector like so does not
// yield the SankoffPartial of an actual rooting, but this will not change the
// minimum value in the partial, so the root node can still be used to calculate
// the parsimony score.
total_parsimony +=
*std::min_element(total_tree.begin(), total_tree.end()) * weights[pattern];
}
return total_parsimony;
}
| 74,953
|
C++
|
.cpp
| 1,506
| 42.962815
| 88
| 0.688278
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,019
|
driver.cpp
|
phylovi_bito/src/driver.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
// Based on
// https://www.gnu.org/software/bison/manual/html_node/Calc_002b_002b-Parsing-Driver.html#Calc_002b_002b-Parsing-Driver
#include "driver.hpp"
#include <fstream>
#include <iostream>
#include <memory>
#include <regex>
#include <unordered_map>
#include <utility>
#include "parser.hpp"
#include "taxon_name_munging.hpp"
#include "zlib_stream.hpp"
Driver::Driver()
: next_id_(0),
sort_taxa_(false),
taxa_complete_(false),
trace_parsing_(0),
trace_scanning_(false),
latest_tree_(nullptr) {}
void Driver::Clear() {
next_id_ = 0;
taxa_complete_ = false;
trace_parsing_ = 0;
trace_scanning_ = false;
latest_tree_ = nullptr;
taxa_.clear();
branch_lengths_.clear();
}
// This parser will allow anything before the first '('.
TreeCollection Driver::ParseNewick(std::istream &in) {
yy::parser parser_instance(*this);
parser_instance.set_debug_level(trace_parsing_);
std::string line;
unsigned int line_number = 1;
Tree::TreeVector trees;
while (std::getline(in, line)) {
// Set the Bison location line number properly so we get useful error
// messages.
location_.initialize(nullptr, line_number);
line_number++;
auto tree_start = line.find_first_of('(');
if (!line.empty() && tree_start != std::string::npos) {
// Erase any characters before the first '('.
line.erase(0, tree_start);
// If taxon map has not be initialized, we parse the first tree temporarily to get
// the taxon names. We will re-parse this tree after assigning sorted taxon IDs.
if (!taxa_complete_ && sort_taxa_) {
ParseString(&parser_instance, line);
SortTaxa();
}
trees.push_back(ParseString(&parser_instance, line));
}
}
return TreeCollection(std::move(trees), this->TagTaxonMap());
}
TreeCollection Driver::ParseAndDequoteNewick(std::istream &in) {
TreeCollection perhaps_quoted_trees = ParseNewick(in);
return TreeCollection(
std::move(perhaps_quoted_trees.trees_),
TaxonNameMunging::DequoteTagStringMap(perhaps_quoted_trees.TagTaxonMap()));
}
TreeCollection Driver::ParseNewickFile(const std::string &fname) {
Clear();
std::ifstream in(fname.c_str());
if (!in) {
Failwith("Cannot open the File : " + fname);
}
return ParseAndDequoteNewick(in);
}
TreeCollection Driver::ParseNewickFileGZ(const std::string &fname) {
Clear();
std::ifstream in_compressed(fname.c_str());
if (!in_compressed) {
Failwith("Cannot open the File : " + fname);
}
zlib::ZStringBuf zbuf(in_compressed, 1024, 2048);
std::istream in(&zbuf);
return ParseAndDequoteNewick(in);
}
void GetLineAndConvertToLowerCase(std::istream &in, std::string &line) {
std::getline(in, line);
std::transform(line.begin(), line.end(), line.begin(),
[](unsigned char c) { return std::tolower(c); });
}
TreeCollection Driver::ParseNexusFile(const std::string &fname) {
std::ifstream in(fname.c_str());
if (!in) {
throw std::runtime_error("Cannot open file.");
}
return ParseNexus(in);
}
TreeCollection Driver::ParseNexusFileGZ(const std::string &fname) {
std::ifstream in_compressed(fname.c_str());
if (!in_compressed) {
throw std::runtime_error("Cannot open file.");
}
zlib::ZStringBuf zbuf(in_compressed, 1024, 2048);
std::istream in(&zbuf);
return ParseNexus(in);
}
TreeCollection Driver::ParseNexus(std::istream &in) {
Clear();
try {
std::string line;
std::getline(in, line);
if (line != "#NEXUS") {
throw std::runtime_error("Putative Nexus file doesn't begin with #NEXUS.");
}
do {
if (in.eof()) {
throw std::runtime_error("Finished reading and couldn't find 'begin trees;'");
}
GetLineAndConvertToLowerCase(in, line);
} while (line != "begin trees;");
GetLineAndConvertToLowerCase(in, line);
std::regex translate_start("^\\s*translate");
if (!std::regex_match(line, translate_start)) {
throw std::runtime_error("Missing translate block.");
}
std::getline(in, line);
std::regex translate_item_regex(R"raw(^\s*(\d+)\s([^,;]*)[,;]?$)raw");
std::regex lone_semicolon_regex(R"raw(\s*;$)raw");
std::smatch match;
auto previous_position = in.tellg();
TagStringMap long_name_taxon_map;
uint32_t leaf_id = 0;
// Iterate through the translate table, assigning tags according to the order of
// taxa in the block. So, the first taxon name gets leaf number 0, etc.
while (std::regex_match(line, match, translate_item_regex)) {
const auto short_name = match[1].str();
const auto long_name = match[2].str();
// We prepare taxa_ so that it can parse the short taxon names.
SafeInsert(taxa_, short_name, leaf_id);
// However, we keep the long names for the TagTaxonMap.
SafeInsert(long_name_taxon_map, PackInts(leaf_id, 1), long_name);
leaf_id++;
// Semicolon marks the end of the translate block.
// It appears at the end of a translation statement line in MrBayes.
if (match[3].str() == ";") {
break;
}
previous_position = in.tellg();
std::getline(in, line);
// BEAST has the ending semicolon on a line of its own.
if (std::regex_match(line, match, lone_semicolon_regex)) {
break;
}
if (in.eof()) {
throw std::runtime_error("Encountered EOF while parsing translate block.");
}
}
Assert(leaf_id > 0, "No taxa found in translate block!");
taxa_complete_ = true;
// Back up one line to hit the first tree.
in.seekg(previous_position);
// Now we make a new TagTaxonMap to replace the one with numbers in place of
// taxon names.
auto short_name_tree_collection = ParseNewick(in);
// We're using the public member directly rather than the const accessor because we
// want to move.
return TreeCollection(std::move(short_name_tree_collection.trees_),
TaxonNameMunging::DequoteTagStringMap(long_name_taxon_map));
} catch (const std::exception &exception) {
Failwith(std::string("Problem parsing Nexus file:\n") + exception.what());
}
}
Tree Driver::ParseString(yy::parser *parser_instance, const std::string &str) {
// Scan the string using the lexer into hidden state.
this->ScanString(str);
// Parse the scanned string.
int return_code = (*parser_instance)();
Assert(return_code == 0, "Parser had nonzero return value.");
return *latest_tree_;
}
TreeCollection Driver::ParseString(const std::string &str) {
Clear();
yy::parser parser_instance(*this);
parser_instance.set_debug_level(trace_parsing_);
Tree::TreeVector trees = {ParseString(&parser_instance, str)};
return TreeCollection(trees,
TaxonNameMunging::DequoteTagStringMap(this->TagTaxonMap()));
}
TagStringMap Driver::TagTaxonMap() {
TagStringMap m;
for (const auto &iter : taxa_) {
// These are leaves, so the number of leaves below is 1.
m[PackInts(iter.second, 1)] = iter.first;
}
return m;
}
void Driver::SetTaxa(const std::map<std::string, uint32_t> taxa) {
taxa_ = taxa;
taxa_complete_ = true;
}
void Driver::SortTaxa() {
std::map<std::string, uint32_t> taxa;
uint32_t new_id = 0;
for (const auto &[name, old_id] : taxa_) {
std::ignore = old_id;
taxa[name] = new_id;
new_id++;
}
SetTaxa(taxa);
}
// Note that a number of Driver methods are implemented in scanner.ll.
| 7,552
|
C++
|
.cpp
| 207
| 32.164251
| 119
| 0.676314
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,020
|
doctest.cpp
|
phylovi_bito/src/doctest.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "include_doctest.hpp"
#include <string>
#include "rooted_sbn_instance.hpp"
#include "stick_breaking_transform.hpp"
#include "taxon_name_munging.hpp"
#include "unrooted_sbn_instance.hpp"
#include "subsplit_dag_node.hpp"
// NOTE: This file is automatically generated from `test/prep/doctest.py`. Don't edit!
TEST_CASE("Node") {
Driver driver;
std::vector<std::string> trace;
auto t =
driver.ParseString("((((0_1,1_1),(2_1,3_1)),4_1),((5_1,(6_1,7_1)),(8_1,9_1)));")
.Trees()[0];
// preorder:
t.Topology()->Preorder(
[&trace](const Node* node) { trace.push_back(node->TagString()); });
CHECK(std::vector<std::string>({"9_10", "4_5", "3_4", "1_2", "0_1", "1_1", "3_2",
"2_1", "3_1", "4_1", "9_5", "7_3", "5_1", "7_2",
"6_1", "7_1", "9_2", "8_1", "9_1"}) == trace);
trace.clear();
// postorder:
t.Topology()->Postorder(
[&trace](const Node* node) { trace.push_back(node->TagString()); });
CHECK(std::vector<std::string>({"0_1", "1_1", "1_2", "2_1", "3_1", "3_2", "3_4",
"4_1", "4_5", "5_1", "6_1", "7_1", "7_2", "7_3",
"8_1", "9_1", "9_2", "9_5", "9_10"}) == trace);
trace.clear();
// levelorder:
t.Topology()->LevelOrder(
[&trace](const Node* node) { trace.push_back(node->TagString()); });
CHECK(std::vector<std::string>({"9_10", "4_5", "9_5", "3_4", "4_1", "7_3", "9_2",
"1_2", "3_2", "5_1", "7_2", "8_1", "9_1", "0_1",
"1_1", "2_1", "3_1", "6_1", "7_1"}) == trace);
trace.clear();
}
| 1,832
|
C++
|
.cpp
| 39
| 38.871795
| 86
| 0.51065
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,021
|
tree.cpp
|
phylovi_bito/src/tree.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "tree.hpp"
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "node.hpp"
#include "sugar.hpp"
Tree::Tree(const Node::NodePtr& topology, TagDoubleMap branch_lengths)
: topology_(topology) {
auto tag_id_map = topology->Polish();
branch_lengths_ = std::vector<double>(topology->Id() + 1);
for (const auto& [tag, id] : tag_id_map) {
auto search = branch_lengths.find(tag);
if (search != branch_lengths.end()) {
Assert(id < branch_lengths_.size(),
"branch_lengths of insufficient size in Tree::Tree.");
branch_lengths_[id] = search->second;
} else {
branch_lengths_[id] = 0.;
}
}
}
Tree::Tree(const Node::NodePtr& topology, BranchLengthVector branch_lengths)
: branch_lengths_(std::move(branch_lengths)), topology_(topology) {
if (topology->Id() + 1 != branch_lengths_.size()) {
Failwith(
"Root id is too large relative to the branch_lengths size in "
"Tree::Tree: " +
std::to_string(topology->Id() + 1) + " vs " +
std::to_string(branch_lengths_.size()));
}
}
bool Tree::operator==(const Tree& other) const {
return (this->Topology() == other.Topology()) &&
(this->BranchLengths() == other.BranchLengths());
}
Tree Tree::DeepCopy() const { return Tree(Topology()->DeepCopy(), BranchLengths()); }
std::string Tree::Newick(const TagStringMapOption& node_labels) const {
return Topology()->Newick(branch_lengths_, node_labels);
}
std::string Tree::NewickTopology(const TagStringMapOption& node_labels) const {
return Topology()->Newick(std::nullopt, node_labels);
}
double Tree::BranchLength(const Node* node) const {
Assert(node->Id() < branch_lengths_.size(),
"Requested id is out of range in Tree::BranchLength.");
return branch_lengths_[node->Id()];
}
Tree Tree::UnitBranchLengthTreeOf(Node::NodePtr topology) {
topology->Polish();
return Tree(topology, BranchLengthVector(1 + topology->Id(), 1.));
}
Tree Tree::OfParentIdVector(const std::vector<size_t>& ids) {
auto topology = Node::OfParentIdVector(ids);
return Tree(topology, BranchLengthVector(topology->Id() + 1, 1.));
}
Tree::TreeVector Tree::ExampleTrees() {
TreeVector v;
for (const auto& topology : Node::ExampleTopologies()) {
v.push_back(UnitBranchLengthTreeOf(topology));
}
return v;
}
void Tree::SlideRootPosition() {
size_t fixed_node_id = Children()[1]->Id();
size_t root_child_id = Children()[0]->Id();
branch_lengths_[root_child_id] =
branch_lengths_[root_child_id] + branch_lengths_[fixed_node_id];
branch_lengths_[fixed_node_id] = 0.0;
}
| 2,790
|
C++
|
.cpp
| 74
| 34.283784
| 85
| 0.688009
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,022
|
reindexer.cpp
|
phylovi_bito/src/reindexer.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "reindexer.hpp"
Reindexer Reindexer::IdentityReindexer(const size_t size) {
Reindexer reindexer = Reindexer(size);
std::iota(reindexer.GetData().begin(), reindexer.GetData().end(), 0);
return reindexer;
}
bool Reindexer::IsValid(std::optional<size_t> length) const {
const size_t reindexer_size = (length.has_value() ? length.value() : size());
Assert(length <= size(),
"Length of reindexer cannot be larger than the vector containing it.");
std::vector<bool> already_used(reindexer_size, false);
for (size_t idx = 0; idx < reindexer_size; idx++) {
if (GetNewIndexByOldIndex(idx) >= reindexer_size ||
already_used[GetNewIndexByOldIndex(idx)]) {
return false;
}
already_used[GetNewIndexByOldIndex(idx)] = true;
}
return true;
}
// ** Modification Operations
void Reindexer::Resize(size_t new_size) {
const auto old_size = GetData().size();
GetData().resize(new_size);
for (size_t i = old_size; i < new_size; i++) {
GetData()[i] = i;
}
}
// Gets the inverse of a given reindexer.
Reindexer Reindexer::InvertReindexer() const {
Assert(IsValid(), "Reindexer must be valid in Reindexer::InvertedReindexer.");
Reindexer inverted_reindexer(size());
for (size_t idx = 0; idx < size(); idx++) {
inverted_reindexer.SetReindex(GetNewIndexByOldIndex(idx), idx);
}
return inverted_reindexer;
}
Reindexer Reindexer::RemoveOldIndex(const size_t remove_old_idx) const {
Assert(IsValid(), "Reindexer must be valid in Reindexer::RemoveOldIndex.");
Reindexer result_reindexer;
result_reindexer.reserve(size() - 1);
const size_t remove_new_idx = GetNewIndexByOldIndex(remove_old_idx);
for (size_t old_idx = 0; old_idx < size(); old_idx++) {
if (old_idx == remove_old_idx) {
continue;
}
const size_t new_idx = GetNewIndexByOldIndex(old_idx);
result_reindexer.AppendNewIndex(new_idx - (new_idx > remove_new_idx));
}
return result_reindexer;
}
Reindexer Reindexer::RemoveNewIndex(const size_t remove_new_idx) const {
const size_t remove_old_idx = GetOldIndexByNewIndex(remove_new_idx);
return RemoveOldIndex(remove_old_idx);
}
Reindexer Reindexer::ComposeWith(const Reindexer &apply_reindexer) {
Assert(IsValid(), "base_reindexer not valid in Reindexer::ComposeWith.");
Assert(apply_reindexer.IsValid(),
"apply_reindexer not valid in Reindexer::ComposeWith.");
Assert(size_t(size()) <= apply_reindexer.size(),
"The base_reindexer cannot be larger than the reindexer it is being "
"composed with.");
Reindexer result_reindexer(apply_reindexer.size());
// Pad base_reindexer if it needs to grow to accept apply_reindexer.
for (size_t idx = size(); idx < apply_reindexer.size(); idx++) {
AppendNewIndex();
}
// Reindex.
Reindexer inverted_reindexer = InvertReindexer();
for (size_t idx = 0; idx < apply_reindexer.size(); idx++) {
result_reindexer.SetReindex(inverted_reindexer.GetNewIndexByOldIndex(idx),
apply_reindexer.GetNewIndexByOldIndex(idx));
}
return result_reindexer;
}
void Reindexer::ReassignAndShift(const size_t old_id, const size_t new_id) {
Assert(old_id < size() && new_id < size(),
"The given ids must be within the bounds of the reindexer in "
"Reindexer::ReassignAndShift.");
Assert(IsValid(), "Reindexer must be valid in Reindexer::ReassignAndShift.");
if (old_id == new_id) {
return;
}
// Find position with value old_id.
const size_t old_id_position = GetOldIndexByNewIndex(old_id);
// Shift.
if (old_id > new_id) {
for (size_t &id : GetData()) {
if (id < old_id && id >= new_id) {
id++;
}
}
} else {
for (size_t &id : GetData()) {
if (id > old_id && id <= new_id) {
id--;
}
}
}
// Reassign old_id to new_id.
SetReindex(old_id_position, new_id);
}
| 4,111
|
C++
|
.cpp
| 104
| 34.336538
| 81
| 0.66767
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,023
|
pybito.cpp
|
phylovi_bito/src/pybito.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wold-style-cast"
#include <pybind11/eigen.h>
#include <pybind11/iostream.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/stl_bind.h>
#include <pybind11/functional.h>
#pragma GCC diagnostic pop
#include <string>
#include "sugar_version.hpp"
#include "gp_instance.hpp"
#include "phylo_flags.hpp"
#include "rooted_gradient_transforms.hpp"
#include "rooted_sbn_instance.hpp"
#include "unrooted_sbn_instance.hpp"
namespace py = pybind11;
// This is how we can have Eigen objects be directly mutable from Python. See
// https://github.com/eacousineau/repro/blob/f4ba595d077af7363f501f6c85d3d2449219f04a/python/pybind11/custom_tests/test_tmp.cc#L16-L38
// Thanks to @eacousineau!
template <typename PyClass, typename C, typename D>
void def_read_write_mutable(PyClass &cls, const char *name, D C::*pm) {
cls.def_property(
name, [pm](C & self) -> auto & { return self.*pm; },
[pm](C &self, const D &value) { self.*pm = value; });
}
// Helper for adding definitions to class.
template <typename PyClass, typename... PyArgTypes, typename CppClass, typename RetType,
typename... ArgTypes>
void def_template(PyClass pyclass, const char *name, const char *description,
RetType (CppClass::*func)(ArgTypes...),
std::tuple<PyArgTypes...> pyargs) {
std::apply(
[&pyclass, &name, &description, &func](auto &&...pyargs) {
pyclass.def(
name,
[func](CppClass &self, ArgTypes... args) { return (self.*func)(args...); },
description, pyargs...);
},
pyargs);
}
// Define pyclass function for all function overloads (non-const methods).
template <typename PyClass, typename... PyArgTypes, typename CppFunc,
typename... OtherCppFuncs>
void def_overload(PyClass pyclass, const char *name, const char *description,
std::tuple<CppFunc, std::tuple<PyArgTypes...>> overload_def,
OtherCppFuncs... other_overloads) {
// Add definition to class.
auto &[func, pyargs] = overload_def;
def_template(pyclass, name, description, func, pyargs);
// Get next function from template list.
if constexpr (sizeof...(OtherCppFuncs) > 0) {
def_overload(pyclass, name, description, other_overloads...);
}
}
// Use same function name, description, pyargs for multiple functions from multiple
// classes (non-const methods).
template <typename PyClass, typename... PyArgTypes, typename CppFunc,
typename... OtherClassDefs>
void def_multiclass(const char *name, const char *description,
std::tuple<PyArgTypes...> pyargs,
std::tuple<PyClass, CppFunc> class_def,
OtherClassDefs... other_defs) {
// Add definition to class.
auto &[pyclass, func] = class_def;
def_template(pyclass, name, description, func, pyargs);
// Get next function from template list.
if constexpr (sizeof...(OtherClassDefs) > 0) {
def_multiclass(name, description, pyargs, other_defs...);
}
}
// In order to make vector<double>s available to numpy, we take two steps.
// First, we make them opaque to pybind11, so that it doesn't do its default
// conversion of STL types.
PYBIND11_MAKE_OPAQUE(std::vector<double>);
// MODULE
PYBIND11_MODULE(bito, m) {
m.doc() = R"raw(Python interface to bito.)raw";
// Second, we expose them as buffer objects so that we can use them
// as in-place numpy arrays with np.array(v, copy=False). See
// https://pybind11.readthedocs.io/en/stable/advanced/pycpp/numpy.html
py::class_<std::vector<double>>(m, "vector_double", "A wrapper for vector<double>.",
py::buffer_protocol())
.def_buffer([](std::vector<double> &v) -> py::buffer_info {
return py::buffer_info(v.data(), // Pointer to buffer
sizeof(double), // Size of one scalar
py::format_descriptor<double>::format(), // See docs
1, // Number of dimensions
{v.size()}, // Buffer dimensions
{sizeof(double)}); // Stride
});
m.def("git_commit", &Version::GetGitCommit, "Get git commit of version build.");
m.def("git_branch", &Version::GetGitBranch, "Get git branch of version build.");
m.def("git_tags", &Version::GetGitTags, "Get git tag(s) of version build.");
// CLASS
// RootedTree
py::class_<RootedTree>(m, "RootedTree", "A rooted tree with branch lengths.",
py::buffer_protocol())
.def("__eq__", [](const RootedTree &self,
const RootedTree &other) { return self == other; })
.def("compare_by_topology",
[](const RootedTree &self, const RootedTree &other) {
return self.Topology() == other.Topology();
})
.def(
"to_newick", [](const RootedTree &self) { return self.Newick(); },
"Output to Newick string with branch lengths.")
.def(
"to_newick_topology",
[](const RootedTree &self) { return self.NewickTopology(std::nullopt); },
"Output to Newick string without branch lengths.")
.def("parent_id_vector", &RootedTree::ParentIdVector)
.def("initialize_time_tree_using_height_ratios",
&RootedTree::InitializeTimeTreeUsingHeightRatios)
.def_static("example", &RootedTree::Example)
.def_static("of_parent_id_vector", &RootedTree::OfParentIdVector)
.def_readwrite("branch_lengths", &RootedTree::branch_lengths_)
.def_readwrite("height_ratios", &RootedTree::height_ratios_)
.def_readwrite("node_heights", &RootedTree::node_heights_)
.def_readwrite("node_bounds", &RootedTree::node_bounds_)
.def_readwrite("rates", &RootedTree::rates_)
.def(
"topology", [](const RootedTree &self) { return self.Topology(); },
py::return_value_policy::reference)
.def(
"id", [](const RootedTree &self) { return self.Topology()->Id(); },
"Unique node id within topology.")
.def(
"to_leaves", [](const RootedTree &self) { return self.Topology()->Leaves(); },
"Output node to leave bitset.")
.def(
"build_subsplit",
[](const RootedTree &self) { return self.Topology()->BuildSubsplit(); },
"Build subsplit node bitset of node.")
.def(
"build_pcsp",
[](const RootedTree &self, const size_t child_id) {
Assert(child_id < 2, "child_count must be 0 (left) or 1 (right).");
auto child_clade =
(child_id == 0) ? SubsplitClade::Left : SubsplitClade::Right;
return self.Topology()->BuildPCSP(child_clade);
},
"Build PCSP edge bitset of edge below node.")
.def(
"build_set_of_subsplits",
[](const RootedTree &self) { return self.Topology()->BuildSetOfSubsplits(); },
"Build set of all subsplit bitsets for all nodes in topology.")
.def(
"build_set_of_pcsps",
[](const RootedTree &self) { return self.Topology()->BuildSetOfPCSPs(); },
"Build set of all PCSP edge bitsets for all edges in topology.");
// CLASS
// RootedTreeCollection
py::class_<RootedTreeCollection>(m, "RootedTreeCollection", R"raw(
A collection of rooted trees.
In addition to the methods, RootedTreeCollection also offers direct access to
the trees through the ``trees`` member variable.
)raw")
.def(py::init<RootedTree::RootedTreeVector>(), "The empty constructor.")
.def(py::init<RootedTree::RootedTreeVector, TagStringMap>(),
"Constructor from a vector of trees and a tags->taxon names map.")
.def(py::init<RootedTree::RootedTreeVector, const std::vector<std::string> &>(),
"Constructor from a vector of trees and a vector of taxon names.")
.def("erase", &RootedTreeCollection::Erase,
"Erase the specified range from the current tree collection.")
.def("drop_first", &RootedTreeCollection::DropFirst,
"Drop the first ``fraction`` trees from the tree collection.",
py::arg("fraction"))
.def("newick", &RootedTreeCollection::Newick,
"Get the current set of trees as a big Newick string.")
.def_readwrite("trees", &RootedTreeCollection::trees_);
// CLASS
// UnrootedTree
py::class_<UnrootedTree>(m, "UnrootedTree", "An unrooted tree with branch lengths.",
py::buffer_protocol())
.def("__eq__", [](const UnrootedTree &self,
const UnrootedTree &other) { return self == other; })
.def(
"to_newick", [](const UnrootedTree &self) { return self.Newick(); },
"Output to Newick string with branch lengths.")
.def(
"to_newick_topology",
[](const UnrootedTree &self) { return self.NewickTopology(std::nullopt); },
"Output to Newick string without branch lengths.")
.def("parent_id_vector", &UnrootedTree::ParentIdVector)
.def_static("of_parent_id_vector", &UnrootedTree::OfParentIdVector)
.def_readwrite("branch_lengths", &UnrootedTree::branch_lengths_);
// CLASS
// UnrootedTreeCollection
py::class_<UnrootedTreeCollection>(m, "UnrootedTreeCollection", R"raw(
A collection of unrooted trees.
In addition to the methods, UnrootedTreeCollection also offers direct access to
the trees through the ``trees`` member variable.
)raw")
.def(py::init<UnrootedTree::UnrootedTreeVector>(), "The empty constructor.")
.def(py::init<UnrootedTree::UnrootedTreeVector, TagStringMap>(),
"Constructor from a vector of trees and a tags->taxon names map.")
.def(py::init<UnrootedTree::UnrootedTreeVector,
const std::vector<std::string> &>(),
"Constructor from a vector of trees and a vector of taxon names.")
.def("erase", &UnrootedTreeCollection::Erase,
"Erase the specified range from the current tree collection.")
.def("drop_first", &UnrootedTreeCollection::DropFirst,
"Drop the first ``fraction`` trees from the tree collection.",
py::arg("fraction"))
.def("newick", &UnrootedTreeCollection::Newick,
"Get the current set of trees as a big Newick string.")
.def_readwrite("trees", &UnrootedTreeCollection::trees_);
// PhyloGradient
py::class_<PhyloGradient>(m, "PhyloGradient", R"raw(A phylogenetic gradient.)raw")
.def_readonly("log_likelihood", &PhyloGradient::log_likelihood_)
.def_readonly("gradient", &PhyloGradient::gradient_);
// CLASS
// PSPIndexer
py::class_<PSPIndexer>(m, "PSPIndexer", "The primary split pair indexer.")
.def("details", &PSPIndexer::Details);
// CLASS
// PhyloModelSpecification
py::class_<PhyloModelSpecification>(m, "PhyloModelSpecification",
R"raw(
Phylogenetic model specification.
This is how we specify phylogenetic models, with strings for the substitution
model, the site model, and the clock model.
)raw")
.def(py::init<const std::string &, const std::string &, const std::string &>(),
py::arg("substitution"), py::arg("site"), py::arg("clock"));
// ** SBNInstance variants
const char prepare_for_phylo_likelihood_docstring[] =
R"raw(
Prepare instance for phylogenetic likelihood computation.
See the ``bito.beagle_flags`` online documentation to learn about the allowable flags.
``use_tip_states`` tells BEAGLE if it should use tip states (versus tip partials).
Note that bito currently treats degenerate nucleotides as gaps irrespective of this setting.
``tree_count_option`` tells bito for how many trees you will be asking for the likelihood
or gradient at a time. If not specified, this is set to the number of trees currently loaded
into the instance. This allocates the correct number of slots in the phylogenetic model
parameter matrices, and it's up to the user to set those model parameters after calling
this function.
Note that this tree count need not be the same as the number of threads (and is typically bigger).
)raw";
const char process_loaded_trees_docstring[] = R"raw(
Process the trees currently stored in the instance.
Specifically, parse them and build the subsplit support and allocate (but do not train) the corresponding SBN
parameters.
)raw";
const char read_sbn_parameters_from_csv_docstring[] = R"raw(
Read SBN parameters from a CSV mapping a string representation of the GPCSP to its probability in linear (not
log) space.
Any GPCSPs that are not found in the supplied CSV will be assigned a probability of 0.
)raw";
// CLASS
// RootedSBNInstance
py::class_<PreRootedSBNInstance>(m, "PreRootedSBNInstance");
py::class_<RootedSBNInstance, PreRootedSBNInstance> rooted_sbn_instance_class(
m, "rooted_instance", R"raw(
A rooted SBN instance.
The intent of this class is primarily to support rooted time trees, however some functionality works without
dates.
If you are using time trees, you will need to assign tip dates using one of the available methods and then
initialize the time trees, either using branch lengths or per-tree height ratios.
If you don't do this, then trying to calculate a phylogenetic gradient will raise an exception.
)raw");
rooted_sbn_instance_class
.def(py::init<const std::string &>())
// ** BEGIN DUPLICATED CODE BLOCK between this and UnrootedSBNInstance
.def("get_phylo_model_params", &RootedSBNInstance::GetPhyloModelParams)
.def("get_phylo_model_param_block_map",
&RootedSBNInstance::GetPhyloModelParamBlockMap)
.def(
"prepare_for_phylo_likelihood", &RootedSBNInstance::PrepareForPhyloLikelihood,
prepare_for_phylo_likelihood_docstring, py::arg("model_specification"),
py::arg("thread_count"), py::arg("beagle_flags") = std::vector<BeagleFlags>(),
py::arg("use_tip_states") = true, py::arg("tree_count_option") = std::nullopt)
.def("resize_phylo_model_params", &RootedSBNInstance::ResizePhyloModelParams,
"Resize phylo_model_params.", py::arg("tree_count_option") = std::nullopt)
.def("load_duplicates_of_first_tree",
&RootedSBNInstance::LoadDuplicatesOfFirstTree,
"Replace all of the loaded trees with duplicates of the first tree.",
py::arg("number_of_times"))
.def("read_fasta_file", &RootedSBNInstance::ReadFastaFile,
"Read a sequence alignment from a FASTA file.")
.def("taxon_names", &RootedSBNInstance::TaxonNames,
"Return a list of taxon names.")
// ** Initialization and status
.def("print_status", &RootedSBNInstance::PrintStatus,
"Print information about the instance.")
.def("tree_count", &RootedSBNInstance::TreeCount,
"Return the number of trees that are currently stored in the "
"instance.")
// ** SBN-related items
.def("process_loaded_trees", &RootedSBNInstance::ProcessLoadedTrees,
process_loaded_trees_docstring)
.def("train_simple_average", &RootedSBNInstance::TrainSimpleAverage,
R"raw(
Train the SBN using the "simple average" estimator.
For rooted trees this training is simpler than in the unrooted case:
we simply take the normalized frequency of PCSPs.
)raw")
.def("sbn_parameters_to_csv", &RootedSBNInstance::SBNParametersToCSV,
R"raw(Write "pretty" formatted SBN parameters to a CSV.)raw")
.def("read_sbn_parameters_from_csv", &RootedSBNInstance::ReadSBNParametersFromCSV,
read_sbn_parameters_from_csv_docstring)
.def("calculate_sbn_probabilities", &RootedSBNInstance::CalculateSBNProbabilities,
R"raw(Calculate the SBN probabilities of the currently loaded trees.)raw")
// ** END DUPLICATED CODE BLOCK between this and UnrootedSBNInstance
.def("unconditional_subsplit_probabilities_to_csv",
&RootedSBNInstance::UnconditionalSubsplitProbabilitiesToCSV,
"Write out the overall probability of seeing each subsplit when we sample a "
"tree from the SBN.")
// ** Tip dates
.def("set_dates_to_be_constant", &RootedSBNInstance::SetDatesToBeConstant,
"Set tip dates to be constant.",
py::arg("initialize_time_trees_using_branch_lengths"))
.def("parse_dates_from_taxon_names", &RootedSBNInstance::ParseDatesFromTaxonNames,
"Take dates to be the numbers after an underscore in the taxon names.",
py::arg("initialize_time_trees_using_branch_lengths"))
.def(
"parse_dates_from_csv", &RootedSBNInstance::ParseDatesFromCSV,
"Parse dates from a headerless 2-column CSV of quoted taxon names and dates.",
py::arg("csv_path"), py::arg("initialize_time_trees_using_branch_lengths"))
// ** Phylogenetic likelihood
.def("log_det_jacobian_of_height_transform",
&RootedSBNInstance::LogDetJacobianHeightTransform,
"Calculate the log det jacobian of the node height transform.")
.def("set_rescaling", &RootedSBNInstance::SetRescaling,
"Set whether BEAGLE's likelihood rescaling is used.")
// ** Phylogenetic gradients
.def("gradient_log_det_jacobian_of_height_transform",
&RootedSBNInstance::GradientLogDeterminantJacobian,
"Obtain the log determinant of the gradient")
// ** I/O
.def("read_newick_file", &RootedSBNInstance::ReadNewickFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Newick file.")
.def("read_nexus_file", &RootedSBNInstance::ReadNexusFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Nexus file.")
// ** Member variables
.def_readwrite("tree_collection", &RootedSBNInstance::tree_collection_);
def_overload(
rooted_sbn_instance_class, "phylo_gradients",
"Calculate gradients of parameters for the current set of trees.",
std::tuple(static_cast<std::vector<PhyloGradient> (RootedSBNInstance::*)(
std::optional<PhyloFlags>)>(&RootedSBNInstance::PhyloGradients),
std::tuple(py::arg("phylo_flags") = std::nullopt)),
std::tuple(&RootedSBNInstance::PhyloGradients<StringVector>,
std::tuple(py::arg("flag_names"), py::arg("use_defaults") = true)),
std::tuple(
&RootedSBNInstance::PhyloGradients<StringBoolVector>,
std::tuple(py::arg("flag_names_and_set"), py::arg("use_defaults") = true)),
std::tuple(
&RootedSBNInstance::PhyloGradients<StringDoubleVector>,
std::tuple(py::arg("flag_names_and_values"), py::arg("use_defaults") = true)),
std::tuple(&RootedSBNInstance::PhyloGradients<StringBoolDoubleVector>,
std::tuple(py::arg("flag_names_and_set_and_values"),
py::arg("use_defaults") = true)));
def_overload(
rooted_sbn_instance_class, "log_likelihoods",
"Calculate log likelihoods for the current set of trees.",
std::tuple(static_cast<std::vector<double> (RootedSBNInstance::*)(
std::optional<PhyloFlags>)>(&RootedSBNInstance::LogLikelihoods),
std::tuple(py::arg("phylo_flags") = std::nullopt)),
std::tuple(&RootedSBNInstance::LogLikelihoods<StringVector>,
std::tuple(py::arg("flag_names"), py::arg("use_defaults") = true)),
std::tuple(
&RootedSBNInstance::LogLikelihoods<StringBoolVector>,
std::tuple(py::arg("flag_names_and_set"), py::arg("use_defaults") = true)),
std::tuple(
&RootedSBNInstance::LogLikelihoods<StringDoubleVector>,
std::tuple(py::arg("flag_names_and_values"), py::arg("use_defaults") = true)),
std::tuple(&RootedSBNInstance::LogLikelihoods<StringBoolDoubleVector>,
std::tuple(py::arg("flag_names_and_set_and_values"),
py::arg("use_defaults") = true)));
// CLASS
// UnrootedSBNInstance
py::class_<PreUnrootedSBNInstance>(m, "PreUnrootedSBNInstance");
py::class_<UnrootedSBNInstance, PreUnrootedSBNInstance> unrooted_sbn_instance_class(
m, "unrooted_instance", R"raw(An unrooted SBN instance.)raw");
unrooted_sbn_instance_class
.def(py::init<const std::string &>())
// ** BEGIN DUPLICATED CODE BLOCK between this and RootedSBNInstance
.def("get_phylo_model_params", &UnrootedSBNInstance::GetPhyloModelParams)
.def("get_phylo_model_param_block_map",
&UnrootedSBNInstance::GetPhyloModelParamBlockMap)
.def(
"prepare_for_phylo_likelihood",
&UnrootedSBNInstance::PrepareForPhyloLikelihood,
prepare_for_phylo_likelihood_docstring, py::arg("model_specification"),
py::arg("thread_count"), py::arg("beagle_flags") = std::vector<BeagleFlags>(),
py::arg("use_tip_states") = true, py::arg("tree_count_option") = std::nullopt)
.def("resize_phylo_model_params", &UnrootedSBNInstance::ResizePhyloModelParams,
"Resize phylo_model_params.", py::arg("tree_count_option") = std::nullopt)
.def("load_duplicates_of_first_tree",
&UnrootedSBNInstance::LoadDuplicatesOfFirstTree,
"Replace all of the loaded trees with duplicates of the first tree.",
py::arg("number_of_times"))
.def("read_fasta_file", &UnrootedSBNInstance::ReadFastaFile,
"Read a sequence alignment from a FASTA file.")
.def("taxon_names", &UnrootedSBNInstance::TaxonNames,
"Return a list of taxon names.")
// ** Initialization and status
.def("print_status", &UnrootedSBNInstance::PrintStatus,
"Print information about the instance.")
.def("tree_count", &UnrootedSBNInstance::TreeCount,
"Return the number of trees that are currently stored in the "
"instance.")
// ** SBN-related items
.def("process_loaded_trees", &UnrootedSBNInstance::ProcessLoadedTrees,
process_loaded_trees_docstring)
.def("train_simple_average", &UnrootedSBNInstance::TrainSimpleAverage,
R"raw(
Train the SBN using the "simple average" estimator.
This is described in the "Maximum Lower Bound Estimates" section of the 2018
NeurIPS paper, and is later referred to as the "SBN-SA" estimator.
)raw")
.def("sbn_parameters_to_csv", &UnrootedSBNInstance::SBNParametersToCSV,
R"raw(Write "pretty" formatted SBN parameters to a CSV.)raw")
.def("read_sbn_parameters_from_csv",
&UnrootedSBNInstance::ReadSBNParametersFromCSV,
read_sbn_parameters_from_csv_docstring)
.def("calculate_sbn_probabilities",
&UnrootedSBNInstance::CalculateSBNProbabilities,
R"raw(Calculate the SBN probabilities of the currently loaded trees.)raw")
// ** END DUPLICATED CODE BLOCK between this and RootedSBNInstance
.def("train_expectation_maximization",
&UnrootedSBNInstance::TrainExpectationMaximization,
R"raw(
Train the SBN using the expectation-maximization estimator.
This is described in the "Expectation Maximization" section of the 2018
NeurIPS paper, and is later referred to as the "SBN-EM" estimator.
Here we can supply alpha, the absolute maxiumum number of iterations, and
a score-based termination criterion for EM. EM will stop if the scaled
score increase is less than the provided ``score_epsilon``.
)raw",
py::arg("alpha"), py::arg("max_iter"), py::arg("score_epsilon") = 0.)
.def("sample_trees", &UnrootedSBNInstance::SampleTrees,
"Sample trees from the SBN and store them internally.", py::arg("count"))
.def("make_indexer_representations",
&UnrootedSBNInstance::MakeIndexerRepresentations,
R"raw(
Make the indexer representation of each currently stored tree.
See the comment for ``IndexerRepresentationOf`` in ``sbn_maps.hpp`` to learn about what that means.
Note: any rootsplit or a PCSP that is not contained in the subsplit support is given an index equal
to the length of ``sbn_parameters``. No warning is given.
)raw")
.def("make_psp_indexer_representations",
&UnrootedSBNInstance::MakePSPIndexerRepresentations, R"raw(
Make the PSP indexer representation of each currently stored tree.
See the comments in ``psp_indexer.hpp`` to understand the layout.
)raw")
.def("split_lengths", &UnrootedSBNInstance::SplitLengths,
"Get the lengths of the current set of trees, indexed by splits.")
.def("split_counters", &UnrootedSBNInstance::SplitCounters,
"A testing method to count splits.")
// ** Phylogenetic likelihood
.def("set_rescaling", &UnrootedSBNInstance::SetRescaling,
"Set whether BEAGLE's likelihood rescaling is used.")
// ** Phylogenetic gradients
.def("phylo_gradients",
static_cast<std::vector<PhyloGradient> (UnrootedSBNInstance::*)(
std::optional<PhyloFlags>)>(&UnrootedSBNInstance::PhyloGradients),
"Calculate gradients of parameters for the current set of trees.",
py::arg("phylo_flags") = std::nullopt)
.def("topology_gradients", &UnrootedSBNInstance::TopologyGradients,
R"raw(Calculate gradients of SBN parameters for the current set of trees.
Should be called after sampling trees and setting branch lengths.)raw")
// ** I/O
.def("read_newick_file", &UnrootedSBNInstance::ReadNewickFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Newick file.")
.def("read_nexus_file", &UnrootedSBNInstance::ReadNexusFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Nexus file.")
// ** Member variables
.def_readonly("psp_indexer", &UnrootedSBNInstance::psp_indexer_)
.def_readwrite("tree_collection", &UnrootedSBNInstance::tree_collection_);
def_read_write_mutable(unrooted_sbn_instance_class, "sbn_parameters",
&UnrootedSBNInstance::sbn_parameters_);
def_overload(
unrooted_sbn_instance_class, "phylo_gradients",
"Calculate gradients of parameters for the current set of trees.",
std::tuple(static_cast<std::vector<PhyloGradient> (UnrootedSBNInstance::*)(
std::optional<PhyloFlags>)>(&UnrootedSBNInstance::PhyloGradients),
std::tuple(py::arg("phylo_flags") = std::nullopt)),
std::tuple(&UnrootedSBNInstance::PhyloGradients<StringVector>,
std::tuple(py::arg("flag_names"), py::arg("use_defaults") = true)),
std::tuple(
&UnrootedSBNInstance::PhyloGradients<StringBoolVector>,
std::tuple(py::arg("flag_names_and_set"), py::arg("use_defaults") = true)),
std::tuple(
&UnrootedSBNInstance::PhyloGradients<StringDoubleVector>,
std::tuple(py::arg("flag_names_and_values"), py::arg("use_defaults") = true)),
std::tuple(&UnrootedSBNInstance::PhyloGradients<StringBoolDoubleVector>,
std::tuple(py::arg("flag_names_and_set_and_values"),
py::arg("use_defaults") = true)));
def_overload(
unrooted_sbn_instance_class, "log_likelihoods",
"Calculate log likelihoods for the current set of trees.",
std::tuple(static_cast<std::vector<double> (UnrootedSBNInstance::*)(
std::optional<PhyloFlags>)>(&UnrootedSBNInstance::LogLikelihoods),
std::tuple(py::arg("phylo_flags") = std::nullopt)),
std::tuple(&UnrootedSBNInstance::LogLikelihoods<StringVector>,
std::tuple(py::arg("flag_names"), py::arg("use_defaults") = true)),
std::tuple(
&UnrootedSBNInstance::LogLikelihoods<StringBoolVector>,
std::tuple(py::arg("flag_names_and_set"), py::arg("use_defaults") = true)),
std::tuple(
&UnrootedSBNInstance::LogLikelihoods<StringDoubleVector>,
std::tuple(py::arg("flag_names_and_values"), py::arg("use_defaults") = true)),
std::tuple(&UnrootedSBNInstance::LogLikelihoods<StringBoolDoubleVector>,
std::tuple(py::arg("flag_names_and_set_and_values"),
py::arg("use_defaults") = true)));
// ** PhyloFlags -- for RootedSBNInstance and UnrootedSBNInstance
def_multiclass(
"init_phylo_flags", "Create a PhyloFlags object for instance.", std::tuple<>(),
std::tuple(unrooted_sbn_instance_class, &PreRootedSBNInstance::MakePhyloFlags),
std::tuple(rooted_sbn_instance_class, &PreRootedSBNInstance::MakePhyloFlags));
def_multiclass("set_phylo_defaults", "Set whether to use flag defaults.",
std::tuple(py::arg("use_defaults") = true),
std::tuple(unrooted_sbn_instance_class,
&PreRootedSBNInstance::SetPhyloFlagDefaults),
std::tuple(rooted_sbn_instance_class,
&PreRootedSBNInstance::SetPhyloFlagDefaults));
def_multiclass(
"clear_phylo_flags", "Unset all flag settings.", std::tuple<>(),
std::tuple(unrooted_sbn_instance_class, &PreRootedSBNInstance::ClearPhyloFlags),
std::tuple(rooted_sbn_instance_class, &PreRootedSBNInstance::ClearPhyloFlags));
unrooted_sbn_instance_class.def(
"set_phylo_flag", &PreUnrootedSBNInstance::SetPhyloFlag,
"Set function flag for given option.", py::arg("flag_name"),
py::arg("set_to") = true, py::arg("set_value") = 1.0);
rooted_sbn_instance_class.def("set_phylo_flag", &PreRootedSBNInstance::SetPhyloFlag,
"Set function flag for given option.",
py::arg("flag_name"), py::arg("set_to") = true,
py::arg("set_value") = 1.0);
// FUNCTIONS
m.def("ratio_gradient_of_height_gradient",
&RootedGradientTransforms::RatioGradientOfHeightGradientEigen,
"Obtain a ratio gradient from a height gradient.");
m.def("log_det_jacobian_of_height_transform",
&RootedGradientTransforms::LogDetJacobianHeightTransform,
"Obtain the log determinant jacobian of a height transform.");
m.def("gradient_log_det_jacobian_of_height_transform",
&RootedGradientTransforms::GradientLogDeterminantJacobian,
"Obtain the log determinant jacobian of the gradient");
// CLASS
// GPInstance
py::class_<GPInstance> gp_instance_class(m, "gp_instance",
R"raw(A generalized pruning instance.)raw");
gp_instance_class.def(py::init<const std::string &>())
.def("print_status", &GPInstance::PrintStatus,
"Print information about the instance.")
.def("dag_summary_statistics", &GPInstance::DAGSummaryStatistics,
"Return summary statistics about the DAG.")
.def("make_dag", &GPInstance::MakeDAG, "Build subsplit DAG.")
.def("print_dag", &GPInstance::PrintDAG, "Print the subsplit DAG.")
// ** I/O
.def("read_newick_file", &GPInstance::ReadNewickFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Newick file.")
.def("read_newick_file_gz", &GPInstance::ReadNewickFileGZ, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a gzip-ed Newick file.")
.def("read_nexus_file", &GPInstance::ReadNexusFile, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a Nexus file.")
.def("read_nexus_file_gz", &GPInstance::ReadNexusFileGZ, py::arg("path"),
py::arg("sort_taxa") = true, "Read trees from a gzip-ed Nexus file.")
.def("read_fasta_file", &GPInstance::ReadFastaFile,
"Read a sequence alignment from a FASTA file.")
.def("sbn_parameters_to_csv", &GPInstance::SBNParametersToCSV,
R"raw(Write "pretty" formatted SBN parameters to a CSV.)raw")
.def("sbn_prior_to_csv", &GPInstance::SBNPriorToCSV,
R"raw(Write "pretty" formatted SBN parameters for the prior to a CSV.)raw")
.def("branch_lengths_to_csv", &GPInstance::BranchLengthsToCSV,
R"raw(Write "pretty" formatted branch lengths to a CSV.)raw")
.def("per_gpcsp_llhs_to_csv", &GPInstance::PerGPCSPLogLikelihoodsToCSV,
R"raw(Write "pretty" formatted per pcsp likelihoods to CSV.)raw")
.def(
"intermediate_bls_to_csv", &GPInstance::IntermediateBranchLengthsToCSV,
R"raw(Write "pretty" formatted per pcsp branch lengths throughout optimization to CSV.)raw")
.def(
"intermediate_per_gpcsp_llhs_to_csv",
&GPInstance::IntermediatePerGPCSPLogLikelihoodsToCSV,
R"raw(Write "pretty" formatted per pcsp log likelihoods throughout optimization to CSV.)raw")
.def("per_gpcsp_llh_surfaces_to_csv",
&GPInstance::PerGPCSPLogLikelihoodSurfacesToCSV,
R"raw(Write "pretty" formatted per pcsp log likelihood surfaces to CSV.)raw")
.def(
"tracked_optim_values_to_csv", &GPInstance::TrackedOptimizationValuesToCSV,
R"raw(Write "pretty" formatted per pcsp branch lengths and llh values tracked from optimization to CSV.)raw")
.def("export_trees", &GPInstance::ExportTrees,
R"raw(Write out currently loaded trees to a Newick file
(using current GP branch lengths).)raw",
py::arg("out_path"))
.def("currently_loaded_trees_with_gp_branch_lengths",
&GPInstance::CurrentlyLoadedTreesWithGPBranchLengths,
"Collection of all rooted trees loaded into DAG.")
.def("generate_complete_rooted_tree_collection",
&GPInstance::GenerateCompleteRootedTreeCollection,
"Generate collection of all rooted trees expressed in DAG.")
.def(
"export_all_generated_topologies", &GPInstance::ExportAllGeneratedTopologies,
R"raw(Write out all topologies spanned by the current SBN DAG to a Newick file.
Doesn't require an Engine.)raw",
py::arg("out_path"))
.def("export_all_generated_trees", &GPInstance::ExportAllGeneratedTrees,
R"raw(Write out all trees spanned by the current SBN DAG to a Newick file
(using current GP branch lengths). Requires an Engine.)raw",
py::arg("out_path"))
.def(
"export_trees_with_a_pcsp", &GPInstance::ExportTreesWithAPCSP,
R"raw(Write out trees with a given PCSP string to a Newick file (using current
GP branch lengths).)raw",
py::arg("pcsp_string"), py::arg("newick_path"))
.def("subsplit_dag_to_dot", &GPInstance::SubsplitDAGToDot,
R"raw(Write the current subsplit DAG to a DOT format file.)raw")
.def("get_branch_lengths", &GPInstance::GetBranchLengths,
"Return branch lengths from the GPInstance.")
.def(
"build_edge_idx_to_pcsp_map",
[](GPInstance &self) { return self.GetDAG().BuildInverseEdgeIndexer(); },
"Build a map from DAG edge index to its corresponding PCSP bitset.")
// ** Estimation
.def("use_gradient_optimization", &GPInstance::UseGradientOptimization,
"Use gradients for branch length optimization?",
py::arg("use_gradients") = false)
.def("hot_start_branch_lengths", &GPInstance::HotStartBranchLengths,
"Use given trees to initialize branch lengths.")
.def("gather_branch_lengths", &GPInstance::GatherBranchLengths,
"Gather branch lengths into a map keyed by PCSP index for a given tree "
"sample.")
.def("calculate_hybrid_marginals", &GPInstance::CalculateHybridMarginals,
"Calculate hybrid marginals.")
.def("estimate_sbn_parameters", &GPInstance::EstimateSBNParameters,
"Estimate the SBN parameters based on current branch lengths.")
.def("hot_start_branch_length", &GPInstance::HotStartBranchLengths)
.def("take_first_branch_length", &GPInstance::TakeFirstBranchLength)
.def("estimate_branch_lengths", &GPInstance::EstimateBranchLengths,
"Estimate branch lengths for the GPInstance.", py::arg("tol"),
py::arg("max_iter"), py::arg("quiet") = false,
py::arg("track_intermediate_iterations") = false,
py::arg("optimization_method") = std::nullopt)
.def("get_perpcsp_llh_surface", &GPInstance::GetPerGPCSPLogLikelihoodSurfaces,
"Scan the likelihood surface for the pcsps in the GPInstance.",
py::arg("steps"), py::arg("scale_min"), py::arg("scale_max"))
.def("perturb_and_track_optimization_values",
&GPInstance::PerturbAndTrackValuesFromOptimization,
"Reinitiate optimization, perturbing one branch length at a time, and "
"track branch length and per pcsp likelihoods.")
// ** GP Likelihoods
.def("populate_plvs", &GPInstance::PopulatePLVs, "Populate PLVs.")
.def("compute_likelihoods", &GPInstance::ComputeLikelihoods,
"Compute Likelihoods.")
.def("get_per_pcsp_log_likelihoods", &GPInstance::GetPerPCSPLogLikelihoods,
"Get Per-PCSP Log Likelihoods.")
// ** DAG
.def("make_dag", &GPInstance::MakeDAG, "Initialize Subsplit DAG.")
.def(
"get_dag", [](GPInstance &self) -> GPDAG * { return &self.GetDAG(); },
py::return_value_policy::reference, "Get Subsplit DAG.")
// ** DAG Engines
.def("make_gp_engine", &GPInstance::MakeGPEngine, "Initialize GP Engine.",
py::arg("rescaling_threshold") = GPEngine::default_rescaling_threshold_,
py::arg("use_gradients") = false)
.def(
"get_gp_engine",
[](GPInstance &self) -> GPEngine * { return &self.GetGPEngine(); },
py::return_value_policy::reference, "Get GP Engine.")
.def("make_nni_engine", &GPInstance::MakeNNIEngine, "Initialize NNI Engine.")
.def(
"get_nni_engine",
[](GPInstance &self) -> NNIEngine * { return &self.GetNNIEngine(); },
py::return_value_policy::reference, "Get Subsplit DAG.")
.def("make_tp_engine", &GPInstance::MakeTPEngine, "Initialize TP Engine.")
.def(
"get_tp_engine",
[](GPInstance &self) -> TPEngine * { return &self.GetTPEngine(); },
py::return_value_policy::reference, "Get TP Engine.")
.def("tp_engine_set_branch_lengths_by_taking_first",
&GPInstance::TPEngineSetBranchLengthsByTakingFirst)
.def("tp_engine_set_choice_map_by_taking_first",
&GPInstance::TPEngineSetChoiceMapByTakingFirst,
py::arg("use_subsplit_method") = true)
// ** Tree Engines
.def("get_likelihood_tree_engine", &GPInstance::GetLikelihoodTreeEngine,
py::return_value_policy::reference)
.def("get_parsimony_tree_engine", &GPInstance::GetParsimonyTreeEngine,
py::return_value_policy::reference)
.def("compute_tree_likelihood",
[](const GPInstance &self, const RootedTree &tree) {
auto beagle_pref_flags = BEAGLE_FLAG_VECTOR_SSE;
PhyloModelSpecification model_spec{"JC69", "constant", "strict"};
SitePattern site_pattern = self.MakeSitePattern();
bool use_tip_states = true;
auto tree_engine =
FatBeagle(model_spec, site_pattern, beagle_pref_flags, use_tip_states);
return tree_engine.UnrootedLogLikelihood(tree);
})
.def("compute_tree_parsimony",
[](const GPInstance &self, const RootedTree &tree) {
auto site_pattern = self.MakeSitePattern();
auto mmap_file_path = self.GetMMapFilePath() + ".sankoff";
auto tree_engine = SankoffHandler(site_pattern, mmap_file_path);
tree_engine.RunSankoff(tree.Topology());
return tree_engine.ParsimonyScore();
});
// ** DAGs
py::class_<GPDAG> dag_class(m, "dag", "Subsplit DAG for performing GPOperations.");
dag_class.def("__eq__", [](const GPDAG &self, const GPDAG &other) { self == other; })
.def("node_count", &GPDAG::NodeCount, "Get number of nodes contained in DAG.")
.def("edge_count", &GPDAG::EdgeCountWithLeafSubsplits,
"Get number of edges contained in DAG.")
.def("taxon_count", &GPDAG::TaxonCount, "Get number of taxa in DAG.")
.def("topology_count", &GPDAG::TopologyCount,
"Get number of unique topologies contained in DAG.")
.def("get_nni", &GPDAG::GetNNI, "Get NNI for the given DAG edge.")
.def("get_node_id",
[](const GPDAG &self, const Bitset &bitset) {
return self.GetDAGNodeId(bitset);
})
.def("get_edge_id", [](const GPDAG &self,
const Bitset &bitset) { return self.GetEdgeIdx(bitset); })
.def("get_edge_id", [](const GPDAG &self,
const NNIOperation &nni) { return self.GetEdgeIdx(nni); })
.def("get_taxon_map", &GPDAG::GetTaxonMap,
"Get map of taxon names contained in DAG.")
.def("build_set_of_node_bitsets", &GPDAG::BuildSetOfNodeBitsets,
"Build a set of node Subsplit bitsets contained in DAG.")
.def("build_set_of_edge_bitsets", &GPDAG::BuildSetOfEdgeBitsets,
"Build a set of edge PCSP bitsets contained in DAG.")
.def("contains_node",
[](const GPDAG &self, const Bitset &bitset) {
return self.ContainsNode(bitset);
})
.def("contains_edge",
[](const GPDAG &self, const Bitset &bitset) {
return self.ContainsEdge(bitset);
})
.def("contains_nni", &GPDAG::ContainsNNI)
.def("contains_tree", &GPDAG::ContainsTree, "Check whether DAG contains tree.",
py::arg("tree"), py::arg("is_quiet") = true)
.def("contains_topology", &GPDAG::ContainsTopology,
"Check whether DAG contains topology.")
.def("is_valid_add_node_pair", &GPDAG::IsValidAddNodePair,
"Checks whether a given parent/child subsplit pair is valid to be added to "
"the DAG.")
.def(
"add_node_pair",
[](GPDAG &self, const Bitset &parent, const Bitset &child) {
self.AddNodePair(parent, child);
},
"Add parent/child subsplit pair to DAG.")
.def("add_nodes", &GPDAG::AddNodes)
.def("add_edges", &GPDAG::AddEdges)
.def(
"fully_connect", [](GPDAG &self) { self.FullyConnect(); },
"Adds all valid edges with present nodes to the DAG.")
// ** I/O
.def("tree_to_newick_topology", &GPDAG::TreeToNewickTopology)
.def("tree_to_newick_tree", &GPDAG::TreeToNewickTree)
.def("topology_to_newick_topology", &GPDAG::TopologyToNewickTopology)
.def("generate_all_topologies", &GPDAG::GenerateAllTopologies)
.def("to_newick_of_all_topologies", &GPDAG::ToNewickOfAllTopologies)
.def("generate_covering_topologies", &GPDAG::GenerateCoveringTopologies)
.def("to_newick_of_covering_topologies", &GPDAG::ToNewickOfCoveringTopologies);
py::class_<GraftDAG> graft_dag_class(m, "graft_dag",
"Subsplit DAG for grafting nodes and edges.");
graft_dag_class
.def("compare_to_dag",
[](const GraftDAG &self, const GPDAG &other) { self.CompareToDAG(other); })
.def("graft_node_count", &GraftDAG::GraftNodeCount,
"Get number of graft nodes appended to DAG.")
.def("graft_edge_count", &GraftDAG::GraftEdgeCount,
"Get number of graft edges appended to DAG.")
.def("host_node_count", &GraftDAG::HostNodeCount,
"Get number of host nodes contained in DAG.")
.def("host_edge_count", &GraftDAG::HostEdgeCount,
"Get number of host edges contained in DAG.")
.def("get_host_dag", &GraftDAG::GetHostDAG, py::return_value_policy::reference,
"Get underlying Host DAG.")
.def("is_valid_add_node_pair", &GraftDAG::IsValidAddNodePair,
"Checks whether a given parent/child subsplit pair is valid to be added to "
"the DAG.")
.def(
"add_node_pair",
[](GraftDAG &self, const Bitset &parent, const Bitset &child) {
self.AddNodePair(parent, child);
},
"Add parent/child subsplit pair to DAG.");
// ** Engines for DAGs
py::class_<GPEngine> gp_engine_class(m, "gp_engine",
"An engine for computing Generalized Pruning.");
gp_engine_class.def("node_count", &GPEngine::GetNodeCount, "Get number of nodes.")
.def("plv_count", &GPEngine::GetPLVCount, "Get number of PLVs.")
.def("edge_count", &GPEngine::GetGPCSPCount, "Get number of edges.");
py::class_<TPEngine> tp_engine_class(m, "tp_engine",
"An engine for computing Top Pruning.");
tp_engine_class.def("node_count", &TPEngine::GetNodeCount, "Get number of nodes.")
.def("edge_count", &TPEngine::GetEdgeCount, "Get number of edges.")
.def("get_top_tree_score", &TPEngine::GetTopTreeScore)
.def(
"get_tree_source",
[](const TPEngine &self, const EdgeId edge_id) {
return self.GetTreeSource(edge_id);
},
"Get tree source of given edge")
.def("get_top_tree_with_edge", &TPEngine::GetTopTreeWithEdge,
"Output the top tree of tree containing given edge.")
.def("get_top_tree_likelihood_with_edge", &TPEngine::GetTopTreeLikelihood,
"Output the top tree likelihood containing given edge.")
.def("get_top_tree_parsimony_with_edge", &TPEngine::GetTopTreeParsimony,
"Output the top tree parsimony containing given edge.")
.def("get_top_tree_topology_with_edge", &TPEngine::GetTopTopologyWithEdge,
"Output the top tree of tree containing given edge.")
// ** Branch Length Optimization
.def("get_branch_lengths", [](TPEngine &self) { return self.GetBranchLengths(); })
.def("optimize_branch_lengths", &TPEngine::OptimizeBranchLengths,
py::arg("check_branch_convergence") = std::nullopt)
// ** Settings
.def("is_optimize_new_edges", &TPEngine::IsOptimizeNewEdges)
.def("set_optimize_new_edges", &TPEngine::SetOptimizeNewEdges)
.def("set_optimization_max_iteration", &TPEngine::SetOptimizationMaxIteration)
.def("get_optimization_max_iteration", &TPEngine::GetOptimizationMaxIteration)
.def("get_use_best_edge_map", &TPEngine::GetUseBestEdgeMap)
.def("set_use_best_edge_map", &TPEngine::SetUseBestEdgeMap)
.def("is_init_proposed_branch_lengths_with_dag",
&TPEngine::IsInitProposedBranchLengthsWithDAG)
.def("set_init_proposed_branch_lengths_with_dag",
&TPEngine::SetInitProposedBranchLengthsWithDAG)
.def("is_fix_proposed_branch_lengths_from_dag",
&TPEngine::IsFixProposedBranchLengthsFromDAG)
.def("set_fix_proposed_branch_lengths_from_dag",
&TPEngine::SetFixProposedBranchLengthsFromDAG)
// ** I/O
.def("build_map_from_pcsp_to_edge_choice_pcsps",
&TPEngine::BuildMapFromPCSPToEdgeChoicePCSPs)
.def("build_map_from_pcsp_to_pv_hashes", &TPEngine::BuildMapFromPCSPToPVHashes)
.def("build_map_from_pcsp_to_pv_values", &TPEngine::BuildMapFromPCSPToPVValues)
.def("build_map_from_pcsp_to_branch_length",
&TPEngine::BuildMapFromPCSPToBranchLength)
.def("build_map_from_pcsp_to_score", &TPEngine::BuildMapFromPCSPToScore)
.def("build_map_of_proposed_nnis_to_best_pre_nnis",
&TPEngine::BuildMapOfProposedNNIsToBestPreNNIs, py::arg("post_nnis"))
.def("build_map_of_proposed_nni_pcsps_to_best_pre_nni_pcsps",
&TPEngine::BuildMapOfProposedNNIPCSPsToBestPreNNIPCSPs, py::arg("post_nnis"),
py::arg("prev_edge_count") = std::nullopt,
py::arg("edge_reindexer") = std::nullopt)
.def("build_map_of_tree_id_to_top_topologies",
&TPEngine::BuildMapOfTreeIdToTopTopologies)
.def("to_newick_of_top_topologies", &TPEngine::ToNewickOfTopTopologies)
.def("to_newick_of_top_trees", &TPEngine::ToNewickOfTopTrees);
py::class_<TPChoiceMap> tp_choice_map_class(
m, "tp_choice_map", "An choice map for finding the top tree in TPEngine.");
tp_choice_map_class.def("__str__", &TPChoiceMap::ToString)
.def(
"edge_choice_to_string",
[](const TPChoiceMap &self, const EdgeId edge_id) {
return self.EdgeChoiceToString(edge_id);
},
"Output the edge choice map for a given edge in DAG.");
py::class_<NNIEngine> nni_engine_class(
m, "nni_engine", "An engine for computing NNI Systematic Search.");
nni_engine_class
// Access
.def(
"get_tp_engine",
[](const NNIEngine &self) -> const TPEngine * { return &self.GetTPEngine(); },
py::return_value_policy::reference, "Get TP Engine.")
.def(
"get_graft_dag", [](NNIEngine &self) { return self.GetGraftDAG(); },
py::return_value_policy::reference, "Get the Graft DAG.")
.def("get_branch_lengths", &NNIEngine::GetBranchLengths,
"Get DAG branch lengths.")
// NNI Sets
.def("adjacent_nnis", &NNIEngine::GetAdjacentNNIs, "Get NNIs adjacent to DAG.")
.def("new_adjacent_nnis", &NNIEngine::GetNewAdjacentNNIs,
"Get new NNIs adjacent to DAG.")
.def("accepted_nnis", &NNIEngine::GetAcceptedNNIs, "Get NNIs accepted into DAG.")
.def("rejected_nnis", &NNIEngine::GetRejectedNNIs, "Get NNIs rejected from DAG.")
.def("scored_nnis", &NNIEngine::GetScoredNNIs,
"Get Scored NNIs of current iteration.")
.def("past_scored_nnis", &NNIEngine::GetPastScoredNNIs,
"Get scores from NNIs from previous iterations.")
.def("nnis_to_rescore", &NNIEngine::GetNNIsToRescore,
"Get NNIs to be scored (or rescored) in current iteration.")
.def("nnis_to_reevaluate", &NNIEngine::GetNNIsToReevaluate,
"Get NNIs to be evaluated (or reevaluated) in current iteration.")
// Counts
.def("adjacent_nni_count", &NNIEngine::GetAdjacentNNICount,
"Get number of NNIs adjacent to DAG.")
.def("new_adjacent_nni_count", &NNIEngine::GetNewAdjacentNNICount,
"Get number of adjacent NNIs not seen in previous iterations.")
.def("accepted_nni_count", &NNIEngine::GetAcceptedNNICount,
"Get number of adjacent NNIs were accepted by the filter on current "
"iteration.")
.def("rejected_nni_count", &NNIEngine::GetRejectedNNICount,
"Get number of adjacent NNIs were rejected by the filter on current "
"iteration.")
.def("past_accepted_nni_count", &NNIEngine::GetPastAcceptedNNICount,
"Get number of adjacent NNIs were accepted by the filter on all previous "
"iterations.")
.def("past_rejected_nni_count", &NNIEngine::GetPastRejectedNNICount,
"Get number of adjacent NNIs were rejected by the filter on all previous "
"iterations.")
.def("scored_nni_count", &NNIEngine::GetScoredNNICount,
"Get number of current NNI scores.")
.def("iter_count", &NNIEngine::GetIterationCount,
"Get number of iterations of NNI search run.")
// Search primary routines
.def("run", &NNIEngine::Run, "Primary runner for NNI systematic search.",
py::arg("is_quiet") = true)
.def("run_init", &NNIEngine::RunInit, "Run initialization step of NNI search.",
py::arg("is_quiet") = true)
.def("run_main_loop", &NNIEngine::RunMainLoop, "Run main loop of NNI search.",
py::arg("is_quiet") = true)
.def("run_post_loop", &NNIEngine::RunPostLoop, "Run post loop of NNI search.",
py::arg("is_quiet") = true)
// Search subroutines
// Init
.def("reset_nni_data", &NNIEngine::ResetNNIData)
.def("sync_adjacent_nnis_with_dag", &NNIEngine::SyncAdjacentNNIsWithDAG,
py::arg("on_init") = false)
.def("prep_eval_engine", &NNIEngine::PrepEvalEngine)
.def("filter_init", &NNIEngine::FilterInit)
// Main Loop subroutines
.def("graft_adjacent_nnis_to_dag", &NNIEngine::GraftAdjacentNNIsToDAG,
py::arg("is_quiet") = true)
.def("filter_pre_score", &NNIEngine::FilterPreScore)
.def("filter_score_adjacent_nnis", &NNIEngine::FilterScoreAdjacentNNIs)
.def("filter_post_score", &NNIEngine::FilterPostScore)
.def("filter_evaluate_adjacent_nnis", &NNIEngine::FilterEvaluateAdjacentNNIs)
.def("remove_all_graft_nnis_from_dag", &NNIEngine::RemoveAllGraftedNNIsFromDAG)
.def("add_accepted_nnis_to_dag", &NNIEngine::AddAcceptedNNIsToDAG,
py::arg("is_quiet") = true)
// Post Loop subroutines
.def("update_rejected_nnis", &NNIEngine::UpdateRejectedNNIs)
.def("update_adjacent_nnis", &NNIEngine::UpdateAdjacentNNIs)
.def("update_scored_nnis", &NNIEngine::UpdateScoredNNIs)
.def("update_accepted_nnis", &NNIEngine::UpdateAcceptedNNIs)
// Filtering schemes
.def("set_no_filter", &NNIEngine::SetNoFilter,
"Set filter to either accept (True) or deny (False) all NNIs.",
py::arg("set_all_nni_to_accept"))
.def("set_gp_likelihood_cutoff_filtering_scheme",
&NNIEngine::SetGPLikelihoodCutoffFilteringScheme,
"Set filtering scheme to use Generalized Pruning based on constant score "
"cutoff.")
.def("set_tp_likelihood_cutoff_filtering_scheme",
&NNIEngine::SetTPLikelihoodCutoffFilteringScheme,
"Set filtering scheme to use Top Pruning with Likelihoods based on constant "
"score cutoff.")
.def("set_tp_parsimony_cutoff_filtering_scheme",
&NNIEngine::SetTPParsimonyCutoffFilteringScheme,
"Set filtering scheme to use Top Pruning with Parsimony based on constant "
"score cutoff.")
.def("set_gp_likelihood_drop_filtering_scheme",
&NNIEngine::SetGPLikelihoodDropFilteringScheme,
"Set filtering scheme to use Generalized Pruning based on based on drop "
"from best score.")
.def("set_tp_likelihood_drop_filtering_scheme",
&NNIEngine::SetTPLikelihoodDropFilteringScheme,
"Set filtering scheme to use Top Pruning with Likelihoods based on drop "
"from best score.")
.def("set_tp_parsimony_drop_filtering_scheme",
&NNIEngine::SetTPParsimonyDropFilteringScheme,
"Set filtering scheme to use Top Pruning with Parsimony based on drop from "
"best score.")
.def("set_top_k_score_filtering_scheme", &NNIEngine::SetTopKScoreFilteringScheme,
"Set filter scheme that accepts the top N best-scoring NNIs.",
py::arg("top_k"), py::arg("max_is_best") = true)
// Custom filters
.def("set_filter_init_function", &NNIEngine::SetFilterInitFunction)
.def("set_filter_pre_score_function", &NNIEngine::SetFilterPreScoreFunction)
.def("set_filter_score_loop_function", &NNIEngine::SetFilterScoreLoopFunction)
.def("set_filter_post_score_function", &NNIEngine::SetFilterPostScoreFunction)
.def("set_filter_evaluate_function", &NNIEngine::SetFilterEvaluateFunction)
.def("set_filter_evaluate_loop_function",
&NNIEngine::SetFilterEvaluateLoopFunction)
// Options
.def("set_include_rootsplits", &NNIEngine::SetIncludeRootsplitNNIs,
"Set whether to include rootsplits in adjacent NNIs")
.def("set_reevaluate_rejected_nnis", &NNIEngine::SetReevaluateRejectedNNIs,
"Set whether to re-evaluate NNIs rejected by a previous iteration.")
.def("set_rescore_rejected_nnis", &NNIEngine::SetRescoreRejectedNNIs,
"Set whether to re-score NNIs rejected by a previous iteration.")
// Scoring
.def("get_score_by_nni", &NNIEngine::GetScoreByNNI, "Get score by NNI.")
.def("get_score_by_edge", &NNIEngine::GetScoreByEdge, "Get score by EdgeId.");
py::class_<SankoffHandler> parsimony_engine_class(
m, "parsimony_tree_engine",
"An engine that computes parsimonies for tree topologies.");
parsimony_engine_class.def("compute_parsimony",
[](SankoffHandler &self, const RootedTree &tree) {
self.RunSankoff(tree.Topology());
return self.ParsimonyScore();
});
py::class_<FatBeagle> likelihood_engine_class(
m, "likelihood_tree_engine",
"An engine that computes likelihoods for tree topologies.");
likelihood_engine_class.def("compute_likelihood",
[](FatBeagle &self, const RootedTree &tree) {
return self.UnrootedLogLikelihood(tree);
});
// ** Node Topology
py::class_<Node::Topology> topology_class(
m, "node_topology", "A node in a node topology representing a tree.");
topology_class.def("__str__", [](const Node::Topology &self) { self->Leaves(); })
.def(
"id", [](const Node::Topology &self) { return self->Id(); },
"Unique node id within topology.")
.def(
"to_leaves", [](const Node::Topology &self) { return self->Leaves(); },
"Output node to leave bitset.")
.def(
"build_subsplit",
[](const Node::Topology &self) { return self->BuildSubsplit(); },
"Build subsplit node bitset of node.")
.def(
"build_pcsp",
[](const Node::Topology &self, const size_t child_id) {
Assert(child_id < 2, "child_count must be 0 (left) or 1 (right).");
auto child_clade =
(child_id == 0) ? SubsplitClade::Left : SubsplitClade::Right;
return self->BuildPCSP(child_clade);
},
"Build PCSP edge bitset of edge below node.")
.def(
"build_set_of_subsplits",
[](const Node::Topology &self) { return self->BuildSetOfSubsplits(); },
"Build a vector of all subsplit bitsets for all nodes in topology.")
.def(
"build_set_of_pcsps",
[](const Node::Topology &self) { return self->BuildSetOfPCSPs(); },
"Build vector of all PCSP edge bitsets for all edges in topology.")
.def(
"to_newick", [](const Node::Topology &self) { return self->Newick(); },
"Output to Newick string.");
// ** Bitsets, Subsplits, PCSPs, NNIs, etc
py::class_<Bitset> bitset_class(
m, "bitset", "A bitset representing the taxon membership of a Subsplit or PCSP.");
bitset_class.def(py::init<const std::string &>())
.def("__str__", &Bitset::ToString)
.def("__repr__", &Bitset::ToHashString)
.def("__eq__",
[](const Bitset &self, const Bitset &other) { return self == other; })
.def("__hash__", &Bitset::Hash)
.def("to_string", &Bitset::ToString)
.def("to_hash_string", &Bitset::ToHashString, py::arg("length") = 16)
.def("subsplit_to_hash_string", &Bitset::SubsplitToHashString,
py::arg("length") = 16)
.def("pcsp_to_hash_string", &Bitset::PCSPToHashString, py::arg("length") = 16)
.def("clade_get_count", &Bitset::Count)
.def("subsplit_get_clade",
[](const Bitset &self, const size_t i) {
SubsplitClade clade =
(i == 0) ? SubsplitClade::Left : SubsplitClade::Right;
return self.SubsplitGetClade(clade);
})
.def("subsplit_is_uca", &Bitset::SubsplitIsUCA)
.def("subsplit_is_rootsplit", &Bitset::SubsplitIsRootsplit)
.def("subsplit_is_leaf", &Bitset::SubsplitIsLeaf)
.def("subsplit_to_string", &Bitset::SubsplitToString,
"Output as Subsplit-style string.")
.def("pcsp_to_string", &Bitset::PCSPToString, "Output as PCSP-style string.")
.def("pcsp_get_parent_subsplit", &Bitset::PCSPGetParentSubsplit,
"Get parent subsplit from PCSP.")
.def("pcsp_get_child_subsplit", &Bitset::PCSPGetChildSubsplit,
"Get child subsplit from PCSP.");
m.def(
"subsplit",
[](const std::string &left_clade, const std::string &right_clade) {
return Bitset::Subsplit(left_clade, right_clade);
},
"A Subsplit Bitset constructed from two Bitset Clades.");
m.def(
"pcsp",
[](const Bitset &parent, const Bitset &child) {
return Bitset::PCSP(parent, child);
},
"A PCSP Bitset constructed from two Bitset Subsplits.");
py::class_<NodeId> node_id_class(m, "node_id",
"An ID representing a unique node within a DAG.");
node_id_class.def(py::init<const size_t>())
.def("__str__", [](const NodeId self) { return self.ToString(); })
.def("__eq__", [](const NodeId lhs, const NodeId rhs) { return lhs == rhs; })
.def("value", [](const NodeId &self) -> int { return self.value_; });
py::class_<EdgeId> edge_id_class(m, "edge_id",
"An ID representing a unique edge within a DAG.");
edge_id_class.def(py::init<const size_t>())
.def("__str__", [](const EdgeId self) { return self.ToString(); })
.def("__eq__", [](const EdgeId lhs, const EdgeId rhs) { return lhs == rhs; })
.def("value", [](const EdgeId &self) -> int { return self.value_; });
py::class_<TaxonId> taxon_id_class(m, "taxon_id",
"An ID representing a unique edge within a DAG.");
taxon_id_class.def(py::init<const size_t>())
.def("__str__", [](const TaxonId self) { return self.ToString(); })
.def("__eq__", [](const TaxonId lhs, const TaxonId rhs) { return lhs == rhs; })
.def("value", [](const TaxonId &self) -> int { return self.value_; });
py::class_<TreeId> tree_id_class(m, "tree_id", "An ID representing a unique tree.");
tree_id_class.def(py::init<const size_t>())
.def("__str__", [](const TreeId self) { return self.ToString(); })
.def("__eq__", [](const TreeId lhs, const TreeId rhs) { return lhs == rhs; })
.def("value", [](const TreeId &self) -> int { return self.value_; });
py::class_<NNIOperation> nni_op_class(
m, "nni_op",
"A proposed NNI Operation for the DAG. Repesents the PCSP to be added.");
nni_op_class.def(py::init<const std::string &, const std::string &>())
.def("__str__", &NNIOperation::ToString)
.def("__repr__", &NNIOperation::ToHashString)
.def("__eq__",
[](const NNIOperation &lhs, const NNIOperation &rhs) { return lhs == rhs; })
.def("__hash__", &NNIOperation::Hash)
.def("to_hash_string", &NNIOperation::ToHashString, py::arg("length") = 16)
.def("to_string", &NNIOperation::ToString)
.def("get_parent", &NNIOperation::GetParent, "Get parent Subsplit of PCSP.")
.def("get_child", &NNIOperation::GetChild, "Get child Subsplit of PCSP.")
.def("get_central_edge_pcsp", &NNIOperation::GetCentralEdgePCSP,
"Get central edge PCSP.")
.def("is_valid", &NNIOperation::IsValid,
"Checks that NNI Operation is a valid PCSP.");
// If you want to be sure to get all of the stdout and cerr messages, put your
// Python code in a context like so:
// `with bito.ostream_redirect(stdout=True, stderr=True):`
// https://pybind11.readthedocs.io/en/stable/advanced/pycpp/utilities.html#capturing-standard-output-from-ostream
py::add_ostream_redirect(m, "ostream_redirect");
// MODULE
py::module beagle_flags = m.def_submodule("beagle_flags",
R"raw(
Flags that can be passed to BEAGLE.
They are used in Python like ``beagle_flags.PROCESSOR_GPU``.
Note that we expose only a subset of the BEAGLE flags on purpose.
)raw");
py::enum_<BeagleFlags>(beagle_flags, "beagle_flag")
.value("PRECISION_SINGLE", BEAGLE_FLAG_PRECISION_SINGLE,
"Single precision computation")
.value("PRECISION_DOUBLE", BEAGLE_FLAG_PRECISION_DOUBLE,
"Double precision computation")
.value("COMPUTATION_SYNCH", BEAGLE_FLAG_COMPUTATION_SYNCH,
"Synchronous computation (blocking)")
.value("COMPUTATION_ASYNCH", BEAGLE_FLAG_COMPUTATION_ASYNCH,
"Asynchronous computation (non-blocking)")
.value("VECTOR_SSE", BEAGLE_FLAG_VECTOR_SSE, "SSE computation")
.value("VECTOR_NONE", BEAGLE_FLAG_VECTOR_NONE, "No vector computation")
.value("THREADING_CPP", BEAGLE_FLAG_THREADING_CPP, "C++11 threading")
.value("THREADING_OPENMP", BEAGLE_FLAG_THREADING_OPENMP, "OpenMP threading")
.value("THREADING_NONE", BEAGLE_FLAG_THREADING_NONE, "No threading (default)")
.value("PROCESSOR_CPU", BEAGLE_FLAG_PROCESSOR_CPU, "Use CPU as main processor")
.value("PROCESSOR_GPU", BEAGLE_FLAG_PROCESSOR_GPU, "Use GPU as main processor")
.value("FRAMEWORK_CUDA", BEAGLE_FLAG_FRAMEWORK_CUDA,
"Use CUDA implementation with GPU resources")
.value("FRAMEWORK_OPENCL", BEAGLE_FLAG_FRAMEWORK_OPENCL,
"Use OpenCL implementation with GPU resources")
.value("FRAMEWORK_CPU", BEAGLE_FLAG_FRAMEWORK_CPU, "Use CPU implementation")
.value("PARALLELOPS_STREAMS", BEAGLE_FLAG_PARALLELOPS_STREAMS,
"Operations in updatePartials may be assigned to separate device streams")
.value("PARALLELOPS_GRID", BEAGLE_FLAG_PARALLELOPS_GRID,
"Operations in updatePartials may be folded into single kernel launch "
"(necessary for partitions; typically performs better for problems with "
"fewer pattern sites)")
.export_values();
// ** Export Keys
auto ExportFlagsToModuleAttributes = [](py::module &module,
const PhyloFlagOptionSet &flag_set) {
for (const auto &[name, flag] : flag_set.GetAllNames()) {
module.attr(name.c_str()) = py::cast(std::string(flag));
}
};
auto ExportMapkeysToModuleAttributes = [](py::module &module,
const PhyloMapkeySet &mapkey_set) {
for (const auto &[name, mapkey] : mapkey_set.GetAllNames()) {
std::ignore = name;
module.attr(mapkey.GetName().c_str()) = py::cast(std::string(mapkey.GetKey()));
}
};
// * Export PhyloFlagOptions
py::module phylo_flags = m.def_submodule("phylo_flags",
R"raw(
Option flags for functions such as ``SBNInstance::phylo_gradient`` and ``SBNInstanct::log_likelihood``.
)raw");
ExportFlagsToModuleAttributes(phylo_flags, PhyloGradientFlagOptions::set_);
ExportFlagsToModuleAttributes(phylo_flags, LogLikelihoodFlagOptions::set_);
// * Export PhyloMapkeys
py::module phylo_model_mapkeys = m.def_submodule("phylo_model_mapkeys",
R"raw(
Dict keys for accessing the PhyloModel, returned by ``SBNInstance::get_phylo_model_param_block_map``.
)raw");
ExportMapkeysToModuleAttributes(phylo_model_mapkeys, PhyloModelMapkeys::set_);
py::module phylo_gradient_mapkeys = m.def_submodule("phylo_gradient_mapkeys",
R"raw(
Dict keys for accessing the GradientMap, returned by ``SBNInstance::phylo_gradient``.
)raw");
ExportMapkeysToModuleAttributes(phylo_gradient_mapkeys, PhyloGradientMapkeys::set_);
}
| 68,182
|
C++
|
.cpp
| 1,190
| 47.792437
| 134
| 0.647113
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,024
|
block_model.cpp
|
phylovi_bito/src/block_model.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "block_model.hpp"
#include "sugar.hpp"
const BlockSpecification& BlockModel::GetBlockSpecification() const {
return block_specification_;
}
EigenVectorXdRef BlockModel::ExtractSegment(EigenVectorXdRef param_vector,
std::string key) const {
return block_specification_.ExtractSegment(param_vector, key);
}
EigenMatrixXdRef BlockModel::ExtractBlock(EigenMatrixXdRef param_matrix,
std::string key) const {
return block_specification_.ExtractBlock(param_matrix, key);
}
void BlockModel::Append(const std::string& sub_entire_key, BlockSpecification other) {
block_specification_.Append(sub_entire_key, other);
}
| 838
|
C++
|
.cpp
| 18
| 40
| 86
| 0.726044
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,025
|
rooted_tree_collection.cpp
|
phylovi_bito/src/rooted_tree_collection.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "rooted_tree_collection.hpp"
#include "csv.hpp"
#include "taxon_name_munging.hpp"
RootedTreeCollection::RootedTreeCollection(
const PreRootedTreeCollection& pre_collection, const TagDateMap& tag_date_map)
: PreRootedTreeCollection(pre_collection), tag_date_map_(tag_date_map){};
RootedTreeCollection RootedTreeCollection::OfTreeCollection(
const TreeCollection& trees) {
TTreeVector rooted_trees;
rooted_trees.reserve(trees.TreeCount());
for (const auto& tree : trees.Trees()) {
rooted_trees.emplace_back(tree);
}
return RootedTreeCollection(std::move(rooted_trees), trees.TagTaxonMap());
}
RootedTreeCollection RootedTreeCollection::BuildCollectionByDuplicatingFirst(
size_t number_of_times) {
return RootedTreeCollection(
PreRootedTreeCollection::BuildCollectionByDuplicatingFirst(number_of_times),
tag_date_map_);
}
void RootedTreeCollection::SetDatesToBeConstant(
bool initialize_time_trees_using_branch_lengths) {
tag_date_map_ = TaxonNameMunging::ConstantDatesForTagTaxonMap(TagTaxonMap());
ProcessTreeDates(initialize_time_trees_using_branch_lengths);
}
void RootedTreeCollection::ParseDatesFromTaxonNames(
bool initialize_time_trees_using_branch_lengths) {
tag_date_map_ = TaxonNameMunging::ParseDatesFromTagTaxonMap(TagTaxonMap());
ProcessTreeDates(initialize_time_trees_using_branch_lengths);
}
void RootedTreeCollection::ParseDatesFromCSV(
const std::string& csv_path, bool initialize_time_trees_using_branch_lengths) {
ParseDatesFromCSVButDontInitializeTimeTrees(csv_path);
ProcessTreeDates(initialize_time_trees_using_branch_lengths);
}
void RootedTreeCollection::ParseDatesFromCSVButDontInitializeTimeTrees(
const std::string& csv_path) {
tag_date_map_.clear();
auto taxon_date_map = CSV::StringDoubleMapOfCSV(csv_path);
for (auto& [tag, taxon] : TagTaxonMap()) {
auto search = taxon_date_map.find(taxon);
if (search == taxon_date_map.end()) {
Failwith("Taxon " + taxon + // NOLINT
" found in current tree collection but not in " + csv_path);
}
// else
SafeInsert(tag_date_map_, tag, search->second);
}
TaxonNameMunging::MakeDatesRelativeToMaximum(tag_date_map_);
}
void RootedTreeCollection::SetTipDates() {
for (auto& tree : trees_) {
tree.SetTipDates(tag_date_map_);
}
}
void RootedTreeCollection::ProcessTreeDates(
bool initialize_time_trees_using_branch_lengths) {
SetTipDates();
if (initialize_time_trees_using_branch_lengths) {
for (auto& tree : trees_) {
tree.InitializeTimeTreeUsingBranchLengths();
}
}
}
| 2,726
|
C++
|
.cpp
| 67
| 37.208955
| 83
| 0.766616
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,026
|
unrooted_tree.cpp
|
phylovi_bito/src/unrooted_tree.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "unrooted_tree.hpp"
UnrootedTree::UnrootedTree(const Node::NodePtr& topology,
BranchLengthVector branch_lengths)
: Tree(topology, std::move(branch_lengths)) {
AssertTopologyTrifurcatingInConstructor(topology);
}
UnrootedTree::UnrootedTree(const Node::NodePtr& topology, TagDoubleMap branch_lengths)
: Tree(topology, std::move(branch_lengths)) {
AssertTopologyTrifurcatingInConstructor(topology);
}
UnrootedTree UnrootedTree::UnitBranchLengthTreeOf(const Node::NodePtr& topology) {
topology->Polish();
return UnrootedTree(topology, BranchLengthVector(1 + topology->Id(), 1.));
}
UnrootedTree UnrootedTree::OfParentIdVector(const std::vector<size_t>& ids) {
auto topology = Node::OfParentIdVector(ids);
return UnrootedTree(topology, BranchLengthVector(1 + topology->Id(), 1.));
}
Tree UnrootedTree::Detrifurcate() const {
Assert(Children().size() == 3,
"UnrootedTree::Detrifurcate given a non-trifurcating tree.");
auto branch_lengths = BranchLengths();
auto our_id = Id();
auto root12 = Node::Join(Children()[1], Children()[2], our_id);
branch_lengths[our_id] = 0.;
auto rerooted_topology = Node::Join(Children()[0], root12, our_id + 1);
branch_lengths.push_back(0.);
return Tree(rerooted_topology, branch_lengths);
}
UnrootedTree UnrootedTree::DeepCopy() const {
return UnrootedTree(Topology()->DeepCopy(), BranchLengths());
}
bool UnrootedTree::operator==(const UnrootedTree& other) const {
return (this->Topology() == other.Topology()) &&
(this->BranchLengths() == other.BranchLengths());
}
void UnrootedTree::AssertTopologyTrifurcatingInConstructor(
const Node::NodePtr& topology) {
Assert(topology->Children().size() == 3,
"Expected a tree with a trifucation at the root in the constructor of "
"UnrootedTree.");
}
| 1,968
|
C++
|
.cpp
| 44
| 41.045455
| 86
| 0.734726
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,027
|
pv_handler.cpp
|
phylovi_bito/src/pv_handler.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "pv_handler.hpp"
// ** Resize
template <class PVTypeEnum, class DAGElementId>
void PartialVectorHandler<PVTypeEnum, DAGElementId>::Resize(
const size_t new_element_count, const size_t new_element_alloc,
std::optional<size_t> new_element_spare) {
const size_t old_pv_count = GetPVCount();
SetCount(new_element_count);
if (new_element_spare.has_value()) {
SetSpareCount(new_element_spare.value());
}
SetAllocatedCount(new_element_alloc + GetSpareCount());
Assert(GetPaddedCount() < GetAllocatedCount(),
"Padded count cannot exceed allocated count.");
// Allocate mmapped data block.
mmapped_master_pvs_.Resize(GetAllocatedPVCount() * pattern_count_);
// Subdivide mmapped data in individual PVs.
pvs_ = mmapped_master_pvs_.Subdivide(GetAllocatedPVCount());
// Initialize new work space.
Assert((pvs_.back().rows() == MmappedNucleotidePLV::base_count_) &&
(pvs_.back().cols() == static_cast<Eigen::Index>(pattern_count_)) &&
(size_t(pvs_.size()) == GetAllocatedPVCount()),
"Didn't get the right shape of PVs out of Subdivide.");
for (size_t i = old_pv_count; i < GetPaddedPVCount(); i++) {
pvs_.at(i).setZero();
}
pv_reindexer_.Resize(GetPaddedPVCount());
}
template <class PVTypeEnum, class DAGElementId>
void PartialVectorHandler<PVTypeEnum, DAGElementId>::Reindex(
const Reindexer pv_reindexer) {
ReindexViaRemap(pv_reindexer);
if (!(pv_reindexer.size() < (reindexer_init_size_ * reindex_ratio)) or
!use_remapping_) {
ReindexViaMoveCopy(pv_reindexer);
}
}
template <class PVTypeEnum, class DAGElementId>
void PartialVectorHandler<PVTypeEnum, DAGElementId>::ReindexViaMoveCopy(
const Reindexer pv_reindexer) {
Reindexer::ReindexInPlace(pvs_, pv_reindexer_, GetPVCount(), GetPV(GetPVCount()),
GetPV(GetPVCount() + 1));
pv_reindexer_ = Reindexer::IdentityReindexer(GetPaddedPVCount());
reindexer_init_size_ = pv_reindexer_.size();
}
template <class PVTypeEnum, class DAGElementId>
void PartialVectorHandler<PVTypeEnum, DAGElementId>::ReindexViaRemap(
const Reindexer pv_reindexer) {
pv_reindexer_.Resize(pv_reindexer.size());
pv_reindexer_ = pv_reindexer_.ComposeWith(pv_reindexer);
pv_reindexer_.Resize(GetPaddedPVCount());
}
template <class PVTypeEnum, class DAGElementId>
Reindexer PartialVectorHandler<PVTypeEnum, DAGElementId>::BuildPVReindexer(
const Reindexer& element_reindexer, const size_t old_element_count,
const size_t new_element_count) {
Assert(old_element_count <= new_element_count,
"Cannot build a PVReindexer with a shrinking element count.");
element_count_ = new_element_count;
Reindexer pv_reindexer(new_element_count * pv_count_per_element_);
PVId new_pvs_idx = PVId(old_element_count * pv_count_per_element_);
for (size_t i = 0; i < new_element_count; i++) {
const DAGElementId old_element_idx = DAGElementId(i);
const DAGElementId new_element_idx =
DAGElementId(element_reindexer.GetNewIndexByOldIndex(old_element_idx.value_));
for (const auto pv_type : typename PVTypeEnum::Iterator()) {
// Either get input pv_index from old pvs, or get new pv_index (new data is
// irrelevant, so just get next available index).
PVId old_pv_idx;
if (old_element_idx < old_element_count) {
old_pv_idx = GetPVIndex(pv_type, old_element_idx, old_element_count);
} else {
old_pv_idx = new_pvs_idx;
new_pvs_idx++;
}
const PVId new_pv_idx = GetPVIndex(pv_type, new_element_idx, new_element_count);
pv_reindexer.SetReindex(old_pv_idx.value_, new_pv_idx.value_);
}
}
Assert(pv_reindexer.IsValid(GetPVCount()), "PV Reindexer is not valid.");
return pv_reindexer;
}
// ** Explicit Instantiation
template class PartialVectorHandler<PLVTypeEnum, NodeId>;
template class PartialVectorHandler<PLVTypeEnum, EdgeId>;
template class PartialVectorHandler<PSVTypeEnum, NodeId>;
template class PartialVectorHandler<PSVTypeEnum, EdgeId>;
| 4,151
|
C++
|
.cpp
| 89
| 42.325843
| 86
| 0.723976
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,028
|
fat_beagle.cpp
|
phylovi_bito/src/fat_beagle.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "fat_beagle.hpp"
#include <numeric>
#include <utility>
#include <vector>
#include "rooted_gradient_transforms.hpp"
FatBeagle::FatBeagle(const PhyloModelSpecification &specification,
const SitePattern &site_pattern,
const FatBeagle::PackedBeagleFlags beagle_preference_flags,
bool use_tip_states)
: phylo_model_(PhyloModel::OfSpecification(specification)),
rescaling_(false), // Note: rescaling_ set via the SetRescaling method.
pattern_count_(static_cast<int>(site_pattern.PatternCount())),
use_tip_states_(use_tip_states) {
std::tie(beagle_instance_, beagle_flags_) =
CreateInstance(site_pattern, beagle_preference_flags);
if (use_tip_states_) {
SetTipStates(site_pattern);
} else {
SetTipPartials(site_pattern);
}
UpdatePhyloModelInBeagle();
}
FatBeagle::~FatBeagle() {
auto finalize_result = beagleFinalizeInstance(beagle_instance_);
if (finalize_result != 0) {
std::cout << "beagleFinalizeInstance gave nonzero return value!";
std::terminate();
}
}
const BlockSpecification &FatBeagle::GetPhyloModelBlockSpecification() const {
return phylo_model_->GetBlockSpecification();
}
void FatBeagle::SetParameters(const EigenVectorXdRef param_vector) {
phylo_model_->SetParameters(param_vector);
UpdatePhyloModelInBeagle();
}
// This is the "core" of the likelihood calculation, assuming that the tree is
// bifurcating.
double FatBeagle::LogLikelihoodInternals(
const Node::NodePtr topology, const std::vector<double> &branch_lengths) const {
BeagleAccessories ba(beagle_instance_, rescaling_, topology);
BeagleOperationVector operations;
beagleResetScaleFactors(beagle_instance_, 0);
topology->BinaryIdPostorder(
[&operations, &ba](int node_id, int child0_id, int child1_id) {
AddLowerPartialOperation(operations, ba, node_id, child0_id, child1_id);
});
UpdateBeagleTransitionMatrices(ba, branch_lengths, nullptr);
beagleUpdatePartials(beagle_instance_,
operations.data(), // eigenIndex
static_cast<int>(operations.size()),
ba.cumulative_scale_index_[0]);
double log_like = 0.;
beagleCalculateRootLogLikelihoods(
beagle_instance_, &ba.root_id_, ba.category_weight_index_.data(),
ba.state_frequency_index_.data(), ba.cumulative_scale_index_.data(),
ba.mysterious_count_, &log_like);
return log_like;
}
double FatBeagle::LogLikelihood(const UnrootedTree &tree,
std::optional<PhyloFlags> flags) const {
auto detrifurcated_tree = tree.Detrifurcate();
return LogLikelihoodInternals(detrifurcated_tree.Topology(),
detrifurcated_tree.BranchLengths());
}
double FatBeagle::UnrootedLogLikelihood(const RootedTree &tree,
std::optional<PhyloFlags> flags) const {
return LogLikelihoodInternals(tree.Topology(), tree.BranchLengths());
}
double FatBeagle::LogLikelihood(const RootedTree &tree,
std::optional<PhyloFlags> flags) const {
double log_likelihood = 0.0f;
std::vector<double> branch_lengths = tree.BranchLengths();
const std::vector<double> &rates = tree.GetRates();
for (size_t i = 0; i < tree.BranchLengths().size() - 1; i++) {
branch_lengths[i] *= rates[i];
}
log_likelihood += LogLikelihoodInternals(tree.Topology(), branch_lengths);
if (PhyloFlags::IsFlagSet(
flags, LogLikelihoodFlagOptions::include_log_det_jacobian_likelihood_)) {
log_likelihood += RootedGradientTransforms::LogDetJacobianHeightTransform(tree);
}
return log_likelihood;
}
// Build differential matrix and scale it.
EigenMatrixXd BuildDifferentialMatrices(const SubstitutionModel &substitution_model,
const EigenVectorXd &scalers) {
size_t category_count = scalers.size();
EigenMatrixXd Q = substitution_model.GetQMatrix();
Eigen::Map<Eigen::RowVectorXd> mapQ(Q.data(), Q.size());
EigenMatrixXd dQ = mapQ.replicate(category_count, 1);
for (size_t k = 0; k < category_count; k++) {
dQ.row(k) *= scalers[k];
}
return dQ;
}
std::pair<double, std::vector<double>> FatBeagle::BranchGradientInternals(
const Node::NodePtr topology, const std::vector<double> &branch_lengths,
const EigenMatrixXd &dQ) const {
beagleResetScaleFactors(beagle_instance_, 0);
BeagleAccessories ba(beagle_instance_, rescaling_, topology);
UpdateBeagleTransitionMatrices(ba, branch_lengths, nullptr);
SetRootPreorderPartialsToStateFrequencies(ba);
// Set differential matrices.
int derivative_matrix_idx = ba.node_count_ - 1;
beagleSetDifferentialMatrix(beagle_instance_, derivative_matrix_idx, dQ.data());
const auto derivative_matrix_indices =
std::vector<int>(ba.node_count_ - 1, derivative_matrix_idx);
// Calculate post-order partials
BeagleOperationVector operations;
topology->BinaryIdPostorder(
[&operations, &ba](int node_id, int child0_id, int child1_id) {
AddLowerPartialOperation(operations, ba, node_id, child0_id, child1_id);
});
beagleUpdatePartials(beagle_instance_, operations.data(),
static_cast<int>(operations.size()),
ba.cumulative_scale_index_[0]); // cumulative scale index
// Calculate pre-order partials.
operations.clear();
topology->TripleIdPreorderBifurcating(
[&operations, &ba](int node_id, int sister_id, int parent_id) {
AddUpperPartialOperation(operations, ba, node_id, sister_id, parent_id);
});
beagleUpdatePrePartials(beagle_instance_, operations.data(),
static_cast<int>(operations.size()),
BEAGLE_OP_NONE); // cumulative scale index
// Actually compute the gradient.
std::vector<double> gradient(ba.node_count_, 0.);
const auto pre_buffer_indices =
BeagleAccessories::IotaVector(ba.node_count_ - 1, ba.node_count_);
beagleCalculateEdgeDerivatives(
beagle_instance_,
ba.node_indices_.data(), // list of post order buffer indices
pre_buffer_indices.data(), // list of pre order buffer indices
derivative_matrix_indices.data(), // differential Q matrix indices
ba.category_weight_index_.data(), // category weights indices
ba.node_count_ - 1, // number of edges
nullptr, // derivative-per-site output array
gradient.data(), // sum of derivatives across sites output array
nullptr); // sum of squared derivatives output array
// Also calculate the likelihood.
double log_like = 0.;
beagleCalculateRootLogLikelihoods(
beagle_instance_, &ba.root_id_, ba.category_weight_index_.data(),
ba.state_frequency_index_.data(), ba.cumulative_scale_index_.data(),
ba.mysterious_count_, &log_like);
return {log_like, gradient};
}
const FatBeagle *NullPtrAssert(const FatBeagle *fat_beagle) {
Assert(fat_beagle != nullptr, "NULL FatBeagle pointer!");
return fat_beagle;
}
double FatBeagle::StaticUnrootedLogLikelihood(const FatBeagle *fat_beagle,
const UnrootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return NullPtrAssert(fat_beagle)->LogLikelihood(in_tree, flags);
}
double FatBeagle::StaticUnrootedLogLikelihoodOfRooted(const FatBeagle *fat_beagle,
const RootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return NullPtrAssert(fat_beagle)->UnrootedLogLikelihood(in_tree);
}
double FatBeagle::StaticRootedLogLikelihood(const FatBeagle *fat_beagle,
const RootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return NullPtrAssert(fat_beagle)->LogLikelihood(in_tree, flags);
}
double FatBeagle::StaticLogDetJacobianHeightTransform(const FatBeagle *fat_beagle,
const RootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return RootedGradientTransforms::LogDetJacobianHeightTransform(in_tree);
}
PhyloGradient FatBeagle::StaticUnrootedGradient(const FatBeagle *fat_beagle,
const UnrootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return NullPtrAssert(fat_beagle)->Gradient(in_tree, flags);
}
PhyloGradient FatBeagle::StaticRootedGradient(const FatBeagle *fat_beagle,
const RootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return NullPtrAssert(fat_beagle)->Gradient(in_tree, flags);
}
DoubleVector FatBeagle::StaticGradientLogDeterminantJacobian(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags) {
return RootedGradientTransforms::GradientLogDeterminantJacobian(in_tree);
}
std::pair<FatBeagle::BeagleInstance, FatBeagle::PackedBeagleFlags>
FatBeagle::CreateInstance(const SitePattern &site_pattern,
FatBeagle::PackedBeagleFlags beagle_preference_flags) {
int taxon_count = static_cast<int>(site_pattern.SequenceCount());
// Number of partial buffers to create (input):
// taxon_count - 1 for lower partials (internal nodes only)
// 2*taxon_count - 1 for upper partials (every node)
int partials_buffer_count = 3 * taxon_count - 2;
if (!use_tip_states_) {
partials_buffer_count += taxon_count;
}
// Number of compact state representation buffers to create -- for use with
// setTipStates (input)
int compact_buffer_count = (use_tip_states_ ? taxon_count : 0);
// The number of states.
int state_count =
static_cast<int>(phylo_model_->GetSubstitutionModel()->GetStateCount());
// Number of site patterns to be handled by the instance.
int pattern_count = pattern_count_;
// Number of eigen-decomposition buffers to allocate (input)
int eigen_buffer_count = 1;
// Number of transition matrix buffers (input) -- two per edge
int matrix_buffer_count = 2 * (2 * taxon_count - 1);
// Number of rate categories
int category_count =
static_cast<int>(phylo_model_->GetSiteModel()->GetCategoryCount());
// Number of scaling buffers -- 1 buffer per partial buffer and 1 more
// for accumulating scale factors in position 0.
int scale_buffer_count = partials_buffer_count + 1;
// List of potential resources on which this instance is allowed (input,
// NULL implies no restriction
int *allowed_resources = nullptr;
// Length of resourceList list (input) -- not needed to use the default
// hardware config
int resource_count = 0;
// Bit-flags indicating preferred implementation charactertistics, see
// BeagleFlags (input)
int requirement_flags = BEAGLE_FLAG_SCALING_MANUAL;
BeagleInstanceDetails return_info;
auto beagle_instance = beagleCreateInstance(
taxon_count, partials_buffer_count, compact_buffer_count, state_count,
pattern_count, eigen_buffer_count, matrix_buffer_count, category_count,
scale_buffer_count, allowed_resources, resource_count, beagle_preference_flags,
requirement_flags, &return_info);
if (return_info.flags & (BEAGLE_FLAG_PROCESSOR_CPU | BEAGLE_FLAG_PROCESSOR_GPU)) {
return {beagle_instance, return_info.flags};
} // else
Failwith("Couldn't get a CPU or a GPU from BEAGLE.");
}
void FatBeagle::SetTipStates(const SitePattern &site_pattern) {
int taxon_number = 0;
for (const auto &pattern : site_pattern.GetPatterns()) {
beagleSetTipStates(beagle_instance_, taxon_number++, pattern.data());
}
beagleSetPatternWeights(beagle_instance_, site_pattern.GetWeights().data());
}
void FatBeagle::SetTipPartials(const SitePattern &site_pattern) {
for (size_t i = 0; i < site_pattern.GetPatterns().size(); i++) {
beagleSetTipPartials(beagle_instance_, i, site_pattern.GetPartials(i).data());
}
beagleSetPatternWeights(beagle_instance_, site_pattern.GetWeights().data());
}
void FatBeagle::UpdateSiteModelInBeagle() {
const auto &site_model = phylo_model_->GetSiteModel();
const auto &weights = site_model->GetCategoryProportions();
const auto &rates = site_model->GetCategoryRates();
beagleSetCategoryWeights(beagle_instance_, 0, weights.data());
beagleSetCategoryRates(beagle_instance_, rates.data());
}
void FatBeagle::UpdateSubstitutionModelInBeagle() const {
const auto &substitution_model = phylo_model_->GetSubstitutionModel();
const EigenMatrixXd &eigenvectors = substitution_model->GetEigenvectors();
const EigenMatrixXd &inverse_eigenvectors =
substitution_model->GetInverseEigenvectors();
const EigenVectorXd &eigenvalues = substitution_model->GetEigenvalues();
const EigenVectorXd &frequencies = substitution_model->GetFrequencies();
beagleSetStateFrequencies(beagle_instance_, 0, frequencies.data());
beagleSetEigenDecomposition(beagle_instance_,
0, // eigenIndex
&eigenvectors.data()[0], &inverse_eigenvectors.data()[0],
&eigenvalues.data()[0]);
}
void FatBeagle::UpdatePhyloModelInBeagle() {
// Issue #146: put in a clock model here.
UpdateSiteModelInBeagle();
UpdateSubstitutionModelInBeagle();
}
// If we pass nullptr as gradient_indices_ptr then we will not prepare for
// gradient calculation.
void FatBeagle::UpdateBeagleTransitionMatrices(
const BeagleAccessories &ba, const std::vector<double> &branch_lengths,
const int *const gradient_indices_ptr) const {
beagleUpdateTransitionMatrices(beagle_instance_, // instance
0, // eigenIndex
ba.node_indices_.data(), // probabilityIndices
gradient_indices_ptr, // firstDerivativeIndices
nullptr, // secondDerivativeIndices
branch_lengths.data(), // edgeLengths
ba.node_count_ - 1); // count
}
void FatBeagle::SetRootPreorderPartialsToStateFrequencies(
const BeagleAccessories &ba) const {
const EigenVectorXd &frequencies =
phylo_model_->GetSubstitutionModel()->GetFrequencies();
size_t category_count = phylo_model_->GetSiteModel()->GetCategoryCount();
EigenVectorXd state_frequencies =
frequencies.replicate(pattern_count_ * category_count, 1);
beagleSetPartials(beagle_instance_, ba.root_id_ + ba.node_count_,
state_frequencies.data());
}
void FatBeagle::AddLowerPartialOperation(BeagleOperationVector &operations,
const BeagleAccessories &ba, const int node_id,
const int child0_id, const int child1_id) {
const int destinationScaleWrite =
ba.rescaling_ ? node_id - ba.taxon_count_ + 1 : BEAGLE_OP_NONE;
// We can't emplace_back because BeagleOperation has no constructor.
// The compiler should elide this though.
operations.push_back({
node_id, // destinationPartials
destinationScaleWrite, ba.destinationScaleRead_,
child0_id, // child1Partials;
child0_id, // child1TransitionMatrix;
child1_id, // child2Partials;
child1_id // child2TransitionMatrix;
});
}
void FatBeagle::AddUpperPartialOperation(BeagleOperationVector &operations,
const BeagleAccessories &ba, const int node_id,
const int sister_id, const int parent_id) {
// Scalers are indexed differently for the upper conditional
// likelihood. They start at the number of internal nodes + 1 because
// of the lower conditional likelihoods. Also, in this case the leaves
// have scalers.
const int destinationScaleWrite =
ba.rescaling_ ? node_id + 1 + ba.internal_count_ : BEAGLE_OP_NONE;
operations.push_back({
node_id + ba.node_count_, // dest pre-order partial of current node
destinationScaleWrite, ba.destinationScaleRead_,
parent_id + ba.node_count_, // pre-order partial parent
node_id, // matrices of current node
sister_id, // post-order partial of sibling
sister_id // matrices of sibling
});
}
// Calculation of the substitution rate gradient.
// \partial{L}/\partial{r_i} = \partial{L}/\partial{b_i} \partial{b_i}/\partial{r_i}
// For strict clock:
// \partial{L}/\partial{r} = \sum_i \partial{L}/\partial{r_i}
std::vector<double> ClockGradient(const RootedTree &tree,
const std::vector<double> &branch_gradient) {
auto root_id = tree.Topology()->Id();
std::vector<double> rate_gradient(root_id, 0);
for (size_t i = 0; i < root_id; i++) {
rate_gradient[i] = branch_gradient[i] * tree.branch_lengths_[i];
}
// Strict clock.
if (tree.rate_count_ == 1) {
return {std::accumulate(rate_gradient.cbegin(), rate_gradient.cend(), 0.0)};
}
// One rate per branch.
else if (tree.rate_count_ == tree.rates_.size()) {
return rate_gradient;
} else {
Failwith(
"The number of rates should be equal to 1 (i.e. strict clock) or equal to "
"the number of branches.");
}
}
std::vector<double> DiscreteSiteModelGradient(
const std::vector<double> &branch_lengths,
const std::vector<double> &unscaled_category_gradient) {
size_t edge_count = branch_lengths.size() - 1;
double rate_gradient = 0;
for (size_t node_id = 0; node_id < edge_count; node_id++) {
rate_gradient += unscaled_category_gradient[node_id] * branch_lengths[node_id];
}
return {rate_gradient};
}
template <typename TTree>
std::vector<double> FatBeagle::SubstitutionModelGradientFiniteDifference(
FatBeagle::StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, SubstitutionModel *subst_model, const std::string ¶meter_key,
EigenVectorXd param_vector, double delta, std::optional<PhyloFlags> flags) const {
return SubstitutionModelGradientFiniteDifference(f, fat_beagle, tree, subst_model,
parameter_key, param_vector, delta,
IdentityTransform(), flags);
}
template <typename TTree>
std::vector<double> FatBeagle::SubstitutionModelGradientFiniteDifference(
FatBeagle::StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, SubstitutionModel *subst_model, const std::string ¶meter_key,
EigenVectorXd param_vector, double delta, const Transform &transform,
std::optional<PhyloFlags> flags) const {
auto [parameter_start, parameter_length] =
subst_model->GetBlockSpecification().GetMap().at(parameter_key);
EigenVectorXd parameters = param_vector.segment(parameter_start, parameter_length);
EigenVectorXd parameters_reparameterized = transform.inverse(parameters);
std::vector<double> gradient(parameters_reparameterized.size());
for (Eigen::Index parameter_idx = 0;
parameter_idx < parameters_reparameterized.size(); parameter_idx++) {
double original_parameter_value = parameters_reparameterized[parameter_idx];
parameters_reparameterized[parameter_idx] += delta;
param_vector.segment(parameter_start, parameter_length) =
transform(parameters_reparameterized);
subst_model->SetParameters(param_vector);
UpdateSubstitutionModelInBeagle();
double log_prob_plus = f(fat_beagle, tree, flags);
parameters_reparameterized[parameter_idx] = original_parameter_value - delta;
param_vector.segment(parameter_start, parameter_length) =
transform(parameters_reparameterized);
subst_model->SetParameters(param_vector);
UpdateSubstitutionModelInBeagle();
double log_prob_minus = f(fat_beagle, tree, flags);
gradient[parameter_idx] = (log_prob_plus - log_prob_minus) / (2. * delta);
parameters_reparameterized[parameter_idx] = original_parameter_value;
subst_model->SetParameters(param_vector);
}
UpdateSubstitutionModelInBeagle();
return gradient;
}
template <typename TTree>
DoubleVectorPair FatBeagle::SubstitutionModelGradient(
FatBeagle::StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, std::optional<PhyloFlags> flags) const {
// Retrieve frequency and rate data from data map.
auto subst_model = phylo_model_->GetSubstitutionModel();
EigenVectorXd param_vector(subst_model->GetBlockSpecification().ParameterCount());
auto subst_map = subst_model->GetBlockSpecification().GetMap();
param_vector.segment(subst_map.at(SubstitutionModel::frequencies_key_).first,
subst_map.at(SubstitutionModel::frequencies_key_).second) =
phylo_model_->GetSubstitutionModel()->GetFrequencies();
param_vector.segment(subst_map.at(SubstitutionModel::rates_key_).first,
subst_map.at(SubstitutionModel::rates_key_).second) =
phylo_model_->GetSubstitutionModel()->GetRates();
// Set delta.
double delta = PhyloFlags::GetFlagValueIfSet(
flags, PhyloGradientFlagOptions::set_gradient_delta_, 1.e-6);
// Compute frequency gradients.
std::vector<double> freqs_grad;
if (PhyloFlags::IsFlagSet(flags,
PhyloGradientFlagOptions::use_stickbreaking_transform_)) {
freqs_grad = SubstitutionModelGradientFiniteDifference(
f, fat_beagle, tree, subst_model, SubstitutionModel::frequencies_key_,
param_vector, delta, StickBreakingTransform());
} else {
freqs_grad = SubstitutionModelGradientFiniteDifference(
f, fat_beagle, tree, subst_model, SubstitutionModel::frequencies_key_,
param_vector, delta, IdentityTransform());
}
// Compute rate gradients
std::vector<double> rates_grad;
// Rates in the GTR model are constrained to sum to 1
if ((subst_model->GetRates().size() == 6) &&
(PhyloFlags::IsFlagSet(flags,
PhyloGradientFlagOptions::use_stickbreaking_transform_))) {
rates_grad = SubstitutionModelGradientFiniteDifference(
f, fat_beagle, tree, subst_model, SubstitutionModel::rates_key_, param_vector,
delta, StickBreakingTransform());
} else {
rates_grad = SubstitutionModelGradientFiniteDifference(
f, fat_beagle, tree, subst_model, SubstitutionModel::rates_key_, param_vector,
delta, IdentityTransform());
}
// Compile results.
return std::make_pair(rates_grad, freqs_grad);
}
PhyloGradient FatBeagle::Gradient(const UnrootedTree &in_tree,
std::optional<PhyloFlags> flags) const {
PhyloGradient phylo_gradient = PhyloGradient();
auto tree = in_tree.Detrifurcate();
tree.SlideRootPosition();
EigenMatrixXd dQ =
BuildDifferentialMatrices(*phylo_model_->GetSubstitutionModel(),
phylo_model_->GetSiteModel()->GetCategoryRates());
auto [log_likelihood, branch_length_gradient] =
BranchGradientInternals(tree.Topology(), tree.BranchLengths(), dQ);
phylo_gradient.log_likelihood_ = log_likelihood;
// Calculate Substitution Model Gradients.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::substitution_model_)) {
if (phylo_model_->GetSubstitutionModel()->GetRates().size() > 0) {
auto [rates_grad, freqs_grad] = SubstitutionModelGradient<UnrootedTree>(
FatBeagle::StaticUnrootedLogLikelihood, this, in_tree);
auto model_grad = std::vector<double>();
model_grad.insert(model_grad.end(), rates_grad.begin(), rates_grad.end());
model_grad.insert(model_grad.end(), freqs_grad.begin(), freqs_grad.end());
phylo_gradient[PhyloGradientMapkeys::substitution_model_] = model_grad;
phylo_gradient[PhyloGradientMapkeys::substitution_model_rates_] = rates_grad;
phylo_gradient[PhyloGradientMapkeys::substitution_model_frequencies_] =
freqs_grad;
}
}
// Calculate Site Model Gradients.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::site_model_)) {
auto site_model = phylo_model_->GetSiteModel();
size_t category_count = site_model->GetCategoryCount();
if (category_count > 1) {
EigenMatrixXd dQ =
BuildDifferentialMatrices(*phylo_model_->GetSubstitutionModel(),
phylo_model_->GetSiteModel()->GetRateGradient());
auto unscaled_category_gradient =
BranchGradientInternals(tree.Topology(), tree.BranchLengths(), dQ).second;
phylo_gradient[PhyloGradientMapkeys::site_model_] =
DiscreteSiteModelGradient(tree.BranchLengths(), unscaled_category_gradient);
}
}
// We want the fixed node to have a zero gradient.
branch_length_gradient[tree.Topology()->Children()[1]->Id()] = 0.;
phylo_gradient[PhyloGradientMapkeys::branch_lengths_] = branch_length_gradient;
return phylo_gradient;
}
PhyloGradient FatBeagle::Gradient(const RootedTree &tree,
std::optional<PhyloFlags> flags) const {
PhyloGradient phylo_gradient = PhyloGradient();
// Scale time with clock rate.
std::vector<double> branch_lengths = tree.BranchLengths();
const std::vector<double> &rates = tree.GetRates();
for (size_t i = 0; i < tree.BranchLengths().size() - 1; i++) {
branch_lengths[i] *= rates[i];
}
// Calculate branch length gradient and log likelihood.
EigenMatrixXd dQ =
BuildDifferentialMatrices(*phylo_model_->GetSubstitutionModel(),
phylo_model_->GetSiteModel()->GetCategoryRates());
auto [log_likelihood, branch_gradient] =
BranchGradientInternals(tree.Topology(), branch_lengths, dQ);
phylo_gradient.log_likelihood_ = log_likelihood;
phylo_gradient[PhyloGradientMapkeys::branch_lengths_] = branch_gradient;
// Calculate Substitution Model Gradients.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::substitution_model_)) {
if (phylo_model_->GetSubstitutionModel()->GetRates().size() > 0) {
auto [rates_grad, freqs_grad] = SubstitutionModelGradient<RootedTree>(
FatBeagle::StaticRootedLogLikelihood, this, tree, flags);
auto model_grad = std::vector<double>();
model_grad.insert(model_grad.end(), rates_grad.begin(), rates_grad.end());
model_grad.insert(model_grad.end(), freqs_grad.begin(), freqs_grad.end());
phylo_gradient[PhyloGradientMapkeys::substitution_model_] = model_grad;
phylo_gradient[PhyloGradientMapkeys::substitution_model_rates_] = rates_grad;
phylo_gradient[PhyloGradientMapkeys::substitution_model_frequencies_] =
freqs_grad;
}
}
// Calculate Site Model Parameter Gradient.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::site_model_)) {
auto site_model = phylo_model_->GetSiteModel();
size_t category_count = site_model->GetCategoryCount();
if (category_count > 1) {
EigenMatrixXd dQ =
BuildDifferentialMatrices(*phylo_model_->GetSubstitutionModel(),
phylo_model_->GetSiteModel()->GetRateGradient());
auto unscaled_category_gradient =
BranchGradientInternals(tree.Topology(), branch_lengths, dQ).second;
phylo_gradient[PhyloGradientMapkeys::site_model_] =
DiscreteSiteModelGradient(branch_lengths, unscaled_category_gradient);
}
}
// Calculate the Ratio Gradient of Branch Gradient.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::ratios_root_height_)) {
phylo_gradient[PhyloGradientMapkeys::ratios_root_height_] =
RootedGradientTransforms::RatioGradientOfBranchGradient(tree, branch_gradient,
flags);
}
// Calculate the Clock Rate Gradient.
if (PhyloFlags::IsFlagSet(flags, PhyloGradientFlagOptions::clock_model_)) {
phylo_gradient[PhyloGradientMapkeys::clock_model_] =
ClockGradient(tree, branch_gradient);
}
return phylo_gradient;
}
| 28,441
|
C++
|
.cpp
| 557
| 43.72711
| 88
| 0.688053
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,029
|
phylo_model.cpp
|
phylovi_bito/src/phylo_model.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "phylo_model.hpp"
PhyloModel::PhyloModel(std::unique_ptr<SubstitutionModel> substitution_model,
std::unique_ptr<SiteModel> site_model,
std::unique_ptr<ClockModel> clock_model)
: BlockModel({}),
substitution_model_(std::move(substitution_model)),
site_model_(std::move(site_model)),
clock_model_(std::move(clock_model)) {
Append(entire_substitution_key_, substitution_model_->GetBlockSpecification());
Append(entire_site_key_, site_model_->GetBlockSpecification());
Append(entire_clock_key_, clock_model_->GetBlockSpecification());
}
std::unique_ptr<PhyloModel> PhyloModel::OfSpecification(
const PhyloModelSpecification& specification) {
return std::make_unique<PhyloModel>(
SubstitutionModel::OfSpecification(specification.substitution_),
SiteModel::OfSpecification(specification.site_),
ClockModel::OfSpecification(specification.clock_));
}
void PhyloModel::SetParameters(const EigenVectorXdRef param_vector) {
substitution_model_->SetParameters(
ExtractSegment(param_vector, entire_substitution_key_));
site_model_->SetParameters(ExtractSegment(param_vector, entire_site_key_));
clock_model_->SetParameters(ExtractSegment(param_vector, entire_clock_key_));
}
| 1,407
|
C++
|
.cpp
| 27
| 46.888889
| 81
| 0.74564
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,030
|
unrooted_sbn_instance.cpp
|
phylovi_bito/src/unrooted_sbn_instance.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "unrooted_sbn_instance.hpp"
#include <iostream>
#include <memory>
#include <unordered_set>
#include "eigen_sugar.hpp"
#include "numerical_utils.hpp"
// ** Building SBN-related items
EigenVectorXd UnrootedSBNInstance::TrainExpectationMaximization(double alpha,
size_t max_iter,
double score_epsilon) {
CheckTopologyCounter();
auto indexer_representation_counter =
sbn_support_.IndexerRepresentationCounterOf(topology_counter_);
return SBNProbability::ExpectationMaximization(
sbn_parameters_, indexer_representation_counter, sbn_support_.RootsplitCount(),
sbn_support_.ParentToRange(), alpha, max_iter, score_epsilon);
}
Node::NodePtr UnrootedSBNInstance::SampleTopology() const {
return SampleTopology(false);
}
void UnrootedSBNInstance::SampleTrees(size_t count) {
CheckSBNSupportNonEmpty();
auto taxon_count = sbn_support_.TaxonCount();
Assert(taxon_count > 2,
"SampleTrees: Can't sample an unrooted tree with less than 3 taxa.");
// 2n-2 because trees are unrooted.
auto edge_count = 2 * static_cast<int>(taxon_count) - 2;
tree_collection_.trees_.clear();
for (size_t i = 0; i < count; i++) {
std::vector<double> branch_lengths(static_cast<size_t>(edge_count));
tree_collection_.trees_.emplace_back(
UnrootedTree(SampleTopology(), std::move(branch_lengths)));
}
}
std::vector<SizeVectorVector> UnrootedSBNInstance::MakePSPIndexerRepresentations()
const {
std::vector<SizeVectorVector> representations;
representations.reserve(tree_collection_.trees_.size());
for (const auto &tree : tree_collection_.trees_) {
representations.push_back(psp_indexer_.RepresentationOf(tree.Topology()));
}
return representations;
}
DoubleVectorVector UnrootedSBNInstance::SplitLengths() const {
return psp_indexer_.SplitLengths(tree_collection_);
}
StringSetVector UnrootedSBNInstance::StringIndexerRepresentationOf(
const UnrootedIndexerRepresentation &indexer_representation) const {
return UnrootedSBNMaps::StringIndexerRepresentationOf(
sbn_support_.StringReversedIndexer(), indexer_representation);
}
StringSetVector UnrootedSBNInstance::StringIndexerRepresentationOf(
const Node::NodePtr &topology, size_t out_of_sample_index) const {
return StringIndexerRepresentationOf(
sbn_support_.IndexerRepresentationOf(topology, out_of_sample_index));
}
// This function is really just for testing-- it recomputes from scratch.
std::pair<StringSizeMap, StringPCSPMap> UnrootedSBNInstance::SplitCounters() const {
auto counter = tree_collection_.TopologyCounter();
return {StringifyMap(UnrootedSBNMaps::RootsplitCounterOf(counter).Map()),
SBNMaps::StringPCSPMapOf(UnrootedSBNMaps::PCSPCounterOf(counter))};
}
// ** I/O
void UnrootedSBNInstance::ReadNewickFile(const std::string &fname,
const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
UnrootedTreeCollection::OfTreeCollection(driver.ParseNewickFile(fname));
}
void UnrootedSBNInstance::ReadNexusFile(const std::string &fname,
const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
UnrootedTreeCollection::OfTreeCollection(driver.ParseNexusFile(fname));
}
// ** Phylogenetic likelihood
std::vector<double> UnrootedSBNInstance::LogLikelihoods(
std::optional<PhyloFlags> external_flags) {
auto flags = CollectPhyloFlags(external_flags);
return GetEngine()->LogLikelihoods(tree_collection_, phylo_model_params_, rescaling_,
flags);
}
template <class VectorType>
std::vector<double> UnrootedSBNInstance::LogLikelihoods(const VectorType &flag_vec,
const bool is_run_defaults) {
std::optional<PhyloFlags> flags = PhyloFlags(flag_vec, is_run_defaults);
return LogLikelihoods(flags);
}
// Explicit instantiation for Pybind API.
template DoubleVector UnrootedSBNInstance::LogLikelihoods(const StringVector &,
const bool);
template DoubleVector UnrootedSBNInstance::LogLikelihoods(const StringBoolVector &,
const bool);
template DoubleVector UnrootedSBNInstance::LogLikelihoods(const StringDoubleVector &,
const bool);
template DoubleVector UnrootedSBNInstance::LogLikelihoods(
const StringBoolDoubleVector &, const bool);
std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
std::optional<PhyloFlags> external_flags) {
auto flags = CollectPhyloFlags(external_flags);
return GetEngine()->Gradients(tree_collection_, phylo_model_params_, rescaling_,
flags);
}
template <class VectorType>
std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
const VectorType &flag_vec, const bool is_run_defaults) {
std::optional<PhyloFlags> flags = PhyloFlags(flag_vec, is_run_defaults);
return PhyloGradients(flags);
}
// Explicit instantiation for Pybind API.
template std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
const StringVector &, const bool);
template std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
const StringBoolVector &, const bool);
template std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
const StringDoubleVector &, const bool);
template std::vector<PhyloGradient> UnrootedSBNInstance::PhyloGradients(
const StringBoolDoubleVector &, const bool);
void UnrootedSBNInstance::PushBackRangeForParentIfAvailable(
const Bitset &parent, UnrootedSBNInstance::RangeVector &range_vector) {
if (sbn_support_.ParentInSupport(parent)) {
range_vector.push_back(sbn_support_.ParentToRangeAt(parent));
}
}
// Retrieves range of subsplits for each s|t that appears in the tree
// given by rooted_representation.
UnrootedSBNInstance::RangeVector UnrootedSBNInstance::GetSubsplitRanges(
const SizeVector &rooted_representation) {
RangeVector subsplit_ranges;
// PROFILE: should we be reserving here?
subsplit_ranges.emplace_back(0, sbn_support_.RootsplitCount());
Bitset root = sbn_support_.RootsplitsAt(rooted_representation[0]);
PushBackRangeForParentIfAvailable(root, subsplit_ranges);
PushBackRangeForParentIfAvailable(root.SubsplitRotate(), subsplit_ranges);
// Starting at 1 here because we took care of the rootsplit above (the 0th element).
for (size_t i = 1; i < rooted_representation.size(); i++) {
Bitset child = sbn_support_.IndexToChildAt(rooted_representation[i]);
PushBackRangeForParentIfAvailable(child, subsplit_ranges);
PushBackRangeForParentIfAvailable(child.SubsplitRotate(), subsplit_ranges);
}
return subsplit_ranges;
}
// This gives the gradient of log q at a specific unrooted topology.
// See eq:gradLogQ in the tex, and TopologyGradients for more information about
// normalized_sbn_parameters_in_log.
EigenVectorXd UnrootedSBNInstance::GradientOfLogQ(
EigenVectorXdRef normalized_sbn_parameters_in_log,
const UnrootedIndexerRepresentation &indexer_representation) {
EigenVectorXd grad_log_q = EigenVectorXd::Zero(sbn_parameters_.size());
double log_q = DOUBLE_NEG_INF;
for (const auto &rooted_representation : indexer_representation) {
if (SBNProbability::IsInSBNSupport(rooted_representation, sbn_parameters_.size())) {
auto subsplit_ranges = GetSubsplitRanges(rooted_representation);
// Calculate entries in normalized_sbn_parameters_in_log as needed.
for (const auto &[begin, end] : subsplit_ranges) {
if (std::isnan(normalized_sbn_parameters_in_log[begin])) {
// The entry hasn't been filled yet because it's NaN, so fill it.
auto sbn_parameters_segment = sbn_parameters_.segment(begin, end - begin);
double log_sum = sbn_parameters_segment.redux(NumericalUtils::LogAdd);
// We should be extra careful of NaNs when we are using NaN as a sentinel.
Assert(std::isfinite(log_sum),
"GradientOfLogQ encountered non-finite value during calculation.");
normalized_sbn_parameters_in_log.segment(begin, end - begin) =
sbn_parameters_segment.array() - log_sum;
}
}
double log_probability_rooted_tree = SBNProbability::SumOf(
normalized_sbn_parameters_in_log, rooted_representation, 0.0);
double probability_rooted_tree = exp(log_probability_rooted_tree);
// We need to look up the subsplits in the tree.
// Set representation allows fast lookup.
std::unordered_set<size_t> rooted_representation_as_set(
rooted_representation.begin(), rooted_representation.end());
// Now, we actually perform the eq:gradLogQ calculation.
for (const auto &[begin, end] : subsplit_ranges) {
for (size_t pcsp_idx = begin; pcsp_idx < end; pcsp_idx++) {
auto indicator_subsplit_in_rooted_tree =
static_cast<double>(rooted_representation_as_set.count(pcsp_idx) > 0);
grad_log_q[pcsp_idx] += probability_rooted_tree *
(indicator_subsplit_in_rooted_tree -
exp(normalized_sbn_parameters_in_log[pcsp_idx]));
}
}
log_q = NumericalUtils::LogAdd(log_q, log_probability_rooted_tree);
}
}
grad_log_q.array() *= exp(-log_q);
return grad_log_q;
}
EigenVectorXd UnrootedSBNInstance::TopologyGradients(const EigenVectorXdRef log_f,
bool use_vimco) {
size_t tree_count = tree_collection_.TreeCount();
EigenVectorXd gradient_vector = EigenVectorXd::Zero(sbn_parameters_.size());
EigenVectorXd multiplicative_factors =
use_vimco ? CalculateVIMCOMultiplicativeFactors(log_f)
: CalculateMultiplicativeFactors(log_f);
// This variable acts as a cache to store normalized SBN parameters in log.
// Initialization to DOUBLE_NAN indicates that all entries are empty.
// It is mutated by GradientOfLogQ.
EigenVectorXd normalized_sbn_parameters_in_log =
EigenVectorXd::Constant(sbn_parameters_.size(), DOUBLE_NAN);
for (size_t i = 0; i < tree_count; i++) {
const auto indexer_representation =
sbn_support_.IndexerRepresentationOf(tree_collection_.GetTree(i).Topology());
// PROFILE: does it matter that we are allocating another sbn_vector_ sized object?
EigenVectorXd log_grad_q =
GradientOfLogQ(normalized_sbn_parameters_in_log, indexer_representation);
log_grad_q.array() *= multiplicative_factors(i);
gradient_vector += log_grad_q;
}
return gradient_vector;
}
| 10,981
|
C++
|
.cpp
| 216
| 43.976852
| 88
| 0.717159
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,031
|
csv.cpp
|
phylovi_bito/src/csv.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "csv.hpp"
#include <fstream>
// Given a headerless 2-column CSV of quoted string keys then double values, this parses
// the CSV into a StringDoubleMap.
StringDoubleMap CSV::StringDoubleMapOfCSV(const std::string& in_path) {
io::CSVReader<2, io::trim_chars<' ', '\t'>, io::double_quote_escape<',', '"'>> csv_in(
in_path);
std::string key;
double value;
StringDoubleMap string_double_map;
while (csv_in.read_row(key, value)) {
auto search = string_double_map.find(key);
if (search == string_double_map.end()) {
string_double_map.insert({key, value});
} else {
Failwith("Key " + key + " found twice in " + in_path + // NOLINT
"when turning it into a map.");
}
}
return string_double_map;
}
void CSV::StringDoubleVectorToCSV(const StringDoubleVector& vect,
const std::string& out_path) {
std::ofstream out_stream(out_path);
for (const auto& [s, value] : vect) {
out_stream << s << "," << value << std::endl;
}
if (out_stream.bad()) {
Failwith("Failure writing to " + out_path);
}
out_stream.close();
}
| 1,252
|
C++
|
.cpp
| 34
| 32.264706
| 88
| 0.64827
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,032
|
nni_evaluation_engine.cpp
|
phylovi_bito/src/nni_evaluation_engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "nni_evaluation_engine.hpp"
#include "nni_engine.hpp"
#include "tp_engine.hpp"
#include "gp_engine.hpp"
// ** NNIEvalEngine
NNIEvalEngine::NNIEvalEngine(NNIEngine &nni_engine)
: nni_engine_(&nni_engine),
dag_(&nni_engine.GetDAG()),
graft_dag_(&nni_engine.GetGraftDAG()) {}
double NNIEvalEngine::GetScoreByNNI(const NNIOperation &nni) const {
auto it = GetScoredNNIs().find(nni);
Assert(it != GetScoredNNIs().end(), "NNI does not exist in NNI Evaluation Engine.");
return it->second;
}
double NNIEvalEngine::GetScoreByEdge(const EdgeId edge_id) const {
auto nni = GetDAG().GetNNI(edge_id);
return GetScoreByNNI(nni);
}
double NNIEvalEngine::GetMaxScore() const {
double max = -INFINITY;
for (const auto &[nni, score] : GetScoredNNIs()) {
std::ignore = nni;
if (max < score) {
max = score;
}
}
return max;
}
double NNIEvalEngine::GetMinScore() const {
double min = INFINITY;
for (const auto &[nni, score] : GetScoredNNIs()) {
std::ignore = nni;
if (min > score) {
min = score;
}
}
return min;
}
// ** NNIEvalEngineViaGP
NNIEvalEngineViaGP::NNIEvalEngineViaGP(NNIEngine &nni_engine, GPEngine &gp_engine)
: NNIEvalEngine(nni_engine), gp_engine_(&gp_engine) {}
void NNIEvalEngineViaGP::Init() {
GetGPEngine().GrowPLVs(GetDAG().NodeCountWithoutDAGRoot());
GetGPEngine().GrowGPCSPs(GetDAG().EdgeCountWithLeafSubsplits());
}
void NNIEvalEngineViaGP::Prep() {
GetGPEngine().ProcessOperations(GetDAG().PopulatePLVs());
GetGPEngine().ProcessOperations(GetDAG().ComputeLikelihoods());
}
void NNIEvalEngineViaGP::GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) {
// Remove DAGRoot from node reindexing (for GPEngine).
const Reindexer node_reindexer_without_root =
node_reindexer.value().RemoveNewIndex(GetDAG().GetDAGRootNodeId().value_);
GetGPEngine().GrowPLVs(GetDAG().NodeCountWithoutDAGRoot(),
node_reindexer_without_root);
GetGPEngine().GrowGPCSPs(GetDAG().EdgeCountWithLeafSubsplits(), edge_reindexer);
}
void NNIEvalEngineViaGP::GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference,
const bool use_unique_temps) {
if (via_reference) {
if (use_unique_temps) {
GetGPEngine().GrowSparePLVs(GetSpareNodesPerNNI() * adjacent_nnis.size());
} else {
GetGPEngine().GrowSparePLVs(GetSpareNodesPerNNI());
}
GetGPEngine().GrowSpareGPCSPs(adjacent_nnis.size());
} else {
GetGPEngine().GrowPLVs(GetGraftDAG().NodeCountWithoutDAGRoot());
GetGPEngine().GrowGPCSPs(GetGraftDAG().EdgeCountWithLeafSubsplits());
}
}
void NNIEvalEngineViaGP::UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &pre_nni_to_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
using namespace GPOperations;
auto &branch_handler = GetGPEngine().GetBranchLengthHandler();
// Find all new edge ids.
std::set<EdgeId> new_edge_ids;
for (size_t i = prev_edge_count; i < edge_reindexer.size(); i++) {
EdgeId edge_id = EdgeId(edge_reindexer.GetNewIndexByOldIndex(i));
new_edge_ids.insert(edge_id);
}
// Grow and Reindex GPEngine.
GetGPEngine().GrowPLVs(node_reindexer.size());
GetGPEngine().GrowGPCSPs(edge_reindexer.size());
for (size_t i = prev_edge_count; i < edge_reindexer.size(); i++) {
const EdgeId edge_id = edge_reindexer.GetOldIndexByNewIndex(i);
branch_handler(edge_id) = branch_handler.GetDefaultBranchLength();
}
// Copy over branch lengths from pre-NNI to post-NNI.
if (copy_new_edges_) {
for (const auto &[pre_nni, nni] : pre_nni_to_nni) {
CopyGPEngineDataAfterAddingNNI(pre_nni, nni);
}
}
// Update SBN Priors.
auto sbn_prior = GetDAG().BuildUniformOnTopologicalSupportPrior();
auto unconditional_node_probabilities =
GetDAG().UnconditionalNodeProbabilities(sbn_prior);
auto inverted_sbn_prior =
GetDAG().InvertedGPCSPProbabilities(sbn_prior, unconditional_node_probabilities);
GetGPEngine().InitializePriors(std::move(sbn_prior),
std::move(unconditional_node_probabilities.segment(
0, GetDAG().NodeCountWithoutDAGRoot())),
std::move(inverted_sbn_prior));
if (use_null_priors_) {
GetGPEngine().SetNullPrior();
}
// Update PLVs.
GetGPEngine().ProcessOperations(GetDAG().PopulatePLVs());
// Optimize branch lengths.
if (optimize_new_edges_) {
BranchLengthOptimization();
}
GetGPEngine().ProcessOperations(GetDAG().ComputeLikelihoods());
}
void NNIEvalEngineViaGP::CopyGPEngineDataAfterAddingNNI(const NNIOperation &pre_nni,
const NNIOperation &nni) {
auto &branch_handler = GetGPEngine().GetBranchLengthHandler();
auto CopyBranchLengths = [this, &branch_handler](
const auto pre_node_id, const auto post_node_id,
const Direction dir, const SubsplitClade clade) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(pre_node_id).GetNeighbors(dir, clade)) {
const auto pre_parent_id =
(dir == Direction::Rootward) ? adj_node_id : pre_node_id;
const auto post_parent_id =
(dir == Direction::Rootward) ? adj_node_id : post_node_id;
const auto pre_child_id =
(dir == Direction::Rootward) ? pre_node_id : adj_node_id;
const auto post_child_id =
(dir == Direction::Rootward) ? post_node_id : adj_node_id;
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_parent_id, pre_child_id);
const auto post_edge_id = GetDAG().GetEdgeIdx(post_parent_id, post_child_id);
GetGPEngine().CopyGPCSPData(pre_edge_id, post_edge_id);
}
};
// Build mapping according to the NNI swap.
const auto post_parent_id = GetDAG().GetDAGNodeId(nni.GetParent());
const auto post_child_id = GetDAG().GetDAGNodeId(nni.GetChild());
const auto post_edge_id = GetDAG().GetEdgeIdx(post_parent_id, post_child_id);
const auto pre_parent_id = GetDAG().GetDAGNodeId(pre_nni.GetParent());
const auto pre_child_id = GetDAG().GetDAGNodeId(pre_nni.GetChild());
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_parent_id, pre_child_id);
const auto pre_sister_clade = GetDAG().GetSisterClade(pre_edge_id);
const auto clade_map = NNIOperation::BuildNNICladeMapFromPreNNIToNNI(nni, pre_nni);
NNICladeEnum::Array<NodeId> post_node_ids;
post_node_ids[clade_map[NNIClade::ParentFocal]] = post_parent_id;
post_node_ids[clade_map[NNIClade::ParentSister]] = post_parent_id;
post_node_ids[clade_map[NNIClade::ChildLeft]] = post_child_id;
post_node_ids[clade_map[NNIClade::ChildRight]] = post_child_id;
DoubleVector parents, sisters, children;
// Copy parent edges.
for (const auto clade : SubsplitCladeEnum::Iterator()) {
CopyBranchLengths(pre_parent_id, post_node_ids[NNIClade::ParentFocal],
Direction::Rootward, clade);
}
// Copy sister edges.
CopyBranchLengths(pre_parent_id, post_node_ids[NNIClade::ParentSister],
Direction::Leafward, pre_sister_clade);
// Copy central edge.
branch_handler.Get(post_edge_id) = branch_handler.Get(pre_edge_id);
// Copy left and right child edges.
for (const auto clade : SubsplitCladeEnum::Iterator()) {
const auto nni_clade =
(clade == SubsplitClade::Left) ? NNIClade::ChildLeft : NNIClade::ChildRight;
CopyBranchLengths(pre_child_id, post_node_ids[nni_clade], Direction::Leafward,
clade);
}
}
void NNIEvalEngineViaGP::ScoreAdjacentNNIs(const NNISet &adjacent_nnis) {
ComputeAdjacentNNILikelihoods(adjacent_nnis, true);
}
double NNIEvalEngineViaGP::ScoreInternalNNIByNNI(const NNIOperation &nni) const {
Assert(GetDAG().ContainsNNI(nni), "DAG does not contain NNI.");
const auto edge_id = GetDAG().GetEdgeIdx(nni);
return ScoreInternalNNIByEdge(edge_id);
}
double NNIEvalEngineViaGP::ScoreInternalNNIByEdge(const EdgeId &edge_id) const {
return GetGPEngine().GetPerGPCSPLogLikelihoods(edge_id.value_, 1)[0];
}
size_t NNIEvalEngineViaGP::GetSpareNodesPerNNI() const { return spare_nodes_per_nni_; }
size_t NNIEvalEngineViaGP::GetSpareEdgesPerNNI() const { return spare_edges_per_nni_; }
void NNIEvalEngineViaGP::ComputeAdjacentNNILikelihoods(const NNISet &adjacent_nnis,
const bool via_reference) {
GrowGPEngineForAdjacentNNILikelihoods(adjacent_nnis, via_reference);
std::vector<double> scored_nnis(adjacent_nnis.size());
size_t nni_count = 0;
size_t offset = 0;
for (const auto &nni : adjacent_nnis) {
auto [likelihood, new_offset] = ComputeAdjacentNNILikelihood(nni, offset);
std::ignore = likelihood;
std::ignore = new_offset;
nni_count++;
}
}
std::pair<double, size_t> NNIEvalEngineViaGP::ComputeAdjacentNNILikelihood(
const NNIOperation &nni, const size_t offset) {
using namespace GPOperations;
GPOperationVector ops;
auto &pvs = GetGPEngine().GetPLVHandler();
const NNIOperation pre_nni = GetDAG().FindNNINeighborInDAG(nni);
// const std::set<NodeId> rootsplit_node_ids{GetDAG().GetRootsplitNodeIds().begin(),
// GetDAG().GetRootsplitNodeIds().end()};
std::set<NodeId> rootsplit_node_ids;
for (const auto node_id : GetDAG().GetRootsplitNodeIds()) {
rootsplit_node_ids.insert(node_id);
}
// Get temp PVs for adjacent NNI.
const auto pv_ids = GetTempAdjPVIds();
// Get mapped nodes and temp edges for adjacent NNI.
auto [node_ids, edge_ids] =
GetMappedAdjNodeIdsAndTempAdjEdgeIds(pre_nni, nni, IsCopyNewEdges());
size_t edge_count = edge_ids.parents.size() + edge_ids.sisters.size() +
edge_ids.leftchildren.size() + edge_ids.rightchildren.size() + 1;
size_t new_offset = edge_count;
GetGPEngine().GrowSpareGPCSPs(edge_count);
// Rootward Pass
auto UpdateLeftChildRootward = [&]() {
// Evolve up leftchild edge: leftchild_p -> child_phatleft.
size_t i = 0;
ops.push_back(ZeroPLV{pv_ids.child_phatleft.value_});
for (const auto adj_node_id : node_ids.grand_leftchildren) {
const PVId leftchild_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.child_phatleft.value_,
edge_ids.leftchildren[i].value_,
leftchild_p.value_});
i++;
}
};
auto UpdateRightChildRootward = [&]() {
// Evolve up rightchild edge: rightchild_p -> child_phatright.
size_t i = 0;
ops.push_back(ZeroPLV{pv_ids.child_phatright.value_});
for (const auto adj_node_id : node_ids.grand_rightchildren) {
const PVId rightchild_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.child_phatright.value_,
edge_ids.rightchildren[i].value_,
rightchild_p.value_});
i++;
}
};
auto UpdateCentralRootward = [&]() {
// child_p = child_phatleft \circ child_phatright..
ops.push_back(Multiply{pv_ids.child_p.value_, pv_ids.child_phatleft.value_,
pv_ids.child_phatright.value_});
// Evolve up central edge: child_p -> parent_phatfocal.
ops.push_back(ZeroPLV{pv_ids.parent_phatfocal.value_});
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.parent_phatfocal.value_,
edge_ids.central.value_,
pv_ids.child_p.value_});
};
auto UpdateSisterRootward = [&]() {
// Evolve up sister edge: sister_p -> parent_phatsister.
size_t i = 0;
ops.push_back(ZeroPLV{pv_ids.parent_phatsister.value_});
for (const auto adj_node_id : node_ids.grand_sisters) {
const PVId sister_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.parent_phatsister.value_,
edge_ids.sisters[i].value_,
sister_p.value_});
i++;
}
};
auto UpdateParentRootward = [&]() {
// parent_p = parent_phatfocal \circ parent_phatsister.
ops.push_back(Multiply{pv_ids.parent_p.value_, pv_ids.parent_phatfocal.value_,
pv_ids.parent_phatsister.value_});
};
auto NNIRootwardPass = [&]() {
UpdateLeftChildRootward();
UpdateRightChildRootward();
UpdateCentralRootward();
UpdateSisterRootward();
UpdateParentRootward();
};
// Leafward Pass
auto UpdateLeftChildLeafward = [&]() {
// child_rleft = child_rhat \circ child_rhatright.
ops.push_back(Multiply{pv_ids.child_rleft.value_, pv_ids.child_rhat.value_,
pv_ids.child_phatright.value_});
};
auto UpdateRightChildLeafward = [&]() {
// child_rright = child_rhat \circ child_rhatleft.
ops.push_back(Multiply{pv_ids.child_rright.value_, pv_ids.child_rhat.value_,
pv_ids.child_phatleft.value_});
};
auto UpdateCentralLeafward = [&]() {
// parent_rfocal = parent_rhat \circ parent_rhatsister.
ops.push_back(Multiply{pv_ids.parent_rfocal.value_, pv_ids.parent_rhat.value_,
pv_ids.parent_phatsister.value_});
// Evolve down central edge: parent_rfocal -> child_rhat.
ops.push_back(ZeroPLV{pv_ids.child_rhat.value_});
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.child_rhat.value_,
edge_ids.central.value_,
pv_ids.parent_rfocal.value_});
};
auto UpdateSisterLeafward = [&]() {
// parent_rsister = parent_rhat \circ parent_rhatfocal.
ops.push_back(Multiply{pv_ids.parent_rsister.value_, pv_ids.parent_rhat.value_,
pv_ids.parent_phatfocal.value_});
};
auto UpdateParentLeafward = [&]() {
// Evolve down parent edge: grandparent_rfocal -> parent_rhat.
ops.push_back(ZeroPLV{pv_ids.parent_rhat.value_});
size_t i = 0;
const bool is_dag_root = GetDAG().IsNodeRoot(node_ids.grand_parents[0]);
if (is_dag_root) {
ops.push_back(SetToStationaryDistribution{pv_ids.parent_rhat.value_,
edge_ids.parents[0].value_});
} else {
for (const auto adj_node_id : node_ids.grand_parents) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(adj_node_id, node_ids.parent_focal);
const auto focal_clade = GetDAG().GetFocalClade(edge_id);
const PVId grandparent_rfocal =
pvs.GetPVIndex(PLVTypeEnum::RPLVType(focal_clade), adj_node_id);
ops.push_back(IncrementWithWeightedEvolvedPLV{pv_ids.parent_rhat.value_,
edge_ids.parents[i].value_,
grandparent_rfocal.value_});
i++;
}
}
};
auto NNILeafwardPass = [&]() {
UpdateParentLeafward();
UpdateCentralLeafward();
UpdateSisterLeafward();
UpdateLeftChildLeafward();
UpdateRightChildLeafward();
};
// Branch Length Optimization
auto OptimizeLeftChild = [&](bool do_update = true) {
size_t i = 0;
for (const auto adj_node_id : node_ids.grand_leftchildren) {
const PVId grand_leftchild_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(OptimizeBranchLength{pv_ids.child_rleft.value_,
grand_leftchild_p.value_,
edge_ids.leftchildren[i].value_});
i++;
}
if (do_update) UpdateLeftChildRootward();
};
auto OptimizeRightChild = [&](bool do_update = true) {
size_t i = 0;
for (const auto adj_node_id : node_ids.grand_rightchildren) {
const PVId grand_rightchild_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(OptimizeBranchLength{pv_ids.child_rright.value_,
grand_rightchild_p.value_,
edge_ids.rightchildren[i].value_});
i++;
}
if (do_update) UpdateRightChildRootward();
};
auto OptimizeCentral = [&](bool do_update = true) {
if (do_update) UpdateCentralLeafward();
ops.push_back(OptimizeBranchLength{pv_ids.parent_rfocal.value_,
pv_ids.child_p.value_, edge_ids.central.value_});
if (do_update) UpdateCentralRootward();
};
auto OptimizeSister = [&](bool do_update = true) {
if (do_update) UpdateSisterLeafward();
size_t i = 0;
for (const auto adj_node_id : node_ids.grand_sisters) {
const PVId grand_sister_p = pvs.GetPVIndex(PLVType::P, adj_node_id);
ops.push_back(OptimizeBranchLength{pv_ids.parent_rsister.value_,
grand_sister_p.value_,
edge_ids.sisters[i].value_});
i++;
}
if (do_update) UpdateSisterRootward();
};
auto OptimizeParent = [&](bool do_update = true) {
if (do_update) UpdateParentLeafward();
size_t i = 0;
if ((node_ids.grand_parents.size() == 1) &&
GetDAG().IsNodeRoot(node_ids.grand_parents[0])) {
return;
} else {
for (const auto adj_node_id : node_ids.grand_parents) {
const EdgeId edge_id = GetDAG().GetEdgeIdx(adj_node_id, node_ids.parent_focal);
const auto focal_clade = GetDAG().GetFocalClade(edge_id);
const PVId grandparent_rfocal =
pvs.GetPVIndex(PLVTypeEnum::RPLVType(focal_clade), adj_node_id);
ops.push_back(OptimizeBranchLength{
grandparent_rfocal.value_,
pv_ids.parent_p.value_,
edge_ids.parents[i].value_,
});
i++;
}
}
if (do_update) UpdateParentRootward();
};
auto NNIBranchLengthOptimization = [&](bool do_update = true) {
OptimizeLeftChild(do_update);
OptimizeRightChild(do_update);
OptimizeSister(do_update);
OptimizeCentral(do_update);
OptimizeParent(do_update);
};
// Branch Length Optimization.
if (IsOptimizeNewEdges()) {
NNIRootwardPass();
NNILeafwardPass();
GetGPEngine().ProcessOperations(ops);
ops.clear();
NNIBranchLengthOptimization();
NNILeafwardPass();
for (size_t iter = 0; iter < GetOptimizationMaxIteration(); iter++) {
GetGPEngine().ProcessOperations(ops);
}
ops.clear();
}
// Compute likelihood of central edge.
NNIRootwardPass();
NNILeafwardPass();
ops.push_back(GPOperations::Likelihood{
edge_ids.central.value_, pv_ids.parent_rfocal.value_, pv_ids.child_p.value_});
GetGPEngine().ProcessOperations(ops);
ops.clear();
double likelihood =
GetGPEngine().GetPerGPCSPLogLikelihoods(edge_ids.central.value_, 1)[0];
GetScoredNNIs()[nni] = likelihood;
return {likelihood, new_offset};
}
NNIEvalEngineViaGP::AdjNodeAndEdgeIds NNIEvalEngineViaGP::GetAdjNodeAndEdgeIds(
const NNIOperation &nni) const {
AdjNodeIds node_ids;
AdjEdgeIds edge_ids;
Assert(GetDAG().ContainsNNI(nni), "Given NNI does not exist in DAG.");
const auto AddAdjNodesAndEdgesToVector =
[this](NodeIdVector &adj_node_ids, EdgeIdVector &adj_edge_ids,
const NodeId node_id, const Direction dir, const SubsplitClade clade) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(node_id).GetNeighbors(dir, clade)) {
if (adj_node_id >= GetDAG().NodeCount()) return;
const auto parent_id = (dir == Direction::Rootward) ? adj_node_id : node_id;
const auto child_id = (dir == Direction::Rootward) ? node_id : adj_node_id;
const auto edge_id = GetDAG().GetEdgeIdx(parent_id, child_id);
adj_node_ids.push_back(adj_node_id);
adj_edge_ids.push_back(edge_id);
}
};
node_ids.parent_focal = GetDAG().GetDAGNodeId(nni.GetParent());
node_ids.parent_sister = node_ids.parent_focal;
node_ids.child_left = GetDAG().GetDAGNodeId(nni.GetChild());
node_ids.child_right = node_ids.child_left;
edge_ids.central = GetDAG().GetEdgeIdx(node_ids.parent_focal, node_ids.child_left);
const auto sister_clade = GetDAG().GetSisterClade(edge_ids.central);
for (const auto clade : SubsplitCladeEnum::Iterator()) {
AddAdjNodesAndEdgesToVector(node_ids.grand_parents, edge_ids.parents,
node_ids.parent_focal, Direction::Rootward, clade);
}
AddAdjNodesAndEdgesToVector(node_ids.grand_sisters, edge_ids.sisters,
node_ids.parent_sister, Direction::Leafward,
sister_clade);
AddAdjNodesAndEdgesToVector(node_ids.grand_leftchildren, edge_ids.leftchildren,
node_ids.child_left, Direction::Leafward,
SubsplitClade::Left);
AddAdjNodesAndEdgesToVector(node_ids.grand_rightchildren, edge_ids.rightchildren,
node_ids.child_right, Direction::Leafward,
SubsplitClade::Right);
return {node_ids, edge_ids};
}
NNIEvalEngineViaGP::AdjNodeAndEdgeIds
NNIEvalEngineViaGP::GetMappedAdjNodeIdsAndTempAdjEdgeIds(
const NNIOperation &pre_nni, const NNIOperation &nni,
const bool copy_branch_lengths) {
AdjNodeIds node_ids;
// Get edges and nodes from pre-NNI.
auto [pre_node_ids, pre_edge_ids] = GetAdjNodeAndEdgeIds(pre_nni);
const auto clade_map = NNIOperation::BuildNNICladeMapFromPreNNIToNNI(nni, pre_nni);
// Remap adj node ids from pre-NNI to post-NNI.
NNICladeEnum::Array<NodeIdVector *> unmapped_node_ids, mapped_node_ids;
unmapped_node_ids[NNIClade::ParentFocal] = &pre_node_ids.grand_parents;
unmapped_node_ids[NNIClade::ParentSister] = &pre_node_ids.grand_sisters;
unmapped_node_ids[NNIClade::ChildLeft] = &pre_node_ids.grand_leftchildren;
unmapped_node_ids[NNIClade::ChildRight] = &pre_node_ids.grand_rightchildren;
for (const auto nni_clade : NNICladeEnum::Iterator()) {
mapped_node_ids[nni_clade] = unmapped_node_ids[clade_map[nni_clade]];
}
node_ids.grand_parents = *mapped_node_ids[NNIClade::ParentFocal];
node_ids.grand_sisters = *mapped_node_ids[NNIClade::ParentSister];
node_ids.grand_leftchildren = *mapped_node_ids[NNIClade::ChildLeft];
node_ids.grand_rightchildren = *mapped_node_ids[NNIClade::ChildRight];
// Remap central node ids from pre-NNI to post-NNI.
NNICladeEnum::Array<NodeId> unmapped_node_id, mapped_node_id;
unmapped_node_id[NNIClade::ParentFocal] = pre_node_ids.parent_focal;
unmapped_node_id[NNIClade::ParentSister] = pre_node_ids.parent_sister;
unmapped_node_id[NNIClade::ChildLeft] = pre_node_ids.child_left;
unmapped_node_id[NNIClade::ChildRight] = pre_node_ids.child_right;
for (const auto nni_clade : NNICladeEnum::Iterator()) {
mapped_node_id[nni_clade] = unmapped_node_id[clade_map[nni_clade]];
}
node_ids.parent_focal = mapped_node_id[NNIClade::ParentFocal];
node_ids.parent_sister = mapped_node_id[NNIClade::ParentSister];
node_ids.child_left = mapped_node_id[NNIClade::ChildLeft];
node_ids.child_right = mapped_node_id[NNIClade::ChildRight];
// Remap edges from pre-NNI to post-NNI.
AdjEdgeIds edge_ids;
NNICladeEnum::Array<EdgeIdVector *> unmapped_edge_ids, mapped_edge_ids;
unmapped_edge_ids[NNIClade::ParentFocal] = &pre_edge_ids.parents;
unmapped_edge_ids[NNIClade::ParentSister] = &pre_edge_ids.sisters;
unmapped_edge_ids[NNIClade::ChildLeft] = &pre_edge_ids.leftchildren;
unmapped_edge_ids[NNIClade::ChildRight] = &pre_edge_ids.rightchildren;
for (const auto nni_clade : NNICladeEnum::Iterator()) {
mapped_edge_ids[nni_clade] = unmapped_edge_ids[clade_map[nni_clade]];
}
edge_ids.central = pre_edge_ids.central;
edge_ids.parents = *mapped_edge_ids[NNIClade::ParentFocal];
edge_ids.sisters = *mapped_edge_ids[NNIClade::ParentSister];
edge_ids.leftchildren = *mapped_edge_ids[NNIClade::ChildLeft];
edge_ids.rightchildren = *mapped_edge_ids[NNIClade::ChildRight];
// Assign temp edges for post-NNI.
AdjEdgeIds temp_edge_ids = GetTempAdjEdgeIds(node_ids);
// Copy pre-NNI branches to post-NNI branches.
auto CopyEdgeData = [&](const EdgeId pre_edge_id, const EdgeId post_edge_id) {
GetGPEngine().CopyGPCSPData(pre_edge_id, post_edge_id);
};
auto CopyEdgesData = [&](const EdgeIdVector pre_edge_ids,
const EdgeIdVector post_edge_ids) {
for (size_t i = 0; i < pre_edge_ids.size(); i++) {
CopyEdgeData(pre_edge_ids[i], post_edge_ids[i]);
}
};
if (copy_branch_lengths) {
CopyEdgeData(edge_ids.central, temp_edge_ids.central);
CopyEdgesData(edge_ids.parents, temp_edge_ids.parents);
CopyEdgesData(edge_ids.sisters, temp_edge_ids.sisters);
CopyEdgesData(edge_ids.leftchildren, temp_edge_ids.leftchildren);
CopyEdgesData(edge_ids.rightchildren, temp_edge_ids.rightchildren);
}
return {node_ids, temp_edge_ids};
}
NNIEvalEngineViaGP::AdjEdgeIds NNIEvalEngineViaGP::GetTempAdjEdgeIds(
const AdjNodeIds &node_ids) {
AdjEdgeIds edge_ids;
// Grow spare edges if neccessary.
const size_t spare_edge_count =
1 + node_ids.grand_parents.size() + node_ids.grand_sisters.size() +
node_ids.grand_leftchildren.size() + node_ids.grand_rightchildren.size();
GetGPEngine().GrowSpareGPCSPs(spare_edge_count);
// Assign temp edges based on number of adjacent edges in pre-NNI.
size_t edge_offset = 0;
const auto GetNextSpareEdgeIds = [this, &edge_offset](
const NodeIdVector &adj_node_ids,
const NodeId node_id, const Direction dir) {
EdgeIdVector edge_ids;
for (size_t i = 0; i < adj_node_ids.size(); i++) {
const EdgeId edge_id = GetGPEngine().GetSpareGPCSPIndex(edge_offset);
edge_offset++;
edge_ids.push_back(edge_id);
}
return edge_ids;
};
// Assign central edge to first temp index.
edge_ids.central = GetGPEngine().GetSpareGPCSPIndex(edge_offset);
edge_offset++;
// Assign adjacent edges.
edge_ids.parents = GetNextSpareEdgeIds(node_ids.grand_parents, node_ids.parent_focal,
Direction::Rootward);
edge_ids.sisters = GetNextSpareEdgeIds(node_ids.grand_sisters, node_ids.parent_sister,
Direction::Leafward);
edge_ids.leftchildren = GetNextSpareEdgeIds(node_ids.grand_leftchildren,
node_ids.child_left, Direction::Leafward);
edge_ids.rightchildren = GetNextSpareEdgeIds(
node_ids.grand_rightchildren, node_ids.child_right, Direction::Leafward);
return edge_ids;
}
NNIEvalEngineViaGP::AdjPVIds NNIEvalEngineViaGP::GetTempAdjPVIds() {
AdjPVIds pv_ids;
// Grow PLV handler if necessary.
GetGPEngine().GrowSparePLVs(2 * PLVTypeEnum::Count);
// Get next available spare PVId.
size_t node_offset = 0;
const auto GetNextSparePVId = [this, &node_offset]() {
auto &pvs = GetGPEngine().GetPLVHandler();
const PVId pvid = pvs.GetSparePVIndex(PVId(node_offset));
node_offset++;
return pvid;
};
// Get PLVs for parent.
pv_ids.parent_p = GetNextSparePVId();
pv_ids.parent_phatfocal = GetNextSparePVId();
pv_ids.parent_phatsister = GetNextSparePVId();
pv_ids.parent_rhat = GetNextSparePVId();
pv_ids.parent_rfocal = GetNextSparePVId();
pv_ids.parent_rsister = GetNextSparePVId();
// Get PLVs for child.
pv_ids.child_p = GetNextSparePVId();
pv_ids.child_phatleft = GetNextSparePVId();
pv_ids.child_phatright = GetNextSparePVId();
pv_ids.child_rhat = GetNextSparePVId();
pv_ids.child_rleft = GetNextSparePVId();
pv_ids.child_rright = GetNextSparePVId();
return pv_ids;
}
std::set<EdgeId> NNIEvalEngineViaGP::BuildSetOfEdgeIdsAdjacentToNNI(
const NNIOperation &nni) const {
std::set<EdgeId> edge_ids;
const auto parent_id = GetDAG().GetDAGNodeId(nni.GetParent());
const auto child_id = GetDAG().GetDAGNodeId(nni.GetChild());
const auto central_edge_id = GetDAG().GetEdgeIdx(parent_id, child_id);
const auto sister_clade = GetDAG().GetSisterClade(central_edge_id);
// Collect left children.
for (const auto adj_node_id : GetDAG().GetDAGNode(child_id).GetNeighbors(
Direction::Leafward, SubsplitClade::Left)) {
const auto edge_id = GetDAG().GetEdgeIdx(child_id, adj_node_id);
edge_ids.insert(edge_id);
}
// Collect right children.
for (const auto adj_node_id : GetDAG().GetDAGNode(child_id).GetNeighbors(
Direction::Leafward, SubsplitClade::Right)) {
const auto edge_id = GetDAG().GetEdgeIdx(child_id, adj_node_id);
edge_ids.insert(edge_id);
}
// Collect sisters.
for (const auto adj_node_id :
GetDAG().GetDAGNode(parent_id).GetNeighbors(Direction::Leafward, sister_clade)) {
const auto edge_id = GetDAG().GetEdgeIdx(parent_id, adj_node_id);
edge_ids.insert(edge_id);
}
// Collect central.
edge_ids.insert(central_edge_id);
// Collect parents.
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id :
GetDAG().GetDAGNode(parent_id).GetNeighbors(Direction::Rootward, clade)) {
if (GetDAG().IsNodeRoot(adj_node_id)) {
continue;
}
const auto edge_id = GetDAG().GetEdgeIdx(adj_node_id, parent_id);
edge_ids.insert(edge_id);
}
}
return edge_ids;
}
std::set<Bitset> NNIEvalEngineViaGP::BuildSetOfPCSPsAdjacentToNNI(
const NNIOperation &nni) const {
std::set<Bitset> pcsps;
auto edge_ids = BuildSetOfEdgeIdsAdjacentToNNI(nni);
for (const auto edge_id : edge_ids) {
pcsps.insert(GetDAG().GetDAGEdgeBitset(edge_id));
}
return pcsps;
}
void NNIEvalEngineViaGP::GrowGPEngineForAdjacentNNILikelihoods(
const NNISet &adjacent_nnis, const bool via_reference,
const bool use_unique_temps) {
if (via_reference) {
if (use_unique_temps) {
GetGPEngine().GrowSparePLVs(2 * adjacent_nnis.size());
} else {
GetGPEngine().GrowSparePLVs(2);
}
GetGPEngine().GrowSpareGPCSPs(adjacent_nnis.size());
} else {
GetGPEngine().GrowPLVs(GetGraftDAG().NodeCountWithoutDAGRoot());
GetGPEngine().GrowGPCSPs(GetGraftDAG().EdgeCountWithLeafSubsplits());
}
}
NNIEvalEngine::KeyIndexMapPair
NNIEvalEngineViaGP::PassGPEngineDataFromPreNNIToPostNNIViaCopy(
const NNIOperation &pre_nni, const NNIOperation &post_nni) {
// Find data in pre-NNI.
const auto pre_key_idx =
GetNNIEngine().BuildKeyIndexMapForNNI(pre_nni, GetGraftDAG().NodeCount() - 1);
// Find data in NNI.
const auto &post_key_idx =
GetNNIEngine().BuildKeyIndexMapForNNI(post_nni, GetGraftDAG().NodeCount() - 1);
// Array for mapping from pre-NNI plvs to post-NNI plvs.
const auto key_type_map =
GetNNIEngine().BuildKeyIndexTypePairsFromPreNNIToPostNNI(pre_nni, post_nni);
// Copy over pre-NNI plvs to NNI plvs.
GetGPEngine().CopyPLVData(pre_key_idx[KeyIndex::Parent_RHat],
post_key_idx[KeyIndex::Parent_RHat]);
for (const auto &[pre_key_type, post_key_type] : key_type_map) {
GetGPEngine().CopyPLVData(pre_key_idx[post_key_type], post_key_idx[pre_key_type]);
}
// Copy over associated node data.
for (const auto id_type : {KeyIndex::Parent_Id, KeyIndex::Child_Id}) {
const auto pre_node_id = NodeId(pre_key_idx[id_type]);
const auto post_node_id = NodeId(post_key_idx[id_type]);
GetGPEngine().CopyNodeData(pre_node_id, post_node_id);
}
// Copy over central edge data.
for (const auto idx_type : {KeyIndex::Edge}) {
const auto pre_edge_idx = EdgeId(pre_key_idx[idx_type]);
const auto post_edge_idx = EdgeId(post_key_idx[idx_type]);
GetGPEngine().CopyGPCSPData(pre_edge_idx, post_edge_idx);
}
// Copy over associated non-central edge data.
// Gather common ancestors and descendents of Pre-NNI and Post-NNI.
for (const auto key_index : {KeyIndex::Parent_Id, KeyIndex::Child_Id}) {
const auto pre_node = GetGraftDAG().GetDAGNode(NodeId(pre_key_idx[key_index]));
const auto post_node = GetGraftDAG().GetDAGNode(NodeId(post_key_idx[key_index]));
for (const auto direction : {Direction::Rootward, Direction::Leafward}) {
// Ignore parents of child node.
if ((key_index == KeyIndex::Child_Id) && (direction == Direction::Rootward)) {
continue;
}
for (const auto is_focal_clade : {true, false}) {
// Ignore focal children of parent node.
if ((key_index == KeyIndex::Parent_Id) && (direction == Direction::Leafward) &&
is_focal_clade) {
continue;
}
SubsplitClade prenni_clade = (is_focal_clade ? pre_nni.WhichCladeIsFocal()
: pre_nni.WhichCladeIsSister());
for (const auto &adj_node_id : pre_node.GetNeighbors(direction, prenni_clade)) {
// If edge from Pre-NNI also exists in Post-NNI, copy data over.
if (GetGraftDAG().ContainsEdge(post_node.Id(), NodeId(adj_node_id))) {
const auto pre_edge_idx =
GetGraftDAG().GetEdgeIdx(pre_node.Id(), NodeId(adj_node_id));
const auto post_edge_idx =
GetGraftDAG().GetEdgeIdx(post_node.Id(), NodeId(adj_node_id));
GetGPEngine().CopyGPCSPData(pre_edge_idx, post_edge_idx);
}
}
}
}
}
return {pre_key_idx, post_key_idx};
}
NNIEvalEngine::KeyIndexMapPair
NNIEvalEngineViaGP::PassGPEngineDataFromPreNNIToPostNNIViaReference(
const NNIOperation &pre_nni, const NNIOperation &post_nni, const size_t nni_count,
const bool use_unique_temps) {
// Find data in pre-NNI.
KeyIndexMap pre_key_idx = GetNNIEngine().BuildKeyIndexMapForNNI(
pre_nni, GetDAG().NodeCountWithoutDAGRoot());
// Find data in post-NNI.
KeyIndexMap post_key_idx =
GetNNIEngine().BuildKeyIndexMapForPostNNIViaReferencePreNNI(pre_nni, post_nni,
pre_key_idx);
// Assign temporary place in engine to store intermediate values.
size_t temp_offset_1 = 0;
size_t temp_offset_2 = 0;
if (use_unique_temps) {
temp_offset_1 = nni_count * 2;
temp_offset_2 = (nni_count * 2) + 1;
} else {
temp_offset_1 = 0;
temp_offset_2 = 1;
}
post_key_idx[KeyIndex::Parent_RFocal] =
GetGPEngine().GetPLVHandler().GetSparePVIndex(PVId(temp_offset_1)).value_;
post_key_idx[KeyIndex::Child_P] =
GetGPEngine().GetPLVHandler().GetSparePVIndex(PVId(temp_offset_2)).value_;
post_key_idx[KeyIndex::Edge] = GetGPEngine().GetSpareGPCSPIndex(nni_count);
// Copy over central edge data.
for (const auto idx_type : {KeyIndex::Edge}) {
const auto pre_edge_idx = EdgeId(pre_key_idx[idx_type]);
const auto post_edge_idx = EdgeId(post_key_idx[idx_type]);
GetGPEngine().CopyGPCSPData(pre_edge_idx, post_edge_idx);
}
return {pre_key_idx, post_key_idx};
}
void NNIEvalEngineViaGP::BranchLengthOptimization() {
const auto ops = GetDAG().BranchLengthOptimization();
for (size_t iter = 0; iter < GetOptimizationMaxIteration(); iter++) {
GetGPEngine().ProcessOperations(ops);
}
}
void NNIEvalEngineViaGP::BranchLengthOptimization(
const std::set<EdgeId> &edges_to_optimize) {
const auto update_ops = GetDAG().PopulatePLVs();
const auto optimize_ops = GetDAG().BranchLengthOptimization(edges_to_optimize);
for (size_t iter = 0; iter < GetOptimizationMaxIteration(); iter++) {
GetGPEngine().ProcessOperations(optimize_ops);
GetGPEngine().ProcessOperations(update_ops);
}
}
void NNIEvalEngineViaGP::NNIBranchLengthOptimization(
const NNIOperation &nni, const std::set<EdgeId> &new_edge_ids) {
using namespace GPOperations;
const auto adj_edge_ids = BuildSetOfEdgeIdsAdjacentToNNI(nni);
// Get the edges that are new to the DAG.
std::set<EdgeId> new_adj_edge_ids;
for (const auto edge_id : adj_edge_ids) {
if (new_edge_ids.find(edge_id) != new_edge_ids.end()) {
new_adj_edge_ids.insert(edge_id);
}
}
// Initial optimization.
GPOperationVector init_ops;
auto BuildOpForRootward = [this, &init_ops](const NodeId node_id) {
auto node = GetDAG().GetDAGNode(node_id);
auto &pvs = GetGPEngine().GetPLVHandler();
if (GetDAG().IsNodeLeaf(node_id)) return;
// Update PHatLeft.
init_ops.push_back(ZeroPLV{pvs.GetPVIndex(PLVType::PHatLeft, node_id).value_});
for (const auto adj_node_id :
node.GetNeighbors(Direction::Leafward, SubsplitClade::Left)) {
const auto edge = GetDAG().GetDAGEdge(GetDAG().GetEdgeIdx(node_id, adj_node_id));
init_ops.push_back(IncrementWithWeightedEvolvedPLV{
pvs.GetPVIndex(PLVType::PHatLeft, node_id).value_, edge.GetId().value_,
pvs.GetPVIndex(PLVType::P, adj_node_id).value_});
}
// Update PHatRight.
init_ops.push_back(ZeroPLV{pvs.GetPVIndex(PLVType::PHatRight, node_id).value_});
for (const auto adj_node_id :
node.GetNeighbors(Direction::Leafward, SubsplitClade::Right)) {
const auto edge = GetDAG().GetDAGEdge(GetDAG().GetEdgeIdx(node_id, adj_node_id));
init_ops.push_back(IncrementWithWeightedEvolvedPLV{
pvs.GetPVIndex(PLVType::PHatRight, node_id).value_, edge.GetId().value_,
pvs.GetPVIndex(PLVType::P, adj_node_id).value_});
}
// Update P.
init_ops.push_back(Multiply{pvs.GetPVIndex(PLVType::P, node_id).value_,
pvs.GetPVIndex(PLVType::PHatRight, node_id).value_,
pvs.GetPVIndex(PLVType::PHatLeft, node_id).value_});
};
auto BuildOpForLeafward = [this, &init_ops](const NodeId node_id) {
auto node = GetDAG().GetDAGNode(node_id);
auto &pvs = GetGPEngine().GetPLVHandler();
if (GetDAG().IsNodeRoot(node_id)) return;
// Update RHat.
init_ops.push_back(ZeroPLV{pvs.GetPVIndex(PLVType::RHat, node_id).value_});
for (const auto clade : SubsplitCladeEnum::Iterator()) {
for (const auto adj_node_id : node.GetNeighbors(Direction::Rootward, clade)) {
const auto edge =
GetDAG().GetDAGEdge(GetDAG().GetEdgeIdx(adj_node_id, node_id));
init_ops.push_back(IncrementWithWeightedEvolvedPLV{
pvs.GetPVIndex(PLVType::RHat, node_id).value_, edge.GetId().value_,
pvs.GetPVIndex(PLVTypeEnum::RPLVType(edge.GetSubsplitClade()), adj_node_id)
.value_});
}
}
// Update RLeft.
init_ops.push_back(Multiply{pvs.GetPVIndex(PLVType::RRight, node_id).value_,
pvs.GetPVIndex(PLVType::RHat, node_id).value_,
pvs.GetPVIndex(PLVType::PHatLeft, node_id).value_});
// Update RRight.
init_ops.push_back(Multiply{pvs.GetPVIndex(PLVType::RLeft, node_id).value_,
pvs.GetPVIndex(PLVType::RHat, node_id).value_,
pvs.GetPVIndex(PLVType::PHatRight, node_id).value_});
};
auto BuildOpForBranchLengthOptimization =
[this, &init_ops, &new_adj_edge_ids, &BuildOpForRootward, &BuildOpForLeafward](
const EdgeId edge_id, const bool do_update = true) {
if (GetDAG().IsEdgeRoot(edge_id)) return;
if (new_adj_edge_ids.find(edge_id) == new_adj_edge_ids.end()) return;
const auto &edge = GetDAG().GetDAGEdge(edge_id);
auto &pvs = GetGPEngine().GetPLVHandler();
if (do_update) {
BuildOpForRootward(edge.GetParent());
BuildOpForLeafward(edge.GetParent());
}
PVId parent_rfocal = pvs.GetPVIndex(
PLVTypeEnum::RPLVType(edge.GetSubsplitClade()), edge.GetParent());
PVId child_p = pvs.GetPVIndex(PLVType::P, edge.GetChild());
init_ops.push_back(
OptimizeBranchLength{parent_rfocal.value_, child_p.value_, edge_id.value_});
if (do_update) {
BuildOpForRootward(edge.GetParent());
}
};
const auto [node_ids, edge_ids] = GetAdjNodeAndEdgeIds(nni);
std::ignore = node_ids;
// Build Optimization Operations.
for (const auto edge_id : edge_ids.leftchildren) {
BuildOpForBranchLengthOptimization(edge_id);
}
for (const auto edge_id : edge_ids.rightchildren) {
BuildOpForBranchLengthOptimization(edge_id);
}
for (const auto edge_id : edge_ids.sisters) {
BuildOpForBranchLengthOptimization(edge_id);
}
BuildOpForBranchLengthOptimization(edge_ids.central);
for (const auto edge_id : edge_ids.parents) {
BuildOpForBranchLengthOptimization(edge_id);
}
// Build update PLVs.
// Rootward Pass.
for (const auto edge_id : edge_ids.leftchildren) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForRootward(edge.GetChild());
}
for (const auto edge_id : edge_ids.rightchildren) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForRootward(edge.GetChild());
}
for (const auto edge_id : edge_ids.sisters) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForRootward(edge.GetChild());
}
for (const auto edge_id : {edge_ids.central}) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForRootward(edge.GetChild());
}
for (const auto edge_id : edge_ids.parents) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForRootward(edge.GetChild());
}
// Leafward Pass.
for (const auto edge_id : edge_ids.parents) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForLeafward(edge.GetParent());
}
for (const auto edge_id : {edge_ids.central}) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForLeafward(edge.GetParent());
}
for (const auto edge_id : edge_ids.sisters) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForLeafward(edge.GetParent());
}
for (const auto edge_id : edge_ids.rightchildren) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForLeafward(edge.GetParent());
}
for (const auto edge_id : edge_ids.leftchildren) {
const auto &edge = GetDAG().GetDAGEdge(edge_id);
BuildOpForLeafward(edge.GetParent());
}
for (size_t iter = 0; iter < GetOptimizationMaxIteration(); iter++) {
GetGPEngine().ProcessOperations(init_ops);
GetGPEngine().ProcessOperations(GetDAG().PopulatePLVs());
}
}
void NNIEvalEngineViaGP::NNIBranchLengthOptimization(const NNIOperation &nni) {
using namespace GPOperations;
const auto adj_edge_ids = BuildSetOfEdgeIdsAdjacentToNNI(nni);
// Initial optimization.
GPOperationVector init_ops;
auto BuildGPOpForBranchLengthOptimization = [this, &init_ops](const EdgeId edge_id) {
if (GetDAG().IsEdgeRoot(edge_id)) return;
const auto &edge = GetDAG().GetDAGEdge(edge_id);
auto &pvs = GetGPEngine().GetPLVHandler();
PVId parent_rfocal = pvs.GetPVIndex(PLVTypeEnum::RPLVType(edge.GetSubsplitClade()),
edge.GetParent());
PVId child_p = pvs.GetPVIndex(PLVType::P, edge.GetChild());
init_ops.push_back(
OptimizeBranchLength{parent_rfocal.value_, child_p.value_, edge_id.value_});
};
const auto [node_ids, edge_ids] = GetAdjNodeAndEdgeIds(nni);
std::ignore = node_ids;
for (const auto edge_id : edge_ids.leftchildren) {
BuildGPOpForBranchLengthOptimization(edge_id);
}
for (const auto edge_id : edge_ids.rightchildren) {
BuildGPOpForBranchLengthOptimization(edge_id);
}
for (const auto edge_id : edge_ids.sisters) {
BuildGPOpForBranchLengthOptimization(edge_id);
}
BuildGPOpForBranchLengthOptimization(edge_ids.central);
for (const auto edge_id : edge_ids.parents) {
BuildGPOpForBranchLengthOptimization(edge_id);
}
GetGPEngine().ProcessOperations(init_ops);
// Optimize after updating entire DAG.
const auto full_ops = GetDAG().BranchLengthOptimization(adj_edge_ids);
for (size_t iter = 1; iter < GetOptimizationMaxIteration(); iter++) {
GetGPEngine().ProcessOperations(full_ops);
GetGPEngine().ProcessOperations(GetDAG().PopulatePLVs());
}
}
// ** NNIEvalEngineViaTP
void NNIEvalEngineViaTP::Init() { GetTPEngine().Initialize(); }
void NNIEvalEngineViaTP::Prep() {
GetTPEngine().InitializeChoiceMap();
GetTPEngine().InitializeScores();
}
void NNIEvalEngineViaTP::GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) {
GetTPEngine().GrowNodeData(GetDAG().NodeCount(), node_reindexer);
GetTPEngine().GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), edge_reindexer);
}
void NNIEvalEngineViaTP::GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference,
const bool use_unique_temps) {
if (via_reference) {
if (use_unique_temps) {
GetTPEngine().GrowSpareNodeData(GetSpareNodesPerNNI() * adjacent_nnis.size());
GetTPEngine().GrowSpareEdgeData(GetSpareEdgesPerNNI() * adjacent_nnis.size());
} else {
GetTPEngine().GrowSpareNodeData(GetSpareNodesPerNNI());
GetTPEngine().GrowSpareEdgeData(GetSpareEdgesPerNNI());
}
} else {
GetTPEngine().GrowNodeData(GetGraftDAG().NodeCountWithoutDAGRoot());
GetTPEngine().GrowEdgeData(GetGraftDAG().EdgeCountWithLeafSubsplits());
}
}
void NNIEvalEngineViaTP::UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &pre_nni_to_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
GetTPEngine().UpdateAfterModifyingDAG(pre_nni_to_nni, prev_node_count, node_reindexer,
prev_edge_count, edge_reindexer);
}
void NNIEvalEngineViaTP::ScoreAdjacentNNIs(const NNISet &adjacent_nnis) {
std::optional<BitsetEdgeIdMap> best_edge_map = std::nullopt;
if (GetTPEngine().GetUseBestEdgeMap()) {
best_edge_map =
GetTPEngine().BuildMapOfProposedNNIPCSPsToBestPreNNIEdges(adjacent_nnis);
}
for (const auto &nni : adjacent_nnis) {
const auto pre_nni = GetDAG().FindNNINeighborInDAG(nni);
GetScoredNNIs()[nni] =
GetTPEngine().GetTopTreeScoreWithProposedNNI(nni, pre_nni, 0, best_edge_map);
}
}
double NNIEvalEngineViaTP::ScoreInternalNNIByNNI(const NNIOperation &nni) const {
Assert(GetDAG().ContainsNNI(nni), "DAG does not contain NNI.");
const auto edge_id = GetDAG().GetEdgeIdx(nni);
return ScoreInternalNNIByEdge(edge_id);
}
double NNIEvalEngineViaTP::ScoreInternalNNIByEdge(const EdgeId &edge_id) const {
return GetTPEngine().GetTopTreeScore(edge_id);
}
size_t NNIEvalEngineViaTP::GetSpareNodesPerNNI() const {
return GetTPEngine().GetSpareNodesPerNNI();
}
size_t NNIEvalEngineViaTP::GetSpareEdgesPerNNI() const {
return GetTPEngine().GetSpareNodesPerNNI();
}
| 47,221
|
C++
|
.cpp
| 1,025
| 39.213659
| 88
| 0.674458
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,033
|
quartet_hybrid_request.cpp
|
phylovi_bito/src/quartet_hybrid_request.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "quartet_hybrid_request.hpp"
bool QuartetHybridRequest::IsFullyFormed() const {
return !rootward_tips_.empty() && !sister_tips_.empty() && !rotated_tips_.empty() &&
!sorted_tips_.empty();
}
std::ostream& operator<<(std::ostream& os, QuartetTip const& plv_pcsp) {
os << "(tip node " << plv_pcsp.tip_node_id_ << ", plv " << plv_pcsp.plv_idx_
<< ", gpcsp " << plv_pcsp.gpcsp_idx_ << ")";
return os;
}
std::ostream& operator<<(std::ostream& os, QuartetTipVector const& plv_pcsp_vector) {
os << "[";
for (const auto& plv_pcsp : plv_pcsp_vector) {
os << plv_pcsp << ", ";
}
os << "]";
return os;
}
std::ostream& operator<<(std::ostream& os, QuartetHybridRequest const& request) {
os << "[\n";
os << "\tcentral GPCSP: " << request.central_gpcsp_idx_ << "\n";
os << "\trootward tips: " << request.rootward_tips_ << "\n";
os << "\tsister tips: " << request.sister_tips_ << "\n";
os << "\trotated tips: " << request.rotated_tips_ << "\n";
os << "\tsorted tips: " << request.sorted_tips_ << "\n";
os << "]" << std::endl;
return os;
}
| 1,209
|
C++
|
.cpp
| 30
| 37.466667
| 86
| 0.616695
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,034
|
gp_dag.cpp
|
phylovi_bito/src/gp_dag.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "gp_dag.hpp"
#include "numerical_utils.hpp"
using namespace GPOperations; // NOLINT
using PLVType = PLVTypeEnum::Type;
size_t GPDAG::GetPLVIndex(PLVType plv_type, NodeId node_id) const {
return PLVNodeHandler::GetPVIndex(plv_type, node_id, NodeCountWithoutDAGRoot())
.value_;
}
// The R PLV update that corresponds to our rotation status.
GPOperation GPDAG::RUpdateOfRotated(NodeId node_id, bool is_edge_on_left) const {
return is_edge_on_left ? Multiply{GetPLVIndex(PLVType::RLeft, node_id),
GetPLVIndex(PLVType::RHat, node_id),
GetPLVIndex(PLVType::PHatRight, node_id)}
: Multiply{GetPLVIndex(PLVType::RRight, node_id),
GetPLVIndex(PLVType::RHat, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)};
}
// After this traversal, we will have optimized branch lengths, but we cannot assume
// that all of the PLVs are in a valid state.
//
// Update the terminology in this function as part of #288.
GPOperationVector GPDAG::ApproximateBranchLengthOptimization() const {
GPOperationVector operations;
SubsplitDAG::DepthFirstWithAction(
GetRootsplitNodeIds(),
SubsplitDAGTraversalAction(
// BeforeNode
[this, &operations](NodeId node_id) {
if (!GetDAGNode(node_id).IsRootsplit()) {
// Update R-hat if we're not at the root.
UpdateRHat(node_id, operations);
}
},
// AfterNode
[this, &operations](NodeId node_id) {
// Make P the elementwise product ("o") of the two P PLVs for the
// node-clades.
operations.push_back(Multiply{GetPLVIndex(PLVType::P, node_id),
GetPLVIndex(PLVType::PHatRight, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)});
},
// BeforeNodeClade
[this, &operations](NodeId node_id, bool is_edge_on_left) {
const PLVType p_hat_plv_type =
is_edge_on_left ? PLVType::PHatLeft : PLVType::PHatRight;
// Update the R PLV corresponding to our rotation status.
operations.push_back(RUpdateOfRotated(node_id, is_edge_on_left));
// Zero out the node-clade PLV so we can fill it as part of VisitEdge.
operations.push_back(ZeroPLV{GetPLVIndex(p_hat_plv_type, node_id)});
},
// VisitEdge
[this, &operations](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
// #310 this is temporary:
// We do a full PLV population and then marginal likelihood calculation.
// GPOperations::AppendGPOperations(operations, PopulatePLVs());
// GPOperations::AppendGPOperations(operations, MarginalLikelihood());
// Optimize each branch for a given node-clade and accumulate the resulting
// P-hat PLVs in the parent node.
OptimizeBranchLengthUpdatePHat(node_id, child_id, is_edge_on_left,
operations);
}));
return operations;
}
// After this traversal, we will have optimized branch lengths, but we cannot assume
// that all of the PLVs are in a valid state.
//
// Update the terminology in this function as part of #288.
GPOperationVector GPDAG::BranchLengthOptimization() {
GPOperationVector operations;
DepthFirstWithTidyAction(
GetRootsplitNodeIds(),
TidySubsplitDAGTraversalAction(
// BeforeNode
[this, &operations](NodeId node_id) {
if (!GetDAGNode(node_id).IsRootsplit()) {
// Update R-hat if we're not at the root.
UpdateRHat(node_id, operations);
}
},
// AfterNode
[this, &operations](NodeId node_id) {
// Make P the elementwise product ("o") of the two P PLVs for the
// node-clades.
operations.push_back(Multiply{GetPLVIndex(PLVType::P, node_id),
GetPLVIndex(PLVType::PHatRight, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)});
},
// BeforeNodeClade
[this, &operations](NodeId node_id, bool is_edge_on_left) {
const PLVType p_hat_plv_type =
is_edge_on_left ? PLVType::PHatLeft : PLVType::PHatRight;
// Update the R PLV corresponding to our rotation status.
operations.push_back(RUpdateOfRotated(node_id, is_edge_on_left));
// Zero out the node-clade PLV so we can fill it as part of VisitEdge.
operations.push_back(ZeroPLV{GetPLVIndex(p_hat_plv_type, node_id)});
},
// ModifyEdge
[this, &operations](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
// Optimize each branch for a given node-clade and accumulate the resulting
// P-hat PLVs in the parent node.
OptimizeBranchLengthUpdatePHat(node_id, child_id, is_edge_on_left,
operations);
},
// UpdateEdge
[this, &operations](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
// Accumulate all P-hat PLVs in the parent node without optimization.
// #321 I don't think we need this Likelihood call... just the update PHat.
UpdatePHatComputeLikelihood(node_id, child_id, is_edge_on_left, operations);
}));
return operations;
}
// After this traversal, we will have optimized branch lengths, but we cannot assume
// that all of the PLVs are in a valid state.
//
// Update the terminology in this function as part of #288.
GPOperationVector GPDAG::BranchLengthOptimization(
const std::set<EdgeId> &edges_to_optimize) {
GPOperationVector operations;
DepthFirstWithTidyAction(
GetRootsplitNodeIds(),
TidySubsplitDAGTraversalAction(
// BeforeNode
[this, &operations](NodeId node_id) {
if (!GetDAGNode(node_id).IsRootsplit()) {
// Update R-hat if we're not at the root.
UpdateRHat(node_id, operations);
}
},
// AfterNode
[this, &operations](NodeId node_id) {
// Make P the elementwise product ("o") of the two P PLVs for the
// node-clades.
operations.push_back(Multiply{GetPLVIndex(PLVType::P, node_id),
GetPLVIndex(PLVType::PHatRight, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)});
},
// BeforeNodeClade
[this, &operations](NodeId node_id, bool is_edge_on_left) {
const PLVType p_hat_plv_type =
is_edge_on_left ? PLVType::PHatLeft : PLVType::PHatRight;
// Update the R PLV corresponding to our rotation status.
operations.push_back(RUpdateOfRotated(node_id, is_edge_on_left));
// Zero out the node-clade PLV so we can fill it as part of VisitEdge.
operations.push_back(ZeroPLV{GetPLVIndex(p_hat_plv_type, node_id)});
},
// ModifyEdge
[this, &operations, &edges_to_optimize](NodeId node_id, NodeId child_id,
bool is_edge_on_left) {
// Optimize each branch for a given node-clade and accumulate the resulting
// P-hat PLVs in the parent node.
const auto edge_id = GetEdgeIdx(node_id, child_id);
const bool do_optimize_branch_length =
(edges_to_optimize.find(edge_id) != edges_to_optimize.end());
OptimizeBranchLengthUpdatePHat(node_id, child_id, is_edge_on_left,
operations, do_optimize_branch_length);
},
// UpdateEdge
[this, &operations](NodeId node_id, NodeId child_id, bool is_edge_on_left) {
// Accumulate all P-hat PLVs in the parent node without optimization.
// #321 I don't think we need this Likelihood call... just the update PHat.
UpdatePHatComputeLikelihood(node_id, child_id, is_edge_on_left, operations);
}));
return operations;
}
GPOperationVector GPDAG::ComputeLikelihoods() const {
GPOperationVector operations;
IterateOverRealNodes([this, &operations](SubsplitDAGNode node) {
IterateOverLeafwardEdges(
node, [this, node, &operations](const bool is_edge_on_left,
SubsplitDAGNode child_node) {
const auto gpcsp_idx = GetEdgeIdx(node.Id(), child_node.Id());
operations.push_back(
Likelihood{gpcsp_idx.value_,
GetPLVIndex(PLVTypeEnum::RPLVType(is_edge_on_left), node.Id()),
GetPLVIndex(PLVType::P, child_node.Id())});
});
});
const auto marginal_likelihood_operations = MarginalLikelihood();
operations.insert(operations.end(), marginal_likelihood_operations.begin(),
marginal_likelihood_operations.end());
return operations;
}
GPOperationVector GPDAG::LeafwardPass() const {
return LeafwardPass(LeafwardNodeTraversalTrace(false));
}
GPOperationVector GPDAG::MarginalLikelihood() const {
GPOperationVector operations = {GPOperations::ResetMarginalLikelihood{}};
for (const auto &rootsplit_id : GetRootsplitNodeIds()) {
operations.push_back(GPOperations::IncrementMarginalLikelihood{
GetPLVIndex(PLVType::RHat, NodeId(rootsplit_id)),
GetEdgeIdx(GetDAGRootNodeId(), NodeId(rootsplit_id)).value_,
GetPLVIndex(PLVType::P, NodeId(rootsplit_id))});
}
return operations;
}
GPOperationVector GPDAG::RootwardPass() const {
return RootwardPass(RootwardNodeTraversalTrace(false));
}
GPOperationVector GPDAG::OptimizeSBNParameters() const {
GPOperationVector operations;
std::unordered_set<size_t> visited_nodes;
for (const auto &id : LeafwardNodeTraversalTrace(false)) {
const auto node = GetDAGNode(id);
OptimizeSBNParametersForASubsplit(node.GetBitset(), operations);
OptimizeSBNParametersForASubsplit(node.GetBitset().SubsplitRotate(), operations);
}
operations.push_back(UpdateSBNProbabilities{0, RootsplitCount()});
return operations;
}
GPOperationVector GPDAG::SetLeafwardZero() const {
GPOperationVector operations;
for (NodeId node_id = NodeId(0); node_id < NodeCountWithoutDAGRoot(); node_id++) {
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::RHat, node_id)});
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::RRight, node_id)});
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::RLeft, node_id)});
}
return operations;
}
GPOperationVector GPDAG::SetRhatToStationary() const {
GPOperationVector operations;
for (const auto &rootsplit_node_id : GetRootsplitNodeIds()) {
auto rootsplit_gpcsp_idx = GetEdgeIdx(GetDAGRootNodeId(), rootsplit_node_id);
operations.push_back(SetToStationaryDistribution{
GetPLVIndex(PLVType::RHat, rootsplit_node_id), rootsplit_gpcsp_idx.value_});
}
return operations;
}
GPOperationVector GPDAG::SetRootwardZero() const {
GPOperationVector operations;
for (NodeId node_id = NodeId(TaxonCount()); node_id < NodeCountWithoutDAGRoot();
node_id++) {
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::P, node_id)});
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::PHatRight, node_id)});
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::PHatLeft, node_id)});
}
return operations;
}
GPOperationVector GPDAG::LeafwardPass(const NodeIdVector &visit_order) const {
GPOperationVector operations;
for (const auto node_id : visit_order) {
auto node = GetDAGNode(node_id);
// Build rhat(s) via rhat(s) += \sum_t q(s|t) P'(s|t) r(t)
AddRhatOperations(node, operations);
// Multiply to get r(s_right) = rhat(s) \circ phat(s_left).
operations.push_back(Multiply{GetPLVIndex(PLVType::RRight, node_id),
GetPLVIndex(PLVType::RHat, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)});
// Multiply to get r(s_left) = rhat(s) \circ phat(s_right).
operations.push_back(Multiply{GetPLVIndex(PLVType::RLeft, node_id),
GetPLVIndex(PLVType::RHat, node_id),
GetPLVIndex(PLVType::PHatRight, node_id)});
}
return operations;
}
GPOperationVector GPDAG::RootwardPass(const NodeIdVector &visit_order) const {
GPOperationVector operations;
for (const auto node_id : visit_order) {
const auto node = GetDAGNode(node_id);
if (!node.IsLeaf()) {
// Build phat(s_right).
AddPhatOperations(node, false, operations);
// Build phat(s_left).
AddPhatOperations(node, true, operations);
// Multiply to get p(s) = phat(s_left) \circ phat(s_right).
operations.push_back(Multiply{node_id.value_,
GetPLVIndex(PLVType::PHatRight, node_id),
GetPLVIndex(PLVType::PHatLeft, node_id)});
}
}
return operations;
}
GPOperationVector GPDAG::PopulatePLVs() const {
GPOperationVector operations;
GPOperations::AppendGPOperations(operations, SetRootwardZero());
GPOperations::AppendGPOperations(operations, SetLeafwardZero());
GPOperations::AppendGPOperations(operations, SetRhatToStationary());
GPOperations::AppendGPOperations(operations, RootwardPass());
GPOperations::AppendGPOperations(operations, LeafwardPass());
return operations;
}
// Take in some new operations, determine an appropriate PrepForMarginalization for
// them, then append the PrepForMarginalization and the new operations to `operations`
// (in that order).
void AppendOperationsAfterPrepForMarginalization(
GPOperationVector &operations, const GPOperationVector &new_operations) {
if (!new_operations.empty()) {
operations.push_back(PrepForMarginalizationOfOperations(new_operations));
operations.insert(operations.end(), new_operations.begin(), new_operations.end());
}
}
void GPDAG::AddPhatOperations(SubsplitDAGNode node, bool is_edge_on_left,
GPOperationVector &operations) const {
PLVType plv_type = PLVTypeEnum::PPLVType(is_edge_on_left);
const NodeId parent_id = node.Id();
const PVId dest_pvid = GetPLVIndex(plv_type, node.Id());
GPOperationVector new_operations;
for (const auto &child_id : node.GetLeafward(is_edge_on_left)) {
const EdgeId edge_id = GetEdgeIdx(parent_id, NodeId(child_id));
const PVId child_pvid = GetPLVIndex(PLVType::P, NodeId(child_id));
new_operations.push_back(IncrementWithWeightedEvolvedPLV{
dest_pvid.value_, edge_id.value_, child_pvid.value_});
}
AppendOperationsAfterPrepForMarginalization(operations, new_operations);
}
void GPDAG::AddRhatOperations(SubsplitDAGNode node,
GPOperationVector &operations) const {
GPOperationVector new_operations;
IterateOverRootwardEdges(
node, [this, node, &new_operations](const bool is_edge_on_left,
SubsplitDAGNode parent_node) {
new_operations.push_back(IncrementWithWeightedEvolvedPLV{
GetPLVIndex(PLVType::RHat, node.Id()),
GetEdgeIdx(parent_node.Id(), node.Id()).value_,
GetPLVIndex(PLVTypeEnum::RPLVType(is_edge_on_left), parent_node.Id())});
});
AppendOperationsAfterPrepForMarginalization(operations, new_operations);
}
void GPDAG::OptimizeSBNParametersForASubsplit(const Bitset &subsplit,
GPOperationVector &operations) const {
if (parent_to_child_range_.count(subsplit) > 0) {
const auto &[edge_begin, edge_end] = parent_to_child_range_.at(subsplit);
if (edge_begin.value_ - edge_end.value_ > 1) {
operations.push_back(UpdateSBNProbabilities{edge_begin.value_, edge_end.value_});
}
}
}
void GPDAG::UpdateRHat(NodeId node_id, GPOperationVector &operations) const {
operations.push_back(ZeroPLV{GetPLVIndex(PLVType::RHat, node_id)});
GPOperationVector new_operations;
const auto node = GetDAGNode(node_id);
for (const bool is_edge_on_left : {false, true}) {
PLVType src_plv_type = is_edge_on_left ? PLVType::RLeft : PLVType::RRight;
for (auto parent_id : node.GetRootward(is_edge_on_left)) {
new_operations.push_back(IncrementWithWeightedEvolvedPLV{
GetPLVIndex(PLVType::RHat, node_id), GetEdgeIdx(parent_id, node_id).value_,
GetPLVIndex(src_plv_type, parent_id)});
}
}
AppendOperationsAfterPrepForMarginalization(operations, new_operations);
}
// #311 there's some work to be done here.
// There's a lot of common code between this function and the next.
// Also, the prep for marginalization isn't actually working correctly: we need to
// gather more operations first.
void GPDAG::UpdatePHatComputeLikelihood(NodeId node_id, NodeId child_node_id,
bool is_edge_on_left,
GPOperationVector &operations) const {
const auto gpcsp_idx = GetEdgeIdx(node_id, child_node_id);
// Update p_hat(s)
GPOperationVector new_operations;
new_operations.push_back(IncrementWithWeightedEvolvedPLV{
GetPLVIndex(is_edge_on_left ? PLVType::PHatLeft : PLVType::PHatRight, node_id),
gpcsp_idx.value_,
GetPLVIndex(PLVType::P, child_node_id),
});
new_operations.push_back(Likelihood{
gpcsp_idx.value_, GetPLVIndex(PLVTypeEnum::RPLVType(is_edge_on_left), node_id),
GetPLVIndex(PLVType::P, child_node_id)});
AppendOperationsAfterPrepForMarginalization(operations, new_operations);
}
void GPDAG::OptimizeBranchLengthUpdatePHat(NodeId node_id, NodeId child_node_id,
bool is_edge_on_left,
GPOperationVector &operations,
bool do_optimize_branch_length) const {
EdgeId gpcsp_idx = GetEdgeIdx(node_id, child_node_id);
if (do_optimize_branch_length) {
operations.push_back(OptimizeBranchLength{
GetPLVIndex(PLVType::P, child_node_id),
GetPLVIndex(PLVTypeEnum::RPLVType(is_edge_on_left), node_id),
gpcsp_idx.value_});
}
// Update p_hat(s)
GPOperationVector new_operations;
new_operations.push_back(IncrementWithWeightedEvolvedPLV{
GetPLVIndex(is_edge_on_left ? PLVType::PHatLeft : PLVType::PHatRight, node_id),
gpcsp_idx.value_,
GetPLVIndex(PLVType::P, child_node_id),
});
AppendOperationsAfterPrepForMarginalization(operations, new_operations);
}
QuartetHybridRequest GPDAG::QuartetHybridRequestOf(NodeId parent_id,
bool is_focal_on_left,
NodeId child_id) const {
QuartetTipVector rootward_tips;
IterateOverRootwardEdgesAndParents(
GetDAGNode(parent_id),
[this, &rootward_tips](const EdgeId gpcsp_idx, const bool is_rootward_on_left,
const NodeId grandparent_id) {
rootward_tips.emplace_back(
grandparent_id.value_,
GetPLVIndex(PLVTypeEnum::RPLVType(is_rootward_on_left), grandparent_id),
gpcsp_idx.value_);
});
QuartetTipVector sister_tips;
const auto &parent_node = GetDAGNode(parent_id);
const bool is_sister_edge_on_left = !is_focal_on_left;
IterateOverLeafwardEdges(
parent_node, is_sister_edge_on_left,
[this, &parent_node, &sister_tips](SubsplitDAGNode sister_node) {
const auto sister_id = sister_node.Id();
sister_tips.emplace_back(
sister_id.value_, GetPLVIndex(PLVType::P, sister_id),
GetEdgeIdx(parent_node.GetBitset(), sister_node.GetBitset()).value_);
});
QuartetTipVector rotated_tips;
QuartetTipVector sorted_tips;
IterateOverLeafwardEdgesAndChildren(
GetDAGNode(child_id), [this, &rotated_tips, &sorted_tips](
const EdgeId gpcsp_idx, const bool is_leafward_on_left,
const NodeId grandchild_id) {
if (is_leafward_on_left) {
rotated_tips.emplace_back(grandchild_id.value_,
GetPLVIndex(PLVType::P, grandchild_id),
gpcsp_idx.value_);
} else {
sorted_tips.emplace_back(grandchild_id.value_,
GetPLVIndex(PLVType::P, grandchild_id),
gpcsp_idx.value_);
}
});
return QuartetHybridRequest(GetEdgeIdx(parent_id, child_id).value_,
std::move(rootward_tips), std::move(sister_tips),
std::move(rotated_tips), std::move(sorted_tips));
}
| 21,093
|
C++
|
.cpp
| 426
| 39.723005
| 88
| 0.648122
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,035
|
nni_engine.cpp
|
phylovi_bito/src/nni_engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
#include "nni_engine.hpp"
#include "stopwatch.hpp"
using PLVType = PLVNodeHandler::PLVType;
NNIEngine::NNIEngine(GPDAG &dag, std::optional<GPEngine *> gp_engine,
std::optional<TPEngine *> tp_engine)
: dag_(dag), graft_dag_(std::make_unique<GraftDAG>(dag)) {
if (gp_engine.has_value() && gp_engine.value()) {
MakeGPEvalEngine(gp_engine.value());
}
if (tp_engine.has_value() && tp_engine.value()) {
MakeTPEvalEngine(tp_engine.value());
}
}
// ** Access
double NNIEngine::GetScoreByNNI(const NNIOperation &nni) const {
// Check if score has already been stored in NNI Engine.
const auto it_1 = GetScoredNNIs().find(nni);
if (it_1 != GetScoredNNIs().end()) {
return it_1->second;
}
const auto it_2 = GetPastScoredNNIs().find(nni);
if (it_2 != GetScoredNNIs().end()) {
return it_2->second;
}
// Otherwise, check if score is in NNI Eval Engine.
return GetEvalEngine().ScoreInternalNNIByNNI(nni);
}
double NNIEngine::GetScoreByEdge(const EdgeId edge_id) const {
// Check for score in NNI Eval Engine.
return GetEvalEngine().ScoreInternalNNIByEdge(edge_id);
}
// ** NNI Evaluation Engine
NNIEvalEngineViaGP &NNIEngine::MakeGPEvalEngine(GPEngine *gp_engine) {
Assert(gp_engine != nullptr, "Cannot MakeGPEvalEngine with nullptr.");
eval_engine_via_gp_ = std::make_unique<NNIEvalEngineViaGP>(*this, *gp_engine);
SelectGPEvalEngine();
return GetGPEvalEngine();
}
NNIEvalEngineViaTP &NNIEngine::MakeTPEvalEngine(TPEngine *tp_engine) {
Assert(tp_engine != nullptr, "Cannot MakeTPEvalEngine with nullptr.");
eval_engine_via_tp_ = std::make_unique<NNIEvalEngineViaTP>(*this, *tp_engine);
SelectTPLikelihoodEvalEngine();
return GetTPEvalEngine();
}
void NNIEngine::ClearEvalEngineInUse() {
for (auto eval_engine_type : NNIEvalEngineTypeEnum::Iterator()) {
eval_engine_in_use_[eval_engine_type] = false;
}
}
void NNIEngine::SelectEvalEngine(const NNIEvalEngineType eval_engine_type) {
switch (eval_engine_type) {
case NNIEvalEngineType::GPEvalEngine:
SelectGPEvalEngine();
break;
case NNIEvalEngineType::TPEvalEngineViaLikelihood:
SelectTPLikelihoodEvalEngine();
break;
case NNIEvalEngineType::TPEvalEngineViaParsimony:
SelectTPParsimonyEvalEngine();
break;
default:
Failwith("Invalid NNIEvalEngineType.");
}
}
void NNIEngine::SelectGPEvalEngine() {
Assert(HasGPEvalEngine(), "Must MakeGPEvalEngine before selecting it.");
ClearEvalEngineInUse();
eval_engine_in_use_[NNIEvalEngineType::GPEvalEngine] = true;
eval_engine_ = &GetGPEvalEngine();
}
void NNIEngine::SelectTPLikelihoodEvalEngine() {
Assert(HasTPEvalEngine() && GetTPEngine().HasLikelihoodEvalEngine(),
"Must MakeTPEvalEngine with LikelihoodEvalEngine before selecting it.");
ClearEvalEngineInUse();
eval_engine_in_use_[NNIEvalEngineType::TPEvalEngineViaLikelihood] = true;
GetTPEngine().SelectLikelihoodEvalEngine();
eval_engine_ = &GetTPEvalEngine();
}
void NNIEngine::SelectTPParsimonyEvalEngine() {
Assert(HasTPEvalEngine() && GetTPEngine().HasParsimonyEvalEngine(),
"Must MakeTPEvalEngine with ParsimonyEvalEngine before selecting it.");
ClearEvalEngineInUse();
eval_engine_in_use_[NNIEvalEngineType::TPEvalEngineViaParsimony];
GetTPEngine().SelectParsimonyEvalEngine();
eval_engine_ = &GetTPEvalEngine();
}
void NNIEngine::InitEvalEngine() {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
GetGPEvalEngine().Init();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().Init();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().Init();
}
}
void NNIEngine::PrepEvalEngine() {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
GetGPEvalEngine().Prep();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().Prep();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().Prep();
}
}
void NNIEngine::GrowEvalEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
GetGPEvalEngine().GrowEngineForDAG(node_reindexer, edge_reindexer);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().GrowEngineForDAG(node_reindexer, edge_reindexer);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().GrowEngineForDAG(node_reindexer, edge_reindexer);
}
}
void NNIEngine::GrowEvalEngineForAdjacentNNIs(const bool via_reference,
const bool use_unique_temps) {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
GetGPEvalEngine().GrowEngineForAdjacentNNIs(GetAdjacentNNIs(), via_reference,
use_unique_temps);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().GrowEngineForAdjacentNNIs(GetAdjacentNNIs(), via_reference,
use_unique_temps);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().GrowEngineForAdjacentNNIs(GetAdjacentNNIs(), via_reference,
use_unique_temps);
}
}
void NNIEngine::UpdateEvalEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
auto node_reindexer_without_dag(node_reindexer);
node_reindexer_without_dag =
node_reindexer.RemoveNewIndex(GetDAG().GetDAGRootNodeId().value_);
GetGPEvalEngine().UpdateEngineAfterModifyingDAG(nni_to_pre_nni, prev_node_count,
node_reindexer_without_dag,
prev_edge_count, edge_reindexer);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().UpdateEngineAfterModifyingDAG(nni_to_pre_nni, prev_node_count,
node_reindexer, prev_edge_count,
edge_reindexer);
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().UpdateEngineAfterModifyingDAG(nni_to_pre_nni, prev_node_count,
node_reindexer, prev_edge_count,
edge_reindexer);
}
}
const DAGBranchHandler &NNIEngine::GetDAGBranchHandler() const {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
return GetGPEvalEngine().GetDAGBranchHandler();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
return GetTPEvalEngine().GetDAGBranchHandler();
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
return GetTPEvalEngine().GetDAGBranchHandler();
}
Failwith("Invalid given EvalEngineType.");
}
// ** Runners
void NNIEngine::Run(const bool is_quiet) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
os << "RunInit(): ";
// Initialize Once at the start of the loop
RunInit(is_quiet);
os << timer.Lap() << std::endl;
// Loop until no more eligible NNIs are found.
while (GetAdjacentNNICount() > 0) {
os << "RunMainLoop(): ";
RunMainLoop(is_quiet);
os << timer.Lap() << std::endl;
os << "RunPostLoop(): ";
RunPostLoop(is_quiet);
os << timer.Lap() << std::endl;
}
}
void NNIEngine::RunInit(const bool is_quiet) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// Initialize Adjacent NNIs based on starting state of DAG.
ResetNNIData();
os << "RunInit::ResetNNIData: " << timer.Lap() << std::endl;
SyncAdjacentNNIsWithDAG(true);
os << "RunInit::SyncAdjacentNNIsWithDAG: " << timer.Lap() << std::endl;
FilterInit();
os << "RunInit::FilterInit: " << timer.Lap() << std::endl;
}
void NNIEngine::RunMainLoop(const bool is_quiet) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// (1) Add all adjacent NNIs to the GraftDAG.
GraftAdjacentNNIsToDAG(is_quiet);
os << "RunMainLoop::GraftAdjacentNNIsToDAG: " << timer.Lap() << std::endl;
// (2) Compute each adjacent NNI score.
FilterPreScore();
os << "RunMainLoop::FilterPreScore: " << timer.Lap() << std::endl;
// (2b) Optional per-NNI function.
FilterScoreAdjacentNNIs();
os << "RunMainLoop::FilterScoreAdjacentNNIs: " << timer.Lap() << std::endl;
FilterPostScore();
os << "RunMainLoop::FilterScoreUpdate: " << timer.Lap() << std::endl;
// (3) Select whether to accept or reject adjacent NNIs via filter.
FilterEvaluateAdjacentNNIs();
os << "RunMainLoop::FilterEvaluate: " << timer.Lap() << std::endl;
// (4a) Remove adjacent NNIs from GraftDAG.
RemoveAllGraftedNNIsFromDAG();
// (4b) Add accepted NNIs permanently to DAG.
AddAcceptedNNIsToDAG(is_quiet);
os << "RunMainLoop::RemoveAndAddNNIs: " << timer.Lap() << std::endl;
iter_count_++;
}
void NNIEngine::RunPostLoop(const bool is_quiet) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// (5) Update NNI data.
// (5a) Update rejected NNIs.
UpdateRejectedNNIs();
os << "RunPostLoop::UpdateRejectedNNIs: " << timer.Lap() << std::endl;
// (5b) Update adjacent and new NNIs.
UpdateAdjacentNNIs();
os << "RunPostLoop::UpdateAdjacentNNIs: " << timer.Lap() << std::endl;
// (5c) Update scored NNIs.
UpdateScoredNNIs();
os << "RunPostLoop::UpdateScoredNNIs: " << timer.Lap() << std::endl;
// (5d) Update accepted NNIs.
UpdateAcceptedNNIs();
os << "RunPostLoop::UpdateAcceptedNNIs: " << timer.Lap() << std::endl;
}
// ** Filter Subroutines
void NNIEngine::FilterInit() {
if (filter_init_fn_) {
(filter_init_fn_)(*this);
}
}
void NNIEngine::FilterPreScore() {
if (filter_pre_score_fn_) {
(filter_pre_score_fn_)(*this);
}
}
void NNIEngine::FilterScoreAdjacentNNIs() {
if (filter_score_loop_fn_) {
for (const auto &nni : GetNNIsToRescore()) {
const double nni_score = (filter_score_loop_fn_)(*this, nni);
AddNNIScore(nni, nni_score);
}
}
}
void NNIEngine::FilterPostScore() {
if (filter_post_score_fn_) {
(filter_post_score_fn_)(*this);
}
Assert(new_sorted_scored_nnis_.size() == GetNNIsToRescore().size(),
"After scoring NNIs, not all NNIs have been assigned scores.");
}
void NNIEngine::FilterEvaluateAdjacentNNIs() {
Assert(filter_evaluate_fn_ or filter_evaluate_loop_fn_,
"Must assign a filter process function before running NNIEngine.");
if (filter_evaluate_fn_) {
(filter_evaluate_fn_)(*this, GetNNIsToReevaluate(), GetScoredNNIsToReevaluate(),
GetSortedScoredNNIsToReevaluate(), accepted_nnis_);
}
if (filter_evaluate_loop_fn_) {
for (const auto &[nni, nni_score] : GetScoredNNIsToReevaluate()) {
const bool accept_nni = (filter_evaluate_loop_fn_)(*this, nni, nni_score);
if (accept_nni) {
accepted_nnis_.insert(nni);
} else if (track_rejected_nnis_) {
rejected_nnis_.insert(nni);
}
}
}
}
void NNIEngine::FilterPostModification(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni) {
if (filter_post_modification_fn_) {
(filter_post_modification_fn_)(*this, mods_, nni_to_pre_nni);
}
}
// ** Filter Setters
void NNIEngine::SetFilterInitFunction(StaticFilterInitFunction filter_init_fn) {
filter_init_fn_ = filter_init_fn;
}
void NNIEngine::SetFilterPreScoreFunction(
StaticFilterUpdateFunction filter_pre_update_fn) {
filter_pre_score_fn_ = filter_pre_update_fn;
}
void NNIEngine::SetFilterScoreLoopFunction(
StaticFilterScoreLoopFunction filter_score_loop_fn) {
filter_score_loop_fn_ = filter_score_loop_fn;
}
void NNIEngine::SetFilterPostScoreFunction(
StaticFilterUpdateFunction filter_post_score_fn) {
filter_post_score_fn_ = filter_post_score_fn;
}
void NNIEngine::SetFilterEvaluateFunction(
StaticFilterEvaluateFunction filter_evaluate_fn) {
filter_evaluate_fn_ = filter_evaluate_fn;
}
void NNIEngine::SetFilterEvaluateLoopFunction(
StaticFilterEvaluateLoopFunction filter_evaluate_loop_fn) {
filter_evaluate_loop_fn_ = filter_evaluate_loop_fn;
}
void NNIEngine::SetFilterPostModificationFunction(
StaticFilterModificationFunction filter_post_modification_fn) {
filter_post_modification_fn_ = filter_post_modification_fn;
}
// ** Filtering Schemes
void NNIEngine::SetNoFilter(const bool accept_all_nnis) {
SetNoEvaluate(accept_all_nnis);
}
void NNIEngine::SetGPLikelihoodCutoffFilteringScheme(const double score_cutoff) {
Assert(HasGPEvalEngine(), "Must MakeGPEvalEngine before using the filtering scheme.");
SelectGPEvalEngine();
SetScoreViaEvalEngine();
SetEvaluateViaMinScoreCutoff(score_cutoff);
}
void NNIEngine::SetTPLikelihoodCutoffFilteringScheme(const double score_cutoff) {
Assert(HasTPEvalEngine() && GetTPEngine().HasLikelihoodEvalEngine(),
"Must MakeTPEvalEngine before using the filtering scheme.");
SelectTPLikelihoodEvalEngine();
SetScoreViaEvalEngine();
SetEvaluateViaMinScoreCutoff(score_cutoff);
}
void NNIEngine::SetTPParsimonyCutoffFilteringScheme(const double score_cutoff) {
Assert(HasTPEvalEngine() && GetTPEngine().HasParsimonyEvalEngine(),
"Must MakeTPEvalEngine before using the filtering scheme.");
SelectTPParsimonyEvalEngine();
SetScoreViaEvalEngine();
SetEvaluateViaMaxScoreCutoff(score_cutoff);
}
void NNIEngine::SetGPLikelihoodDropFilteringScheme(const double score_cutoff) {
Assert(HasGPEvalEngine(), "Must MakeGPEvalEngine before using the filtering scheme.");
SelectGPEvalEngine();
SetScoreViaEvalEngine();
SetFilterPostScoreFunction([score_cutoff](NNIEngine &this_nni_engine) {
double max = this_nni_engine.GetMaxScore();
this_nni_engine.SetEvaluateViaMinScoreCutoff(max - score_cutoff);
});
}
void NNIEngine::SetTPLikelihoodDropFilteringScheme(const double score_cutoff) {
Assert(HasTPEvalEngine() && GetTPEngine().HasLikelihoodEvalEngine(),
"Must MakeTPEvalEngine before using the filtering scheme.");
SelectTPLikelihoodEvalEngine();
SetScoreViaEvalEngine();
SetFilterPostScoreFunction([score_cutoff](NNIEngine &this_nni_engine) {
double max = this_nni_engine.GetMaxScore();
this_nni_engine.SetEvaluateViaMinScoreCutoff(max - score_cutoff);
});
}
void NNIEngine::SetTPParsimonyDropFilteringScheme(const double score_cutoff) {
Assert(HasTPEvalEngine() && GetTPEngine().HasParsimonyEvalEngine(),
"Must MakeTPEvalEngine before using the filtering scheme.");
SelectTPParsimonyEvalEngine();
SetScoreViaEvalEngine();
SetFilterPostScoreFunction([score_cutoff](NNIEngine &this_nni_engine) {
double min = this_nni_engine.GetMinScore();
this_nni_engine.SetEvaluateViaMaxScoreCutoff(min + score_cutoff);
});
}
void NNIEngine::SetTopKScoreFilteringScheme(const size_t k, const bool max_is_best) {
SetFilterPostScoreFunction([k, max_is_best](NNIEngine &this_nni_engine) {
auto score_cutoff =
max_is_best ? this_nni_engine.GetMaxKScore(k) : this_nni_engine.GetMinKScore(k);
if (max_is_best) {
this_nni_engine.SetEvaluateViaMinScoreCutoff(score_cutoff);
} else {
this_nni_engine.SetEvaluateViaMaxScoreCutoff(score_cutoff);
}
});
}
// ** Filtering Scheme Helper Functions
void NNIEngine::SetScoreToConstant(const double value) {
SetFilterScoreLoopFunction(
[value](NNIEngine &this_nni_engine, const NNIOperation &nni) { return value; });
}
void NNIEngine::SetScoreViaEvalEngine() {
SetFilterInitFunction(
[](NNIEngine &this_nni_engine) { this_nni_engine.PrepEvalEngine(); });
SetFilterPreScoreFunction(
[](NNIEngine &this_nni_engine) { this_nni_engine.ScoreAdjacentNNIs(); });
SetFilterScoreLoopFunction([](NNIEngine &this_nni_engine, const NNIOperation &nni) {
const auto &eval_engine = this_nni_engine.GetEvalEngine();
return eval_engine.GetScoredNNIs().find(nni)->second;
});
SetFilterPostModificationFunction(
[](NNIEngine &this_nni_engine, const SubsplitDAG::ModificationResult &mods,
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni) {
this_nni_engine.GrowEvalEngineForDAG(mods.node_reindexer, mods.edge_reindexer);
this_nni_engine.UpdateEvalEngineAfterModifyingDAG(
nni_to_pre_nni, mods.prv_node_count, mods.node_reindexer,
mods.prv_edge_count, mods.edge_reindexer);
});
}
void NNIEngine::SetNoEvaluate(const bool set_all_nni_to_pass) {
SetFilterEvaluateFunction(
[set_all_nni_to_pass](NNIEngine &this_nni_engine, const NNISet &nnis_to_evaluate,
const NNIDoubleMap &scored_nnis,
const DoubleNNIPairSet &sorted_scored_nnis,
NNISet &accepted_nnis) {
if (set_all_nni_to_pass) {
for (auto it = sorted_scored_nnis.rbegin(); it != sorted_scored_nnis.rend();
it++) {
const auto &[nni_score, nni] = *it;
accepted_nnis.insert(nni);
}
}
});
}
void NNIEngine::SetEvaluateViaSetOfNNIs(const std::set<NNIOperation> &nnis_to_accept) {
SetFilterEvaluateFunction([nnis_to_accept](NNIEngine &this_nni_engine,
const NNISet &nnis_to_evaluate,
const NNIDoubleMap &scored_nnis,
const DoubleNNIPairSet &sorted_scored_nnis,
NNISet &accepted_nnis) {
for (const auto &nni : nnis_to_accept) {
accepted_nnis.insert(nni);
}
});
}
void NNIEngine::SetEvaluateViaMinScoreCutoff(const double score_cutoff) {
SetFilterEvaluateFunction([score_cutoff](NNIEngine &this_nni_engine,
const NNISet &nnis_to_evaluate,
const NNIDoubleMap &scored_nnis,
const DoubleNNIPairSet &sorted_scored_nnis,
NNISet &accepted_nnis) {
for (auto it = sorted_scored_nnis.rbegin(); it != sorted_scored_nnis.rend(); it++) {
const auto &[nni_score, nni] = *it;
if (nni_score < score_cutoff) {
break;
}
accepted_nnis.insert(nni);
}
});
}
void NNIEngine::SetEvaluateViaMaxScoreCutoff(const double score_cutoff) {
SetFilterEvaluateFunction([score_cutoff](NNIEngine &this_nni_engine,
const NNISet &nnis_to_evaluate,
const NNIDoubleMap &scored_nnis,
const DoubleNNIPairSet &sorted_scored_nnis,
NNISet &accepted_nnis) {
for (auto it = sorted_scored_nnis.begin(); it != sorted_scored_nnis.end(); it++) {
const auto &[nni_score, nni] = *it;
if (nni_score > score_cutoff) {
break;
}
accepted_nnis.insert(nni);
}
});
}
void NNIEngine::ScoreAdjacentNNIs() {
if (IsEvalEngineInUse(NNIEvalEngineType::GPEvalEngine)) {
GetGPEvalEngine().ScoreAdjacentNNIs(GetNNIsToRescore());
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaLikelihood)) {
GetTPEvalEngine().ScoreAdjacentNNIs(GetNNIsToRescore());
}
if (IsEvalEngineInUse(NNIEvalEngineType::TPEvalEngineViaParsimony)) {
GetTPEvalEngine().ScoreAdjacentNNIs(GetNNIsToRescore());
}
}
double NNIEngine::GetMinScore() const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
return scores.empty() ? INFINITY : scores.begin()->first;
}
double NNIEngine::GetMaxScore() const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
return scores.empty() ? -INFINITY : scores.rbegin()->first;
}
double NNIEngine::GetMinKScore(const size_t k) const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
double score = INFINITY;
if (scores.empty() or k == 0) return score;
size_t count = 0;
for (auto it = scores.begin(); it != scores.end(); it++) {
const auto &[nni_score, nni] = *it;
score = nni_score;
if (++count >= k) break;
}
return score;
}
double NNIEngine::GetMaxKScore(const size_t k) const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
double score = -INFINITY;
if (scores.empty() or k == 0) return score;
size_t count = 0;
for (auto it = scores.rbegin(); it != scores.rend(); it++) {
const auto &[nni_score, nni] = *it;
score = nni_score;
if (++count >= k) break;
}
return score;
}
std::set<NNIOperation> NNIEngine::GetMinKScoringNNIs(const size_t k) const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
std::set<NNIOperation> nnis;
if (scores.empty() or k == 0) return nnis;
size_t count = 0;
for (auto it = scores.begin(); it != scores.end(); it++) {
auto &[nni_score, nni] = *it;
nnis.insert(nni);
if (++count >= k) break;
}
return nnis;
}
std::set<NNIOperation> NNIEngine::GetMaxKScoringNNIs(const size_t k) const {
const auto &scores = GetSortedScoredNNIsToReevaluate();
std::set<NNIOperation> nnis;
if (scores.empty() or k == 0) return nnis;
size_t count = 0;
for (auto it = scores.rbegin(); it != scores.rend(); it++) {
auto &[nni_score, nni] = *it;
nnis.insert(nni);
if (++count >= k) break;
}
return nnis;
}
// ** Key Indexing
NNIEngine::KeyIndex NNIEngine::NNICladeToPHatPLV(NNIClade clade_type) {
switch (clade_type) {
case NNIClade::ParentSister:
return KeyIndex::Parent_PHatSister;
case NNIClade::ChildLeft:
return KeyIndex::Child_PHatLeft;
case NNIClade::ChildRight:
return KeyIndex::Child_PHatRight;
default:
Failwith("Given NNIClade has no associated KeyIndex.");
}
};
NNIEngine::KeyIndexPairArray NNIEngine::BuildKeyIndexTypePairsFromPreNNIToPostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) {
// Find mapping from clades in pre-NNI to NNI.
const auto nni_clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(pre_nni, post_nni);
KeyIndexPairArray key_idx_pair_array;
size_t nni_clade_count = 0;
for (const auto pre_nni_clade_type :
{NNIClade::ParentSister, NNIClade::ChildLeft, NNIClade::ChildRight}) {
const auto post_nni_clade_type = nni_clade_map[pre_nni_clade_type];
key_idx_pair_array[nni_clade_count] = {NNICladeToPHatPLV(pre_nni_clade_type),
NNICladeToPHatPLV(post_nni_clade_type)};
nni_clade_count++;
}
return key_idx_pair_array;
}
NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForNNI(
const NNIOperation &nni, const size_t node_count) const {
return NNIEngine::BuildKeyIndexMapForNNI(nni, GetGraftDAG(), node_count);
}
NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForPostNNIViaReferencePreNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni,
const NNIEngine::KeyIndexMap &pre_key_idx) const {
return NNIEngine::BuildKeyIndexMapForPostNNIViaReferencePreNNI(
pre_nni, post_nni, pre_key_idx, GetGraftDAG());
}
template <typename DAGType>
NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForNNI(const NNIOperation &nni,
const DAGType &dag,
const size_t node_count) {
// Find NNI nodes.
const auto parent_id = dag.GetDAGNodeId(nni.GetParent());
const auto child_id = dag.GetDAGNodeId(nni.GetChild());
const bool is_left_clade_sister = (nni.WhichCladeIsFocal() == SubsplitClade::Left);
// Find key indices for NNI.
KeyIndexMap key_idx_map;
key_idx_map.fill(NoId);
key_idx_map[KeyIndex::Parent_Id] = parent_id.value_;
key_idx_map[KeyIndex::Child_Id] = child_id.value_;
key_idx_map[KeyIndex::Edge] = dag.GetEdgeIdx(parent_id, child_id).value_;
key_idx_map[KeyIndex::Parent_RHat] =
PLVNodeHandler::GetPVIndex(PLVType::RHat, parent_id, node_count).value_;
key_idx_map[KeyIndex::Parent_RFocal] =
PLVNodeHandler::GetPVIndex(PLVTypeEnum::RPLVType(!is_left_clade_sister),
parent_id, node_count)
.value_;
key_idx_map[KeyIndex::Parent_PHatSister] =
PLVNodeHandler::GetPVIndex(PLVTypeEnum::PPLVType(is_left_clade_sister), parent_id,
node_count)
.value_;
key_idx_map[KeyIndex::Child_P] =
PLVNodeHandler::GetPVIndex(PLVType::P, child_id, node_count).value_;
key_idx_map[KeyIndex::Child_PHatLeft] =
PLVNodeHandler::GetPVIndex(PLVType::PHatLeft, child_id, node_count).value_;
key_idx_map[KeyIndex::Child_PHatRight] =
PLVNodeHandler::GetPVIndex(PLVType::PHatRight, child_id, node_count).value_;
return key_idx_map;
}
// Explicit Instantiation
template NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForNNI(
const NNIOperation &nni, const GPDAG &dag, const size_t node_count);
template NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForNNI(
const NNIOperation &nni, const GraftDAG &dag, const size_t node_count);
template <typename DAGType>
NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForPostNNIViaReferencePreNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni,
const NNIEngine::KeyIndexMap &pre_key_idx, const DAGType &dag) {
// Unpopulated key indices will left as NoId.
NodeId parent_id = dag.GetDAGNodeId(post_nni.GetParent());
NodeId child_id = dag.GetDAGNodeId(post_nni.GetChild());
KeyIndexMap post_key_idx;
post_key_idx.fill(NoId);
post_key_idx[KeyIndex::Parent_Id] = parent_id.value_;
post_key_idx[KeyIndex::Child_Id] = child_id.value_;
post_key_idx[KeyIndex::Edge] = dag.GetEdgeIdx(parent_id, child_id).value_;
// Array for mapping from pre-NNI plvs to post-NNI plvs.
const auto key_map = BuildKeyIndexTypePairsFromPreNNIToPostNNI(pre_nni, post_nni);
// Set NNI plvs to their corresponding Pre-NNI plvs.
post_key_idx[KeyIndex::Parent_RHat] = pre_key_idx[KeyIndex::Parent_RHat];
for (const auto &[pre_key_type, post_key_type] : key_map) {
post_key_idx[post_key_type] = pre_key_idx[pre_key_type];
}
return post_key_idx;
}
// Explicit Instantiation
template NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForPostNNIViaReferencePreNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni,
const NNIEngine::KeyIndexMap &pre_key_idx, const GPDAG &dag);
template NNIEngine::KeyIndexMap NNIEngine::BuildKeyIndexMapForPostNNIViaReferencePreNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni,
const NNIEngine::KeyIndexMap &pre_key_idx, const GraftDAG &dag);
// ** DAG Maintenance
void NNIEngine::AddAcceptedNNIsToDAG(const bool is_quiet) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
// Initialize reindexers for remapping after adding nodes.
mods_.Reinit(GetDAG());
// Build map from NNI to pre-NNI.
std::map<NNIOperation, NNIOperation> nni_to_pre_nni;
for (const auto &nni : GetAcceptedNNIs()) {
auto adj_nnis = GetDAG().FindAllNNINeighborsInDAG(nni);
bool nni_found = false;
for (const auto clade : SubsplitCladeEnum::Iterator()) {
const auto adj_nni = adj_nnis[clade];
if (adj_nni.has_value() &&
(GetAdjacentNNIs().find(adj_nni.value()) == GetAdjacentNNIs().end())) {
nni_to_pre_nni[nni] = adj_nni.value();
nni_found = true;
}
}
Assert(nni_found, "NNI not found to be adjacent to DAG.");
}
// Add NNI to DAG.
os << "AddAcceptedNNIsToDAG::Prep: " << timer.Lap() << std::endl;
for (const auto &nni : GetAcceptedNNIs()) {
auto mods = GetDAG().AddNodePair(nni);
mods_ = mods_.ComposeWith(mods);
}
os << "AddAcceptedNNIsToDAG::AddAllAcceptedNNIs: " << timer.Lap() << std::endl;
RemoveAllGraftedNNIsFromDAG();
os << "AddAcceptedNNIsToDAG::RemoveGraftedNNIs: " << timer.Lap() << std::endl;
FilterPostModification(nni_to_pre_nni);
os << "AddAcceptedNNIsToDAG::FilterPostModification: " << timer.Lap() << std::endl;
}
void NNIEngine::GraftAdjacentNNIsToDAG(const bool is_quiet) {
BitsetPairVector nodes_to_add;
for (const auto &nni : GetNNIsToRescore()) {
GetGraftDAG().AddNodePair(nni);
}
}
void NNIEngine::RemoveAllGraftedNNIsFromDAG() { GetGraftDAG().RemoveAllGrafts(); }
// ** NNI Maintenance
void NNIEngine::SyncAdjacentNNIsWithDAG(const bool on_init) {
adjacent_nnis_.clear();
new_adjacent_nnis_.clear();
// Only real node pairs are viable NNIs.
dag_.IterateOverRealNodes([this](SubsplitDAGNode node) {
dag_.IterateOverParentAndChildAndLeafwardEdges(
node, [this](const NodeId parent_id, const bool is_edge_on_left,
const NodeId child_id, const EdgeId edge_idx) {
// Only internal node pairs are viable NNIs.
const Bitset &parent_bitset = dag_.GetDAGNode(parent_id).GetBitset();
const Bitset &child_bitset = dag_.GetDAGNode(child_id).GetBitset();
if (!(parent_bitset.SubsplitIsUCA() || child_bitset.SubsplitIsLeaf())) {
if (GetIncludeRootsplitNNIs() || !parent_bitset.SubsplitIsRootsplit()) {
SafeAddOutputNNIsToAdjacentNNIs(parent_bitset, child_bitset,
is_edge_on_left);
}
}
});
});
// If not on initial run, remove new NNIs that have already been seen.
if (!on_init) {
for (const auto &nni : GetPastAcceptedNNIs()) {
new_adjacent_nnis_.erase(nni);
}
}
}
void NNIEngine::UpdateAdjacentNNIsAfterDAGAddNodePair(const NNIOperation &nni) {
UpdateAdjacentNNIsAfterDAGAddNodePair(nni.parent_, nni.child_);
}
void NNIEngine::UpdateAdjacentNNIsAfterDAGAddNodePair(const Bitset &parent_bitset,
const Bitset &child_bitset) {
const auto parent_id = dag_.GetDAGNodeId(parent_bitset);
const auto child_id = dag_.GetDAGNodeId(child_bitset);
// Every new edge added is a potential new NNI.
// Iterate over the parent and child node of the new pair.
for (const auto &node_id : {parent_id, child_id}) {
// Get nodes adjacent to current node from both left and right edges.
for (const bool is_edge_leafward : {true, false}) {
// Get nodes adjacent to current node from both leafward and rootward
// directions.
for (const bool is_edge_on_left : {true, false}) {
auto adjacent_node_ids = dag_.GetDAGNode(node_id).GetLeafwardOrRootward(
is_edge_leafward, is_edge_on_left);
if (GetIncludeRootsplitNNIs() || !parent_bitset.SubsplitIsRootsplit()) {
AddAllNNIsFromNodeVectorToAdjacentNNIs(node_id, adjacent_node_ids,
is_edge_on_left, is_edge_leafward);
}
}
}
}
// Remove the pair that was just added to the DAG from NNI Set.
NNIOperation new_nni = NNIOperation(parent_bitset, child_bitset);
adjacent_nnis_.erase(new_nni);
}
void NNIEngine::AddAllNNIsFromNodeVectorToAdjacentNNIs(
const NodeId node_id, const SizeVector &adjacent_node_ids,
const bool is_edge_on_left, const bool is_edge_leafward) {
Bitset node_bitset = dag_.GetDAGNode(node_id).GetBitset();
// Determine whether node_id corresponds to parent or child of the pair.
// Add every edge's NNI to NNI Set.
// If edges are leafward, node_id is the parent to all vector nodes.
// If edges are rootward, node_id is the child to all vector nodes.
if (is_edge_leafward) {
const Bitset &parent_bitset = node_bitset;
for (const auto &adjacent_node_id : adjacent_node_ids) {
const Bitset child_bitset = dag_.GetDAGNode(NodeId(adjacent_node_id)).GetBitset();
SafeAddOutputNNIsToAdjacentNNIs(parent_bitset, child_bitset, is_edge_on_left);
}
} else {
const Bitset &child_bitset = node_bitset;
for (const auto &adjacent_node_id : adjacent_node_ids) {
const Bitset parent_bitset =
dag_.GetDAGNode(NodeId(adjacent_node_id)).GetBitset();
SafeAddOutputNNIsToAdjacentNNIs(parent_bitset, child_bitset, is_edge_on_left);
}
}
}
void NNIEngine::SafeAddOutputNNIsToAdjacentNNIs(const Bitset &parent_bitset,
const Bitset &child_bitset,
const bool is_edge_on_left) {
// Soft assert that parent is not the root and child is not a leaf.
if (parent_bitset.SubsplitIsUCA() || child_bitset.SubsplitIsLeaf()) {
return;
}
// Add NNI for right clade swap and left clade swap.
const auto focal_clade =
(is_edge_on_left) ? SubsplitClade::Left : SubsplitClade::Right;
for (auto child_clade_swapped_with_sister : SubsplitCladeEnum::Iterator()) {
bool is_in_dag = false;
const auto new_nni = NNIOperation::GetNeighboringNNI(
parent_bitset, child_bitset, child_clade_swapped_with_sister, focal_clade);
// If DAG already contains output parent and child nodes, and an edge between
// them, then don't add it to the adjacent_nnis.
if (dag_.ContainsNode(new_nni.parent_) && dag_.ContainsNode(new_nni.child_)) {
const auto parent_id = dag_.GetDAGNodeId(new_nni.parent_);
const auto child_id = dag_.GetDAGNodeId(new_nni.child_);
is_in_dag = dag_.ContainsEdge(parent_id, child_id);
}
if (!is_in_dag) {
if (adjacent_nnis_.find(new_nni) == adjacent_nnis_.end()) {
adjacent_nnis_.insert(new_nni);
new_adjacent_nnis_.insert(new_nni);
}
}
}
}
void NNIEngine::AddNNIScore(const NNIOperation &nni, const double score) {
RemoveNNIScore(nni);
scored_nnis_.insert({nni, score});
new_scored_nnis_.insert({nni, score});
sorted_scored_nnis_.insert({score, nni});
new_sorted_scored_nnis_.insert({score, nni});
};
void NNIEngine::RemoveNNIScore(const NNIOperation &nni) {
double old_score = -INFINITY;
auto it = scored_nnis_.find(nni);
if (it != scored_nnis_.end()) {
old_score = it->second;
sorted_scored_nnis_.erase({old_score, nni});
new_sorted_scored_nnis_.erase({old_score, nni});
scored_nnis_.erase(nni);
new_scored_nnis_.erase(nni);
}
}
void NNIEngine::UpdateAdjacentNNIs() {
new_adjacent_nnis_.clear();
for (const auto &nni : accepted_nnis_) {
adjacent_nnis_.erase(nni);
RemoveNNIScore(nni);
}
for (const auto &edge_id : mods_.added_edge_idxs) {
auto nni = GetDAG().GetNNI(edge_id);
adjacent_nnis_.erase(nni);
RemoveNNIScore(nni);
}
for (const auto &nni : GetAcceptedNNIs()) {
const auto nni_edge_id = GetDAG().GetEdgeIdx(nni);
const auto &nni_edge = GetDAG().GetDAGEdge(nni_edge_id);
for (const auto node_id : {nni_edge.GetParent(), nni_edge.GetChild()}) {
const auto &node = GetDAG().GetDAGNode(node_id);
for (const auto dir : DirectionEnum::Iterator()) {
for (const auto clade : SubsplitCladeEnum::Iterator()) {
const auto node_view = node.GetNeighbors(dir, clade);
for (auto it = node_view.begin(); it != node_view.end(); ++it) {
const auto &edge = GetDAG().GetDAGEdge(it.GetEdgeId());
SafeAddOutputNNIsToAdjacentNNIs(
GetDAG().GetDAGNodeBitset(edge.GetParent()),
GetDAG().GetDAGNodeBitset(edge.GetChild()),
(edge.GetSubsplitClade() == SubsplitClade::Left));
}
}
}
}
}
if (rescore_old_nnis_adjacent_to_new_nnis_) {
UpdateOutOfDateAdjacentNNIs();
}
}
void NNIEngine::UpdateOutOfDateAdjacentNNIs() {
// If there have been modifications surrounding current adjacent NNIs, re-label them
// as new.
auto &new_node_ids = mods_.added_node_ids;
auto &new_edge_ids = mods_.added_edge_idxs;
std::set<NodeId> updated_node_ids(new_node_ids.begin(), new_node_ids.end());
for (const auto edge_id : new_edge_ids) {
const auto edge = GetDAG().GetDAGEdge(edge_id);
updated_node_ids.insert(edge.GetParent());
updated_node_ids.insert(edge.GetChild());
}
for (const auto &nni : GetNNIsToReevaluate()) {
// If NNI is already new, skip.
if (new_adjacent_nnis_.find(nni) != new_adjacent_nnis_.end()) {
continue;
}
bool nni_found = false;
for (const auto &nni_subsplit : {nni.GetParent(), nni.GetChild()}) {
if (nni_found) {
continue;
}
// First, if one of the nodes in the NNI are newly added, re-label as new.
if (GetDAG().ContainsNode(nni_subsplit)) {
const auto node_id = GetDAG().GetDAGNodeId(nni_subsplit);
if (updated_node_ids.find(node_id) != updated_node_ids.end()) {
new_adjacent_nnis_.insert(nni);
continue;
}
}
// Second, if any edges connecting to grandparent or grandchild nodes are new,
// re-label as new.
const auto [left_parent_ids, right_parent_ids] =
GetDAG().FindParentNodeIdsViaMap(nni_subsplit);
const auto [left_child_ids, right_child_ids] =
GetDAG().FindChildNodeIdsViaMap(nni_subsplit);
for (const auto &node_ids :
{left_parent_ids, right_parent_ids, left_child_ids, right_child_ids}) {
if (nni_found) break;
for (const auto node_id : node_ids) {
if (updated_node_ids.find(node_id) != updated_node_ids.end()) {
new_adjacent_nnis_.insert(nni);
nni_found = true;
break;
}
}
}
}
}
}
void NNIEngine::UpdateRejectedNNIs() {
if (save_past_rejected_nnis_) {
rejected_past_nnis_.insert(new_adjacent_nnis_.begin(), new_adjacent_nnis_.end());
for (const auto &nni : accepted_nnis_) {
rejected_past_nnis_.erase(nni);
}
}
rejected_nnis_.clear();
}
void NNIEngine::UpdateScoredNNIs() {
if (save_past_scored_nnis_) {
scored_past_nnis_.insert(scored_nnis_.begin(), scored_nnis_.end());
for (const auto &nni : accepted_nnis_) {
scored_past_nnis_.erase(nni);
}
}
new_scored_nnis_.clear();
new_sorted_scored_nnis_.clear();
}
void NNIEngine::UpdateAcceptedNNIs() {
if (save_past_accepted_nnis_) {
accepted_past_nnis_.insert(accepted_nnis_.begin(), accepted_nnis_.end());
}
accepted_nnis_.clear();
}
void NNIEngine::ResetNNIData() {
new_adjacent_nnis_.clear();
adjacent_nnis_.clear();
accepted_nnis_.clear();
accepted_past_nnis_.clear();
rejected_nnis_.clear();
rejected_past_nnis_.clear();
}
| 39,153
|
C++
|
.cpp
| 921
| 36.676439
| 88
| 0.687864
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,037
|
psp_indexer.cpp
|
phylovi_bito/src/psp_indexer.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "psp_indexer.hpp"
#include <algorithm>
#include "sugar.hpp"
PSPIndexer::PSPIndexer(BitsetVector rootsplits, BitsetSizeMap in_indexer) {
size_t index = 0;
// First the rootsplits.
for (const auto& rootsplit : rootsplits) {
SafeInsert(indexer_, rootsplit, index);
index++;
}
after_rootsplits_index_ = index;
// Now onto the PCSPs.
for (const auto& iter : in_indexer) {
const auto& pcsp = iter.first;
// The first condition allows us to skip the rootsplits. We only want the
// PCSPs here. The second condition is because the "primary" part of Primary
// Subsplit Pair means that the parent split is a rootsplit.
if (iter.second >= rootsplits.size() && pcsp.PCSPIsParentRootsplit()) {
SafeInsert(indexer_, pcsp.PCSPGetChildSubsplit(), index);
index++;
}
}
first_empty_index_ = index;
}
StringVector PSPIndexer::ToStringVector() const {
std::vector<std::string> reversed_indexer(indexer_.size() + 1);
for (const auto& iter : indexer_) {
reversed_indexer[iter.second] = iter.first.SubsplitToString();
}
// Add an extra entry at the end for the split that doesn't exist.
reversed_indexer[indexer_.size()] = "";
return reversed_indexer;
}
SizeVectorVector PSPIndexer::RepresentationOf(const Node::NodePtr& topology) const {
Assert(first_empty_index_ > 0, "This PSPIndexer is uninitialized.");
SizeVector rootsplit_result(topology->Id(), first_empty_index_);
SizeVector psp_result_down(topology->Id(), first_empty_index_);
SizeVector psp_result_up(topology->Id(), first_empty_index_);
auto rootsplit_index = [&indexer = this->indexer_](const Node* node) {
return indexer.at(Bitset::RootsplitSubsplitOfClade(node->Leaves()));
};
// Here we use the terminology in the 2019 ICLR paper (screenshotted in
// https://github.com/phylovi/bito/issues/95) looking at the right-hand case
// in blue. The primary subsplit pair has Z_1 and Z_2 splitting apart Z. Here
// we use analogous notation.
auto psp_index = [&indexer = this->indexer_](const Bitset& z1, const Bitset& z2) {
return indexer.at(Bitset::Subsplit(z1, z2));
};
topology->TriplePreorder(
// f_rootsplit
[&rootsplit_result, &psp_result_up, &rootsplit_index, &psp_index](
const Node* node0, const Node* node1, const Node* node2) {
rootsplit_result[node0->Id()] = rootsplit_index(node0);
psp_result_up[node0->Id()] = psp_index(node1->Leaves(), node2->Leaves());
},
// f_internal
[&rootsplit_result, &psp_result_up, &psp_result_down, &rootsplit_index,
&psp_index](const Node* node, const Node* sister, const Node* parent) {
rootsplit_result[node->Id()] = rootsplit_index(node);
psp_result_up[node->Id()] = psp_index(~parent->Leaves(), sister->Leaves());
psp_result_down[parent->Id()] = psp_index(node->Leaves(), sister->Leaves());
});
return {rootsplit_result, psp_result_down, psp_result_up};
}
StringVectorVector PSPIndexer::StringRepresentationOf(
const Node::NodePtr& topology) const {
StringVector reversed_indexer = ToStringVector();
StringVectorVector result;
for (const auto& partial_representation : RepresentationOf(topology)) {
StringVector str_partial_representation;
for (const auto& index : partial_representation) {
str_partial_representation.push_back(reversed_indexer.at(index));
}
result.push_back(std::move(str_partial_representation));
}
return result;
}
DoubleVectorVector PSPIndexer::SplitLengths(
const UnrootedTreeCollection& tree_collection) const {
DoubleVectorVector result(after_rootsplits_index_);
auto tree_count = tree_collection.TreeCount();
for (size_t tree_index = 0; tree_index < tree_count; tree_index++) {
const auto& tree = tree_collection.GetTree(tree_index);
// The 0th part of the PSP representation is the rootsplit vector.
auto split_indices = RepresentationOf(tree.Topology())[0];
const auto branch_lengths = tree.BranchLengths();
for (size_t edge_index = 0; edge_index < split_indices.size(); edge_index++) {
result[split_indices[edge_index]].push_back(branch_lengths[edge_index]);
}
}
return result;
}
| 4,316
|
C++
|
.cpp
| 94
| 41.776596
| 84
| 0.70954
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,038
|
site_pattern.cpp
|
phylovi_bito/src/site_pattern.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "site_pattern.hpp"
#include <cstdio>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "intpack.hpp"
#include "sugar.hpp"
// DNA assumption here.
CharIntMap SitePattern::GetSymbolTable() {
CharIntMap table({
{'A', 0},
{'C', 1},
{'G', 2},
{'T', 3},
{'a', 0},
{'c', 1},
{'g', 2},
{'t', 3},
{'-', 4},
{'N', 4},
{'X', 4},
{'?', 4},
// Treat degenerate nucleotides as gaps for now.
// See issue #162.
{'B', 4},
{'D', 4},
{'H', 4},
{'K', 4},
{'M', 4},
{'R', 4},
{'S', 4},
{'U', 4},
{'V', 4},
{'W', 4},
{'Y', 4},
});
return table;
}
int SitePattern::SymbolTableAt(const CharIntMap &symbol_table, char c) {
auto search = symbol_table.find(c);
if (search == symbol_table.end()) {
char error[50];
std::snprintf(error, sizeof(error), "Symbol '%c' not known.", c);
Failwith(error);
}
return search->second;
}
SymbolVector SitePattern::SymbolVectorOf(const CharIntMap &symbol_table,
const std::string &str) {
SymbolVector v(str.size());
for (size_t i = 0; i < str.size(); i++) {
v[i] = SymbolTableAt(symbol_table, str[i]);
}
return v;
}
struct IntVectorHasher {
int operator()(const std::vector<int> &values) const {
int hash = values[0];
for (size_t i = 1; i < values.size(); i++) {
hash ^= values[i] + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
return hash;
}
};
void SitePattern::Compress() {
CharIntMap symbol_table = GetSymbolTable();
size_t sequence_length = alignment_.Length();
std::unordered_map<SymbolVector, double, IntVectorHasher> patterns;
std::unordered_map<size_t, std::string> taxon_number_to_sequence;
for (const auto &[tag, taxon] : tag_taxon_map_) {
const auto taxon_number = static_cast<size_t>(MaxLeafIDOfTag(tag));
SafeInsert(taxon_number_to_sequence, taxon_number, alignment_.at(taxon));
}
for (size_t pos = 0; pos < sequence_length; pos++) {
SymbolVector pattern(alignment_.SequenceCount());
for (const auto &[taxon_number, sequence] : taxon_number_to_sequence) {
const auto symbol_to_find = sequence[pos];
pattern[taxon_number] = SymbolTableAt(symbol_table, symbol_to_find);
}
if (patterns.find(pattern) == patterns.end()) {
SafeInsert(patterns, pattern, 1.);
} else {
patterns[pattern]++;
}
}
// Collect the site patterns per taxon.
for (const auto &iter_tag_taxon : tag_taxon_map_) {
SymbolVector compressed_sequence;
auto taxon_number = static_cast<size_t>(MaxLeafIDOfTag(iter_tag_taxon.first));
for (const auto &iter_patterns : patterns) {
compressed_sequence.push_back(iter_patterns.first[taxon_number]);
}
patterns_[taxon_number] = compressed_sequence;
}
// Collect the site weights.
for (const auto &iter : patterns) {
weights_.push_back(iter.second);
}
}
const std::vector<double> SitePattern::GetPartials(size_t sequence_idx) const {
// DNA assumption here.
size_t state_count = 4;
std::vector<double> partials(state_count * PatternCount(), 0.);
for (size_t pattern_idx = 0; pattern_idx < PatternCount(); pattern_idx++) {
if (patterns_[sequence_idx][pattern_idx] < static_cast<int>(state_count)) {
partials[pattern_idx * state_count + patterns_[sequence_idx][pattern_idx]] = 1.0;
} else {
for (size_t state_idx = 0; state_idx < state_count; state_idx++) {
partials[pattern_idx * state_count + state_idx] = 1.0;
}
}
}
return partials;
}
| 3,760
|
C++
|
.cpp
| 117
| 27.435897
| 87
| 0.621659
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,039
|
bitset.cpp
|
phylovi_bito/src/bitset.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Note that the default constructor for std::vector<bool> is filled with false:
// https://stackoverflow.com/a/22984114/467327
#include "bitset.hpp"
#include <utility>
#include "sugar.hpp"
// Bitset::Bitset() : value_(0, false) {}
Bitset::Bitset(std::vector<bool> value) : value_(std::move(value)) {}
Bitset::Bitset(const size_t n, const bool initial_value) : value_(n, initial_value) {}
Bitset::Bitset(const std::string str) : Bitset(str.length()) {
for (size_t i = 0; i < value_.size(); i++) {
if (str[i] == '0') {
value_[i] = false;
} else if (str[i] == '1') {
value_[i] = true;
} else {
Failwith("String constructor for Bitset must use only 0s or 1s; found '" +
std::string(1, str[i]) + "'.");
}
}
}
Bitset::Bitset(const SizeVector bits_on, const size_t n) : Bitset(n, false) {
for (auto i : bits_on) {
Assert(i < n, "Bitset SizeVector constructor has values out of range.");
value_[i] = true;
}
}
Bitset Bitset::EmptyBitset() { return Bitset(0, false); }
// ** std::bitset Interface Methods
bool Bitset::operator[](size_t i) const { return value_[i]; }
size_t Bitset::size() const { return value_.size(); }
void Bitset::set(size_t i, bool value) {
Assert(i < value_.size(), "i out of range in Bitset::set.");
value_[i] = value;
}
void Bitset::reset(size_t i) {
Assert(i < value_.size(), "i out of range in Bitset::reset.");
value_[i] = false;
}
void Bitset::flip() { value_.flip(); }
int Bitset::Compare(const Bitset& bitset_a, const Bitset& bitset_b) {
Assert(bitset_a.size() == bitset_b.size(),
"Bitsets must be same size for Bitset::Compare.");
for (size_t i = 0; i < bitset_a.size(); i++) {
if (bitset_a[i] != bitset_b[i]) {
return static_cast<int>(bitset_a[i]) - static_cast<int>(bitset_b[i]);
}
}
return 0;
}
int Bitset::Compare(const Bitset& that_bitset) const {
const Bitset& this_bitset = *this;
return Compare(this_bitset, that_bitset);
}
bool Bitset::operator==(const Bitset& other) const { return value_ == other.value_; }
bool Bitset::operator!=(const Bitset& other) const { return value_ != other.value_; }
bool Bitset::operator<(const Bitset& other) const { return value_ < other.value_; }
bool Bitset::operator<=(const Bitset& other) const { return value_ <= other.value_; }
bool Bitset::operator>(const Bitset& other) const { return value_ > other.value_; }
bool Bitset::operator>=(const Bitset& other) const { return value_ >= other.value_; }
Bitset Bitset::operator&(const Bitset& other) const {
Assert(value_.size() == other.size(), "Size mismatch in Bitset::operator&.");
Bitset r(value_.size());
for (size_t i = 0; i < value_.size(); i++) {
if (value_[i] && other.value_[i]) {
r.set(i);
}
}
return r;
}
Bitset Bitset::operator|(const Bitset& other) const {
Assert(value_.size() == other.size(), "Size mismatch in Bitset::operator|.");
Bitset r(value_.size());
for (size_t i = 0; i < value_.size(); i++) {
if (value_[i] || other.value_[i]) {
r.set(i);
}
}
return r;
}
Bitset Bitset::operator^(const Bitset& other) const {
Assert(value_.size() == other.size(), "Size mismatch in Bitset::operator^.");
Bitset r(value_.size());
for (size_t i = 0; i < value_.size(); i++) {
if (value_[i] != other.value_[i]) {
r.set(i);
}
}
return r;
}
Bitset Bitset::operator~() const {
Bitset r(value_);
r.value_.flip();
return r;
}
Bitset Bitset::operator+(const Bitset& other) const {
Bitset sum(value_.size() + other.size());
sum.CopyFrom(*this, 0, false);
sum.CopyFrom(other, value_.size(), false);
return sum;
}
void Bitset::operator&=(const Bitset& other) {
Assert(value_.size() == other.size(), "Size mismatch in Bitset::operator&=.");
for (size_t i = 0; i < value_.size(); i++) {
value_[i] = value_[i] && other[i];
}
}
void Bitset::operator|=(const Bitset& other) {
Assert(value_.size() == other.size(), "Size mismatch in Bitset::operator|=.");
for (size_t i = 0; i < value_.size(); i++) {
value_[i] = value_[i] || other[i];
}
}
std::ostream& operator<<(std::ostream& os, const Bitset& bitset) {
os << bitset.ToString();
return os;
}
// ** Bitset Methods
void Bitset::Zero() { std::fill(value_.begin(), value_.end(), false); }
std::vector<bool> Bitset::GetData() { return value_; }
size_t Bitset::Hash() const { return std::hash<std::vector<bool>>{}(value_); }
std::string Bitset::ToString() const {
std::string str;
for (auto&& bit : value_) {
str += (bit ? '1' : '0');
}
return str;
}
std::string Bitset::ToHashString(const size_t length) const {
return HashToString(Hash(), length);
}
std::vector<size_t> Bitset::ToVectorOfSetBits() const {
std::vector<size_t> vec;
for (size_t i = 0; i < size(); i++) {
if (value_[i]) {
vec.push_back(i);
}
}
return vec;
}
bool Bitset::All() const {
for (auto&& bit : value_) {
if (!bit) {
return false;
}
}
return true;
}
bool Bitset::Any() const {
for (auto&& bit : value_) {
if (bit) {
return true;
}
}
return false;
}
bool Bitset::None() const { return !Any(); }
bool Bitset::IsSingleton() const { return SingletonOption().has_value(); }
bool Bitset::IsDisjoint(const Bitset& other) const {
Assert(size() == other.size(), "Size mismatch in Bitset::IsDisjoint.");
for (size_t i = 0; i < size(); i++) {
if (value_[i] && other.value_[i]) {
return false;
}
}
return true;
}
void Bitset::Minorize() {
Assert(!(value_.empty()), "Can't Bitset::Minorize an empty bitset.");
if (value_[0]) {
value_.flip();
}
}
void Bitset::CopyFrom(const Bitset& other, size_t begin, bool flip) {
Assert(begin + other.size() <= size(), "Can't fit copy in Bitset::CopyFrom.");
if (flip) {
for (size_t i = 0; i < other.size(); i++) {
value_[i + begin] = !other[i];
}
} else {
for (size_t i = 0; i < other.size(); i++) {
value_[i + begin] = other[i];
}
}
}
std::optional<uint32_t> Bitset::SingletonOption() const {
bool found_already = false;
uint32_t found_index;
for (uint32_t i = 0; i < size(); i++) {
if (value_[i]) {
if (found_already) {
// We previously found an index, so this isn't a singleton.
return std::nullopt;
} // else
found_already = true;
found_index = i;
}
}
if (found_already) {
return found_index;
} // else
return std::nullopt;
}
size_t Bitset::Count() const { return std::count(value_.begin(), value_.end(), true); }
std::string Bitset::ToVectorOfSetBitsAsString() const {
std::string str;
for (size_t i = 0; i < size(); i++) {
if (value_[i]) {
str += std::to_string(i);
str += ",";
}
}
if (!str.empty()) {
str.pop_back();
}
return str;
}
// ** SBN-related functions
// ** Clade / MultiClade functions
int Bitset::CladeCompare(const Bitset& bitset_a, const Bitset& bitset_b) {
// Comparing by lexigraphical taxon representation is the precise opposite of
// comparing by their binary representation. See header file for details.
return (-1 * Bitset::Compare(bitset_a, bitset_b));
}
int Bitset::CladeCompare(const Bitset& other) const {
const Bitset& bitset = *this;
return CladeCompare(bitset, other);
}
size_t Bitset::MultiCladeGetCladeSize(const size_t clade_count) const {
Assert(size() % clade_count == 0,
"Bitset::MultiCladeGetCladeSize: size isn't evenly divisible by clade_count.");
return size() / clade_count;
}
Bitset Bitset::MultiCladeGetClade(const size_t i, const size_t clade_count) const {
Assert(i < clade_count, "Bitset::MultiCladeGetClade: index is too large.");
size_t clade_size = MultiCladeGetCladeSize(clade_count);
std::vector<bool> new_value(
value_.begin() + static_cast<std::vector<bool>::difference_type>(i * clade_size),
value_.begin() +
static_cast<std::vector<bool>::difference_type>((i + 1) * clade_size));
return Bitset(std::move(new_value));
}
std::string Bitset::MultiCladeToString(const size_t clade_count) const {
size_t clade_size = MultiCladeGetCladeSize(clade_count);
std::string str;
for (size_t i = 0; i < value_.size(); ++i) {
str += (value_[i] ? '1' : '0');
if ((i + 1) % clade_size == 0 && i + 1 < value_.size()) {
// The next item will start a new clade, so add a separator.
str += '|';
}
}
return str;
}
// ** Subsplit functions
Bitset Bitset::Subsplit(const Bitset& clade_0, const Bitset& clade_1) {
// This asserts that clades are disjoint and equal-sized.
Assert(clade_0.IsDisjoint(clade_1),
"SubsplitOfPair: given bitsets are not a valid clade pair.");
return SubsplitFromUnorderedClades(clade_0, clade_1);
}
Bitset Bitset::Subsplit(const std::string clade_0, const std::string clade_1) {
return Bitset::Subsplit(Bitset(clade_0), Bitset(clade_1));
}
Bitset Bitset::Subsplit(const SizeVector clade_0, const SizeVector clade_1,
const size_t n) {
return Bitset::Subsplit(Bitset(clade_0, n), Bitset(clade_1, n));
}
Bitset Bitset::SubsplitFromUnorderedClades(const Bitset& clade_0,
const Bitset& clade_1) {
Assert(clade_0.size() == clade_1.size(),
"Bitset::SubsplitOrderClades requires Bitsets be the same size.");
return CladeCompare(clade_0, clade_1) < 0 ? clade_0 + clade_1 : clade_1 + clade_0;
}
int Bitset::SubsplitCompare(const Bitset& subsplit_a, const Bitset& subsplit_b) {
Assert(subsplit_a.size() == subsplit_b.size(),
"Bitset::SubsplitCompare requires Bitsets be the same size.");
// (1) Compare the number of taxa of the Subsplits.
auto count_a = subsplit_a.Count();
auto count_b = subsplit_b.Count();
if (count_a != count_b) {
return count_a - count_b;
}
// (2) Compare their respective union Bitsets.
auto union_a = subsplit_a.SubsplitCladeUnion();
auto union_b = subsplit_b.SubsplitCladeUnion();
auto compare_union = Bitset::Compare(union_a, union_b);
if (compare_union != 0) {
return compare_union;
}
// (3) Compare the subsplit Bitsets.
auto compare_subsplit = Bitset::Compare(subsplit_a, subsplit_b);
return compare_subsplit;
}
int Bitset::SubsplitCompare(const Bitset& subsplit_b) const {
const Bitset& subsplit_a = *this;
return SubsplitCompare(subsplit_a, subsplit_b);
}
Bitset Bitset::SubsplitRotate() const {
Assert(size() % 2 == 0, "Bitset::SubsplitRotate requires an even-size bitset.");
Bitset clade_0 = SubsplitGetClade(SubsplitClade::Left);
Bitset clade_1 = SubsplitGetClade(SubsplitClade::Right);
return clade_1 + clade_0;
}
Bitset Bitset::SubsplitSortClades() const {
Assert(size() % 2 == 0, "Bitset::SubsplitRotate requires an even-size bitset.");
Bitset clade_0 = SubsplitGetClade(SubsplitClade::Left);
Bitset clade_1 = SubsplitGetClade(SubsplitClade::Right);
return SubsplitFromUnorderedClades(clade_0, clade_1);
}
std::string Bitset::SubsplitToString() const { return MultiCladeToString(2); }
std::string Bitset::SubsplitToVectorOfSetBitsAsString() const {
std::string str;
str += SubsplitGetClade(SubsplitClade::Left).ToVectorOfSetBitsAsString();
str += "|";
str += SubsplitGetClade(SubsplitClade::Right).ToVectorOfSetBitsAsString();
return str;
}
std::string Bitset::SubsplitToHashString(const size_t length) const {
std::stringstream ss;
ss << "[" << ToHashString(length) << "::" << SubsplitCladeUnion().ToHashString(length)
<< "::" << SubsplitGetClade(SubsplitClade::Left).ToHashString(length) << "||"
<< SubsplitGetClade(SubsplitClade::Right).ToHashString(length) << "]";
return ss.str();
}
size_t Bitset::SubsplitGetCladeSize() const {
return MultiCladeGetCladeSize(SubsplitCladeCount);
}
Bitset Bitset::SubsplitGetClade(const size_t which_clade) const {
return MultiCladeGetClade(which_clade, SubsplitCladeCount);
}
Bitset Bitset::SubsplitGetClade(const SubsplitClade which_clade) const {
Assert(which_clade != SubsplitClade::Unspecified,
"Cannot SubsplitGetClade for unspecified clade.");
// #350 discuss this cast
size_t which_clade_idx =
static_cast<std::underlying_type<SubsplitClade>::type>(which_clade);
return MultiCladeGetClade(which_clade_idx, SubsplitCladeCount);
}
bool Bitset::SubsplitIsLeaf() const {
// A subsplit is a leaf its right clade is empty and its left clade is a singleton.
bool is_left_clade_singleton = SubsplitGetClade(SubsplitClade::Left).IsSingleton();
bool is_right_clade_empty = SubsplitGetClade(SubsplitClade::Right).None();
return is_left_clade_singleton && is_right_clade_empty;
}
bool Bitset::SubsplitIsUCA() const {
// A subsplit is a UCA if the left clade contains all taxa and the right clade
// contains no taxa. If subsplit is valid, then we can assume the right clade is
// empty.
bool is_left_clade_full = SubsplitGetClade(SubsplitClade::Left).All();
return is_left_clade_full;
}
bool Bitset::SubsplitIsRootsplit() const {
// A subsplit is a rootsplit if the union of the clades contain all clades.
// But is also not the UCA, meaning both clades are nonempty.
bool is_union_of_clades_full = SubsplitCladeUnion().All();
bool is_left_clade_nonempty = !SubsplitGetClade(SubsplitClade::Left).None();
bool is_right_clade_nonempty = !SubsplitGetClade(SubsplitClade::Right).None();
return is_union_of_clades_full && is_left_clade_nonempty && is_right_clade_nonempty;
}
bool Bitset::SubsplitIsLeftChildOf(const Bitset& parent) const {
return (size() == parent.size()) &&
(SubsplitCladeUnion() == parent.SubsplitGetClade(SubsplitClade::Left) &&
(*this != parent));
}
bool Bitset::SubsplitIsRightChildOf(const Bitset& parent) const {
return (size() == parent.size()) &&
(SubsplitCladeUnion() == parent.SubsplitGetClade(SubsplitClade::Right) &&
(*this != parent));
}
Bitset Bitset::SubsplitCladeUnion() const {
Assert(size() % SubsplitCladeCount == 0,
"Size isn't 0 mod 2 in Bitset::SubsplitCladeUnion.");
return SubsplitGetClade(SubsplitClade::Left) | SubsplitGetClade(SubsplitClade::Right);
}
SubsplitClade Bitset::SubsplitIsChildOfWhichParentClade(const Bitset& parent,
const Bitset& child) {
Assert(parent.size() == child.size(),
"Bitset::SubsplitIsChildOfWhichParentClade() bitsets are different sizes.");
Bitset child_union = child.SubsplitCladeUnion();
// #350 why not use the left and right here?
for (SubsplitClade clade : {SubsplitClade::Left, SubsplitClade::Right}) {
if (child_union == parent.SubsplitGetClade(clade)) {
return clade;
}
}
// If it reaches the end, then it is not a parent.
Failwith(
"Bitset::SubsplitIsChildOfWhichParentClade(): given parent is not a parent of "
"given child.");
}
bool Bitset::SubsplitIsParentChildPair(const Bitset& parent, const Bitset& child) {
return child.SubsplitIsLeftChildOf(parent) || child.SubsplitIsRightChildOf(parent);
}
bool Bitset::SubsplitIsAncestorDescendantPair(const Bitset& ancestor,
const Bitset& descendant,
const SubsplitClade clade_type) {
const auto ancestor_clade = ancestor.SubsplitGetClade(clade_type);
const auto descendant_clade = descendant.SubsplitCladeUnion();
return (~ancestor_clade & descendant_clade).None();
}
bool Bitset::SubsplitIsAdjacent(const Bitset& subsplit_a, const Bitset& subsplit_b) {
return SubsplitIsParentChildPair(subsplit_a, subsplit_b) ||
SubsplitIsParentChildPair(subsplit_b, subsplit_a);
}
bool Bitset::SubsplitIsValid() const {
return SubsplitGetClade(SubsplitClade::Left)
.IsDisjoint(SubsplitGetClade(SubsplitClade::Right));
}
// ** PCSP functions
Bitset Bitset::PCSP(const Bitset& parent_subsplit, const Bitset& child_subsplit) {
// Assert that:
// - child_subsplit is either a right or left child of parent_subsplit.
// - child_subsplit forms a valid subsplit.
bool is_parent_valid = parent_subsplit.SubsplitIsValid();
bool is_child_valid = child_subsplit.SubsplitIsValid();
bool is_pair_valid = child_subsplit.SubsplitIsLeftChildOf(parent_subsplit) ||
child_subsplit.SubsplitIsRightChildOf(parent_subsplit);
Assert(is_parent_valid && is_child_valid && is_pair_valid,
"PCSP(): given bitsets are not a valid parent/child pair.");
if (child_subsplit.SubsplitIsLeftChildOf(parent_subsplit)) {
return parent_subsplit.SubsplitRotate() +
child_subsplit.SubsplitGetClade(SubsplitClade::Right);
} else {
return parent_subsplit + child_subsplit.SubsplitGetClade(SubsplitClade::Right);
}
}
Bitset Bitset::PCSP(const Bitset& sister_clade, const Bitset& focal_clade,
const Bitset& right_child_clade) {
Assert(sister_clade.size() == focal_clade.size() &&
focal_clade.size() == right_child_clade.size(),
"PCSP(): all clades must be of equal size.");
Bitset pcsp = sister_clade + focal_clade + right_child_clade;
Assert(pcsp.PCSPIsValid(), "PCSP(): given clades form an invalid PCSP.");
return pcsp;
}
Bitset Bitset::PCSP(const std::string sister_clade, const std::string focal_clade,
const std::string right_child_clade) {
return PCSP(Bitset(sister_clade), Bitset(focal_clade), Bitset(right_child_clade));
}
// #350 I'd argue that if we are going to use SubsplitCladeCount (and I'm not sure about
// that) then we should use something like that here. Oh wait, there is a
// PCSPCladeCount.
size_t Bitset::PCSPGetCladeSize() const { return MultiCladeGetCladeSize(3); }
Bitset Bitset::PCSPGetClade(const size_t which_clade) const {
return MultiCladeGetClade(which_clade, 3);
}
Bitset Bitset::PCSPGetClade(const PCSPClade which_clade) const {
size_t which_clade_idx =
static_cast<std::underlying_type<PCSPClade>::type>(which_clade);
return MultiCladeGetClade(which_clade_idx, 3);
}
Bitset Bitset::PCSPGetParentSubsplit() const {
Bitset sister = PCSPGetClade(PCSPClade::Sister);
Bitset focal = PCSPGetClade(PCSPClade::Focal);
return Bitset::Subsplit(sister, focal);
}
Bitset Bitset::PCSPGetChildSubsplit() const {
Bitset focal = PCSPGetClade(PCSPClade::Focal);
Bitset child_right = PCSPGetClade(PCSPClade::RightChild);
Bitset child_left = focal & ~child_right;
return Bitset::Subsplit(child_left, child_right);
}
std::string Bitset::PCSPToString() const { return MultiCladeToString(PCSPCladeCount); }
std::string Bitset::PCSPToHashString(const size_t length) const {
std::stringstream ss;
ss << PCSPGetParentSubsplit().SubsplitToHashString(length) << "_"
<< PCSPGetChildSubsplit().SubsplitToHashString(length);
return ss.str();
}
bool Bitset::PCSPIsValid() const {
if (size() % PCSPCladeCount != 0) {
return false;
}
Bitset sister = PCSPGetClade(PCSPClade::Sister);
Bitset focal = PCSPGetClade(PCSPClade::Focal);
Bitset child_right = PCSPGetClade(PCSPClade::RightChild);
// The parent clades should be disjoint.
if (!sister.IsDisjoint(focal)) {
return false;
}
// The clade should split the focal clade of the parent,
// so the taxa of child_left should be a subset of those of focal clade.
if (!child_right.IsDisjoint(~focal)) {
return false;
}
// Something has to be set in each clade.
if (sister.None() || focal.None() || child_right.None()) {
return false;
}
return true;
}
bool Bitset::PCSPChildIsLeaf() const {
Assert(size() % PCSPCladeCount == 0,
"Size isn't 0 mod 3 in Bitset::PCSPChildIsLeaf.");
// If third clade of PCSP is empty, that means that the associated clade's right
// subsplit is empty, so it is leaf.
return PCSPGetClade(PCSPClade::RightChild).None();
}
Bitset Bitset::PCSPSortClades() const {
Bitset parent = PCSPGetParentSubsplit();
Bitset child = PCSPGetChildSubsplit();
return Bitset::PCSP(parent, child);
}
bool Bitset::PCSPIsParentRootsplit() const {
Assert(size() % PCSPCladeCount == 0,
"Size isn't 0 mod 3 in Bitset::PCSPIsParentRootsplit.");
return PCSPGetParentSubsplit().SubsplitIsRootsplit();
}
SizePair Bitset::PCSPGetChildSubsplitTaxonCounts() const {
auto clade_size = PCSPGetCladeSize();
auto total_clade_taxon_count =
std::count(value_.begin() + clade_size,
value_.begin() + SubsplitCladeCount * clade_size, true);
auto clade0_taxon_count =
std::count(value_.begin() + SubsplitCladeCount * clade_size, value_.end(), true);
Assert(clade0_taxon_count < total_clade_taxon_count,
"PCSPGetChildSubsplitTaxonCounts: not a proper PCSP bitset.");
return {static_cast<size_t>(clade0_taxon_count),
static_cast<size_t>(total_clade_taxon_count - clade0_taxon_count)};
}
bool Bitset::PCSPIsParentChildPair(const Bitset& parent_pcsp,
const Bitset& child_pcsp) {
return parent_pcsp.PCSPGetChildSubsplit() == child_pcsp.PCSPGetParentSubsplit();
}
Bitset Bitset::Singleton(size_t n, size_t which_on) {
Assert(which_on < n,
"Bitset::Singleton(): selected 'on' bit is out of range for bitset size.");
Bitset singleton(n);
singleton.set(which_on);
return singleton;
}
Bitset Bitset::LeafSubsplitOfNonemptyClade(const Bitset& nonempty_clade) {
// Leaf pairs a nonempty left clade and an empty right clade.
Bitset leaf_subsplit =
Bitset::Subsplit(nonempty_clade, Bitset(nonempty_clade.size(), false));
return leaf_subsplit;
}
void AssertSubsplitIsLeafAdjacent(const Bitset& subsplit) {
// For subsplit to be adjacent to
bool is_left_clade_nonempty = subsplit.SubsplitGetClade(SubsplitClade::Left).Any();
bool is_right_clade_singleton =
subsplit.SubsplitGetClade(SubsplitClade::Right).IsSingleton();
Assert(is_left_clade_nonempty && is_right_clade_singleton,
"Assertion SisterAndLeafSubsplit failed: we want the left-hand clade of the "
"subsplit be non-empty and the right-hand clade be a singleton.");
}
Bitset Bitset::LeafSubsplitOfParentSubsplit(const Bitset& parent_subsplit) {
AssertSubsplitIsLeafAdjacent(parent_subsplit);
// Put the right-hand clade of the subsplit as the nonzero contents of the leaf
// subsplit.
return LeafSubsplitOfNonemptyClade(
parent_subsplit.SubsplitGetClade(SubsplitClade::Right));
}
Bitset Bitset::PCSPFromRightParentCladeToLeaf(const Bitset& parent_subsplit) {
AssertSubsplitIsLeafAdjacent(parent_subsplit);
const auto taxon_count = parent_subsplit.SubsplitGetCladeSize();
Bitset leaf(3 * taxon_count);
// Put the nonzero contents on the left of the leaf subsplit.
leaf.CopyFrom(parent_subsplit, 0, false);
return leaf;
}
Bitset Bitset::UCASubsplitOfTaxonCount(const size_t taxon_count) {
Bitset zeros(taxon_count);
return ~zeros + zeros;
}
Bitset Bitset::RootsplitSubsplitOfClade(const Bitset& clade) {
Bitset half = clade;
half.Minorize();
return ~half + half;
}
Bitset Bitset::PCSPFromUCAToRootsplit(const Bitset& rootsplit) {
Assert(rootsplit.SubsplitIsRootsplit(),
"Given subsplit is not rootsplit in Bitset::PCSPFromUCAToRootsplit.");
return PCSP(UCASubsplitOfTaxonCount(rootsplit.size() / 2), rootsplit);
}
Bitset Bitset::Remap(const Bitset& bitset, const SizeOptionVector& idx_table) {
Bitset result(idx_table.size(), false);
for (size_t i = 0; i < idx_table.size(); ++i) {
if (idx_table[i].has_value() && bitset[idx_table[i].value()]) {
result.set(i, true);
}
}
return result;
}
| 23,599
|
C++
|
.cpp
| 587
| 36.352641
| 88
| 0.691868
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,040
|
gp_engine.cpp
|
phylovi_bito/src/gp_engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "gp_engine.hpp"
#include "sugar.hpp"
#include "sbn_maps.hpp"
GPEngine::GPEngine(SitePattern site_pattern, size_t node_count, size_t gpcsp_count,
const std::string& mmap_file_path, double rescaling_threshold,
EigenVectorXd sbn_prior,
EigenVectorXd unconditional_node_probabilities,
EigenVectorXd inverted_sbn_prior, bool use_gradients)
: site_pattern_(std::move(site_pattern)),
rescaling_threshold_(rescaling_threshold),
log_rescaling_threshold_(log(rescaling_threshold)),
plv_handler_(mmap_file_path, 0, site_pattern_.PatternCount(), resizing_factor_),
unconditional_node_probabilities_(std::move(unconditional_node_probabilities)),
branch_handler_(0),
q_(std::move(sbn_prior)),
inverted_sbn_prior_(std::move(inverted_sbn_prior)) {
// Initialize site pattern-based data
auto weights = site_pattern_.GetWeights();
site_pattern_weights_ = EigenVectorXdOfStdVectorDouble(weights);
log_marginal_likelihood_.resize(site_pattern_.PatternCount());
log_marginal_likelihood_.setConstant(DOUBLE_NEG_INF);
// Initialize node-based data
GrowPLVs(node_count, std::nullopt, std::nullopt, true);
InitializePLVsWithSitePatterns();
// Initialize edge-based data
branch_handler_.SetCount(GetGPCSPCount());
branch_handler_.SetSpareCount(GetSpareGPCSPCount());
GrowGPCSPs(gpcsp_count, std::nullopt, std::nullopt, true);
// Initialize PLV temporaries.
quartet_root_plv_ = GetPLV(PVId(0));
quartet_root_plv_.setZero();
quartet_r_s_plv_ = quartet_root_plv_;
quartet_q_s_plv_ = quartet_root_plv_;
quartet_r_sorted_plv_ = quartet_root_plv_;
InitializeBranchLengthHandler();
UseGradientOptimization(use_gradients);
}
void GPEngine::InitializePriors(EigenVectorXd sbn_prior,
EigenVectorXd unconditional_node_probabilities,
EigenVectorXd inverted_sbn_prior) {
Assert(size_t(unconditional_node_probabilities.size()) == GetNodeCount(),
"unconditional_node_probabilities is wrong size for GPEngine.");
Assert(size_t(sbn_prior.size()) == GetGPCSPCount(),
"sbn_prior is wrong size for GPEngine.");
Assert(size_t(inverted_sbn_prior.size()) == GetGPCSPCount(),
"inverted_sbn_prior is wrong size for GPEngine.");
unconditional_node_probabilities_.segment(0, GetNodeCount()) =
unconditional_node_probabilities;
q_.segment(0, GetGPCSPCount()) = sbn_prior;
inverted_sbn_prior_.segment(0, GetGPCSPCount()) = inverted_sbn_prior;
}
void GPEngine::SetNullPrior() { q_.setConstant(1.0); }
// ** Resize and Reindex
void GPEngine::GrowPLVs(const size_t new_node_count,
std::optional<const Reindexer> node_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
const size_t old_node_count = GetNodeCount();
const size_t old_plv_count = GetPLVCount();
SetNodeCount(new_node_count);
// Reallocate more space if needed.
if ((GetPaddedNodeCount() > GetAllocatedNodeCount()) || explicit_alloc.has_value()) {
SetAllocatedNodeCount(
size_t(ceil(double(GetPaddedNodeCount()) * resizing_factor_)));
if (explicit_alloc.has_value()) {
Assert(explicit_alloc.value() >= GetNodeCount(),
"Attempted to reallocate space smaller than node_count.");
SetAllocatedNodeCount(explicit_alloc.value() + GetSpareNodeCount());
}
plv_handler_.Resize(new_node_count, GetAllocatedNodeCount());
rescaling_counts_.conservativeResize(GetAllocatedPLVCount());
unconditional_node_probabilities_.conservativeResize(GetAllocatedNodeCount());
}
// Resize to fit without deallocating unused memory.
rescaling_counts_.conservativeResize(GetPaddedPLVCount());
if (on_init) {
unconditional_node_probabilities_.conservativeResize(GetNodeCount());
} else {
unconditional_node_probabilities_.conservativeResize(GetPaddedNodeCount());
}
// Initialize new work space.
Assert((GetPLVs().back().rows() == MmappedNucleotidePLV::base_count_) &&
(GetPLVs().back().cols() ==
static_cast<Eigen::Index>(site_pattern_.PatternCount())) &&
(size_t(GetPLVs().size()) == GetAllocatedPLVCount()),
"Didn't get the right shape of PLVs out of Subdivide.");
for (PVId pv_id = PVId(old_plv_count); pv_id < GetPaddedPLVCount(); pv_id++) {
rescaling_counts_[pv_id.value_] = 0;
GetPLV(pv_id).setZero();
}
for (NodeId node_id = NodeId(old_node_count); node_id < GetNodeCount(); node_id++) {
if (!on_init) {
unconditional_node_probabilities_[node_id.value_] = 1.;
}
}
// Reindex work space to realign with DAG.
if (node_reindexer.has_value()) {
ReindexPLVs(node_reindexer.value(), old_node_count);
}
}
void GPEngine::GrowGPCSPs(const size_t new_gpcsp_count,
std::optional<const Reindexer> gpcsp_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
const size_t old_gpcsp_count = GetGPCSPCount();
SetGPCSPCount(new_gpcsp_count);
branch_handler_.Resize(new_gpcsp_count, std::nullopt, explicit_alloc,
gpcsp_reindexer);
// Reallocate more space if needed.
if ((GetPaddedGPCSPCount() > GetAllocatedGPCSPCount()) ||
explicit_alloc.has_value()) {
SetAllocatedGPCSPCount(
size_t(ceil(double(GetPaddedGPCSPCount()) * resizing_factor_)));
if (explicit_alloc.has_value()) {
Assert(explicit_alloc.value() >= GetNodeCount(),
"Attempted to reallocate space smaller than node_count.");
SetAllocatedGPCSPCount(explicit_alloc.value() + GetSpareGPCSPCount());
}
hybrid_marginal_log_likelihoods_.conservativeResize(GetAllocatedGPCSPCount());
log_likelihoods_.conservativeResize(GetAllocatedGPCSPCount(),
site_pattern_.PatternCount());
q_.conservativeResize(GetAllocatedGPCSPCount());
inverted_sbn_prior_.conservativeResize(GetAllocatedGPCSPCount());
}
// Resize to fit without deallocating unused memory.
hybrid_marginal_log_likelihoods_.conservativeResize(GetPaddedGPCSPCount());
log_likelihoods_.conservativeResize(GetPaddedGPCSPCount(),
site_pattern_.PatternCount());
if (on_init) {
q_.conservativeResize(GetGPCSPCount());
inverted_sbn_prior_.conservativeResize(GetGPCSPCount());
} else {
q_.conservativeResize(GetPaddedGPCSPCount());
inverted_sbn_prior_.conservativeResize(GetPaddedGPCSPCount());
}
// Initialize new work space.
for (size_t i = old_gpcsp_count; i < GetPaddedGPCSPCount(); i++) {
hybrid_marginal_log_likelihoods_[i] = DOUBLE_NEG_INF;
}
if (on_init) {
} else {
for (size_t i = old_gpcsp_count; i < GetGPCSPCount(); i++) {
q_[i] = 1.;
inverted_sbn_prior_[i] = 1.;
}
}
// Reindex work space to realign with DAG.
if (gpcsp_reindexer.has_value()) {
ReindexGPCSPs(gpcsp_reindexer.value(), old_gpcsp_count);
}
}
void GPEngine::ReindexPLVs(const Reindexer& node_reindexer,
const size_t old_node_count) {
Assert(node_reindexer.size() == GetNodeCount(),
"Node Reindexer is the wrong size for GPEngine.");
Assert(node_reindexer.IsValid(GetNodeCount()), "Node Reindexer is not valid.");
// Expand node_reindexer into plv_reindexer.
Reindexer plv_reindexer =
plv_handler_.BuildPVReindexer(node_reindexer, old_node_count, GetNodeCount());
// Reindex data vectors
plv_handler_.Reindex(plv_reindexer);
Reindexer::ReindexInPlace<EigenVectorXi, int>(rescaling_counts_, plv_reindexer,
GetPLVCount());
Reindexer::ReindexInPlace<EigenVectorXd, double>(unconditional_node_probabilities_,
node_reindexer, GetNodeCount());
}
void GPEngine::ReindexGPCSPs(const Reindexer& gpcsp_reindexer,
const size_t old_gpcsp_count) {
Assert(gpcsp_reindexer.size() == GetGPCSPCount(),
"GPCSP Reindexer is the wrong size for GPEngine.");
Assert(gpcsp_reindexer.IsValid(GetGPCSPCount()),
"GPCSP Reindexer is not valid for GPEngine size.");
// Reindex data vectors.
Reindexer::ReindexInPlace<EigenVectorXd, double>(hybrid_marginal_log_likelihoods_,
gpcsp_reindexer, GetGPCSPCount());
Reindexer::ReindexInPlace<EigenVectorXd, double>(q_, gpcsp_reindexer,
GetGPCSPCount());
Reindexer::ReindexInPlace<EigenVectorXd, double>(inverted_sbn_prior_, gpcsp_reindexer,
GetGPCSPCount());
}
void GPEngine::GrowSparePLVs(const size_t new_node_spare_count) {
if (new_node_spare_count > GetSpareNodeCount()) {
SetSpareNodeCount(new_node_spare_count);
GrowPLVs(GetNodeCount());
}
}
void GPEngine::GrowSpareGPCSPs(const size_t new_gpcsp_spare_count) {
if (new_gpcsp_spare_count > GetSpareGPCSPCount()) {
SetSpareGPCSPCount(new_gpcsp_spare_count);
branch_handler_.SetSpareCount(new_gpcsp_spare_count);
GrowGPCSPs(GetGPCSPCount());
}
}
// ** GPOperations
void GPEngine::operator()(const GPOperations::ZeroPLV& op) {
GetPLV(PVId(op.dest_)).setZero();
rescaling_counts_(op.dest_) = 0;
}
void GPEngine::operator()(const GPOperations::SetToStationaryDistribution& op) {
auto& plv = GetPLV(PVId(op.dest_));
for (Eigen::Index row_idx = 0; row_idx < plv.rows(); ++row_idx) {
// Multiplication by q_ avoids special treatment of the rhat vector for the
// rootsplits.
plv.row(row_idx).array() =
q_(op.root_gpcsp_idx_) * stationary_distribution_(row_idx);
}
rescaling_counts_(op.dest_) = 0;
}
void GPEngine::operator()(const GPOperations::IncrementWithWeightedEvolvedPLV& op) {
const auto branch_length = branch_handler_(EdgeId(op.gpcsp_));
SetTransitionMatrixToHaveBranchLength(branch_length);
// We assume that we've done a PrepForMarginalization operation, and thus the
// rescaling count for op.dest_ is the minimum of the rescaling counts among the
// op.src_s. Thus this should be non-negative:
const int rescaling_difference =
rescaling_counts_(op.src_) - rescaling_counts_(op.dest_);
Assert(rescaling_difference >= 0,
"dest_ rescaling too large in IncrementWithWeightedEvolvedPLV");
const double rescaling_factor =
rescaling_difference == 0
? 1.
: pow(rescaling_threshold_, static_cast<double>(rescaling_difference));
// We are going to have evidence of reduced-precision arithmetic here because we are
// adding together things of radically different rescaling amounts. This appears
// unavoidable without special-purpose truncation code, which doesn't seem
// worthwhile.
GetPLV(PVId(op.dest_)) +=
rescaling_factor * q_(op.gpcsp_) * transition_matrix_ * GetPLV(PVId(op.src_));
}
void GPEngine::operator()(const GPOperations::ResetMarginalLikelihood& op) { // NOLINT
ResetLogMarginalLikelihood();
}
void GPEngine::operator()(const GPOperations::IncrementMarginalLikelihood& op) {
Assert(rescaling_counts_(op.stationary_times_prior_) == 0,
"Surprise! Rescaled stationary distribution in IncrementMarginalLikelihood");
// This operation does two things: increment the overall per-site log marginal
// likelihood, and also set the conditional per-rootsplit marginal likelihood.
//
// We first calculate the unconditional contribution of the rootsplit to the overall
// per-site marginal likelihood. It's an unconditional contribution because our
// stationary distribution incorporates the prior on rootsplits.
log_likelihoods_.row(op.rootsplit_) =
(GetPLV(PVId(op.stationary_times_prior_)).transpose() * GetPLV(PVId(op.p_)))
.diagonal()
.array()
.log() +
LogRescalingFor(op.p_);
// We can then increment the overall per-site marginal likelihood.
log_marginal_likelihood_ = NumericalUtils::LogAddVectors(
log_marginal_likelihood_, log_likelihoods_.row(op.rootsplit_));
// However, we want the row in log_likelihoods_ to be the marginal likelihood
// *conditional* on that rootsplit, so we log-divide by the rootsplit's probability.
log_likelihoods_.row(op.rootsplit_).array() -= log(q_[op.rootsplit_]);
}
void GPEngine::operator()(const GPOperations::Multiply& op) {
GetPLV(PVId(op.dest_)).array() =
GetPLV(PVId(op.src1_)).array() * GetPLV(PVId(op.src2_)).array();
rescaling_counts_(op.dest_) =
rescaling_counts_(op.src1_) + rescaling_counts_(op.src2_);
AssertPLVIsFinite(op.dest_, "Multiply dest_ is not finite");
RescalePLVIfNeeded(op.dest_);
}
void GPEngine::operator()(const GPOperations::Likelihood& op) {
SetTransitionMatrixToHaveBranchLength(branch_handler_(EdgeId(op.dest_)));
PreparePerPatternLogLikelihoodsForGPCSP(op.parent_, op.child_);
log_likelihoods_.row(op.dest_) = per_pattern_log_likelihoods_;
}
void GPEngine::operator()(const GPOperations::OptimizeBranchLength& op) {
return OptimizeBranchLength(op);
}
EigenVectorXd NormalizedPosteriorOfLogUnnormalized(
EigenVectorXd log_unnormalized_posterior) {
const double log_norm = NumericalUtils::LogSum(log_unnormalized_posterior);
log_unnormalized_posterior.array() -= log_norm;
return log_unnormalized_posterior.array().exp();
}
void GPEngine::operator()(const GPOperations::UpdateSBNProbabilities& op) {
const size_t range_length = op.stop_ - op.start_;
if (range_length == 1) {
q_(op.start_) = 1.;
} else {
EigenVectorXd log_likelihoods;
EigenConstVectorXdRef our_hybrid_log_likelihoods =
hybrid_marginal_log_likelihoods_.segment(op.start_, range_length);
if (our_hybrid_log_likelihoods.minCoeff() > DOUBLE_NEG_INF) {
log_likelihoods = our_hybrid_log_likelihoods;
} else {
log_likelihoods = GetPerGPCSPLogLikelihoods(op.start_, range_length);
}
EigenVectorXd log_prior = q_.segment(op.start_, range_length).array().log();
q_.segment(op.start_, range_length) =
NormalizedPosteriorOfLogUnnormalized(log_likelihoods + log_prior);
}
}
void GPEngine::operator()(const GPOperations::PrepForMarginalization& op) {
const size_t src_count = op.src_vector_.size();
Assert(src_count > 0, "Empty src_vector in PrepForMarginalization");
SizeVector src_rescaling_counts(src_count);
for (size_t idx = 0; idx < src_count; ++idx) {
src_rescaling_counts[idx] = rescaling_counts_(op.src_vector_[idx]);
}
const auto min_rescaling_count =
*std::min_element(src_rescaling_counts.begin(), src_rescaling_counts.end());
rescaling_counts_(op.dest_) = min_rescaling_count;
}
void GPEngine::ProcessOperations(GPOperationVector operations) {
for (const auto& operation : operations) {
std::visit(*this, operation);
}
}
void GPEngine::SetTransitionMatrixToHaveBranchLength(double branch_length) {
diagonal_matrix_.diagonal() = (branch_length * eigenvalues_).array().exp();
transition_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
}
void GPEngine::SetTransitionAndDerivativeMatricesToHaveBranchLength(
double branch_length) {
diagonal_vector_ = (branch_length * eigenvalues_).array().exp();
diagonal_matrix_.diagonal() = diagonal_vector_;
transition_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
// Now calculating derivative matrix
diagonal_matrix_.diagonal() = eigenvalues_.array() * diagonal_vector_.array();
derivative_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
// Now calculating hessian matrix
diagonal_matrix_.diagonal() =
eigenvalues_.array() * eigenvalues_.array() * diagonal_vector_.array();
hessian_matrix_ = eigenmatrix_ * diagonal_matrix_ * inverse_eigenmatrix_;
}
void GPEngine::SetTransitionMatrixToHaveBranchLengthAndTranspose(double branch_length) {
diagonal_matrix_.diagonal() = (branch_length * eigenvalues_).array().exp();
transition_matrix_ =
inverse_eigenmatrix_.transpose() * diagonal_matrix_ * eigenmatrix_.transpose();
}
void GPEngine::SetBranchLengths(EigenVectorXd branch_lengths) {
Assert(size_t(branch_lengths.size()) == GetGPCSPCount(),
"Size mismatch in GPEngine::SetBranchLengths.");
branch_handler_.GetBranchLengths().GetData().segment(0, GetGPCSPCount()) =
branch_lengths;
};
void GPEngine::SetBranchLengthsToConstant(double branch_length) {
branch_handler_.GetBranchLengths().GetData().setConstant(branch_length);
};
void GPEngine::SetBranchLengthsToDefault() {
branch_handler_.GetBranchLengths().GetData().setConstant(
branch_handler_.GetDefaultBranchLength());
};
void GPEngine::ResetLogMarginalLikelihood() {
log_marginal_likelihood_.setConstant(DOUBLE_NEG_INF);
}
void GPEngine::CopyNodeData(const NodeId src_node_idx, const NodeId dest_node_idx) {
Assert(
(src_node_idx < GetPaddedNodeCount()) && (dest_node_idx < GetPaddedNodeCount()),
"Cannot copy node data with src or dest index out-of-range.");
unconditional_node_probabilities_[dest_node_idx.value_] =
unconditional_node_probabilities_[src_node_idx.value_];
}
void GPEngine::CopyPLVData(const size_t src_plv_idx, const size_t dest_plv_idx) {
Assert((src_plv_idx < GetPaddedPLVCount()) && (dest_plv_idx < GetPaddedPLVCount()),
"Cannot copy PLV data with src or dest index out-of-range.");
GetPLV(PVId(dest_plv_idx)) = GetPLV(PVId(src_plv_idx));
rescaling_counts_[dest_plv_idx] = rescaling_counts_[src_plv_idx];
}
void GPEngine::CopyGPCSPData(const EdgeId src_gpcsp_idx, const EdgeId dest_gpcsp_idx) {
Assert((src_gpcsp_idx < GetPaddedGPCSPCount()) &&
(dest_gpcsp_idx < GetPaddedGPCSPCount()),
"Cannot copy PLV data with src or dest index out-of-range.");
branch_handler_(dest_gpcsp_idx) = branch_handler_(src_gpcsp_idx);
q_[dest_gpcsp_idx.value_] = q_[src_gpcsp_idx.value_];
inverted_sbn_prior_[dest_gpcsp_idx.value_] =
inverted_sbn_prior_[src_gpcsp_idx.value_];
}
// ** Access
double GPEngine::GetLogMarginalLikelihood() const {
return (log_marginal_likelihood_.array() * site_pattern_weights_.array()).sum();
}
EigenVectorXd GPEngine::GetBranchLengths() const {
return branch_handler_.GetBranchLengths().GetData().segment(0, GetGPCSPCount());
};
EigenVectorXd GPEngine::GetBranchLengths(const size_t start,
const size_t length) const {
Assert(start + length <= GetPaddedGPCSPCount(),
"Requested range of BranchLengths is out-of-range.");
return branch_handler_.GetBranchLengths().GetData().segment(start, length);
};
EigenVectorXd GPEngine::GetSpareBranchLengths(const size_t start,
const size_t length) const {
return GetBranchLengths(GetSpareGPCSPIndex(start), length);
}
EigenVectorXd GPEngine::GetBranchLengthDifferences() const {
return branch_handler_.GetBranchDifferences().GetData().segment(0, GetGPCSPCount());
};
EigenVectorXd GPEngine::GetPerGPCSPLogLikelihoods() const {
return log_likelihoods_.block(0, 0, GetGPCSPCount(), log_likelihoods_.cols()) *
site_pattern_weights_;
};
EigenVectorXd GPEngine::GetPerGPCSPLogLikelihoods(const size_t start,
const size_t length) const {
Assert(start + length <= GetPaddedGPCSPCount(),
"Requested range of PerGPCSPLogLikelihoods is out-of-range.");
return log_likelihoods_.block(start, 0, length, log_likelihoods_.cols()) *
site_pattern_weights_;
};
EigenVectorXd GPEngine::GetSparePerGPCSPLogLikelihoods(const size_t start,
const size_t length) const {
return GetPerGPCSPLogLikelihoods(GetSpareGPCSPIndex(start), length);
}
EigenVectorXd GPEngine::GetPerGPCSPComponentsOfFullLogMarginal() const {
return GetPerGPCSPLogLikelihoods().array() +
static_cast<double>(site_pattern_.SiteCount()) * q_.array().log();
};
EigenConstMatrixXdRef GPEngine::GetLogLikelihoodMatrix() const {
return log_likelihoods_.block(0, 0, GetGPCSPCount(), log_likelihoods_.cols());
};
EigenConstVectorXdRef GPEngine::GetHybridMarginals() const {
return hybrid_marginal_log_likelihoods_;
};
EigenConstVectorXdRef GPEngine::GetSBNParameters() const { return q_; };
DoublePair GPEngine::LogLikelihoodAndDerivative(
const GPOperations::OptimizeBranchLength& op) {
return LogLikelihoodAndDerivative(op.gpcsp_, op.rootward_, op.leafward_);
}
DoublePair GPEngine::LogLikelihoodAndDerivative(const size_t gpcsp,
const size_t rootward,
const size_t leafward) {
SetTransitionAndDerivativeMatricesToHaveBranchLength(branch_handler_(EdgeId(gpcsp)));
PreparePerPatternLogLikelihoodsForGPCSP(rootward, leafward);
// The prior is expressed using the current value of q_.
// The phylogenetic component of the likelihood is weighted with the number of times
// we see the site patterns.
const double log_likelihood = per_pattern_log_likelihoods_.dot(site_pattern_weights_);
// The per-site likelihood derivative is calculated in the same way as the per-site
// likelihood, but using the derivative matrix instead of the transition matrix.
// We first prepare two useful vectors _without_ likelihood rescaling, because the
// rescalings cancel out in the ratio below.
PrepareUnrescaledPerPatternLikelihoodDerivatives(rootward, leafward);
PrepareUnrescaledPerPatternLikelihoods(rootward, leafward);
// If l_i is the per-site likelihood, the derivative of log(l_i) is the derivative
// of l_i divided by l_i.
per_pattern_likelihood_derivative_ratios_ =
per_pattern_likelihood_derivatives_.array() / per_pattern_likelihoods_.array();
const double log_likelihood_derivative =
per_pattern_likelihood_derivative_ratios_.dot(site_pattern_weights_);
return {log_likelihood, log_likelihood_derivative};
}
std::tuple<double, double, double> GPEngine::LogLikelihoodAndFirstTwoDerivatives(
const GPOperations::OptimizeBranchLength& op) {
return LogLikelihoodAndFirstTwoDerivatives(op.gpcsp_, op.rootward_, op.leafward_);
}
std::tuple<double, double, double> GPEngine::LogLikelihoodAndFirstTwoDerivatives(
const size_t gpcsp, const size_t rootward, const size_t leafward) {
SetTransitionAndDerivativeMatricesToHaveBranchLength(branch_handler_(EdgeId(gpcsp)));
PreparePerPatternLogLikelihoodsForGPCSP(rootward, leafward);
const double log_likelihood = per_pattern_log_likelihoods_.dot(site_pattern_weights_);
// The per-site likelihood derivative is calculated in the same way as the per-site
// likelihood, but using the derivative matrix instead of the transition matrix.
// We first prepare two useful vectors _without_ likelihood rescaling, because the
// rescalings cancel out in the ratio below.
PrepareUnrescaledPerPatternLikelihoodDerivatives(rootward, leafward);
PrepareUnrescaledPerPatternLikelihoods(rootward, leafward);
// If l_i is the per-site likelihood, the derivative of log(l_i) is the derivative
// of l_i divided by l_i.
per_pattern_likelihood_derivative_ratios_ =
per_pattern_likelihood_derivatives_.array() / per_pattern_likelihoods_.array();
const double log_likelihood_gradient =
per_pattern_likelihood_derivative_ratios_.dot(site_pattern_weights_);
// Second derivative is calculated the same way, but has an extra term due to
// the product rule.
PrepareUnrescaledPerPatternLikelihoodSecondDerivatives(rootward, leafward);
per_pattern_likelihood_second_derivative_ratios_ =
(per_pattern_likelihood_second_derivatives_.array() *
per_pattern_likelihoods_.array() -
per_pattern_likelihood_derivatives_.array() *
per_pattern_likelihood_derivatives_.array()) /
(per_pattern_likelihoods_.array() * per_pattern_likelihoods_.array());
const double log_likelihood_hessian =
per_pattern_likelihood_second_derivative_ratios_.dot(site_pattern_weights_);
return std::make_tuple(log_likelihood, log_likelihood_gradient,
log_likelihood_hessian);
}
void GPEngine::InitializePLVsWithSitePatterns() {
for (auto& plv : GetPLVs()) {
plv.setZero();
}
NodeId taxon_idx = 0;
for (const auto& pattern : site_pattern_.GetPatterns()) {
size_t site_idx = 0;
for (const int symbol : pattern) {
Assert(symbol >= 0, "Negative symbol!");
if (symbol == MmappedNucleotidePLV::base_count_) { // Gap character.
GetPLV(PVId(taxon_idx.value_)).col(site_idx).setConstant(1.);
} else if (symbol < MmappedNucleotidePLV::base_count_) {
GetPLV(PVId(taxon_idx.value_))(symbol, site_idx) = 1.;
}
site_idx++;
}
taxon_idx++;
}
}
void GPEngine::RescalePLV(size_t plv_idx, int rescaling_count) {
if (rescaling_count == 0) {
return;
}
// else
Assert(rescaling_count >= 0, "Negative rescaling count in RescalePLV.");
GetPLV(PVId(plv_idx)) /=
pow(rescaling_threshold_, static_cast<double>(rescaling_count));
rescaling_counts_(plv_idx) += rescaling_count;
}
void GPEngine::AssertPLVIsFinite(size_t plv_idx, const std::string& message) const {
Assert(GetPLV(PVId(plv_idx)).array().isFinite().all(), message);
}
std::pair<double, double> GPEngine::PLVMinMax(size_t plv_idx) const {
return {GetPLV(PVId(plv_idx)).minCoeff(), GetPLV(PVId(plv_idx)).maxCoeff()};
}
void GPEngine::RescalePLVIfNeeded(size_t plv_idx) {
auto [min_entry, max_entry] = PLVMinMax(plv_idx);
Assert(min_entry >= 0., "PLV with negative entry (" + std::to_string(min_entry) +
") passed to RescalePLVIfNeeded");
if (max_entry == 0) {
return;
}
// else
int rescaling_count = 0;
while (max_entry < rescaling_threshold_) {
max_entry /= rescaling_threshold_;
rescaling_count++;
}
RescalePLV(plv_idx, rescaling_count);
}
double GPEngine::LogRescalingFor(size_t plv_idx) {
return static_cast<double>(rescaling_counts_(plv_idx)) * log_rescaling_threshold_;
}
void GPEngine::InitializeBranchLengthHandler() {
// Set Nongradient Brent.
DAGBranchHandler::NegLogLikelihoodFunc brent_nongrad_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
SetTransitionMatrixToHaveBranchLength(exp(log_branch_length));
PreparePerPatternLogLikelihoodsForGPCSP(parent_id.value_, child_id.value_);
return -per_pattern_log_likelihoods_.dot(site_pattern_weights_);
};
branch_handler_.SetBrentFunc(brent_nongrad_func);
// Set Gradient Brent.
DAGBranchHandler::NegLogLikelihoodAndDerivativeFunc brent_grad_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
double branch_length = exp(log_branch_length);
branch_handler_(edge_id) = branch_length;
auto [log_likelihood, log_likelihood_derivative] =
this->LogLikelihoodAndDerivative(edge_id.value_, parent_id.value_,
child_id.value_);
return std::make_pair(-log_likelihood,
-branch_length * log_likelihood_derivative);
};
branch_handler_.SetBrentWithGradientFunc(brent_grad_func);
// Set Gradient Ascent.
DAGBranchHandler::LogLikelihoodAndDerivativeFunc grad_ascent_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double branch_length) {
branch_handler_(edge_id) = branch_length;
return this->LogLikelihoodAndDerivative(edge_id.value_, parent_id.value_,
child_id.value_);
};
branch_handler_.SetGradientAscentFunc(grad_ascent_func);
// Set Logspace Gradient Ascent.
DAGBranchHandler::LogLikelihoodAndDerivativeFunc logspace_grad_ascent_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double branch_length) {
branch_handler_(edge_id) = branch_length;
return this->LogLikelihoodAndDerivative(edge_id.value_, parent_id.value_,
child_id.value_);
};
branch_handler_.SetLogSpaceGradientAscentFunc(logspace_grad_ascent_func);
// Set Newton-Raphson.
DAGBranchHandler::LogLikelihoodAndFirstTwoDerivativesFunc newton_raphson_func =
[this](EdgeId edge_id, PVId parent_id, PVId child_id, double log_branch_length) {
double x = exp(log_branch_length);
branch_handler_(edge_id) = x;
auto [f_x, f_prime_x, f_double_prime_x] =
this->LogLikelihoodAndFirstTwoDerivatives(edge_id.value_, parent_id.value_,
child_id.value_);
// x = exp(y) --> f'(exp(y)) = exp(y) * f'(exp(y)) = x * f'(x)
double f_prime_y = x * f_prime_x;
double f_double_prime_y = f_prime_y + std::pow(x, 2) * f_double_prime_x;
return std::make_tuple(f_x, f_prime_y, f_double_prime_y);
};
branch_handler_.SetNewtonRaphsonFunc(newton_raphson_func);
}
void GPEngine::SetOptimizationMethod(const OptimizationMethod method) {
branch_handler_.SetOptimizationMethod(method);
}
void GPEngine::UseGradientOptimization(const bool use_gradients) {
auto optimization_method =
(use_gradients ? OptimizationMethod::BrentOptimizationWithGradients
: OptimizationMethod::BrentOptimization);
branch_handler_.SetOptimizationMethod(optimization_method);
}
void GPEngine::OptimizeBranchLength(const GPOperations::OptimizeBranchLength& op) {
branch_handler_.OptimizeBranchLength(EdgeId(op.gpcsp_), PVId(op.rootward_),
PVId(op.leafward_), !IsFirstOptimization());
}
void GPEngine::SetSignificantDigitsForOptimization(int significant_digits) {
branch_handler_.SetSignificantDigitsForOptimization(significant_digits);
}
void GPEngine::HotStartBranchLengths(const RootedTreeCollection& tree_collection,
const BitsetSizeMap& indexer) {
size_t unique_gpcsp_count = branch_handler_.size();
branch_handler_.GetBranchLengths().GetData().setZero();
EigenVectorXi observed_gpcsp_counts = EigenVectorXi::Zero(unique_gpcsp_count);
// Set the branch length vector to be the total of the branch lengths for each PCSP,
// and count the number of times we have seen each PCSP (into gpcsp_counts).
auto tally_branch_handler_and_gpcsp_count =
[&observed_gpcsp_counts, this](EdgeId gpcsp_idx, const Bitset& bitset,
const RootedTree& tree, const size_t tree_id,
const Node* focal_node) {
branch_handler_(gpcsp_idx) += tree.BranchLength(focal_node);
observed_gpcsp_counts(gpcsp_idx.value_)++;
};
RootedSBNMaps::FunctionOverRootedTreeCollection(tally_branch_handler_and_gpcsp_count,
tree_collection, indexer,
branch_handler_.size());
for (EdgeId gpcsp_idx = 0; gpcsp_idx.value_ < unique_gpcsp_count;
gpcsp_idx.value_++) {
if (observed_gpcsp_counts(gpcsp_idx.value_) == 0) {
branch_handler_(gpcsp_idx) = branch_handler_.GetDefaultBranchLength();
} else {
// Normalize the branch length total using the counts to get a mean branch
// length.
branch_handler_(gpcsp_idx) /=
static_cast<double>(observed_gpcsp_counts(gpcsp_idx.value_));
}
}
}
SizeDoubleVectorMap GPEngine::GatherBranchLengths(
const RootedTreeCollection& tree_collection, const BitsetSizeMap& indexer) {
SizeDoubleVectorMap gpcsp_branchlengths_map;
auto gather_branch_lengths = [&gpcsp_branchlengths_map](
EdgeId gpcsp_idx, const Bitset& bitset,
const RootedTree& tree, const size_t tree_id,
const Node* focal_node) {
gpcsp_branchlengths_map[gpcsp_idx.value_].push_back(tree.BranchLength(focal_node));
};
RootedSBNMaps::FunctionOverRootedTreeCollection(
gather_branch_lengths, tree_collection, indexer, branch_handler_.size());
return gpcsp_branchlengths_map;
}
void GPEngine::TakeFirstBranchLength(const RootedTreeCollection& tree_collection,
const BitsetSizeMap& indexer) {
size_t unique_gpcsp_count = branch_handler_.size();
branch_handler_.GetBranchLengths().GetData().setZero();
EigenVectorXi observed_gpcsp_counts = EigenVectorXi::Zero(unique_gpcsp_count);
// Set the branch length vector to be the first encountered branch length for each
// PCSP, and mark when we have seen each PCSP (into observed_gpcsp_counts).
auto set_first_branch_length_and_increment_gpcsp_count =
[&observed_gpcsp_counts, this](EdgeId gpcsp_idx, const Bitset& bitset,
const RootedTree& tree, const size_t tree_id,
const Node* focal_node) {
if (observed_gpcsp_counts(gpcsp_idx.value_) == 0) {
branch_handler_(gpcsp_idx) = tree.BranchLength(focal_node);
observed_gpcsp_counts(gpcsp_idx.value_)++;
}
};
RootedSBNMaps::FunctionOverRootedTreeCollection(
set_first_branch_length_and_increment_gpcsp_count, tree_collection, indexer,
branch_handler_.size());
// If a branch length was not set above, set it to the default length.
for (EdgeId gpcsp_idx = 0; gpcsp_idx < unique_gpcsp_count; gpcsp_idx.value_++) {
if (observed_gpcsp_counts(gpcsp_idx.value_) == 0) {
branch_handler_(gpcsp_idx) = branch_handler_.GetDefaultBranchLength();
}
}
}
EigenVectorXd GPEngine::CalculateQuartetHybridLikelihoods(
const QuartetHybridRequest& request) {
auto CheckRescaling = [this](size_t plv_idx) {
Assert(rescaling_counts_[plv_idx] == 0,
"Rescaling not implemented in CalculateQuartetHybridLikelihoods.");
};
std::vector<double> result;
for (const auto& rootward_tip : request.rootward_tips_) {
CheckRescaling(rootward_tip.plv_idx_);
const double rootward_tip_prior =
unconditional_node_probabilities_[rootward_tip.tip_node_id_];
const double log_rootward_tip_prior = log(rootward_tip_prior);
// #328 note that for the general case we should transpose the transition matrix
// when coming down the tree.
SetTransitionMatrixToHaveBranchLength(
branch_handler_(EdgeId(rootward_tip.gpcsp_idx_)));
quartet_root_plv_ = transition_matrix_ * GetPLV(PVId(rootward_tip.plv_idx_));
for (const auto& sister_tip : request.sister_tips_) {
CheckRescaling(sister_tip.plv_idx_);
// Form the PLV on the root side of the central edge.
SetTransitionMatrixToHaveBranchLength(
branch_handler_(EdgeId(sister_tip.gpcsp_idx_)));
quartet_r_s_plv_.array() =
quartet_root_plv_.array() *
(transition_matrix_ * GetPLV(PVId(sister_tip.plv_idx_))).array();
// Advance it along the edge.
SetTransitionMatrixToHaveBranchLength(
branch_handler_(EdgeId(request.central_gpcsp_idx_)));
quartet_q_s_plv_ = transition_matrix_ * quartet_r_s_plv_;
for (const auto& rotated_tip : request.rotated_tips_) {
CheckRescaling(rotated_tip.plv_idx_);
// Form the PLV on the root side of the sorted edge.
SetTransitionMatrixToHaveBranchLength(
branch_handler_(EdgeId(rotated_tip.gpcsp_idx_)));
quartet_r_sorted_plv_.array() =
quartet_q_s_plv_.array() *
(transition_matrix_ * GetPLV(PVId(rotated_tip.plv_idx_))).array();
for (const auto& sorted_tip : request.sorted_tips_) {
CheckRescaling(sorted_tip.plv_idx_);
// P(sigma_{ijkl} | \eta)
const double non_sequence_based_log_probability = log(
inverted_sbn_prior_[rootward_tip.gpcsp_idx_] * q_[sister_tip.gpcsp_idx_] *
q_[rotated_tip.gpcsp_idx_] * q_[sorted_tip.gpcsp_idx_]);
// Now calculate the sequence-based likelihood.
SetTransitionMatrixToHaveBranchLength(
branch_handler_(EdgeId(sorted_tip.gpcsp_idx_)));
per_pattern_log_likelihoods_ =
(quartet_r_sorted_plv_.transpose() * transition_matrix_ *
GetPLV(PVId(sorted_tip.plv_idx_)))
.diagonal()
.array()
.log();
per_pattern_log_likelihoods_.array() -= log_rootward_tip_prior;
result.push_back(non_sequence_based_log_probability +
per_pattern_log_likelihoods_.dot(site_pattern_weights_));
}
}
}
}
return EigenVectorXdOfStdVectorDouble(result);
}
void GPEngine::ProcessQuartetHybridRequest(const QuartetHybridRequest& request) {
if (request.IsFullyFormed()) {
EigenVectorXd hybrid_log_likelihoods = CalculateQuartetHybridLikelihoods(request);
hybrid_marginal_log_likelihoods_[request.central_gpcsp_idx_] =
NumericalUtils::LogSum(hybrid_log_likelihoods);
}
}
// ** I/O
std::string GPEngine::PLVToString(const PVId plv_idx) const {
return plv_handler_.ToString(plv_idx);
}
std::string GPEngine::LogLikelihoodMatrixToString() const {
std::stringstream out;
for (Eigen::Index i = 0; i < log_likelihoods_.rows(); i++) {
for (Eigen::Index j = 0; j < log_likelihoods_.cols(); j++) {
out << "[" << i << "," << j << "]: " << log_likelihoods_(i, j) << "\t";
}
out << std::endl;
}
return out.str();
}
| 37,668
|
C++
|
.cpp
| 751
| 43.483356
| 88
| 0.695181
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,041
|
alignment.cpp
|
phylovi_bito/src/alignment.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "alignment.hpp"
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <unordered_map>
std::set<std::string> Alignment::GetNames() const {
std::set<std::string> names;
for (const auto &[name, aln] : data_) {
names.insert(name);
}
return names;
}
size_t Alignment::Length() const {
Assert(SequenceCount() > 0,
"Must have sequences in an alignment to ask for a Length.");
return data_.begin()->second.size();
}
bool Alignment::IsValid() const {
if (data_.empty()) {
return false;
}
auto length = Length();
auto is_same_length = [length](const auto &datum) {
return (length == datum.second.size());
};
return std::all_of(data_.cbegin(), data_.cend(), is_same_length);
}
const std::string &Alignment::at(const std::string &taxon) const {
auto search = data_.find(taxon);
if (search != data_.end()) {
return search->second;
} // else
Failwith("Taxon '" + taxon + "' not found in alignment.");
}
// An edited version of
// https://stackoverflow.com/questions/35251635/fasta-reader-written-in-c
// which seems like it was originally taken from
// http://rosettacode.org/wiki/FASTA_format#C.2B.2B
Alignment Alignment::ReadFasta(const std::string &fname) {
StringStringMap data;
auto insert = [&data](std::string taxon, std::string sequence) {
if (!taxon.empty()) {
SafeInsert(data, taxon, sequence);
}
};
std::ifstream input(fname);
if (!input.good()) {
Failwith("Could not open '" + fname + "'");
}
std::string line, taxon, sequence;
while (std::getline(input, line)) {
if (line.empty()) {
continue;
}
// else:
if (line[0] == '>') {
insert(taxon, sequence);
taxon = line.substr(1);
sequence.clear();
} else {
sequence += line;
}
}
// Insert the last taxon, sequence pair.
insert(taxon, sequence);
Alignment alignment(data);
if (!alignment.IsValid()) {
Failwith("Sequences of the alignment are not all the same length.");
}
return alignment;
}
Alignment Alignment::ExtractSingleColumnAlignment(size_t which_column) const {
Assert(which_column < Length(),
"Alignment::ExtractSingleColumnAlignment: Given column is longer than "
"sequence length.");
StringStringMap out_map;
for (const auto &[taxon_name, sequence] : data_) {
SafeInsert(out_map, taxon_name, sequence.substr(which_column, 1));
}
return Alignment(out_map);
}
| 2,578
|
C++
|
.cpp
| 84
| 27.27381
| 80
| 0.673371
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,042
|
sbn_support.cpp
|
phylovi_bito/src/sbn_support.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "sbn_support.hpp"
StringVector SBNSupport::PrettyIndexer() const {
StringVector pretty_representation(indexer_.size());
for (const auto& [key, idx] : indexer_) {
pretty_representation[idx] = key.PCSPToString();
}
return pretty_representation;
}
void SBNSupport::PrettyPrintIndexer() const {
auto pretty_representation = PrettyIndexer();
for (size_t i = 0; i < pretty_representation.size(); i++) {
std::cout << i << "\t" << pretty_representation[i] << std::endl;
}
}
// Return indexer_ and parent_to_child_range_ converted into string-keyed maps.
std::tuple<StringSizeMap, StringSizePairMap> SBNSupport::GetIndexers() const {
auto str_indexer = StringifyMap(indexer_);
auto str_parent_to_range = StringifyMap(parent_to_child_range_);
std::string rootsplit("DAG Root Node");
SafeInsert(str_parent_to_range, rootsplit, {0, rootsplits_.size()});
return {str_indexer, str_parent_to_range};
}
// Get the indexer, but reversed and with bitsets appropriately converted to
// strings.
StringVector SBNSupport::StringReversedIndexer() const {
StringVector reversed_indexer(indexer_.size());
for (const auto& [key, idx] : indexer_) {
reversed_indexer[idx] = key.PCSPToString();
}
return reversed_indexer;
}
void SBNSupport::ProbabilityNormalizeSBNParametersInLog(
EigenVectorXdRef sbn_parameters) const {
SBNProbability::ProbabilityNormalizeParamsInLog(sbn_parameters, rootsplits_.size(),
parent_to_child_range_);
}
| 1,642
|
C++
|
.cpp
| 38
| 39.421053
| 85
| 0.728411
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,043
|
mersenne_twister.cpp
|
phylovi_bito/src/mersenne_twister.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "mersenne_twister.hpp"
std::random_device MersenneTwister::random_device_;
std::mt19937 MersenneTwister::random_generator_(MersenneTwister::random_device_());
| 292
|
C++
|
.cpp
| 5
| 57
| 83
| 0.803509
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,044
|
dag_branch_handler.cpp
|
phylovi_bito/src/dag_branch_handler.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "dag_branch_handler.hpp"
// ** Constructors
DAGBranchHandler::DAGBranchHandler(const size_t count,
std::optional<OptimizationMethod> method)
: branch_lengths_(count), differences_(count) {
if (method.has_value()) {
SetOptimizationMethod(method.value());
}
branch_lengths_.SetDefaultValue(init_default_branch_length_);
branch_lengths_.FillWithDefault();
differences_.SetDefaultValue(init_default_difference_);
differences_.FillWithDefault();
}
DAGBranchHandler::DAGBranchHandler(GPDAG& dag, std::optional<OptimizationMethod> method)
: branch_lengths_(dag), differences_(dag), dag_(&dag) {
if (method.has_value()) {
SetOptimizationMethod(method.value());
}
branch_lengths_.SetDefaultValue(init_default_branch_length_);
branch_lengths_.FillWithDefault();
differences_.SetDefaultValue(init_default_difference_);
differences_.FillWithDefault();
}
// ** Comparators
int DAGBranchHandler::Compare(const DAGBranchHandler& lhs,
const DAGBranchHandler& rhs) {
if (lhs.size() != rhs.size()) {
return lhs.size() - rhs.size();
}
for (EdgeId edge_id{0}; edge_id < lhs.size(); edge_id++) {
if (lhs.Get(edge_id) != rhs.Get(edge_id)) {
return lhs.Get(edge_id) - rhs.Get(edge_id);
}
}
return 0;
}
bool operator==(const DAGBranchHandler& lhs, const DAGBranchHandler& rhs) {
return DAGBranchHandler::Compare(lhs, rhs) == 0;
}
// ** Branch Length Map
DAGBranchHandler::BranchLengthMap DAGBranchHandler::BuildBranchLengthMap(
const GPDAG& dag) const {
BranchLengthMap branch_length_map;
for (EdgeId edge_id = 0; edge_id < dag.EdgeCountWithLeafSubsplits(); edge_id++) {
auto pcsp = dag.GetDAGEdgeBitset(edge_id);
auto branch_length = Get(edge_id);
branch_length_map[pcsp] = branch_length;
}
return branch_length_map;
}
void DAGBranchHandler::ApplyBranchLengthMap(
const DAGBranchHandler::BranchLengthMap& branch_length_map, const GPDAG& dag) {
for (const auto& [pcsp, branch_length] : branch_length_map) {
const EdgeId edge_id = dag.GetEdgeIdx(pcsp);
Get(edge_id) = branch_length;
}
}
// ** Static Functions
RootedTree DAGBranchHandler::BuildTreeWithBranchLengthsFromTopology(
const GPDAG& dag, const DAGBranchHandler& dag_branch_handler,
const Node::Topology& topology) {
Tree::BranchLengthVector tree_branch_lengths(2 * topology->LeafCount() - 1, 0.0);
topology->RootedPCSPPreorder(
[&dag, &dag_branch_handler, &tree_branch_lengths](
const Node* sister, const Node* focal, const Node* child_left,
const Node* child_right) {
Bitset parent_subsplit = Bitset::Subsplit(sister->Leaves(), focal->Leaves());
Bitset child_subsplit =
Bitset::Subsplit(child_left->Leaves(), child_right->Leaves());
EdgeId focal_edge_id = dag.GetEdgeIdx(parent_subsplit, child_subsplit);
tree_branch_lengths[focal->Id()] = dag_branch_handler(focal_edge_id);
// If adjacent nodes go to leaves.
if (sister->IsLeaf()) {
Bitset subsplit = Bitset::LeafSubsplitOfNonemptyClade(sister->Leaves());
EdgeId edge_id = dag.GetEdgeIdx(parent_subsplit, subsplit);
tree_branch_lengths[sister->Id()] = dag_branch_handler(edge_id);
}
if (child_left->IsLeaf()) {
Bitset subsplit = Bitset::LeafSubsplitOfNonemptyClade(child_left->Leaves());
EdgeId edge_id = dag.GetEdgeIdx(child_subsplit, subsplit);
tree_branch_lengths[child_left->Id()] = dag_branch_handler(edge_id);
}
if (child_right->IsLeaf()) {
Bitset subsplit = Bitset::LeafSubsplitOfNonemptyClade(child_right->Leaves());
EdgeId edge_id = dag.GetEdgeIdx(child_subsplit, subsplit);
tree_branch_lengths[child_right->Id()] = dag_branch_handler(edge_id);
}
},
false);
return RootedTree(topology, std::move(tree_branch_lengths));
}
void DAGBranchHandler::CopyOverBranchLengths(const DAGBranchHandler& src,
DAGBranchHandler& dest) {
const auto& src_dag = src.GetDAG();
const auto& dest_dag = dest.GetDAG();
for (EdgeId dest_id = 0; dest_id < dest_dag.EdgeCountWithLeafSubsplits(); dest_id++) {
const auto& pcsp = dest_dag.GetDAGEdgeBitset(dest_id);
const auto src_id = src_dag.GetEdgeIdx(pcsp);
dest.Get(dest_id) = src.Get(src_id);
}
}
// ** Optimization
void DAGBranchHandler::OptimizeBranchLength(const EdgeId edge_id, const PVId parent_id,
const PVId child_id,
const bool check_branch_convergence) {
// Branch convergence test.
if (check_branch_convergence &&
(differences_(edge_id) < branch_length_difference_threshold_)) {
return;
}
switch (optimization_method_) {
case OptimizationMethod::BrentOptimization:
return BrentOptimization(edge_id, parent_id, child_id);
case OptimizationMethod::BrentOptimizationWithGradients:
return BrentOptimizationWithGradients(edge_id, parent_id, child_id);
case OptimizationMethod::GradientAscentOptimization:
return GradientAscentOptimization(edge_id, parent_id, child_id);
case OptimizationMethod::LogSpaceGradientAscentOptimization:
return LogSpaceGradientAscentOptimization(edge_id, parent_id, child_id);
case OptimizationMethod::NewtonOptimization:
return NewtonRaphsonOptimization(edge_id, parent_id, child_id);
default:
Failwith("DAGBranchHandler::Optimization(): Invalid OptimizationMethod given.");
}
}
// ** Branch
void DAGBranchHandler::BrentOptimization(const EdgeId edge_id, const PVId parent_id,
const PVId child_id) {
Assert(brent_nongrad_func_ != nullptr,
"EvalBranchFunction must be assigned before calling Brent.");
// Evaluate branch length function.
Optimization::NegFunc<double> negative_log_likelihood =
[this, edge_id, parent_id, child_id](double log_branch_length) {
return brent_nongrad_func_(edge_id, parent_id, child_id, log_branch_length);
};
// Capture initial.
double current_log_branch_length = log(branch_lengths_(edge_id));
double current_neg_log_likelihood =
negative_log_likelihood(current_log_branch_length);
// Optimize branch length.
const auto [log_branch_length, neg_log_likelihood] = Optimization::BrentMinimize(
negative_log_likelihood, current_log_branch_length, min_log_branch_length_,
max_log_branch_length_, significant_digits_for_optimization_,
max_iter_for_optimization_, step_size_for_log_space_optimization_);
// Numerical optimization sometimes yields new nllk > current nllk.
// In this case, we reset the branch length to the previous value.
if (neg_log_likelihood > current_neg_log_likelihood) {
branch_lengths_(edge_id) = exp(current_log_branch_length);
} else {
branch_lengths_(edge_id) = exp(log_branch_length);
}
differences_(edge_id) =
abs(exp(current_log_branch_length) - branch_lengths_(edge_id));
}
void DAGBranchHandler::BrentOptimizationWithGradients(const EdgeId edge_id,
const PVId parent_id,
const PVId child_id) {
Assert(brent_grad_func_ != nullptr,
"EvalBranchFunction must be assigned before calling BrentWithGradients.");
// Evaluate branch length function.
Optimization::NegFuncAndDerivative<double> negative_log_likelihood_and_derivative =
[this, edge_id, parent_id, child_id](double log_branch_length) {
return this->brent_grad_func_(edge_id, parent_id, child_id, log_branch_length);
};
// Convert and capture initial branch lenth to log space.
double current_log_branch_length = log(branch_lengths_(edge_id));
double current_neg_log_likelihood =
negative_log_likelihood_and_derivative(current_log_branch_length).first;
// Optimize branch length.
const auto [log_branch_length, neg_log_likelihood] =
Optimization::BrentMinimizeWithGradients(
negative_log_likelihood_and_derivative, current_log_branch_length,
min_log_branch_length_, max_log_branch_length_,
significant_digits_for_optimization_, max_iter_for_optimization_,
step_size_for_log_space_optimization_);
// Numerical optimization sometimes yields new nllk > current nllk.
// In this case, we reset the branch length to the previous value.
if (neg_log_likelihood > current_neg_log_likelihood) {
branch_lengths_(edge_id) = exp(current_log_branch_length);
} else {
branch_lengths_(edge_id) = exp(log_branch_length);
}
differences_(edge_id) =
abs(exp(current_log_branch_length) - branch_lengths_(edge_id));
}
void DAGBranchHandler::GradientAscentOptimization(const EdgeId edge_id,
const PVId parent_id,
const PVId child_id) {
Assert(gradient_ascent_func_ != nullptr,
"EvalBranchFunction must be assigned before calling GradientAscent.");
// Evaluate branch length function.
Optimization::FuncAndDerivative<double> log_likelihood_and_derivative =
[this, edge_id, parent_id, child_id](double log_branch_length) {
return this->gradient_ascent_func_(edge_id, parent_id, child_id,
log_branch_length);
};
// Capture initial.
double current_branch_length = branch_lengths_(edge_id);
// Optimize branch length.
const auto branch_length = Optimization::GradientAscent(
log_likelihood_and_derivative, branch_lengths_(edge_id),
significant_digits_for_optimization_, step_size_for_optimization_,
min_log_branch_length_, max_iter_for_optimization_);
// Capture result.
branch_lengths_(edge_id) = branch_length;
differences_(edge_id) = abs(current_branch_length - branch_lengths_(edge_id));
}
void DAGBranchHandler::LogSpaceGradientAscentOptimization(const EdgeId edge_id,
const PVId parent_id,
const PVId child_id) {
Assert(logspace_gradient_ascent_func_ != nullptr,
"EvalBranchFunction must be assigned before calling LogSpaceGradientAscent.");
// Evaluate branch length function.
Optimization::FuncAndDerivative<double> log_likelihood_and_derivative =
[this, edge_id, parent_id, child_id](double log_branch_length) {
return this->logspace_gradient_ascent_func_(edge_id, parent_id, child_id,
log_branch_length);
};
// Capture initial.
double current_branch_length = branch_lengths_(edge_id);
// Optimize branch length.
const auto branch_length = Optimization::LogSpaceGradientAscent(
log_likelihood_and_derivative, branch_lengths_(edge_id),
significant_digits_for_optimization_, step_size_for_log_space_optimization_,
exp(min_log_branch_length_), max_iter_for_optimization_);
// Capture result.
branch_lengths_(edge_id) = branch_length;
differences_(edge_id) = abs(current_branch_length - branch_lengths_(edge_id));
}
void DAGBranchHandler::NewtonRaphsonOptimization(const EdgeId edge_id,
const PVId parent_id,
const PVId child_id) {
Assert(newton_raphson_func_ != nullptr,
"EvalBranchFunction must be assigned before calling NewtonRaphson.");
// Evaluate branch length function.
Optimization::FuncAndFirstTwoDerivatives<double>
log_likelihood_and_first_two_derivatives =
[this, edge_id, parent_id, child_id](double log_branch_length) {
return this->newton_raphson_func_(edge_id, parent_id, child_id,
log_branch_length);
};
// Capture initial.
double current_branch_length = branch_lengths_(edge_id);
double current_log_branch_length = log(branch_lengths_(edge_id));
// Optimize branch length.
const auto log_branch_length = Optimization::NewtonRaphsonOptimization(
log_likelihood_and_first_two_derivatives, current_log_branch_length,
significant_digits_for_optimization_, denominator_tolerance_for_newton_,
min_log_branch_length_, max_log_branch_length_, max_iter_for_optimization_);
// Capture result.
branch_lengths_(edge_id) = exp(log_branch_length);
differences_(edge_id) = abs(current_branch_length - branch_lengths_(edge_id));
}
| 12,724
|
C++
|
.cpp
| 255
| 42.160784
| 88
| 0.680971
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,045
|
phylo_flags.cpp
|
phylovi_bito/src/phylo_flags.cpp
|
// Copyright 2019-2021 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "phylo_flags.hpp"
// ** Phylo Mapkey
PhyloMapkey::PhyloMapkey(const std::string &name, const std::string &key)
: name_(name), key_(key){};
int PhyloMapkey::Compare(const PhyloMapkey &mapkey_a, const PhyloMapkey &mapkey_b) {
// If mapkey_names are equal, so are the flag.
if (mapkey_a.key_ == mapkey_b.key_) {
return 0;
}
return (mapkey_a.key_ > mapkey_b.key_) ? 1 : -1;
};
bool PhyloMapkey::operator==(const PhyloMapkey &other) const {
return Compare(*this, other) == 0;
};
bool operator==(const PhyloMapkey &lhs, const PhyloMapkey &rhs) {
return PhyloMapkey::Compare(lhs, rhs) == 0;
};
bool PhyloMapkey::operator<(const PhyloMapkey &other) const {
return Compare(*this, other) < 0;
};
bool operator<(const PhyloMapkey &lhs, const PhyloMapkey &rhs) {
return PhyloMapkey::Compare(lhs, rhs) < 0;
};
// Compare against String
bool PhyloMapkey::operator==(const std::string &other_name) const {
return key_ == other_name;
};
bool PhyloMapkey::operator<(const std::string &other_name) const {
return key_ < other_name;
};
// ** Phylo Mapkey Set
PhyloMapkeySet::PhyloMapkeySet(const std::string &name,
const std::vector<PhyloMapkey> &mapkeys)
: name_(name) {
for (const auto &mapkey : mapkeys) {
AddMapkey(mapkey);
}
};
void PhyloMapkeySet::AddMapkey(const PhyloMapkey &mapkey, const bool overwrite) {
if (!overwrite) {
Assert(!ContainsMapkey(mapkey),
"Attempted to insert Mapkey that already exists in MapkeySet, or non-unique "
"flag name: " +
mapkey.GetKey());
}
all_mapkeys_.insert(std::make_pair(mapkey.GetName(), mapkey));
};
bool PhyloMapkeySet::ContainsMapkey(const PhyloMapkey &mapkey) {
return all_mapkeys_.find(mapkey.GetName()) != all_mapkeys_.end();
};
std::string PhyloMapkeySet::ToString() const {
std::stringstream str;
for (const auto &[name, mapkey] : all_mapkeys_) {
std::ignore = name;
str << mapkey.GetName() << " | " << mapkey.GetKey() << std::endl;
}
return str.str();
};
// ** Phylo FlagOption
PhyloFlagOption::PhyloFlagOption()
: is_set_when_running_defaults_(false),
is_set_when_not_running_defaults_(false),
flag_type_(FlagType::None),
data_type_(DataType::None) {}
PhyloFlagOption::PhyloFlagOption(const std::string &name, const std::string &flag,
const FlagType flag_type, const DataType data_type,
const bool is_set_when_running_defaults,
const bool is_set_when_not_running_defaults)
: name_(name),
flag_(flag),
is_set_when_running_defaults_(is_set_when_running_defaults),
is_set_when_not_running_defaults_(is_set_when_not_running_defaults),
flag_type_(flag_type),
data_type_(data_type),
child_flags_() {}
PhyloFlagOption PhyloFlagOption::BooleanOption(
const std::string &name, const std::string &flag,
const bool is_set_when_running_defaults,
const bool is_set_when_not_running_defaults) {
return {name,
flag,
FlagType::Boolean,
DataType::None,
is_set_when_running_defaults,
is_set_when_not_running_defaults};
}
PhyloFlagOption PhyloFlagOption::SetValueOption(const std::string &name,
const std::string &flag,
const DataType data_type) {
return {name, flag, FlagType::SetValue, data_type, false, false};
}
void PhyloFlagOption::AddChild(const PhyloFlagOption &child) { AddChild(child.flag_); }
void PhyloFlagOption::AddChild(const std::string child_flag) {
child_flags_.push_back(child_flag);
}
std::string PhyloFlagOption::ToString() const {
std::stringstream str;
str << "{ " << name_ << ": " << flag_ << " }";
return str.str();
}
int PhyloFlagOption::Compare(const PhyloFlagOption &flag_a,
const PhyloFlagOption &flag_b) {
// If flag_names are equal, so are the flag.
if (flag_a.flag_ == flag_b.flag_) {
return 0;
}
return (flag_a.flag_ > flag_b.flag_) ? 1 : -1;
}
bool PhyloFlagOption::operator==(const PhyloFlagOption &other) {
return Compare(*this, other) == 0;
}
bool operator==(const PhyloFlagOption &lhs, const PhyloFlagOption &rhs) {
return PhyloFlagOption::Compare(lhs, rhs) == 0;
}
bool PhyloFlagOption::operator<(const PhyloFlagOption &other) {
return Compare(*this, other) < 0;
}
bool operator<(const PhyloFlagOption &lhs, const PhyloFlagOption &rhs) {
return PhyloFlagOption::Compare(lhs, rhs) < 0;
}
bool PhyloFlagOption::operator==(const std::string &other_name) {
return flag_ == other_name;
}
bool PhyloFlagOption::operator<(const std::string &other_name) {
return flag_ < other_name;
}
// ** Phylo FlagOption Set
PhyloFlagOptionSet::PhyloFlagOptionSet(const std::string &name) : name_(name) {
AddFlagOption(MasterFlagOptions::run_defaults_);
}
PhyloFlagOptionSet::PhyloFlagOptionSet(const std::string &name,
const std::vector<PhyloFlagOption> &options)
: name_(name) {
for (const auto &option : options) {
AddFlagOption(option);
}
AddFlagOption(MasterFlagOptions::run_defaults_);
}
PhyloFlagOptionSet::PhyloFlagOptionSet(const std::string &name,
const std::vector<PhyloFlagOption> &options,
PhyloFlagOptionSet &parent_optionset)
: name_(name) {
for (const auto &option : options) {
AddFlagOption(option);
}
AddFlagOption(MasterFlagOptions::run_defaults_);
parent_optionset.AddSubPhyloFlagOptionSet(*this);
}
void PhyloFlagOptionSet::AddFlagOption(const PhyloFlagOption &option,
const bool overwrite) {
if (!overwrite) {
Assert(!ContainsFlagOption(option),
"Attempted to add FlagOption that already exists in FlagOptionSet, or "
"non-unique flag name: " +
option.GetFlag());
}
all_options_.insert(std::make_pair(option.GetFlag(), option));
}
bool PhyloFlagOptionSet::ContainsFlagOption(const PhyloFlagOption &option) {
return all_options_.find(option.GetName()) != all_options_.end();
}
std::optional<const PhyloFlagOption> PhyloFlagOptionSet::FindFlagOptionByName(
const std::string &flag_name) const {
// Find if exists in given optionset.
if (all_options_.find(flag_name) != all_options_.end()) {
return all_options_.at(flag_name);
}
// Find if exists in any child optionsets.
for (const auto &[name, sub_optionset] : sub_optionsets_) {
std::ignore = name;
auto sub_option = sub_optionset->FindFlagOptionByName(flag_name);
if (sub_option.has_value()) {
return sub_option;
}
}
return std::nullopt;
}
void PhyloFlagOptionSet::AddSubPhyloFlagOptionSet(PhyloFlagOptionSet &sub_optionset,
const bool overwrite) {
if (!overwrite) {
Assert(sub_optionsets_.find(sub_optionset.GetName()) == sub_optionsets_.end(),
"Attempted to add a PhyloFlagOptionSet under a pre-existing name: " +
sub_optionset.GetName());
}
sub_optionsets_.insert(std::make_pair(sub_optionset.GetName(), &sub_optionset));
}
std::optional<PhyloFlagOptionSet *> PhyloFlagOptionSet::FindSubPhyloFlagOptionSet(
const std::string name) const {
auto sub_optionset = sub_optionsets_.find(name);
if (sub_optionset == sub_optionsets_.end()) {
return std::nullopt;
}
return sub_optionsets_.at(name);
}
StringPairVector PhyloFlagOptionSet::GetAllNames(
std::optional<StringPairVector> flag_vec) const {
if (!flag_vec.has_value()) {
flag_vec = StringPairVector();
}
for (const auto &[name, flag] : GetOptions()) {
std::ignore = name;
flag_vec->push_back({flag.GetName(), flag.GetFlag()});
}
for (const auto &[name, sub_optionset] : GetSubOptionsets()) {
std::ignore = name;
sub_optionset->GetAllNames(flag_vec);
}
return *flag_vec;
}
std::string PhyloFlagOptionSet::ToString() const {
std::stringstream str;
str << "NAME:" << GetName() << std::endl;
str << "FLAGS:" << std::endl;
for (const auto &[name, option] : all_options_) {
std::ignore = name;
str << option.GetName() << " | " << option.GetFlag() << " | "
<< option.GetChildFlags() << std::endl;
}
return str.str();
}
// ** Phylo Flags
void PhyloFlags::ClearFlags() { explicit_flags_.clear(); }
void PhyloFlags::AddPhyloFlags(const std::optional<PhyloFlags> other_flags,
const bool overwrite) {
if (other_flags.has_value()) {
for (const auto &[name, bool_data] : other_flags->GetFlagMap()) {
const auto &[set, data] = bool_data;
// determines if other_flags will not overwrite flags, just supplements it.
if (overwrite || (!IsFlagInMap(name))) {
if (data.has_value()) {
SetFlag(name, set, *data);
} else {
SetFlag(name, set);
}
}
}
}
}
void PhyloFlags::SetFlag(const PhyloFlagOption &flag, const bool is_set,
const double value) {
// Add given flag.
AddFlagToMap(flag, is_set, value);
// Add all child flags of given flag.
for (const auto &child_flag : flag.GetChildFlags()) {
SetFlag(child_flag, value);
}
// If flag being set is the special run_defaults_ flag.
if (MasterFlagOptions::run_defaults_.GetName() == flag.GetName()) {
SetRunDefaultsFlag(true);
}
}
void PhyloFlags::SetFlag(const PhyloFlagOption &flag, const double value) {
SetFlag(flag, true, value);
}
void PhyloFlags::AddFlagToMap(const PhyloFlagOption &flag, const bool set,
const double value) {
explicit_flags_.insert(std::make_pair(flag.GetFlag(), std::make_pair(set, value)));
}
void PhyloFlags::SetRunDefaultsFlag(bool is_set) { is_run_defaults_ = is_set; }
bool PhyloFlags::IsRunDefaultsSet() const { return is_run_defaults_; }
bool PhyloFlags::IsFlagInMap(const PhyloFlagOption &flag) const {
return IsFlagInMap(flag.GetFlag());
}
bool PhyloFlags::IsFlagInMap(const std::string &flag_name) const {
return (explicit_flags_.find(flag_name) != explicit_flags_.end());
}
std::optional<double> PhyloFlags::GetFlagValue(const PhyloFlagOption &flag) const {
Assert(flag.GetFlagType() == PhyloFlagOption::FlagType::SetValue,
"Requested FlagOption value from flag type that does not store associated "
"value.");
return GetFlagValue(flag.GetFlag());
}
std::optional<double> PhyloFlags::GetFlagValue(const std::string &flag_name) const {
if (IsFlagInMap(flag_name)) {
const auto &[set, value] = explicit_flags_.at(flag_name);
std::ignore = set;
return value;
}
return std::nullopt;
}
// Returns the value of the flag if set, otherwise returns default value.
double PhyloFlags::GetFlagValueIfSet(const std::string &flag_name,
const double default_value) const {
auto opt_value = GetFlagValue(flag_name);
if (opt_value.has_value()) {
return *opt_value;
}
return default_value;
}
double PhyloFlags::GetFlagValueIfSet(const PhyloFlagOption &flag,
const double default_value) const {
return GetFlagValueIfSet(flag.GetFlag(), default_value);
}
double PhyloFlags::GetFlagValueIfSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag,
double default_value) {
if (phylo_flags.has_value()) {
return phylo_flags->GetFlagValueIfSet(flag, default_value);
}
return default_value;
}
const PhyloFlags::FlagMap &PhyloFlags::GetFlagMap() const { return explicit_flags_; }
const PhyloFlagOptionSet &PhyloFlags::GetFlagOptionSet() const { return *optionset_; }
std::string PhyloFlags::ToString() const {
std::ostringstream rep;
rep << "{ ";
rep << "(DEFAULT: " << IsRunDefaultsSet() << "), ";
for (const auto &[key, value] : explicit_flags_) {
rep << "(" << key << ": " << value.first << "), ";
}
rep << "}";
return rep.str();
}
bool PhyloFlags::IsFlagSet(const PhyloFlagOption &flag) const {
// (1) Priority is given to explicitly set options.
if (IsFlagInMap(flag)) {
const auto &[set, value] = GetFlagMap().at(flag.GetFlag());
std::ignore = value;
return set;
}
// (2) If is_run_default_ option is set, use given individual flag's defined default
// behavior.
if (is_run_defaults_) {
return flag.IsSetWhenRunningDefaults();
}
// (3) Otherwise, use flag type-based's default behavior.
return flag.IsSetWhenNotRunningDefaults();
}
bool PhyloFlags::IsFlagNotSet(const PhyloFlagOption &flag) const {
return !IsFlagSet(flag);
}
bool PhyloFlags::IsFlagSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag) {
// (1) If user has not passed any flags, then fall back to default behavior.
if (!phylo_flags.has_value()) {
return flag.IsSetWhenRunningDefaults();
}
// (2) If user passed flags, then check if option is set.
return phylo_flags->IsFlagSet(flag);
}
bool PhyloFlags::IsFlagNotSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag) {
return !PhyloFlags::IsFlagSet(phylo_flags, flag);
}
| 13,448
|
C++
|
.cpp
| 346
| 33.352601
| 88
| 0.667024
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,046
|
sbn_probability.cpp
|
phylovi_bito/src/sbn_probability.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "sbn_probability.hpp"
#include <algorithm>
#include <cmath>
#include <iostream>
#include <numeric>
#include <utility>
#include <vector>
#include "ProgressBar.hpp"
#include "numerical_utils.hpp"
// Increment all entries from an index vector by a log(value).
void IncrementByInLog(EigenVectorXdRef vec, const SizeVector& indices, double value) {
for (const auto& idx : indices) {
vec[idx] = NumericalUtils::LogAdd(vec[idx], value);
}
}
// Increment all entries from an index vector vector by a log(value).
void IncrementByInLog(EigenVectorXdRef vec, const SizeVectorVector& index_vector_vector,
double value) {
for (const auto& indices : index_vector_vector) {
IncrementByInLog(vec, indices, value);
}
}
// Increment all entries from an index vector of length k by a value taken from a vector
// of values of length k.
void IncrementByInLog(EigenVectorXdRef vec, const SizeVector& indices,
const EigenConstVectorXdRef values) {
Assert(static_cast<Eigen::Index>(indices.size()) == values.size(),
"Indices and values don't have matching size.");
for (Eigen::Index i = 0; i < values.size(); ++i) {
vec[indices[i]] = NumericalUtils::LogAdd(vec[indices[i]], values[i]);
}
}
// Repeat the previous increment operation across a vector of index vectors with a fixed
// vector of values.
void IncrementByInLog(EigenVectorXdRef vec, const SizeVectorVector& index_vector_vector,
const EigenConstVectorXdRef values) {
Assert(static_cast<Eigen::Index>(index_vector_vector.size()) == values.size(),
"Indices and values don't have matching size.");
for (Eigen::Index i = 0; i < values.size(); ++i) {
IncrementByInLog(vec, index_vector_vector[i], values[i]);
}
}
// Increment all entries from an index vector by a value.
void IncrementBy(EigenVectorXdRef vec, const SizeVector& indices, double value) {
for (const auto& idx : indices) {
vec[idx] += value;
}
}
// Increment all entries from an index vector vector by a value.
void IncrementBy(EigenVectorXdRef vec, const SizeVectorVector& index_vector_vector,
double value) {
for (const auto& indices : index_vector_vector) {
IncrementBy(vec, indices, value);
}
}
// Increment all entries from an index vector of length k by a value taken from a vector
// of values of length k.
void IncrementBy(EigenVectorXdRef vec, const SizeVector& indices,
const EigenConstVectorXdRef values) {
Assert(static_cast<Eigen::Index>(indices.size()) == values.size(),
"Indices and values don't have matching size.");
for (Eigen::Index i = 0; i < values.size(); ++i) {
vec[indices[i]] += values[i];
}
}
// Repeat the previous increment operation across a vector of index vectors with a fixed
// vector of values.
void IncrementBy(EigenVectorXdRef vec, const SizeVectorVector& index_vector_vector,
const EigenConstVectorXdRef values) {
Assert(static_cast<Eigen::Index>(index_vector_vector.size()) == values.size(),
"Indices and values don't have matching size.");
for (Eigen::Index i = 0; i < values.size(); ++i) {
IncrementBy(vec, index_vector_vector[i], values[i]);
}
}
// Take the product of the entries of vec in indices times starting_value.
double ProductOf(const EigenConstVectorXdRef vec, const SizeVector& indices,
const double starting_value) {
double result = starting_value;
for (const auto& idx : indices) {
result *= vec[idx];
}
return result;
}
// Take the sum of the entries of vec in indices plus starting_value.
double SBNProbability::SumOf(const EigenConstVectorXdRef vec, const SizeVector& indices,
const double starting_value) {
double result = starting_value;
for (const auto& idx : indices) {
result += vec[idx];
}
return result;
}
// Probability-normalize a range of values in a vector.
void ProbabilityNormalizeRange(EigenVectorXdRef vec, std::pair<size_t, size_t> range) {
auto [start_idx, end_idx] = range;
auto segment = vec.segment(start_idx, end_idx - start_idx);
segment /= segment.sum();
}
// We assume that vec is laid out like sbn_parameters (see top).
void ProbabilityNormalizeParams(EigenVectorXdRef vec, size_t rootsplit_count,
const BitsetSizePairMap& parent_to_range) {
ProbabilityNormalizeRange(vec, {0, rootsplit_count});
for (const auto& [_, range] : parent_to_range) {
std::ignore = _;
ProbabilityNormalizeRange(vec, range);
}
}
// Normalize such that values become logs of probabilities.
void SBNProbability::ProbabilityNormalizeRangeInLog(EigenVectorXdRef vec,
std::pair<size_t, size_t> range) {
auto [start_idx, end_idx] = range;
auto segment = vec.segment(start_idx, end_idx - start_idx);
NumericalUtils::ProbabilityNormalizeInLog(segment);
}
// We assume that vec is laid out like sbn_parameters (see top).
void SBNProbability::ProbabilityNormalizeParamsInLog(
EigenVectorXdRef vec, size_t rootsplit_count,
const BitsetSizePairMap& parent_to_range) {
ProbabilityNormalizeRangeInLog(vec, {0, rootsplit_count});
for (const auto& [_, range] : parent_to_range) {
std::ignore = _;
ProbabilityNormalizeRangeInLog(vec, range);
}
}
// Set the provided counts vector to be the counts of the rootsplits and PCSPs provided
// in the input.
void SetCounts(
EigenVectorXdRef counts,
const UnrootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range) {
counts.setZero();
for (const auto& [indexer_representation, int_topology_count] :
indexer_representation_counter) {
const auto topology_count = static_cast<double>(int_topology_count);
IncrementBy(counts, indexer_representation, topology_count);
}
}
// Set the provided counts vector to be the log of the counts of the rootsplits and
// PCSPs provided in the input.
// Note code duplication with the override below for
// UnrootedIndexerRepresentationCounter.
// We could refactor with templates, but then this whole file would have to go in the
// header.
void SetLogCounts(
EigenVectorXdRef counts,
const RootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range) {
counts.fill(DOUBLE_NEG_INF);
for (const auto& [indexer_representation, int_topology_count] :
indexer_representation_counter) {
const auto log_topology_count = log(static_cast<double>(int_topology_count));
IncrementByInLog(counts, indexer_representation, log_topology_count);
}
}
// Note code duplication with the override below for
// UnrootedIndexerRepresentationCounter.
void SBNProbability::SimpleAverage(
EigenVectorXdRef sbn_parameters,
const RootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range) {
SetLogCounts(sbn_parameters, indexer_representation_counter, rootsplit_count,
parent_to_range);
}
// Set the provided counts vector to be the log of the counts of the rootsplits and
// PCSPs provided in the input.
// Note code duplication with the override above for RootedIndexerRepresentationCounter.
void SetLogCounts(
EigenVectorXdRef counts,
const UnrootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range) {
counts.fill(DOUBLE_NEG_INF);
for (const auto& [indexer_representation, int_topology_count] :
indexer_representation_counter) {
const auto log_topology_count = log(static_cast<double>(int_topology_count));
IncrementByInLog(counts, indexer_representation, log_topology_count);
}
}
// Note code duplication with the override above for RootedIndexerRepresentationCounter.
void SBNProbability::SimpleAverage(
EigenVectorXdRef sbn_parameters,
const UnrootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range) {
SetLogCounts(sbn_parameters, indexer_representation_counter, rootsplit_count,
parent_to_range);
}
// All references to equations, etc, are to the 2018 NeurIPS paper.
// However, if you are doing a detailed read see doc/tex, because our definition of
// score differs from that in the NeurIPS paper, and also for details of how the prior
// calculation works.
EigenVectorXd SBNProbability::ExpectationMaximization(
EigenVectorXdRef sbn_parameters,
const UnrootedIndexerRepresentationCounter& indexer_representation_counter,
size_t rootsplit_count, const BitsetSizePairMap& parent_to_range, double alpha,
size_t max_iter, double score_epsilon) {
Assert(!indexer_representation_counter.empty(),
"Empty indexer_representation_counter.");
auto edge_count = indexer_representation_counter[0].first.size();
// The \bar{m} vectors (Algorithm 1) in log space.
// They are packed into a single vector as sbn_parameters is.
EigenVectorXd log_m_bar(sbn_parameters.size());
// The q weight of a rootsplit is the probability of each rooting given the current
// SBN parameters.
EigenVectorXd log_q_weights(edge_count);
// The \tilde{m} vectors (p.6): the counts vector before normalization to get the
// SimpleAverage estimate. If alpha is nonzero log_m_tilde gets scaled by it below.
EigenVectorXd log_m_tilde(sbn_parameters.size());
SetLogCounts(log_m_tilde, indexer_representation_counter, rootsplit_count,
parent_to_range);
// m_tilde is the counts, but marginalized over a uniform distribution on the rooting
// edge. Thus we take the total counts and then divide by the edge count.
log_m_tilde = log_m_tilde.array() - log(static_cast<double>(edge_count));
// The normalized version of m_tilde is the SA estimate, which is our starting point.
sbn_parameters = log_m_tilde;
// We need to ensure sbn_parameters is normalized as we are computing log P(S_1, T^u)
// repeatedly.
ProbabilityNormalizeParamsInLog(sbn_parameters, rootsplit_count, parent_to_range);
// We need an exponentiated version of log_m_tilde for the score calculation if alpha
// is nonzero.
EigenVectorXd m_tilde_for_positive_alpha;
if (alpha > 0.) {
// For the regularized case, we always need log(alpha) + log_m_tilde so we store
// this in log_m_tilde.
log_m_tilde = log_m_tilde.array() + log(alpha);
// We also need exp(log_m_tilde) = \alpha * tilde{m}_{s|t} for the regularized EM
// algorithm.
m_tilde_for_positive_alpha = log_m_tilde.array().exp();
}
// Our score is the marginal log likelihood of the training collection of trees (see
// doc/tex).
EigenVectorXd score_history = EigenVectorXd::Zero(max_iter);
// Do the specified number of EM loops.
ProgressBar progress_bar(max_iter);
for (size_t em_idx = 0; em_idx < max_iter; ++em_idx) {
log_m_bar.setConstant(DOUBLE_NEG_INF);
// Loop over topologies (as manifested by their indexer representations).
for (const auto& [indexer_representation, int_topology_count] :
indexer_representation_counter) {
// The number of times this topology was seen in the counter.
const auto topology_count = static_cast<double>(int_topology_count);
// Calculate the q weights for this topology.
log_q_weights.setConstant(DOUBLE_NEG_INF);
Assert(indexer_representation.size() == edge_count,
"Indexer representation length is not constant.");
// Loop over the various rooting positions of this topology, using log_q_weights
// to store the probability of the tree in the various rootings (we will normalize
// it later).
for (size_t rooting_position = 0; rooting_position < edge_count;
++rooting_position) {
const RootedIndexerRepresentation& rooted_representation =
indexer_representation[rooting_position];
// Calculate the SBN probability of this topology rooted at this position.
double log_p_rooted_topology = SumOf(sbn_parameters, rooted_representation, 0.);
// SHJ: Sometimes overflow is reported, sometimes it's underflow...
if (fetestexcept(FE_OVER_AND_UNDER_FLOW_EXCEPT)) {
log_q_weights[rooting_position] = DOUBLE_MINIMUM;
feclearexcept(FE_OVER_AND_UNDER_FLOW_EXCEPT);
} else {
log_q_weights[rooting_position] = log_p_rooted_topology;
}
} // End of looping over rooting positions.
double log_p_unrooted_topology = NumericalUtils::LogSum(log_q_weights);
score_history[em_idx] += topology_count * log_p_unrooted_topology;
// Normalize q_weights to achieve the E-step of Algorithm 1.
// For the increment step (M-step of Algorithm 1) we want a full topology
// count rather than just the unique count. So we multiply the q_weights by the
// topology count (in log space, it becomes summation rather than multiplication).
log_q_weights =
log_q_weights.array() + (-log_p_unrooted_topology + log(topology_count));
// Increment the SBN-parameters-to-be by the q-weighted counts.
IncrementByInLog(log_m_bar, indexer_representation, log_q_weights);
} // End of looping over topologies.
// Store the proper value in sbn_parameters.
sbn_parameters = (alpha > 0.)
? NumericalUtils::LogAddVectors(log_m_bar, log_m_tilde)
: log_m_bar;
// We normalize sbn_parameters right away to ensure that it is always normalized.
ProbabilityNormalizeParamsInLog(sbn_parameters, rootsplit_count, parent_to_range);
if (alpha > 0.) {
// Last line of the section on EM in doc/tex.
score_history[em_idx] += m_tilde_for_positive_alpha.dot(sbn_parameters);
}
// Return if we've converged according to score.
if (em_idx > 0) {
double scaled_score_improvement =
(score_history[em_idx] - score_history[em_idx - 1]) /
fabs(score_history[em_idx - 1]);
// To monitor correctness of EM, we check to ensure that the score is
// monotonically increasing (modulo numerical instability).
// SHJ: -EPS is too small, I noticed the assertion failure for
// scaled_score_improvement of -6e-16. Using ERR_TOLERANCE.
Assert(scaled_score_improvement > -ERR_TOLERANCE, "Score function decreased.");
if (fabs(scaled_score_improvement) < score_epsilon) {
std::cout << "EM converged according to normalized score improvement < "
<< score_epsilon << "." << std::endl;
score_history.resize(em_idx + 1);
break;
}
}
++progress_bar;
progress_bar.display();
} // End of EM loop.
progress_bar.done();
NumericalUtils::ReportFloatingPointEnvironmentExceptions("|After EM|");
return score_history;
}
bool SBNProbability::IsInSBNSupport(
const RootedIndexerRepresentation& rooted_representation,
size_t out_of_support_sentinel_value) {
for (size_t idx : rooted_representation) {
// Our convention is that out_of_support_sentinel_value is one more than the maximum
// allowed PCSP index, so here we check the index is reasonable.
Assert(idx <= out_of_support_sentinel_value,
"Rooted tree index is greater than maximum permitted.");
if (idx == out_of_support_sentinel_value) {
return false;
}
}
return true;
};
double SBNProbability::ProbabilityOfSingle(
const EigenConstVectorXdRef sbn_parameters,
const RootedIndexerRepresentation& rooted_representation) {
size_t sbn_parameter_count = sbn_parameters.size();
return IsInSBNSupport(rooted_representation, sbn_parameter_count)
? exp(SumOf(sbn_parameters, rooted_representation, 0.))
: 0.;
}
double SBNProbability::ProbabilityOfSingle(
const EigenConstVectorXdRef sbn_parameters,
const UnrootedIndexerRepresentation& indexer_representation) {
size_t sbn_parameter_count = sbn_parameters.size();
double log_total_probability = DOUBLE_NEG_INF;
for (const auto& rooted_representation : indexer_representation) {
log_total_probability = NumericalUtils::LogAdd(
log_total_probability,
IsInSBNSupport(rooted_representation, sbn_parameter_count)
? SumOf(sbn_parameters, rooted_representation, 0.)
: DOUBLE_NEG_INF);
}
return exp(log_total_probability);
}
EigenVectorXd SBNProbability::ProbabilityOfCollection(
const EigenConstVectorXdRef sbn_parameters,
const std::vector<RootedIndexerRepresentation>& indexer_representations) {
// Lambdas aren't std:functions, so we make a std::function here.
std::function<double(const RootedIndexerRepresentation&)> f =
[sbn_parameters](const RootedIndexerRepresentation& indexer_representation) {
return ProbabilityOfSingle(sbn_parameters, indexer_representation);
};
return EigenVectorXdOfStdVectorT(indexer_representations, f);
}
EigenVectorXd SBNProbability::ProbabilityOfCollection(
const EigenConstVectorXdRef sbn_parameters,
const std::vector<UnrootedIndexerRepresentation>& indexer_representations) {
std::function<double(const UnrootedIndexerRepresentation&)> f =
[sbn_parameters](const UnrootedIndexerRepresentation& indexer_representation) {
return ProbabilityOfSingle(sbn_parameters, indexer_representation);
};
return EigenVectorXdOfStdVectorT(indexer_representations, f);
}
| 17,662
|
C++
|
.cpp
| 363
| 43.661157
| 88
| 0.725725
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,047
|
gp_operation.cpp
|
phylovi_bito/src/gp_operation.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "gp_operation.hpp"
GPOperations::PrepForMarginalization GPOperations::PrepForMarginalizationOfOperations(
const GPOperationVector& operations) {
return PrepForMarginalizationVisitor(operations).ToPrepForMarginalization();
}
std::ostream& operator<<(std::ostream& os, GPOperation const& operation) {
std::visit(GPOperationOstream{os}, operation);
return os;
}
std::ostream& operator<<(std::ostream& os, GPOperationVector const& operation_vector) {
os << "[" << std::endl;
for (const auto& operation : operation_vector) {
os << " " << operation << "," << std::endl;
}
os << "]" << std::endl;
return os;
}
void GPOperations::AppendGPOperations(GPOperationVector& operations,
GPOperationVector&& new_operations) {
std::move(new_operations.begin(), new_operations.end(),
std::back_inserter(operations));
}
| 1,014
|
C++
|
.cpp
| 24
| 37.875
| 87
| 0.711675
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,048
|
subsplit_dag_nni.cpp
|
phylovi_bito/src/subsplit_dag_nni.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "subsplit_dag_nni.hpp"
#include "bitset.hpp"
#include "subsplit_dag.hpp"
// ** NNIOperation Methods
int NNIOperation::Compare(const NNIOperation &nni_a, const NNIOperation &nni_b) {
auto compare_parent = Bitset::Compare(nni_a.parent_, nni_b.parent_);
if (compare_parent != 0) {
return compare_parent;
}
auto compare_child = Bitset::Compare(nni_a.child_, nni_b.child_);
return compare_child;
};
int NNIOperation::Compare(const NNIOperation &nni_b) const {
const NNIOperation &nni_a = *this;
return Compare(nni_a, nni_b);
}
bool operator<(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) < 0;
}
bool operator>(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) > 0;
}
bool operator==(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) == 0;
}
bool operator!=(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) != 0;
}
NNIOperation NNIOperation::NNIOperationFromNeighboringSubsplits(
const Bitset parent_in, const Bitset child_in,
const bool swap_which_child_clade_with_sister, const bool which_child_of_parent) {
// Input: Parent(X,YZ) -> Child(Y,Z).
Bitset X = parent_in.SubsplitGetClade(!which_child_of_parent);
// "Y" clade can be chosen arbitrarily from (Y,Z), so "Y" is chosen based on which
// we want to swap with "X".
Bitset Y = child_in.SubsplitGetClade(swap_which_child_clade_with_sister);
Bitset Z = child_in.SubsplitGetClade(!swap_which_child_clade_with_sister);
// Output: Parent(Y,XZ) -> Child(X,Z).
Bitset parent_out = Bitset::Subsplit(Y, X | Z);
Bitset child_out = Bitset::Subsplit(X, Z);
return NNIOperation(parent_out, child_out);
}
NNIOperation NNIOperation::NNIOperationFromNeighboringSubsplits(
const Bitset parent_in, const Bitset child_in,
const bool swap_which_child_clade_with_sister) {
bool which_clade_of_parent =
Bitset::SubsplitIsChildOfWhichParentClade(parent_in, child_in);
return NNIOperationFromNeighboringSubsplits(
parent_in, child_in, swap_which_child_clade_with_sister, which_clade_of_parent);
}
// ** SetOfNNIs Methods
bool operator==(const SetOfNNIs &lhs, const SetOfNNIs &rhs) {
return lhs.set_ == rhs.set_;
}
bool operator!=(const SetOfNNIs &lhs, const SetOfNNIs &rhs) {
return lhs.set_ != rhs.set_;
}
void SetOfNNIs::Insert(NNIOperation nni_op) { set_.insert(nni_op); };
void SetOfNNIs::Insert(Bitset parent, Bitset child) {
Insert(NNIOperation(parent, child));
}
void SetOfNNIs::Erase(NNIOperation nni_op) { set_.erase(nni_op); };
void SetOfNNIs::Erase(Bitset parent, Bitset child) {
Erase(NNIOperation(parent, child));
}
void SetOfNNIs::Clear() { set_.clear(); }
size_t SetOfNNIs::GetSize() const { return set_.size(); }
// ** NNIEvaluationEngine Methods
void SyncSetOfNNIsWithDAG(SetOfNNIs &set_of_nnis, const SubsplitDAG &dag) {
set_of_nnis.Clear();
// Only real node pairs are viable NNIs.
dag.IterateOverRealNodes([&set_of_nnis, &dag](SubsplitDAGNode node) {
dag.IterateOverParentAndChildAndLeafwardEdges(
node, [&set_of_nnis, &dag](const size_t parent_id, const bool is_rotated,
const size_t child_id, const size_t edge_idx) {
// Only internal node pairs are viable NNIs.
Bitset parent_bitset = dag.GetDAGNode(parent_id).GetBitset();
Bitset child_bitset = dag.GetDAGNode(child_id).GetBitset();
if (!(parent_bitset.SubsplitIsUCA() || child_bitset.SubsplitIsLeaf())) {
SafeAddOutputNNIsToSetOfNNIs(set_of_nnis, dag, parent_bitset, child_bitset,
is_rotated);
}
});
});
}
void UpdateSetOfNNIsAfterDAGAddNodePair(SetOfNNIs &set_of_nnis, const SubsplitDAG &dag,
const Bitset &parent_bitset,
const Bitset &child_bitset) {
size_t parent_id = dag.GetDAGNodeId(parent_bitset);
size_t child_id = dag.GetDAGNodeId(child_bitset);
// Every new edge added is a potential new NNI.
// Iterate over the parent and child node of the new pair.
for (const size_t &node_id : {parent_id, child_id}) {
// Get nodes adjacent to current node from both sorted and rotated edges.
for (const bool is_edge_leafward : {true, false}) {
// Get nodes adjacent to current node from both leafward and rootward directions.
for (const bool is_edge_rotated : {true, false}) {
SizeVector adjacent_node_ids = dag.GetDAGNode(node_id).GetLeafwardOrRootward(
is_edge_leafward, is_edge_rotated);
AddAllNNIsFromNodeVectorToSetOfNNIs(set_of_nnis, dag, node_id,
adjacent_node_ids, is_edge_rotated,
is_edge_leafward);
}
}
}
// Remove the pair that was just added to the DAG from NNI Set.
NNIOperation new_nni = NNIOperation(parent_bitset, child_bitset);
set_of_nnis.Erase(new_nni);
}
void AddAllNNIsFromNodeVectorToSetOfNNIs(SetOfNNIs &set_of_nnis, const SubsplitDAG &dag,
const size_t &node_id,
const SizeVector &adjacent_node_ids,
const bool is_edge_rotated,
const bool is_edge_leafward) {
Bitset node_bitset = dag.GetDAGNode(node_id).GetBitset();
// Determine whether node_id corresponds to parent or child of the pair.
// Add every edge's NNI to NNI Set.
// If edges are leafward, node_id is the parent to all vector nodes.
// If edges are rootward, node_id is the child to all vector nodes.
if (is_edge_leafward) {
const Bitset &parent_bitset = node_bitset;
for (const auto &adjacent_node_id : adjacent_node_ids) {
const Bitset child_bitset = dag.GetDAGNode(adjacent_node_id).GetBitset();
SafeAddOutputNNIsToSetOfNNIs(set_of_nnis, dag, parent_bitset, child_bitset,
is_edge_rotated);
}
} else {
const Bitset &child_bitset = node_bitset;
for (const auto &adjacent_node_id : adjacent_node_ids) {
const Bitset parent_bitset = dag.GetDAGNode(adjacent_node_id).GetBitset();
SafeAddOutputNNIsToSetOfNNIs(set_of_nnis, dag, parent_bitset, child_bitset,
is_edge_rotated);
}
}
}
void SafeAddOutputNNIsToSetOfNNIs(SetOfNNIs &set_of_nnis, const SubsplitDAG &dag,
const Bitset &parent_bitset,
const Bitset &child_bitset,
const bool is_edge_rotated) {
// Soft assert that parent is not the root and child is not a leaf.
if (parent_bitset.SubsplitIsUCA() || child_bitset.SubsplitIsLeaf()) {
return;
}
// Input pair is in the DAG, so remove it from the Set if it exists.
set_of_nnis.Erase(parent_bitset, child_bitset);
// Add NNI for sorted clade swap and rotated clade swap.
for (bool is_swap_with_sorted_child : {true, false}) {
bool is_in_dag = false;
const auto new_nni = NNIOperation::NNIOperationFromNeighboringSubsplits(
parent_bitset, child_bitset, is_swap_with_sorted_child, !is_edge_rotated);
if (dag.ContainsNode(new_nni.parent_) && dag.ContainsNode(new_nni.child_)) {
const size_t parent_id = dag.GetDAGNodeId(new_nni.parent_);
const size_t child_id = dag.GetDAGNodeId(new_nni.child_);
is_in_dag = dag.ContainsEdge(parent_id, child_id);
}
if (is_in_dag == false) {
set_of_nnis.Insert(new_nni);
}
}
}
| 7,783
|
C++
|
.cpp
| 162
| 40.919753
| 88
| 0.669912
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,049
|
rooted_sbn_instance.cpp
|
phylovi_bito/src/rooted_sbn_instance.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "rooted_sbn_instance.hpp"
#include "subsplit_dag.hpp"
StringSet RootedSBNInstance::StringIndexerRepresentationOf(
const RootedIndexerRepresentation &indexer_representation) const {
return RootedSBNMaps::StringIndexerRepresentationOf(
sbn_support_.StringReversedIndexer(), indexer_representation);
}
StringSet RootedSBNInstance::StringIndexerRepresentationOf(
const Node::NodePtr &topology, size_t out_of_sample_index) const {
return StringIndexerRepresentationOf(
sbn_support_.IndexerRepresentationOf(topology, out_of_sample_index));
}
BitsetDoubleMap RootedSBNInstance::UnconditionalSubsplitProbabilities() const {
if (tree_collection_.TreeCount() == 0) {
Failwith(
"Please load some trees into your RootedSBNInstance before trying to calculate "
"UnconditionalSubsplitProbabilities.");
}
const SubsplitDAG dag(tree_collection_);
EigenVectorXd sbn_parameters = NormalizedSBNParameters();
// Expand sbn_parameters to include fake subsplits.
Assert(size_t(sbn_parameters.size()) == dag.EdgeCount(), "GPCSP count mismatch.");
sbn_parameters.conservativeResize(dag.EdgeCountWithLeafSubsplits());
sbn_parameters
.segment(dag.EdgeCount(), dag.EdgeCountWithLeafSubsplits() - dag.EdgeCount())
.setOnes();
return dag.UnconditionalSubsplitProbabilities(sbn_parameters);
}
void RootedSBNInstance::UnconditionalSubsplitProbabilitiesToCSV(
const std::string &csv_path) const {
CSV::StringDoubleVectorToCSV(
SBNMaps::StringDoubleVectorOf(UnconditionalSubsplitProbabilities()), csv_path);
}
std::vector<double> RootedSBNInstance::LogLikelihoods(
std::optional<PhyloFlags> external_flags) {
auto flags = CollectPhyloFlags(external_flags);
return GetEngine()->LogLikelihoods(tree_collection_, phylo_model_params_, rescaling_,
flags);
}
template <class VectorType>
std::vector<double> RootedSBNInstance::LogLikelihoods(const VectorType &flag_vec,
const bool is_run_defaults) {
PhyloFlags external_flags = PhyloFlags(flag_vec, is_run_defaults);
return LogLikelihoods(external_flags);
};
// Explicit instantiation for Pybito.
template DoubleVector RootedSBNInstance::LogLikelihoods(const StringVector &,
const bool);
template DoubleVector RootedSBNInstance::LogLikelihoods(const StringBoolVector &,
const bool);
template DoubleVector RootedSBNInstance::LogLikelihoods(const StringDoubleVector &,
const bool);
template DoubleVector RootedSBNInstance::LogLikelihoods(const StringBoolDoubleVector &,
const bool);
std::vector<double> RootedSBNInstance::UnrootedLogLikelihoods() {
return GetEngine()->UnrootedLogLikelihoods(tree_collection_, phylo_model_params_,
rescaling_);
}
std::vector<double> RootedSBNInstance::LogDetJacobianHeightTransform() {
return GetEngine()->LogDetJacobianHeightTransform(tree_collection_,
phylo_model_params_, rescaling_);
}
std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
std::optional<PhyloFlags> external_flags) {
auto flags = CollectPhyloFlags(external_flags);
return GetEngine()->Gradients(tree_collection_, phylo_model_params_, rescaling_,
flags);
}
template <class VectorType>
std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
const VectorType &flag_vec, const bool is_run_defaults) {
PhyloFlags external_flags = PhyloFlags(flag_vec, is_run_defaults);
return PhyloGradients(external_flags);
};
// Explicit templates for Pybind API.
template std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
const StringVector &, const bool);
template std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
const StringBoolVector &, const bool);
template std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
const StringDoubleVector &, const bool);
template std::vector<PhyloGradient> RootedSBNInstance::PhyloGradients(
const StringBoolDoubleVector &, const bool);
std::vector<DoubleVector> RootedSBNInstance::GradientLogDeterminantJacobian() {
return GetEngine()->GradientLogDeterminantJacobian(tree_collection_,
phylo_model_params_, rescaling_);
}
void RootedSBNInstance::ReadNewickFile(const std::string &fname, const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNewickFile(fname));
}
void RootedSBNInstance::ReadNexusFile(const std::string &fname, const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNexusFile(fname));
}
void RootedSBNInstance::SetDatesToBeConstant(
bool initialize_time_trees_using_branch_lengths) {
tree_collection_.SetDatesToBeConstant(initialize_time_trees_using_branch_lengths);
}
void RootedSBNInstance::ParseDatesFromTaxonNames(
bool initialize_time_trees_using_branch_lengths) {
tree_collection_.ParseDatesFromTaxonNames(initialize_time_trees_using_branch_lengths);
}
void RootedSBNInstance::ParseDatesFromCSV(
const std::string &csv_path, bool initialize_time_trees_using_branch_lengths) {
tree_collection_.ParseDatesFromCSV(csv_path,
initialize_time_trees_using_branch_lengths);
}
| 5,800
|
C++
|
.cpp
| 114
| 43.473684
| 88
| 0.730946
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,050
|
tp_engine.cpp
|
phylovi_bito/src/tp_engine.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
#include "tp_engine.hpp"
#include "tp_evaluation_engine.hpp"
#include "gp_engine.hpp"
#include "sbn_maps.hpp"
#include "optimization.hpp"
#include "stopwatch.hpp"
TPEngine::TPEngine(GPDAG &dag, SitePattern &site_pattern,
std::optional<std::string> mmap_likelihood_path,
std::optional<std::string> mmap_parsimony_path,
std::optional<const RootedTreeCollection> tree_collection,
std::optional<const BitsetSizeMap> edge_indexer)
: choice_map_(dag), site_pattern_(site_pattern), dag_(&dag) {
for (const auto eval_engine : TPEvalEngineTypeEnum::Iterator()) {
eval_engine_in_use_[eval_engine] = false;
}
// Initialize site pattern-based data.
auto weights = site_pattern_.GetWeights();
site_pattern_weights_ = EigenVectorXdOfStdVectorDouble(weights);
// Initialize node and edge data.
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
// Initialize Tree Source.
if (tree_collection.has_value() && edge_indexer.has_value()) {
SetTreeSourceByTakingFirst(tree_collection.value(), edge_indexer.value());
} else {
std::fill(GetTreeSource().begin(), GetTreeSource().end(), TreeId{1});
}
// Initialize Choice Map.
InitializeChoiceMap();
// Initialize Eval Engines
if (mmap_likelihood_path.has_value()) {
MakeLikelihoodEvalEngine(mmap_likelihood_path.value());
}
if (mmap_parsimony_path.has_value()) {
MakeParsimonyEvalEngine(mmap_parsimony_path.value());
}
// Initialize node and edge data for eval engines.
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
InitializeScores();
}
// ** Comparators
int TPEngine::Compare(const TPEngine &lhs, const TPEngine &rhs, const bool is_quiet) {
std::stringstream dev_null;
auto &os = is_quiet ? dev_null : std::cout;
if (lhs.GetDAG() != rhs.GetDAG()) {
os << "TPEngine::Compare: DAGs are not equal." << std::endl;
return SubsplitDAG::Compare(lhs.GetDAG(), rhs.GetDAG(), is_quiet);
}
const auto node_map =
SubsplitDAG::BuildNodeIdMapBetweenDAGs(lhs.GetDAG(), rhs.GetDAG());
if (node_map.size() != lhs.GetDAG().NodeCount()) {
os << "TPEngine::Compare: node_maps do not cover entire node set: "
<< node_map.size() << " " << lhs.GetDAG().NodeCount() << " "
<< rhs.GetDAG().NodeCount() << std::endl;
return node_map.size() - lhs.GetDAG().NodeCount();
}
const auto edge_map =
SubsplitDAG::BuildEdgeIdMapBetweenDAGs(lhs.GetDAG(), rhs.GetDAG());
if (edge_map.size() != lhs.GetDAG().EdgeCountWithLeafSubsplits()) {
os << "TPEngine::Compare: edge_maps do not cover entire edge set: "
<< edge_map.size() << " " << lhs.GetDAG().EdgeCountWithLeafSubsplits() << " "
<< rhs.GetDAG().EdgeCountWithLeafSubsplits() << std::endl;
return edge_map.size() - lhs.GetDAG().EdgeCountWithLeafSubsplits();
}
auto TranslateEdgeChoice =
[&edge_map](const TPChoiceMap::EdgeChoice &pre_edge_choice) {
TPChoiceMap::EdgeChoice edge_choice(pre_edge_choice);
if (pre_edge_choice.parent != NoId) {
edge_choice.parent = edge_map.find(pre_edge_choice.parent)->second;
}
if (pre_edge_choice.sister != NoId) {
edge_choice.sister = edge_map.find(pre_edge_choice.sister)->second;
}
if (pre_edge_choice.left_child != NoId) {
edge_choice.left_child = edge_map.find(pre_edge_choice.left_child)->second;
}
if (pre_edge_choice.right_child != NoId) {
edge_choice.right_child = edge_map.find(pre_edge_choice.right_child)->second;
}
return edge_choice;
};
auto CompareEdgeChoice = [](const TPChoiceMap::EdgeChoice &lhs,
const TPChoiceMap::EdgeChoice &rhs) {
if (lhs.parent != rhs.parent) {
return int(lhs.parent.value_ - rhs.parent.value_);
}
if (lhs.sister != rhs.sister) {
return int(lhs.sister.value_ - rhs.sister.value_);
}
if ((lhs.left_child != rhs.left_child) or (lhs.right_child != rhs.left_child)) {
// Right and left child can be swapped due to taxon bitset ordering.
if ((lhs.left_child == rhs.right_child) and (lhs.right_child == rhs.left_child)) {
return 0;
}
if (lhs.left_child != rhs.left_child) {
return int(lhs.left_child.value_ - rhs.left_child.value_);
}
return int(lhs.right_child.value_ - rhs.right_child.value_);
}
return 0;
};
int final_diff = 0;
for (const auto [lhs_edge_id, rhs_edge_id] : edge_map) {
const auto &lhs_choice = lhs.GetChoiceMap(lhs_edge_id);
const auto &rhs_choice = rhs.GetChoiceMap(rhs_edge_id);
auto trans_choice = TranslateEdgeChoice(lhs_choice);
auto choice_diff = CompareEdgeChoice(trans_choice, rhs_choice);
if (choice_diff != 0) {
os << "TPEngine::Compare: edge_choice does not match at -- " << lhs_edge_id << " "
<< rhs_edge_id << std::endl;
os << "lhs_choice_map: " << lhs_edge_id << " " << lhs_choice << std::endl;
os << "trans_choice_map: " << lhs_edge_id << " " << trans_choice << std::endl;
os << "rhs_choice_map: " << rhs_edge_id << " " << rhs_choice << std::endl;
for (const auto dir : DirectionEnum::Iterator()) {
auto &hs = lhs;
auto &hs2 = rhs;
auto edge_id = lhs_edge_id;
const auto node_id = (dir == Direction::Leafward)
? hs.GetDAG().GetDAGEdge(edge_id).GetChild()
: hs.GetDAG().GetDAGEdge(edge_id).GetParent();
const auto &node = hs.GetDAG().GetDAGNode(node_id);
for (const auto clade : SubsplitCladeEnum::Iterator()) {
const auto &adj_node_ids = node.GetNeighbors(dir, clade);
os << "hs::" << DirectionEnum::ToString(dir) << ":"
<< SubsplitCladeEnum::ToString(clade) << ": ";
for (const auto adj_node_id : adj_node_ids) {
auto edge_id = (dir == Direction::Leafward)
? hs.GetDAG().GetEdgeIdx(node.Id(), adj_node_id)
: hs.GetDAG().GetEdgeIdx(adj_node_id, node.Id());
os << "Edge" << edge_id << "->"
<< "Tree" << hs.GetTreeSource(edge_id) << " => ";
os << "Edge" << edge_map.find(edge_id)->second << "->"
<< "Tree" << hs2.GetTreeSource(edge_map.find(edge_id)->second) << ", ";
}
os << std::endl;
}
}
final_diff = choice_diff;
}
}
if (lhs.IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine) &&
rhs.IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
auto branch_diff =
DAGBranchHandler::Compare(lhs.GetLikelihoodEvalEngine().GetDAGBranchHandler(),
rhs.GetLikelihoodEvalEngine().GetDAGBranchHandler());
if (branch_diff != 0) {
os << "TPEngine::Compare: Branch lengths do not match" << std::endl;
final_diff = branch_diff;
}
}
return final_diff;
}
bool operator==(const TPEngine &lhs, const TPEngine &rhs) {
return TPEngine::Compare(lhs, rhs) == 0;
}
// ** Settings
double TPEngine::GetResizingFactor() const { return resizing_factor_; }
bool TPEngine::GetUseBestEdgeMap() const { return do_use_best_edge_map_; }
void TPEngine::SetUseBestEdgeMap(bool do_use_best_edge_map) {
do_use_best_edge_map_ = do_use_best_edge_map;
}
size_t TPEngine::IsOptimizeNewEdges() const {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
return GetLikelihoodEvalEngine().IsOptimizeNewEdges();
}
void TPEngine::SetOptimizeNewEdges(const bool do_optimize_new_edges) {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
GetLikelihoodEvalEngine().SetOptimizeNewEdges(do_optimize_new_edges);
}
size_t TPEngine::GetOptimizationMaxIteration() const {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
return GetLikelihoodEvalEngine().GetOptimizationMaxIteration();
}
void TPEngine::SetOptimizationMaxIteration(const size_t optimize_max_iter) {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
GetLikelihoodEvalEngine().SetOptimizationMaxIteration(optimize_max_iter);
}
bool TPEngine::IsInitProposedBranchLengthsWithDAG() const {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
return GetLikelihoodEvalEngine().IsInitProposedBranchLengthsWithDAG();
}
void TPEngine::SetInitProposedBranchLengthsWithDAG(
const bool do_init_proposed_branch_lengths_with_dag) {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
GetLikelihoodEvalEngine().SetInitProposedBranchLengthsWithDAG(
do_init_proposed_branch_lengths_with_dag);
}
bool TPEngine::IsFixProposedBranchLengthsFromDAG() const {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
return GetLikelihoodEvalEngine().IsFixProposedBranchLengthsFromDAG();
}
void TPEngine::SetFixProposedBranchLengthsFromDAG(
const bool do_fix_proposed_branch_lengths_from_dag) {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine modifying settings.");
GetLikelihoodEvalEngine().SetFixProposedBranchLengthsFromDAG(
do_fix_proposed_branch_lengths_from_dag);
}
// ** Maintenance
void TPEngine::Initialize() {
// Initialize node-based data
GrowNodeData(GetDAG().NodeCount(), std::nullopt, std::nullopt, true);
// Initialize edge-based data
GrowEdgeData(GetDAG().EdgeCountWithLeafSubsplits(), std::nullopt, std::nullopt, true);
// Initialize scores.
InitializeChoiceMap();
}
void TPEngine::UpdateAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer, bool is_quiet) {
std::stringstream dev_null;
bool is_quiet_ = true;
std::ostream &os = (is_quiet_ ? dev_null : std::cout);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
UpdateChoiceMapAfterModifyingDAG(nni_to_pre_nni, prev_node_count, node_reindexer,
prev_edge_count, edge_reindexer);
os << "UpdateAfterModifying::ChoiceMap: " << timer.Lap() << std::endl;
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().UpdateEngineAfterModifyingDAG(
nni_to_pre_nni, prev_node_count, node_reindexer, prev_edge_count,
edge_reindexer);
os << "UpdateAfterModifying::LikelihoodEvalEngine: " << timer.Lap() << std::endl;
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().UpdateEngineAfterModifyingDAG(
nni_to_pre_nni, prev_node_count, node_reindexer, prev_edge_count,
edge_reindexer);
os << "UpdateAfterModifying::ParsimonyEvalEngine: " << timer.Lap() << std::endl;
}
}
void TPEngine::GrowNodeData(const size_t new_node_count,
std::optional<const Reindexer> node_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().GrowNodeData(new_node_count, node_reindexer,
explicit_alloc, on_init);
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().GrowNodeData(new_node_count, node_reindexer,
explicit_alloc, on_init);
}
const size_t old_node_count = GetNodeCount();
SetNodeCount(new_node_count);
// Reallocate more space if needed.
if ((GetPaddedNodeCount() > GetAllocatedNodeCount()) || explicit_alloc.has_value()) {
SetAllocatedNodeCount(
size_t(ceil(double(GetPaddedNodeCount()) * resizing_factor_)));
if (explicit_alloc.has_value()) {
Assert(explicit_alloc.value() >= GetNodeCount(),
"Attempted to reallocate space smaller than node_count.");
SetAllocatedNodeCount(explicit_alloc.value() + GetSpareNodeCount());
}
}
// Reindex work space to realign with DAG.
if (node_reindexer.has_value()) {
ReindexNodeData(node_reindexer.value(), old_node_count);
}
}
void TPEngine::GrowEdgeData(const size_t new_edge_count,
std::optional<const Reindexer> edge_reindexer,
std::optional<const size_t> explicit_alloc,
const bool on_init) {
// Update edge in choice map.
GetChoiceMap().GrowEdgeData(new_edge_count, edge_reindexer, explicit_alloc, on_init);
// Update edges in eval engine.
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().GrowEdgeData(new_edge_count, edge_reindexer,
explicit_alloc, on_init);
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().GrowEdgeData(new_edge_count, edge_reindexer,
explicit_alloc, on_init);
}
const size_t old_edge_count = GetEdgeCount();
SetEdgeCount(new_edge_count);
// Reallocate more space if needed.
if ((GetPaddedEdgeCount() > GetAllocatedEdgeCount()) || explicit_alloc.has_value()) {
SetAllocatedEdgeCount(
size_t(ceil(double(GetPaddedEdgeCount()) * resizing_factor_)));
if (explicit_alloc.has_value()) {
Assert(explicit_alloc.value() >= GetNodeCount(),
"Attempted to reallocate space smaller than node_count.");
SetAllocatedEdgeCount(explicit_alloc.value() + GetSpareEdgeCount());
}
GetTreeSource().reserve(GetAllocatedEdgeCount());
}
GetTreeSource().resize(GetPaddedEdgeCount());
// Initialize data.
tree_counter_++;
for (EdgeId i(old_edge_count); i < new_edge_count; i++) {
GetTreeSource(i) = TreeId(NoId);
}
// Reindex work space to realign with DAG.
if (edge_reindexer.has_value()) {
ReindexEdgeData(edge_reindexer.value(), old_edge_count);
}
}
void TPEngine::ReindexNodeData(const Reindexer &node_reindexer,
const size_t old_node_count) {
Assert(node_reindexer.size() == GetNodeCount(),
"Node Reindexer is the wrong size for TPEngine.");
Assert(node_reindexer.IsValid(GetNodeCount()), "Node Reindexer is not valid.");
}
void TPEngine::ReindexEdgeData(const Reindexer &edge_reindexer,
const size_t old_edge_count) {
Assert(edge_reindexer.size() == GetEdgeCount(),
"Edge Reindexer is the wrong size for TPEngine.");
Assert(edge_reindexer.IsValid(GetEdgeCount()),
"Edge Reindexer is not valid for TPEngine size.");
GetTreeSource() = Reindexer::Reindex(GetTreeSource(), edge_reindexer, GetEdgeCount());
}
void TPEngine::GrowSpareNodeData(const size_t new_node_spare_count) {
if (new_node_spare_count > GetSpareNodeCount()) {
SetSpareNodeCount(new_node_spare_count);
GrowNodeData(GetNodeCount());
}
}
void TPEngine::GrowSpareEdgeData(const size_t new_edge_spare_count) {
if (new_edge_spare_count > GetSpareEdgeCount()) {
SetSpareEdgeCount(new_edge_spare_count);
GrowEdgeData(GetEdgeCount());
}
}
void TPEngine::CopyOverEdgeDataFromPreNNIToPostNNI(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
CopyEdgeDataFunc copy_data_func,
std::optional<size_t> new_tree_id) {
new_tree_id =
(new_tree_id.has_value()) ? new_tree_id.value() : GetNextTreeId().value_;
input_tree_count_ = new_tree_id.value() + 1;
// Copy over all adjacent branch lengths from those
auto CopyBranchLengthFromCommonAdjacentNodes =
[this, ©_data_func, new_tree_id](
const NodeId pre_node_id, const NodeId post_node_id,
const Direction direction, const SubsplitClade clade) {
const auto pre_node = GetDAG().GetDAGNode(pre_node_id);
for (const auto parent_id : pre_node.GetNeighbors(direction, clade)) {
const auto pre_edge_id = GetDAG().GetEdgeIdx(parent_id, pre_node_id);
const auto post_edge_id = GetDAG().GetEdgeIdx(parent_id, post_node_id);
copy_data_func(pre_edge_id, post_edge_id);
// GetTreeSource(post_edge_id) = TreeId(new_tree_id.value());
}
};
const auto pre_parent_id = GetDAG().GetDAGNodeId(pre_nni.GetParent());
const auto pre_child_id = GetDAG().GetDAGNodeId(pre_nni.GetChild());
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_parent_id, pre_child_id);
const auto pre_edge = GetDAG().GetDAGEdge(pre_edge_id);
const auto post_parent_id = GetDAG().GetDAGNodeId(post_nni.GetParent());
const auto post_child_id = GetDAG().GetDAGNodeId(post_nni.GetChild());
const auto post_edge_id = GetDAG().GetEdgeIdx(post_parent_id, post_child_id);
// Copy over central edge.
copy_data_func(pre_edge_id, post_edge_id);
// Copy over parent and sister edges.
CopyBranchLengthFromCommonAdjacentNodes(pre_parent_id, post_parent_id,
Direction::Rootward, SubsplitClade::Left);
CopyBranchLengthFromCommonAdjacentNodes(pre_parent_id, post_parent_id,
Direction::Rootward, SubsplitClade::Right);
CopyBranchLengthFromCommonAdjacentNodes(
pre_parent_id, post_child_id, Direction::Leafward,
Bitset::Opposite(pre_edge.GetSubsplitClade()));
// Copy over left and right edges.
NodeId post_leftchild_id;
NodeId post_rightchild_id;
if (pre_nni.GetSisterClade() == post_nni.GetLeftChildClade()) {
// If post_nni swapped pre_nni sister with pre_nni left child.
post_leftchild_id = post_parent_id;
post_rightchild_id = post_child_id;
} else {
// If post_nni swapped pre_nni sister swapped with pre_nni right child.
post_leftchild_id = post_child_id;
post_rightchild_id = post_parent_id;
}
CopyBranchLengthFromCommonAdjacentNodes(pre_child_id, post_leftchild_id,
Direction::Leafward, SubsplitClade::Left);
CopyBranchLengthFromCommonAdjacentNodes(pre_child_id, post_rightchild_id,
Direction::Leafward, SubsplitClade::Right);
}
// ** Choice Map
void TPEngine::InitializeChoiceMap() {
for (EdgeId edge_id = EdgeId(0); edge_id < GetEdgeCount(); edge_id++) {
UpdateEdgeChoiceByTakingHighestPriorityTree(edge_id);
}
}
void TPEngine::UpdateChoiceMapAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
Stopwatch full_timer(true, Stopwatch::TimeScale::SecondScale);
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
const size_t new_edge_count = edge_reindexer.size();
// Tree counter for assigning a unique tree id for each NNI added to the DAG.
tree_counter_++;
const TreeId min_tree_id(tree_counter_);
const TreeId max_tree_id(tree_counter_ + nni_to_pre_nni.size());
TreeId nni_tree_id(tree_counter_);
// Edges that have been newly added to the DAG.
std::set<EdgeId> new_edges, nni_edges;
for (size_t i = prev_edge_count; i < new_edge_count; i++) {
const EdgeId edge_id = EdgeId(edge_reindexer.GetNewIndexByOldIndex(i));
new_edges.insert(edge_id);
}
// Edges that need their choicemaps initialized.
std::set<EdgeId> edges_to_init_choicemap(new_edges);
// Initialize all new tree sources to default tree source.
for (const auto edge_id : new_edges) {
GetTreeSource(edge_id) = max_tree_id;
GetChoiceMap().ResetEdgeChoice(edge_id);
GetLikelihoodEvalEngine().GetDAGBranchHandler()(edge_id) =
GetLikelihoodEvalEngine().GetDAGBranchHandler().GetDefaultBranchLength();
}
// Build map of best reference edges to update.
NNISet nnis;
for (const auto &[post_nni, pre_nni] : nni_to_pre_nni) {
std::ignore = pre_nni;
nnis.insert(post_nni);
}
const auto best_pcsp_edge_map = BuildMapOfProposedNNIPCSPsToBestPreNNIEdges(
nnis, prev_edge_count, edge_reindexer);
std::unordered_map<EdgeId, EdgeId> best_edge_map;
for (const auto &[pcsp, pre_edge_id] : best_pcsp_edge_map) {
if (!GetDAG().ContainsEdge(pcsp)) {
std::cerr << "PCSP not found in DAG: " << pcsp.PCSPToString() << std::endl;
std::cerr << "NNIs: " << std::endl;
for (const auto &[post_nni, pre_nni] : nni_to_pre_nni) {
std::cerr << post_nni.GetCentralEdgePCSP().PCSPToString() << std::endl;
}
}
const auto post_edge_id = GetDAG().GetEdgeIdx(pcsp);
best_edge_map[post_edge_id] = pre_edge_id;
}
// Update branch lengths from best reference edges.
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
auto &branch_handler = GetLikelihoodEvalEngine().GetDAGBranchHandler();
for (const auto &[post_edge_id, pre_edge_id] : best_edge_map) {
branch_handler(post_edge_id) = branch_handler(pre_edge_id);
}
}
// Iterate over newly added NNIs to initialize choice map and tree source.
for (const auto &[post_nni, pre_nni] : nni_to_pre_nni) {
std::ignore = pre_nni;
// Get edge mapping from pre-NNI to post-NNI.
const auto post_edge_id = GetDAG().GetEdgeIdx(post_nni);
nni_edges.insert(post_edge_id);
edges_to_init_choicemap.erase(post_edge_id);
const auto mapped_post_choice =
GetRemappedEdgeChoiceFromPreNNIToPostNNI(pre_nni, post_nni);
// Use edge mapping to update each edge.
auto UpdateEdge = [this, &nni_tree_id, &new_edges](const EdgeId post_edge_id) {
// Update tree source.
if (GetTreeSource(post_edge_id) > nni_tree_id) {
GetTreeSource(post_edge_id) = nni_tree_id;
}
};
UpdateEdge(post_edge_id);
UpdateEdge(mapped_post_choice.parent);
UpdateEdge(mapped_post_choice.sister);
UpdateEdge(mapped_post_choice.left_child);
UpdateEdge(mapped_post_choice.right_child);
// Use edge mapping to update each choice map.
GetChoiceMap(post_edge_id) = mapped_post_choice;
// Increment source tree priority.
nni_tree_id++;
tree_counter_++;
}
// For non-central edges, initialize their choicemap.
for (const auto edge_id : edges_to_init_choicemap) {
UpdateEdgeChoiceByTakingHighestPriorityTree(edge_id);
GetTreeSource(edge_id) = nni_tree_id;
// Increment source tree priority.
nni_tree_id++;
tree_counter_++;
}
// Update adjacent edge choice.
for (const auto &[post_nni, pre_nni] : nni_to_pre_nni) {
std::ignore = pre_nni;
// Get edge mapping from pre-NNI to post-NNI.
const auto post_edge_id = GetDAG().GetEdgeIdx(post_nni);
// Update given choice with given adj_edge_id if edge is new.
auto UpdateChoice = [this, &new_edges](EdgeId choice_edge_id,
EdgeAdjacent edge_type, EdgeId adj_edge_id) {
if (new_edges.find(choice_edge_id) != new_edges.end()) {
GetChoiceMap().SetEdgeChoice(choice_edge_id, edge_type, adj_edge_id);
}
};
const auto &choice = GetChoiceMap(post_edge_id);
const auto focal_clade = GetDAG().GetFocalClade(post_edge_id);
if (focal_clade == SubsplitClade::Left) {
UpdateChoice(choice.parent, EdgeAdjacent::LeftChild, post_edge_id);
} else {
UpdateChoice(choice.parent, EdgeAdjacent::RightChild, post_edge_id);
}
UpdateChoice(choice.sister, EdgeAdjacent::Sister, post_edge_id);
UpdateChoice(choice.left_child, EdgeAdjacent::Parent, post_edge_id);
UpdateChoice(choice.right_child, EdgeAdjacent::Parent, post_edge_id);
}
}
void TPEngine::UpdateEdgeChoiceByTakingFirstTree(const EdgeId edge_id) {
const auto edge = GetDAG().GetDAGEdge(EdgeId(edge_id));
GetChoiceMap().ResetEdgeChoice(edge_id);
auto GetFirstEdgeId = [this](const NodeId node_id, const Direction direction,
const SubsplitClade clade) {
auto adj_edge_id = EdgeId(NoId);
const auto &node = GetDAG().GetDAGNode(node_id);
for (const auto adj_node_id : node.GetNeighbors(direction, clade)) {
const auto parent_node_id =
(direction == Direction::Rootward) ? adj_node_id : node_id;
const auto child_node_id =
(direction == Direction::Rootward) ? node_id : adj_node_id;
adj_edge_id = GetDAG().GetEdgeIdx(parent_node_id, child_node_id);
return adj_edge_id;
}
return adj_edge_id;
};
// Select parent.
auto first_edge_id = EdgeId(NoId);
for (const auto clade : SubsplitCladeEnum::Iterator()) {
first_edge_id = GetFirstEdgeId(edge.GetParent(), Direction::Rootward, clade);
if (first_edge_id != NoId) {
break;
}
}
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::Parent, first_edge_id);
// Select sister.
first_edge_id = GetFirstEdgeId(edge.GetParent(), Direction::Leafward,
Bitset::Opposite(edge.GetSubsplitClade()));
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::Sister, first_edge_id);
// Select left child.
first_edge_id =
GetFirstEdgeId(edge.GetChild(), Direction::Leafward, SubsplitClade::Left);
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::LeftChild, first_edge_id);
// Select right child.
first_edge_id =
GetFirstEdgeId(edge.GetChild(), Direction::Leafward, SubsplitClade::Right);
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::RightChild, first_edge_id);
}
void TPEngine::UpdateEdgeChoiceByTakingHighestPriorityTree(const EdgeId edge_id) {
const auto edge = GetDAG().GetDAGEdge(EdgeId(edge_id));
// GetChoiceMap().ResetEdgeChoice(edge_id);
auto GetBestEdgeIdByHighestPriorityTree = [this, edge_id](
const NodeId node_id,
const Direction direction,
const SubsplitClade clade,
TreeId *opt_tree_id = nullptr) {
auto best_tree_id = TreeId(NoId);
auto best_edge_id = EdgeId(NoId);
if (!GetDAG().ContainsNode(node_id)) {
std::cerr << "ERROR: Encountered invalid node id during edge choice update: Edge"
<< edge_id << " Node" << node_id << std::endl;
}
const auto &node = GetDAG().GetDAGNode(node_id);
bool has_first_edge = false;
for (const auto adj_node_id : node.GetNeighbors(direction, clade)) {
const auto parent_node_id =
(direction == Direction::Rootward) ? adj_node_id : node_id;
const auto child_node_id =
(direction == Direction::Rootward) ? node_id : adj_node_id;
const auto adj_edge_id = GetDAG().GetEdgeIdx(parent_node_id, child_node_id);
auto adj_tree_id = GetTreeSource()[adj_edge_id.value_];
if ((best_tree_id > adj_tree_id) || !has_first_edge) {
best_tree_id = adj_tree_id;
best_edge_id = adj_edge_id;
has_first_edge = true;
}
}
if (opt_tree_id) {
*opt_tree_id = best_tree_id;
}
return best_edge_id;
};
auto best_edge_id = EdgeId(NoId);
auto best_tree_id = TreeId(NoId);
// Select parent.
for (const auto clade : SubsplitCladeEnum::Iterator()) {
TreeId clade_tree_id = TreeId(NoId);
const auto clade_edge_id = GetBestEdgeIdByHighestPriorityTree(
edge.GetParent(), Direction::Rootward, clade, &clade_tree_id);
if ((best_edge_id == NoId) || (best_tree_id > clade_tree_id)) {
best_edge_id = clade_edge_id;
best_tree_id = clade_tree_id;
}
}
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::Parent, best_edge_id);
// Select sister.
best_edge_id = GetBestEdgeIdByHighestPriorityTree(
edge.GetParent(), Direction::Leafward, Bitset::Opposite(edge.GetSubsplitClade()));
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::Sister, best_edge_id);
// Select left child.
best_edge_id = GetBestEdgeIdByHighestPriorityTree(
edge.GetChild(), Direction::Leafward, SubsplitClade::Left);
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::LeftChild, best_edge_id);
// Select right child.
best_edge_id = GetBestEdgeIdByHighestPriorityTree(
edge.GetChild(), Direction::Leafward, SubsplitClade::Right);
GetChoiceMap().SetEdgeChoice(edge_id, EdgeAdjacent::RightChild, best_edge_id);
}
void TPEngine::UpdateEdgeChoiceByTakingHighestScoringTree(const EdgeId edge_id) {}
void TPEngine::SetTreeSourceByTakingFirst(const RootedTreeCollection &tree_collection,
const BitsetSizeMap &edge_indexer) {
input_tree_count_ = tree_collection.TreeCount();
tree_counter_ = input_tree_count_ + 1;
const auto tree_id_max = TreeId(input_tree_count_ + 1);
GetTreeSource().resize(GetEdgeCount(), tree_id_max);
std::fill(GetTreeSource().begin(), GetTreeSource().end(), tree_id_max);
// Set tree source map for each edge in DAG.
auto set_tree_source = [tree_id_max, this](
const EdgeId edge_id, const Bitset &edge_bitset,
const RootedTree &tree, const size_t tree_id,
const Node *focal_node) {
if (GetTreeSource(edge_id) == tree_id_max) {
GetTreeSource(edge_id) = TreeId(tree_id + 1);
}
};
RootedSBNMaps::FunctionOverRootedTreeCollection(set_tree_source, tree_collection,
edge_indexer, NoId);
// Set tree source map for rootsplit edges from the best tree.
const auto root_node = GetDAG().GetDAGNode(GetDAG().GetDAGRootNodeId());
for (const auto rootsplit_node_id : GetDAG().GetRootsplitNodeIds()) {
const auto rootsplit_node = GetDAG().GetDAGNode(rootsplit_node_id);
const auto rootsplit_edge_id =
GetDAG().GetEdgeIdx(root_node.Id(), rootsplit_node.Id());
TreeId best_tree_source = tree_id_max;
GetDAG().IterateOverLeafwardEdges(
rootsplit_node, [this, &rootsplit_node, &rootsplit_edge_id, &best_tree_source](
bool is_edge_on_left, SubsplitDAGNode child_node) {
const auto edge_id =
GetDAG().GetEdgeIdx(rootsplit_node.Id(), child_node.Id());
if (best_tree_source > GetTreeSource(edge_id)) {
best_tree_source = GetTreeSource()[edge_id.value_];
GetTreeSource(rootsplit_edge_id) = best_tree_source;
}
});
}
}
void TPEngine::SetChoiceMapByTakingFirst(const RootedTreeCollection &tree_collection,
const BitsetSizeMap &edge_indexer,
const bool use_subsplit_method) {
// First, update tree sources over tree by taking first occurance of tree.
SetTreeSourceByTakingFirst(tree_collection, edge_indexer);
// Use Subsplit Heuristic.
if (use_subsplit_method) {
// Assign choice map by choosing adjacent edges from tree source.
for (EdgeId edge_id = 0; edge_id < GetEdgeCount(); edge_id++) {
UpdateEdgeChoiceByTakingHighestPriorityTree(edge_id);
}
}
// Use PCSP Heuristic.
else {
// Build maps so we can traverse trees rootward.
std::unordered_map<size_t, std::unordered_map<size_t, const Node *>>
all_parent_maps;
size_t tree_id = 0;
for (const auto &tree : tree_collection) {
auto parent_map = tree.Topology()->BuildParentNodeMap();
all_parent_maps[tree_id] = parent_map;
tree_id++;
}
auto SetEdgeChoice = [this](const Node *grandparent, const Node *parent,
const Node *sister, const Node *child,
const Node *grandchild0, const Node *grandchild1) {
// Find central edge and get
const auto parent_node_id = GetDAG().GetDAGNodeId(parent->BuildSubsplit());
const auto child_node_id = GetDAG().GetDAGNodeId(child->BuildSubsplit());
const auto central_edge_id = GetDAG().GetEdgeIdx(parent_node_id, child_node_id);
auto &edge_choice = GetChoiceMap(central_edge_id);
// Assign sister edge.
const auto sister_node_id = GetDAG().GetDAGNodeId(sister->BuildSubsplit());
const auto sister_edge_id = GetDAG().GetEdgeIdx(parent_node_id, sister_node_id);
edge_choice.sister = sister_edge_id;
// Assign parent edge if parent node is not root.
if (grandparent) {
const auto grandparent_node_id =
GetDAG().GetDAGNodeId(grandparent->BuildSubsplit());
const auto parent_edge_id =
GetDAG().GetEdgeIdx(grandparent_node_id, parent_node_id);
edge_choice.parent = parent_edge_id;
}
// Assign child edges if child node is not leaf.
if (grandchild0) {
const auto grandchild0_node_id =
GetDAG().GetDAGNodeId(grandchild0->BuildSubsplit());
const auto grandchild1_node_id =
GetDAG().GetDAGNodeId(grandchild1->BuildSubsplit());
const auto child0_edge_id =
GetDAG().GetEdgeIdx(child_node_id, grandchild0_node_id);
const auto child1_edge_id =
GetDAG().GetEdgeIdx(child_node_id, grandchild1_node_id);
const auto clade0 = GetDAG().GetFocalClade(child0_edge_id);
edge_choice.left_child =
(clade0 == SubsplitClade::Left) ? child0_edge_id : child1_edge_id;
edge_choice.right_child =
(clade0 == SubsplitClade::Left) ? child1_edge_id : child0_edge_id;
}
};
auto FuncOnNeighboringNodes = [this, &all_parent_maps, &SetEdgeChoice](
const EdgeId edge_id, const Bitset &edge_bitset,
const RootedTree &tree, const size_t tree_id,
const Node *node) {
// Only set edge choices if this is the tree source assigned to edge.
const Node *grandparent_node = nullptr;
const Node *parent_node = nullptr;
const Node *sister_node = nullptr;
const Node *child_node = node;
const Node *grandchild0_node = nullptr;
const Node *grandchild1_node = nullptr;
auto &parent_map = all_parent_maps[tree_id];
// Only continue if tree source of central edge is from tree.
if (GetTreeSource(edge_id) != tree_id + 1) {
return;
}
// Find parent node. Ignore current node if child is a root node.
if (parent_map.find(child_node->Id()) == parent_map.end()) {
return;
}
parent_node = parent_map[child_node->Id()];
// Find grandparent node if exists.
if (parent_map.find(parent_node->Id()) != parent_map.end()) {
grandparent_node = parent_map[parent_node->Id()];
}
// Find sister node.
for (const auto &childx_node : parent_node->Children()) {
if (childx_node->Id() != child_node->Id()) {
sister_node = childx_node.get();
}
}
// Find grandchild nodes.
grandchild0_node = child_node->Children()[0].get();
grandchild1_node = child_node->Children()[1].get();
SetEdgeChoice(grandparent_node, parent_node, sister_node, child_node,
grandchild0_node, grandchild1_node);
};
RootedSBNMaps::FunctionOverRootedTreeCollection(
FuncOnNeighboringNodes, tree_collection, edge_indexer, NoId);
}
}
// ** Proposed NNIs
NNIOperation TPEngine::FindHighestPriorityNeighborNNIInDAG(
const NNIOperation &nni) const {
// Select pre-NNI by taking one with the highest priority in ChoiceMap.
SubsplitCladeEnum::Array<TreeId> tree_ids;
TreeId best_tree_id = TreeId(NoId);
NNIOperation best_pre_nni;
auto pre_nnis = GetDAG().FindAllNNINeighborsInDAG(nni);
for (auto clade : SubsplitCladeEnum::Iterator()) {
const auto &pre_nni = pre_nnis[clade];
if (!pre_nni.has_value()) continue;
const auto edge_id = GetDAG().GetEdgeIdx(pre_nni.value());
const auto tree_id = GetTreeSource(edge_id);
tree_ids[clade] = tree_id;
if ((best_tree_id == NoId) or (tree_id < best_tree_id)) {
best_tree_id = tree_id;
best_pre_nni = pre_nni.value();
}
}
Assert(pre_nnis[SubsplitClade::Left].has_value() or
pre_nnis[SubsplitClade::Right].has_value(),
"DAG does not contain a neighboring NNI to given NNI.");
if (pre_nnis[SubsplitClade::Left].has_value() and
pre_nnis[SubsplitClade::Right].has_value()) {
if (tree_ids[SubsplitClade::Left] == tree_ids[SubsplitClade::Right]) {
std::cerr
<< "WARNING: Best pre-NNI is ambiguous. Both pre-NNIs have equal priority."
<< std::endl;
}
}
return best_pre_nni;
}
std::tuple<EdgeId, EdgeId, EdgeId> TPEngine::FindHighestPriorityAdjacentNodeId(
const NodeId node_id) const {
auto GetHighestPriorityAdjacentNodeId =
[this](const SubsplitDAGNode &node, Direction dir,
SubsplitClade clade) -> std::pair<TreeId, EdgeId> {
TreeId best_tree_id = TreeId(NoId);
EdgeId best_edge_id = EdgeId(NoId);
auto view = node.GetNeighbors(dir, clade);
for (auto it = view.begin(); it != view.end(); ++it) {
auto adj_edge_id = it.GetEdgeId();
if (adj_edge_id >= GetDAG().EdgeCountWithLeafSubsplits()) continue;
auto adj_tree_id = GetTreeSource(adj_edge_id);
if ((best_tree_id == NoId) or (best_tree_id > adj_tree_id)) {
best_tree_id = adj_tree_id;
best_edge_id = adj_edge_id;
}
}
return {best_tree_id, best_edge_id};
};
const auto node = GetDAG().GetDAGNode(node_id);
// Find parent.
EdgeId parent_edge_id;
auto [left_parent_tree_id, left_parent_edge_id] =
GetHighestPriorityAdjacentNodeId(node, Direction::Rootward, SubsplitClade::Left);
auto [right_parent_tree_id, right_parent_edge_id] =
GetHighestPriorityAdjacentNodeId(node, Direction::Rootward, SubsplitClade::Right);
if ((left_parent_tree_id != NoId) and (left_parent_tree_id < right_parent_tree_id)) {
parent_edge_id = left_parent_edge_id;
} else {
parent_edge_id = right_parent_edge_id;
}
// Find left child.
auto [left_child_tree_id, left_child_edge_id] =
GetHighestPriorityAdjacentNodeId(node, Direction::Leafward, SubsplitClade::Left);
// Find right child.
auto [right_child_tree_id, right_child_edge_id] =
GetHighestPriorityAdjacentNodeId(node, Direction::Leafward, SubsplitClade::Right);
return {parent_edge_id, left_child_edge_id, right_child_edge_id};
}
std::unordered_map<EdgeId, EdgeId> TPEngine::BuildAdjacentEdgeMapFromPostNNIToPreNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) const {
using NNIClade = NNIOperation::NNIClade;
using Adj = std::pair<NodeId, Direction>;
using AdjMap = NNIOperation::NNICladeEnum::Array<Adj>;
std::unordered_map<EdgeId, EdgeId> edge_map;
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
const auto &pre_edge = GetDAG().GetDAGEdge(pre_edge_id);
const auto post_edge_id = GetDAG().GetEdgeIdx(post_nni);
const auto &post_edge = GetDAG().GetDAGEdge(post_edge_id);
AdjMap pre_adj_map_1, pre_adj_map_2, post_adj_map;
// Base maps for adjacent edges.
for (const auto &edge_id : {pre_edge_id, post_edge_id}) {
const auto &edge = (edge_id == pre_edge_id) ? pre_edge : post_edge;
auto &adj_map = (edge_id == pre_edge_id) ? pre_adj_map_1 : post_adj_map;
adj_map[NNIClade::ParentFocal] = {edge.GetParent(), Direction::Rootward};
adj_map[NNIClade::ParentSister] = {edge.GetParent(), Direction::Leafward};
adj_map[NNIClade::ChildLeft] = {edge.GetChild(), Direction::Leafward};
adj_map[NNIClade::ChildRight] = {edge.GetChild(), Direction::Leafward};
}
// Map central edge.
edge_map[post_edge_id] = pre_edge_id;
// Map adjacent edges.
for (const auto post_node_dir : {Direction::Rootward, Direction::Leafward}) {
const auto post_node_id = (post_node_dir == Direction::Rootward)
? post_edge.GetParent()
: post_edge.GetChild();
const auto &post_node = GetDAG().GetDAGNode(post_node_id);
for (const auto pre_node_dir : {Direction::Rootward, Direction::Leafward}) {
const auto pre_node_id = (pre_node_dir == Direction::Rootward)
? pre_edge.GetParent()
: pre_edge.GetChild();
// Iterate over all edges adjacent to post-NNI nodes.
for (const auto adj_node_dir : {Direction::Rootward, Direction::Leafward}) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id : post_node.GetNeighbors(adj_node_dir, clade)) {
// Get adjacent post-NNI edge id.
const auto post_parent_node_id =
(adj_node_dir == Direction::Rootward) ? adj_node_id : post_node_id;
const auto post_child_node_id =
(adj_node_dir == Direction::Rootward) ? post_node_id : adj_node_id;
const auto adj_post_edge_id =
GetDAG().GetEdgeIdx(post_parent_node_id, post_child_node_id);
// Get matching adjacent pre-NNI edge id, if a match exists.
const auto pre_parent_node_id =
(adj_node_dir == Direction::Rootward) ? adj_node_id : pre_node_id;
const auto pre_child_node_id =
(adj_node_dir == Direction::Rootward) ? pre_node_id : adj_node_id;
// If match exists, add it to the map.
if (GetDAG().ContainsEdge(pre_parent_node_id, pre_child_node_id)) {
const auto adj_pre_edge_id =
GetDAG().GetEdgeIdx(pre_parent_node_id, pre_child_node_id);
edge_map[adj_post_edge_id] = adj_pre_edge_id;
}
}
}
}
}
}
return edge_map;
}
TPChoiceMap::EdgeChoice TPEngine::RemapEdgeChoiceFromPreNNIToPostNNI(
const TPChoiceMap::EdgeChoice &choice_in,
const NNIOperation::NNICladeArray &clade_map) const {
using NNIClade = NNIOperation::NNIClade;
using NNICladeEnum = NNIOperation::NNICladeEnum;
TPChoiceMap::EdgeChoice choice_out;
NNICladeEnum::Array<EdgeId> pre_clade_map, post_clade_map;
pre_clade_map[NNIClade::ParentFocal] = choice_in.parent;
pre_clade_map[NNIClade::ParentSister] = choice_in.sister;
pre_clade_map[NNIClade::ChildLeft] = choice_in.left_child;
pre_clade_map[NNIClade::ChildRight] = choice_in.right_child;
for (const auto clade : NNICladeEnum::Iterator()) {
post_clade_map[clade] = pre_clade_map[clade_map[clade]];
}
choice_out.parent = post_clade_map[NNIClade::ParentFocal];
choice_out.sister = post_clade_map[NNIClade::ParentSister];
choice_out.left_child = post_clade_map[NNIClade::ChildLeft];
choice_out.right_child = post_clade_map[NNIClade::ChildRight];
return choice_out;
}
TPChoiceMap::EdgeChoice TPEngine::GetRemappedEdgeChoiceFromPreNNIToPostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) const {
TPChoiceMap::EdgeChoice mapped_choice;
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
const auto post_edge_id = GetDAG().GetEdgeIdx(post_nni);
const auto &post_edge = GetDAG().GetDAGEdge(post_edge_id);
const auto pre_choice = GetChoiceMap().GetEdgeChoice(pre_edge_id);
const auto clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(post_nni, pre_nni);
const auto post_choice = RemapEdgeChoiceFromPreNNIToPostNNI(pre_choice, clade_map);
const auto node_choice = GetChoiceMap().GetEdgeChoiceNodeIds(post_choice);
// Find common nodes between pre-NNI and post-NNI. Then use them to
// find edges that go to the common nodes in post-NNI.
auto GetEdgeId = [this](const NodeId parent_node_id, const NodeId child_node_id) {
if ((parent_node_id == NoId) || (child_node_id == NoId)) {
return EdgeId(NoId);
}
return GetDAG().GetEdgeIdx(parent_node_id, child_node_id);
};
mapped_choice.parent = GetEdgeId(node_choice.parent, post_edge.GetParent());
mapped_choice.sister = GetEdgeId(post_edge.GetParent(), node_choice.sister);
mapped_choice.left_child = GetEdgeId(post_edge.GetChild(), node_choice.left_child);
mapped_choice.right_child = GetEdgeId(post_edge.GetChild(), node_choice.right_child);
return mapped_choice;
}
double TPEngine::GetAvgLengthOfAdjEdges(
const NodeId parent_node_id, const NodeId child_node_id,
const std::optional<size_t> prev_node_count,
const std::optional<Reindexer> node_reindexer,
const std::optional<size_t> prev_edge_count,
const std::optional<Reindexer> edge_reindexer) const {
double total_branch_length = 0.0;
double branch_count = 0;
// Checks if edge was in previous version of DAG.
auto IsEdgeOld = [this, &prev_edge_count, &edge_reindexer](const EdgeId edge_id) {
if (!prev_edge_count.has_value()) {
return true;
}
if (edge_reindexer->GetOldIndexByNewIndex(edge_id.value_) <
prev_edge_count.value()) {
return true;
}
return false;
};
// If an old edge exists, then use that branch length.
if (GetDAG().ContainsEdge(parent_node_id, child_node_id)) {
const auto edge_id = GetDAG().GetEdgeIdx(parent_node_id, child_node_id);
if (IsEdgeOld(edge_id)) {
return GetLikelihoodEvalEngine().GetDAGBranchHandler()(edge_id);
}
}
// Otherwise, iterate over alternate edges from parent and child and take average of
// all neighboring branches.
for (const auto node_dir : {Direction::Rootward, Direction::Leafward}) {
const auto node_id =
(node_dir == Direction::Rootward) ? parent_node_id : child_node_id;
const auto other_node_id =
(node_dir == Direction::Rootward) ? child_node_id : parent_node_id;
if (!GetDAG().ContainsNode(node_id)) {
continue;
}
const auto &node = GetDAG().GetDAGNode(node_id);
const auto adj_node_dir =
(node_dir == Direction::Rootward) ? Direction::Leafward : Direction::Rootward;
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto adj_node_id : node.GetNeighbors(adj_node_dir, clade)) {
if (adj_node_id == other_node_id) {
continue;
}
const auto parent_id =
(adj_node_dir == Direction::Rootward) ? adj_node_id : node_id;
const auto child_id =
(adj_node_dir == Direction::Rootward) ? node_id : adj_node_id;
const auto edge_id = GetDAG().GetEdgeIdx(parent_id, child_id);
if (IsEdgeOld(edge_id)) {
branch_count += 1;
total_branch_length +=
GetLikelihoodEvalEngine().GetDAGBranchHandler()(edge_id);
}
}
}
}
if (branch_count == 0) {
return GetLikelihoodEvalEngine().GetDAGBranchHandler().GetDefaultBranchLength();
}
return total_branch_length / branch_count;
}
NNINNIMap TPEngine::BuildMapOfProposedNNIsToBestPreNNIs(const NNISet &post_nnis) const {
NNINNIMap postnni_prenni_map;
for (const auto &post_nni : post_nnis) {
postnni_prenni_map[post_nni] = FindHighestPriorityNeighborNNIInDAG(post_nni);
}
return postnni_prenni_map;
}
BitsetEdgeIdMap TPEngine::BuildMapOfProposedNNIPCSPsToBestPreNNIEdges(
const NNISet &post_nnis, std::optional<const size_t> prev_edge_count,
std::optional<const Reindexer> edge_reindexer) const {
// Stores the best pre-NNI reference edge.
std::unordered_map<Bitset, EdgeId> best_edge_ids;
// Stores the best edge's tree id.
std::unordered_map<Bitset, TreeId> best_tree_ids;
// Checks if edge was added by most recent iteration.
auto IsEdgeOld = [this, &prev_edge_count, &edge_reindexer](const EdgeId edge_id) {
if (!prev_edge_count.has_value()) {
return true;
}
if (edge_reindexer->GetOldIndexByNewIndex(edge_id.value_) <
prev_edge_count.value()) {
return true;
}
return false;
};
// Update map if better edge is found.
auto AssignBestReferenceEdge = [this, &best_edge_ids, &best_tree_ids, &IsEdgeOld](
const Bitset &pcsp,
const EdgeId proposed_reference_edge_id) {
// If pcsp is in DAG and was not added in current iteration, reference itself with
// the highest priority,
if (GetDAG().ContainsEdge(pcsp)) {
const auto edge_id = GetDAG().GetEdgeIdx(pcsp);
if (IsEdgeOld(edge_id)) {
best_edge_ids[pcsp] = edge_id;
best_tree_ids[pcsp] = TreeId(0);
}
}
// Otherwise, if edge_id has not been seen yet or current reference edge has a
// higher tree_id, then update.
if ((best_edge_ids.find(pcsp) == best_edge_ids.end()) ||
(best_tree_ids[pcsp] > GetTreeSource(proposed_reference_edge_id))) {
best_edge_ids[pcsp] = proposed_reference_edge_id;
best_tree_ids[pcsp] = GetTreeSource(proposed_reference_edge_id);
}
};
// Iterate over NNIs to find best branches.
for (const auto &post_nni : post_nnis) {
// Get pre_nni and build map between them.
const auto pre_nni = FindHighestPriorityNeighborNNIInDAG(post_nni);
// For each edge, build post-PCSP by taking the pre-NNIs choicemap PCSPs, then
// join them with the post-NNI parent or child PCSPs. Finally, assigns the best
// edge. Because the same post-PCSP can possibly come from multiple post-NNIs, we
// take the edge with the highest tree priority.
const auto pcsps = BuildAdjacentPCSPsFromPreNNIToPostNNI(pre_nni, post_nni);
// Parent edge.
const auto &[parent_pcsp, parent_edgeid] = pcsps.parent;
AssignBestReferenceEdge(parent_pcsp, parent_edgeid);
// Sister edge.
const auto &[sister_pcsp, sister_edgeid] = pcsps.sister;
AssignBestReferenceEdge(sister_pcsp, sister_edgeid);
// Central edge.
const auto &[central_pcsp, central_edgeid] = pcsps.focal;
AssignBestReferenceEdge(central_pcsp, central_edgeid);
// LeftChild edge.
const auto &[leftchild_pcsp, leftchild_edgeid] = pcsps.left_child;
AssignBestReferenceEdge(leftchild_pcsp, leftchild_edgeid);
// RightChild edge.
const auto &[rightchild_pcsp, rightchild_edgeid] = pcsps.right_child;
AssignBestReferenceEdge(rightchild_pcsp, rightchild_edgeid);
}
return best_edge_ids;
}
BitsetBitsetMap TPEngine::BuildMapOfProposedNNIPCSPsToBestPreNNIPCSPs(
const NNISet &post_nnis, std::optional<const size_t> prev_edge_count,
std::optional<const Reindexer> edge_reindexer) const {
BitsetBitsetMap postpcsp_prepcsp_map;
auto postpcsp_preedge_map = BuildMapOfProposedNNIPCSPsToBestPreNNIEdges(
post_nnis, prev_edge_count, edge_reindexer);
for (const auto &[post_pcsp, pre_edge_id] : postpcsp_preedge_map) {
Bitset pre_pcsp = GetDAG().GetDAGEdgeBitset(pre_edge_id);
postpcsp_prepcsp_map.insert({post_pcsp, pre_pcsp});
}
return postpcsp_prepcsp_map;
}
NNIAdjBitsetEdgeIdMap TPEngine::BuildAdjacentPCSPsFromPreNNIToPostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) const {
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
const auto rev_clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(post_nni, pre_nni);
// Get edge choice map from pre-NNI in DAG, then remap according to post-NNI.
const auto pre_choice = GetChoiceMap(pre_edge_id);
const auto mapped_pre_choice =
RemapEdgeChoiceFromPreNNIToPostNNI(pre_choice, rev_clade_map);
const auto adj_node_ids = GetChoiceMap().GetEdgeChoiceNodeIds(mapped_pre_choice);
// We should not reach this state.
if ((adj_node_ids.parent == NoId) and (adj_node_ids.sister == NoId) and
(adj_node_ids.left_child == NoId) and (adj_node_ids.right_child == NoId)) {
std::cerr << "WARNING: Pre-NNI has an invalid choice map -- " << post_nni
<< std::endl;
}
// Parent pcsp.
const auto &parent_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.parent);
const auto parent_pcsp = Bitset::PCSP(parent_subsplit, post_nni.GetParent());
// Sister pcsp.
const auto &sister_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.sister);
const auto sister_pcsp = Bitset::PCSP(post_nni.GetParent(), sister_subsplit);
// Central pcsp.
const auto central_pcsp = Bitset::PCSP(post_nni.GetParent(), post_nni.GetChild());
// Left Child pcsp.
const auto &leftchild_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.left_child);
const auto leftchild_pcsp = Bitset::PCSP(post_nni.GetChild(), leftchild_subsplit);
// Right Child pcsp.
const auto &rightchild_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.right_child);
const auto rightchild_pcsp = Bitset::PCSP(post_nni.GetChild(), rightchild_subsplit);
NNIAdjBitsetEdgeIdMap adj_pcsps{{parent_pcsp, mapped_pre_choice.parent},
{sister_pcsp, mapped_pre_choice.sister},
{central_pcsp, pre_edge_id},
{leftchild_pcsp, mapped_pre_choice.left_child},
{rightchild_pcsp, mapped_pre_choice.right_child}};
return adj_pcsps;
}
TPChoiceMap::EdgeChoiceNodeIdMap TPEngine::BuildAdjacentNodeIdMapFromPreNNIToPostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) const {
TPChoiceMap::EdgeChoiceNodeIdMap node_id_map;
const auto pre_edge_id = GetDAG().GetEdgeIdx(pre_nni);
const auto clade_map =
NNIOperation::BuildNNICladeMapFromPreNNIToNNI(pre_nni, post_nni);
const auto node_ids = GetChoiceMap().GetEdgeChoiceNodeIds(pre_edge_id);
node_id_map.parent = {node_ids[NNIClade::ParentFocal],
node_ids[clade_map[NNIClade::ParentFocal]]};
node_id_map.sister = {node_ids[NNIClade::ParentSister],
node_ids[clade_map[NNIClade::ParentSister]]};
node_id_map.left_child = {node_ids[NNIClade::ChildLeft],
node_ids[clade_map[NNIClade::ChildLeft]]};
node_id_map.right_child = {node_ids[NNIClade::ChildRight],
node_ids[clade_map[NNIClade::ChildRight]]};
return node_id_map;
}
TPChoiceMap::EdgeChoicePCSPMap TPEngine::BuildAdjacentPCSPMapFromPreNNIToPostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) const {
TPChoiceMap::EdgeChoicePCSPMap pcsps{{Bitset::EmptyBitset(), Bitset::EmptyBitset()},
{Bitset::EmptyBitset(), Bitset::EmptyBitset()},
{Bitset::EmptyBitset(), Bitset::EmptyBitset()},
{Bitset::EmptyBitset(), Bitset::EmptyBitset()}};
const auto node_ids = BuildAdjacentNodeIdMapFromPreNNIToPostNNI(pre_nni, post_nni);
const auto post_parent = post_nni.GetParent();
const auto post_child = post_nni.GetChild();
const auto post_grandparent = GetDAG().GetDAGNodeBitset(node_ids.parent.first);
pcsps.parent.second = Bitset::PCSP(post_grandparent, post_parent);
const auto post_sister = GetDAG().GetDAGNodeBitset(node_ids.sister.first);
pcsps.sister.second = Bitset::PCSP(post_parent, post_sister);
const auto post_left_grandchild =
GetDAG().GetDAGNodeBitset(node_ids.left_child.first);
pcsps.left_child.second = Bitset::PCSP(post_child, post_left_grandchild);
const auto post_right_grandchild =
GetDAG().GetDAGNodeBitset(node_ids.right_child.first);
pcsps.right_child.second = Bitset::PCSP(post_child, post_right_grandchild);
const auto pre_parent = pre_nni.GetParent();
const auto pre_child = pre_nni.GetChild();
const auto pre_grandparent = GetDAG().GetDAGNodeBitset(node_ids.parent.first);
pcsps.parent.first = Bitset::PCSP(pre_grandparent, pre_parent);
const auto pre_sister = GetDAG().GetDAGNodeBitset(node_ids.sister.first);
pcsps.sister.first = Bitset::PCSP(pre_parent, pre_sister);
const auto pre_left_grandchild = GetDAG().GetDAGNodeBitset(node_ids.left_child.first);
pcsps.left_child.first = Bitset::PCSP(pre_child, pre_left_grandchild);
const auto pre_right_grandchild =
GetDAG().GetDAGNodeBitset(node_ids.right_child.first);
pcsps.right_child.first = Bitset::PCSP(pre_child, pre_right_grandchild);
return pcsps;
}
TPEngine::PCSPToPCSPsMap TPEngine::BuildMapFromPCSPToEdgeChoicePCSPs() const {
return GetChoiceMap().BuildPCSPMap();
}
TPEngine::PCSPToPVHashesMap TPEngine::BuildMapFromPCSPToPVHashes() const {
if (!eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine]) {
Failwith("ERROR: must use likelihood_eval_engine.");
}
PCSPToPVHashesMap pcsp_pvhash_map;
auto get_all_pv_hashes = [this](const EdgeId edge_id) {
auto &pvs = GetLikelihoodEvalEngine().GetPVs();
std::vector<std::string> pv_hashes;
for (auto pv_type : PLVTypeEnum::Iterator()) {
auto pv_hash = pvs.ToHashString(pvs.GetPVIndex(pv_type, edge_id), 5);
pv_hashes.push_back(pv_hash);
}
return pv_hashes;
};
for (EdgeId edge_id{0}; edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
auto pcsp = GetDAG().GetDAGEdgeBitset(edge_id);
pcsp_pvhash_map[pcsp] = get_all_pv_hashes(edge_id);
}
return pcsp_pvhash_map;
}
TPEngine::PCSPToPVValuesMap TPEngine::BuildMapFromPCSPToPVValues() const {
if (!eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine]) {
Failwith("ERROR: must use likelihood_eval_engine.");
}
PCSPToPVValuesMap pcsp_pvval_map;
auto get_all_pv_values = [this](const EdgeId edge_id) {
const auto &pvs = GetLikelihoodEvalEngine().GetPVs();
std::vector<DoubleVector> pv_values;
for (auto pv_type : PLVTypeEnum::Iterator()) {
auto pv_value = pvs.ToDoubleVector(pvs.GetPVIndex(pv_type, edge_id));
pv_values.push_back(pv_value);
}
return pv_values;
};
for (EdgeId edge_id{0}; edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
auto pcsp = GetDAG().GetDAGEdgeBitset(edge_id);
pcsp_pvval_map[pcsp] = get_all_pv_values(edge_id);
}
return pcsp_pvval_map;
}
TPEngine::PCSPToBranchLengthMap TPEngine::BuildMapFromPCSPToBranchLength() const {
if (!eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine]) {
Failwith("ERROR: must use likelihood_eval_engine.");
}
PCSPToBranchLengthMap pcsp_bl_map;
const auto &bl_handler = GetLikelihoodEvalEngine().GetDAGBranchHandler();
for (EdgeId edge_id{0}; edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
auto pcsp = GetDAG().GetDAGEdgeBitset(edge_id);
pcsp_bl_map[pcsp] = bl_handler.Get(edge_id);
}
return pcsp_bl_map;
}
TPEngine::PCSPToScoreMap TPEngine::BuildMapFromPCSPToScore(
const bool recompute_scores) {
if (!eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine]) {
Failwith("ERROR: must use likelihood_eval_engine.");
}
if (recompute_scores) {
GetLikelihoodEvalEngine().ComputeScores();
}
PCSPToScoreMap pcsp_score_map;
for (EdgeId edge_id{0}; edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
auto pcsp = GetDAG().GetDAGEdgeBitset(edge_id);
pcsp_score_map[pcsp] = GetTopTreeScore(edge_id);
}
return pcsp_score_map;
}
// ** TP Evaluation Engine
void TPEngine::MakeLikelihoodEvalEngine(const std::string &mmap_likelihood_path) {
likelihood_engine_ =
std::make_unique<TPEvalEngineViaLikelihood>(*this, mmap_likelihood_path);
eval_engine_ = likelihood_engine_.get();
eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine] = true;
}
void TPEngine::MakeParsimonyEvalEngine(const std::string &mmap_parsimony_path) {
parsimony_engine_ =
std::make_unique<TPEvalEngineViaParsimony>(*this, mmap_parsimony_path);
eval_engine_ = parsimony_engine_.get();
eval_engine_in_use_[TPEvalEngineType::ParsimonyEvalEngine] = true;
}
void TPEngine::ClearEvalEngineInUse() {
for (auto eval_engine_type : TPEvalEngineTypeEnum::Iterator()) {
eval_engine_in_use_[eval_engine_type] = false;
}
}
void TPEngine::SelectEvalEngine(const TPEvalEngineType eval_engine_type) {
switch (eval_engine_type) {
case TPEvalEngineType::LikelihoodEvalEngine:
SelectLikelihoodEvalEngine();
break;
case TPEvalEngineType::ParsimonyEvalEngine:
SelectParsimonyEvalEngine();
break;
default:
Failwith("Invalid TPEvalEngineType.");
}
}
void TPEngine::SelectLikelihoodEvalEngine() {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine before selecting it.");
ClearEvalEngineInUse();
eval_engine_in_use_[TPEvalEngineType::LikelihoodEvalEngine] = true;
eval_engine_ = likelihood_engine_.get();
}
void TPEngine::SelectParsimonyEvalEngine() {
Assert(IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine),
"Must MakeParimonyEvalEngine before selecting it.");
ClearEvalEngineInUse();
eval_engine_in_use_[TPEvalEngineType::ParsimonyEvalEngine] = true;
eval_engine_ = parsimony_engine_.get();
}
void TPEngine::UpdateEvalEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) {
UpdateChoiceMapAfterModifyingDAG(nni_to_pre_nni, prev_node_count, node_reindexer,
prev_edge_count, edge_reindexer);
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().UpdateEngineAfterModifyingDAG(
nni_to_pre_nni, prev_node_count, node_reindexer, prev_edge_count,
edge_reindexer);
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().UpdateEngineAfterModifyingDAG(
nni_to_pre_nni, prev_node_count, node_reindexer, prev_edge_count,
edge_reindexer);
}
}
// ** Branch Lengths
void TPEngine::SetBranchLengthsToDefault() {
auto &dag_branch_lengths = GetLikelihoodEvalEngine().GetDAGBranchHandler();
for (EdgeId edge_idx = 0; edge_idx < GetEdgeCount(); edge_idx++) {
dag_branch_lengths(edge_idx) = dag_branch_lengths.GetDefaultBranchLength();
}
}
void TPEngine::SetBranchLengthsByTakingFirst(
const RootedTreeCollection &tree_collection, const BitsetSizeMap &edge_indexer,
const bool set_uninitialized_to_default) {
auto &dag_branch_lengths = GetLikelihoodEvalEngine().GetDAGBranchHandler();
if (set_uninitialized_to_default) {
SetBranchLengthsToDefault();
}
// Unique edges in collection should be the same as the number of total edges in
// DAG created from collection.
EigenVectorXi observed_edge_counts = EigenVectorXi::Zero(GetEdgeCount());
// Set branch lengths on first occurance.
auto set_first_branch_length = [this, &observed_edge_counts, &dag_branch_lengths](
const EdgeId edge_idx, const Bitset &edge_bitset,
const RootedTree &tree, const size_t tree_id,
const Node *focal_node) {
if (observed_edge_counts(edge_idx.value_) == 0) {
dag_branch_lengths(edge_idx) = tree.BranchLength(focal_node);
observed_edge_counts(edge_idx.value_)++;
}
};
RootedSBNMaps::FunctionOverRootedTreeCollection(
set_first_branch_length, tree_collection, edge_indexer,
dag_branch_lengths.GetBranchLengthData().size());
}
void TPEngine::OptimizeBranchLengths(std::optional<bool> check_branch_convergence) {
GetLikelihoodEvalEngine().BranchLengthOptimization(check_branch_convergence);
}
// ** Scoring
void TPEngine::InitializeScores() {
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().Initialize();
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().Initialize();
}
}
void TPEngine::ComputeScores() {
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().ComputeScores();
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().ComputeScores();
}
}
void TPEngine::UpdateScoresAfterDAGAddNodePair(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) {
if (IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine)) {
GetLikelihoodEvalEngine().UpdateEngineAfterDAGAddNodePair(post_nni, pre_nni,
new_tree_id);
}
if (IsEvalEngineInUse(TPEvalEngineType::ParsimonyEvalEngine)) {
GetParsimonyEvalEngine().UpdateEngineAfterDAGAddNodePair(post_nni, pre_nni,
new_tree_id);
}
}
double TPEngine::GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset, std::optional<BitsetEdgeIdMap> best_edge_map_opt) {
std::ignore = pre_nni;
auto best_pre_nni = FindHighestPriorityNeighborNNIInDAG(post_nni);
return GetEvalEngine().GetTopTreeScoreWithProposedNNI(
post_nni, best_pre_nni, spare_offset, best_edge_map_opt);
}
EdgeId TPEngine::FindHighestPriorityEdgeAdjacentToNode(
const NodeId node_id, const Direction direction) const {
const auto node = GetDAG().GetDAGNode(node_id);
EdgeId best_edge_id = EdgeId(NoId);
auto best_tree_source = GetNextTreeId();
for (const auto focal_clade : {SubsplitClade::Left, SubsplitClade::Right}) {
for (const auto rootward_node_id : node.GetNeighbors(direction, focal_clade)) {
const auto edge_id = GetDAG().GetEdgeIdx(rootward_node_id, node_id);
if (best_tree_source > GetTreeSource(edge_id)) {
best_tree_source = GetTreeSource(edge_id);
best_edge_id = edge_id;
}
}
}
return best_edge_id;
}
EdgeId TPEngine::FindHighestPriorityEdgeAdjacentToNode(
const NodeId node_id, const Direction direction, const SubsplitClade clade) const {
const auto node = GetDAG().GetDAGNode(node_id);
EdgeId best_edge_id = EdgeId(NoId);
auto best_tree_source = GetNextTreeId();
for (const auto rootward_node_id : node.GetNeighbors(direction, clade)) {
const auto edge_id = GetDAG().GetEdgeIdx(rootward_node_id, node_id);
if (best_tree_source > GetTreeSource(edge_id)) {
best_tree_source = GetTreeSource(edge_id);
best_edge_id = edge_id;
}
}
return best_edge_id;
}
// ** Tree/Topology Builder
Node::Topology TPEngine::GetTopTopologyWithEdge(const EdgeId edge_id) const {
auto topology = GetChoiceMap().ExtractTopology(edge_id);
return topology;
}
RootedTree TPEngine::GetTopTreeWithEdge(const EdgeId edge_id) const {
Assert(IsEvalEngineInUse(TPEvalEngineType::LikelihoodEvalEngine),
"Must MakeLikelihoodEvalEngine before getting top tree.");
auto topology = GetTopTopologyWithEdge(edge_id);
return BuildTreeFromTopologyInDAG(topology);
}
RootedTree TPEngine::BuildTreeFromTopologyInDAG(const Node::Topology &topology) const {
auto rooted_tree = GetDAG().BuildTreeFromTopology(
topology, GetDAGBranchHandler().GetBranchLengthData());
return rooted_tree;
}
std::set<EdgeId> TPEngine::BuildSetOfEdgesRepresentingTopology(
const Node::Topology &topology) const {
std::set<EdgeId> edges;
topology->Preorder([this, &edges](const Node *node) {
if (!node->IsLeaf()) {
for (const auto clade : {SubsplitClade::Left, SubsplitClade::Right}) {
const auto edge_id = GetDAG().GetEdgeIdx(node->BuildPCSP(clade));
edges.insert(edge_id);
}
}
});
return edges;
}
std::set<TreeId> TPEngine::FindTreeIdsInTreeEdgeVector(
const std::set<EdgeId> edge_ids) const {
Assert(!edge_ids.empty(), "EdgeVector representation of tree cannot be empty.");
std::set<TreeId> tree_ids;
for (const auto edge_id : edge_ids) {
tree_ids.insert(GetTreeSource()[edge_id.value_]);
}
return tree_ids;
}
EdgeIdTopologyMap TPEngine::BuildMapOfEdgeIdToTopTopologies() const {
EdgeIdTopologyMap topology_map;
BoolVector visited_edges(GetEdgeCount(), false);
// Ignore rootsplit edges.
for (const auto edge_id : GetDAG().GetRootsplitEdgeIds()) {
visited_edges[edge_id.value_] = true;
}
// Build trees as we encounter edges not yet assigned to a tree.
for (EdgeId edge_id(0); edge_id < GetDAG().EdgeCountWithLeafSubsplits(); edge_id++) {
if (visited_edges[edge_id.value_]) {
continue;
}
const auto top_topology = GetTopTopologyWithEdge(edge_id);
const auto tree_visited_edges = BuildSetOfEdgesRepresentingTopology(top_topology);
std::set<EdgeId> new_visited_edges;
// For edges contained in the tree, add them if edge has not already been assigned
// to previous tree.
for (const auto edge_id : tree_visited_edges) {
if (!visited_edges[edge_id.value_]) {
new_visited_edges.insert(edge_id);
}
visited_edges[edge_id.value_] = true;
}
topology_map.push_back({new_visited_edges, top_topology});
}
if (!std::all_of(visited_edges.begin(), visited_edges.end(),
[](bool n) { return n; })) {
Failwith("Top Topologies does not cover entire DAG.");
}
return topology_map;
}
TreeIdTopologyMap TPEngine::BuildMapOfTreeIdToTopTopologies() const {
TreeIdTopologyMap topology_map;
const auto edge_topology_map = BuildMapOfEdgeIdToTopTopologies();
for (const auto &[edge_ids, topology] : edge_topology_map) {
const auto tree_ids = FindTreeIdsInTreeEdgeVector(edge_ids);
const auto tree_id = *std::min_element(tree_ids.begin(), tree_ids.end());
if (topology_map.find(tree_id) == topology_map.end()) {
topology_map[tree_id] = std::vector<Node::Topology>();
}
topology_map[tree_id].push_back(topology);
}
return topology_map;
}
TreeIdTreeMap TPEngine::BuildMapOfTreeIdToTopTrees() const {
TreeIdTreeMap tree_map;
const auto topology_map = BuildMapOfTreeIdToTopTopologies();
for (const auto &[tree_id, topology_vec] : topology_map) {
tree_map[tree_id] = std::vector<RootedTree>();
for (const auto topology : topology_vec) {
RootedTree tree = BuildTreeFromTopologyInDAG(topology);
tree_map[tree_id].push_back(tree);
}
}
return tree_map;
}
std::string TPEngine::ToNewickOfTopTopologies() const {
std::stringstream str;
const auto topology_map = BuildMapOfTreeIdToTopTopologies();
for (TreeId tree_id(0); tree_id < GetMaxTreeId(); tree_id++) {
if (topology_map.find(tree_id) == topology_map.end()) continue;
const auto &topology_vec = topology_map.find(tree_id)->second;
for (const auto topology : topology_vec) {
str << topology->Newick(std::nullopt, GetDAG().GetTagTaxonMap()) << std::endl;
}
}
return str.str();
}
std::string TPEngine::ToNewickOfTopTrees() const {
std::stringstream str;
const auto tree_map = BuildMapOfTreeIdToTopTrees();
for (TreeId tree_id(0); tree_id < GetMaxTreeId(); tree_id++) {
if (tree_map.find(tree_id) == tree_map.end()) continue;
const auto &tree_vec = tree_map.find(tree_id)->second;
for (const auto tree : tree_vec) {
str << tree.Newick(GetDAG().GetTagTaxonMap()) << std::endl;
}
}
return str.str();
}
TPChoiceMap::EdgeChoicePCSPs TPEngine::BuildAdjacentPCSPsToProposedNNI(
const NNIOperation &nni, const TPChoiceMap::EdgeChoiceNodeIds &adj_node_ids) const {
const auto &parent_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.parent);
const auto parent = Bitset::PCSP(parent_subsplit, nni.GetParent());
const auto focal = Bitset::PCSP(nni.GetParent(), nni.GetChild());
const auto &sister_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.sister);
const auto sister = Bitset::PCSP(nni.GetParent(), sister_subsplit);
const auto &leftchild_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.left_child);
const auto left_child = Bitset::PCSP(nni.GetChild(), leftchild_subsplit);
const auto &rightchild_subsplit = GetDAG().GetDAGNodeBitset(adj_node_ids.right_child);
const auto right_child = Bitset::PCSP(nni.GetChild(), rightchild_subsplit);
TPChoiceMap::EdgeChoicePCSPs adj_pcsps{parent, sister, focal, left_child,
right_child};
return adj_pcsps;
}
// ** I/O
std::string TPEngine::LikelihoodPVToString(const PVId pv_id) const {
return GetLikelihoodEvalEngine().GetPVs().ToString(pv_id);
}
std::string TPEngine::LogLikelihoodMatrixToString() const {
std::stringstream out;
auto &log_likelihoods = GetLikelihoodEvalEngine().GetMatrix();
for (Eigen::Index i = 0; i < log_likelihoods.rows(); i++) {
for (Eigen::Index j = 0; j < log_likelihoods.cols(); j++) {
out << "[" << i << "," << j << "]: " << log_likelihoods(i, j) << "\t";
}
out << std::endl;
}
return out.str();
}
std::string TPEngine::ParsimonyPVToString(const PVId pv_id) const {
return GetParsimonyEvalEngine().GetPVs().ToString(pv_id);
}
std::string TPEngine::TreeSourceToString() const {
std::stringstream out;
out << "TreeSource: { ";
for (EdgeId i(0); i < GetEdgeCount(); i++) {
out << "[Edge" << i << "]: Tree" << GetTreeSource(i) << ", ";
}
out << " }";
return out.str();
}
| 73,436
|
C++
|
.cpp
| 1,549
| 41.009038
| 88
| 0.682521
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,051
|
rooted_gradient_transforms.cpp
|
phylovi_bito/src/rooted_gradient_transforms.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Calculation of the ratio and root height gradient, adapted from BEAST.
// https://github.com/beast-dev/beast-mcmc
// Credit to Xiang Ji and Marc Suchard.
//
// Because this is code adapted from elsewhere, at least for the time being the naming
// conventions are a little different: pascalCase is allowed for variables.
#include "rooted_gradient_transforms.hpp"
#include <numeric>
#include "rooted_tree.hpp"
// \partial{L}/\partial{t_k} = \sum_j \partial{L}/\partial{b_j}
// \partial{b_j}/\partial{t_k}
std::vector<double> RootedGradientTransforms::HeightGradient(
const RootedTree &tree, const std::vector<double> &branch_gradient) {
auto root_id = tree.Topology()->Id();
std::vector<double> height_gradient(tree.LeafCount() - 1, 0);
tree.Topology()->BinaryIdPreorder(
[&root_id, &branch_gradient, &height_gradient, leaf_count = tree.LeafCount(),
&rates = tree.GetRates()](size_t node_id, size_t child0_id, size_t child1_id) {
if (node_id != root_id) {
height_gradient[node_id - leaf_count] =
-branch_gradient[node_id] * rates[node_id];
}
if (node_id >= leaf_count) {
height_gradient[node_id - leaf_count] +=
branch_gradient[child0_id] * rates[child0_id];
height_gradient[node_id - leaf_count] +=
branch_gradient[child1_id] * rates[child1_id];
}
});
return height_gradient;
}
double RootedGradientTransforms::GetNodePartial(size_t node_id, size_t leaf_count,
const std::vector<double> &heights,
const std::vector<double> &ratios,
const std::vector<double> &bounds) {
return (heights[node_id] - bounds[node_id]) / ratios[node_id - leaf_count];
}
// Calculate \partial{t_j}/\partial{r_k}
double RootedGradientTransforms::GetEpochGradientAddition(
size_t node_id, size_t child_id, size_t leaf_count,
const std::vector<double> &heights, const std::vector<double> &ratios,
const std::vector<double> &bounds,
const std::vector<double> &ratiosGradientUnweightedLogDensity) {
if (child_id < leaf_count) {
return 0.0;
} else if (bounds[node_id] == bounds[child_id]) {
// child_id and node_id are in the same epoch
return ratiosGradientUnweightedLogDensity[child_id - leaf_count] *
ratios[child_id - leaf_count] / ratios[node_id - leaf_count];
} else {
// NOT the same epoch
return ratiosGradientUnweightedLogDensity[child_id - leaf_count] *
ratios[child_id - leaf_count] / (heights[node_id] - bounds[child_id]) *
GetNodePartial(node_id, leaf_count, heights, ratios, bounds);
}
}
std::vector<double> RootedGradientTransforms::GetLogTimeArray(const RootedTree &tree) {
size_t leaf_count = tree.LeafCount();
std::vector<double> log_time(leaf_count - 1, 0);
const auto &node_bounds = tree.GetNodeBounds();
const auto &node_heights = tree.GetNodeHeights();
for (size_t i = 0; i < leaf_count - 2; i++) {
log_time[i] = 1.0 / (node_heights[leaf_count + i] - node_bounds[leaf_count + i]);
}
return log_time;
}
// Update ratio gradient with \partial{t_j}/\partial{r_k}
std::vector<double> RootedGradientTransforms::UpdateGradientUnWeightedLogDensity(
const RootedTree &tree, const std::vector<double> &gradient_height) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
std::vector<double> ratiosGradientUnweightedLogDensity(leaf_count - 1);
tree.Topology()->BinaryIdPostorder(
[&gradient_height, &heights = tree.node_heights_, &ratios = tree.height_ratios_,
&bounds = tree.GetNodeBounds(), &ratiosGradientUnweightedLogDensity, &leaf_count,
&root_id](size_t node_id, size_t child0_id, size_t child1_id) {
if (node_id >= leaf_count && node_id != root_id) {
ratiosGradientUnweightedLogDensity[node_id - leaf_count] +=
GetNodePartial(node_id, leaf_count, heights, ratios, bounds) *
gradient_height[node_id - leaf_count];
ratiosGradientUnweightedLogDensity[node_id - leaf_count] +=
GetEpochGradientAddition(node_id, child0_id, leaf_count, heights, ratios,
bounds, ratiosGradientUnweightedLogDensity);
ratiosGradientUnweightedLogDensity[node_id - leaf_count] +=
GetEpochGradientAddition(node_id, child1_id, leaf_count, heights, ratios,
bounds, ratiosGradientUnweightedLogDensity);
}
});
return ratiosGradientUnweightedLogDensity;
}
double RootedGradientTransforms::UpdateHeightParameterGradientUnweightedLogDensity(
const RootedTree &tree, const std::vector<double> &gradient) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
std::vector<double> multiplierArray(leaf_count - 1);
multiplierArray[root_id - leaf_count] = 1.0;
tree.Topology()->BinaryIdPreorder(
[&leaf_count, &ratios = tree.height_ratios_, &multiplierArray](
size_t node_id, size_t child0_id, size_t child1_id) {
if (child0_id >= leaf_count) {
double ratio = ratios[child0_id - leaf_count];
multiplierArray[child0_id - leaf_count] =
ratio * multiplierArray[node_id - leaf_count];
}
if (child1_id >= leaf_count) {
double ratio = ratios[child1_id - leaf_count];
multiplierArray[child1_id - leaf_count] =
ratio * multiplierArray[node_id - leaf_count];
}
});
double sum = 0.0;
for (size_t i = 0; i < gradient.size(); i++) {
sum += gradient[i] * multiplierArray[i];
}
return sum;
}
std::vector<double> RootedGradientTransforms::GradientLogDeterminantJacobian(
const RootedTree &tree) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
std::vector<double> log_time = GetLogTimeArray(tree);
std::vector<double> gradient_log_jacobian_determinant =
UpdateGradientUnWeightedLogDensity(tree, log_time);
gradient_log_jacobian_determinant[root_id - leaf_count] =
UpdateHeightParameterGradientUnweightedLogDensity(tree, log_time);
for (size_t i = 0; i < gradient_log_jacobian_determinant.size() - 1; i++) {
gradient_log_jacobian_determinant[i] -= 1.0 / tree.height_ratios_[i];
}
return gradient_log_jacobian_determinant;
}
std::vector<double> RootedGradientTransforms::RatioGradientOfHeightGradient(
const RootedTree &tree, const std::vector<double> &height_gradient) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
// Calculate node ratio gradient
std::vector<double> gradient_log_density =
UpdateGradientUnWeightedLogDensity(tree, height_gradient);
// Calculate root height gradient
gradient_log_density[root_id - leaf_count] =
UpdateHeightParameterGradientUnweightedLogDensity(tree, height_gradient);
return gradient_log_density;
}
std::vector<double> RootedGradientTransforms::RatioGradientOfBranchGradient(
const RootedTree &tree, const std::vector<double> &branch_gradient) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
// Calculate node height gradient
std::vector<double> height_gradient = HeightGradient(tree, branch_gradient);
// Calculate ratios and root height gradient
std::vector<double> gradient_log_density =
RatioGradientOfHeightGradient(tree, height_gradient);
// Calculate gradient of log Jacobian determinant
std::vector<double> gradient_log_jacobian_determinant =
GradientLogDeterminantJacobian(tree);
for (size_t i = 0; i < gradient_log_jacobian_determinant.size() - 1; i++) {
gradient_log_density[i] += gradient_log_jacobian_determinant[i];
}
gradient_log_density[root_id - leaf_count] +=
gradient_log_jacobian_determinant[root_id - leaf_count];
return gradient_log_density;
}
std::vector<double> RootedGradientTransforms::RatioGradientOfBranchGradient(
const RootedTree &tree, const std::vector<double> &branch_gradient,
const std::optional<PhyloFlags> flags) {
size_t leaf_count = tree.LeafCount();
size_t root_id = tree.Topology()->Id();
// Calculate node height gradient
std::vector<double> height_gradient = HeightGradient(tree, branch_gradient);
// Calculate ratios and root height gradient
std::vector<double> gradient_log_density =
RatioGradientOfHeightGradient(tree, height_gradient);
if (PhyloFlags::IsFlagSet(
flags, PhyloGradientFlagOptions::include_log_det_jacobian_gradient_)) {
std::vector<double> gradient_log_jacobian_determinant =
GradientLogDeterminantJacobian(tree);
for (size_t i = 0; i < gradient_log_jacobian_determinant.size() - 1; i++) {
gradient_log_density[i] += gradient_log_jacobian_determinant[i];
}
gradient_log_density[root_id - leaf_count] +=
gradient_log_jacobian_determinant[root_id - leaf_count];
}
return gradient_log_density;
}
EigenVectorXd RootedGradientTransforms::RatioGradientOfHeightGradientEigen(
const RootedTree &tree, EigenConstVectorXdRef height_gradient) {
std::vector<double> height_gradient_vector(height_gradient.size());
for (Eigen::Index i = 0; i < height_gradient.size(); ++i) {
height_gradient_vector[i] = height_gradient(i);
}
std::vector<double> vector_output =
UpdateGradientUnWeightedLogDensity(tree, height_gradient_vector);
vector_output[vector_output.size() - 1] =
UpdateHeightParameterGradientUnweightedLogDensity(tree, height_gradient_vector);
EigenVectorXd eigen_output(vector_output.size());
for (size_t i = 0; i < vector_output.size(); ++i) {
eigen_output(i) = vector_output[i];
}
return eigen_output;
}
double RootedGradientTransforms::LogDetJacobianHeightTransform(const RootedTree &tree) {
double log_det_jacobian = 0.0;
size_t leaf_count = tree.LeafCount();
tree.Topology()->TripleIdPreorderBifurcating(
[&log_det_jacobian, &tree, leaf_count](int node_id, int sister_id,
int parent_id) {
if (size_t(node_id) >=
leaf_count) { // Only add to computation if node is not a leaf.
// Account for the jacobian of this branch's height transform.
log_det_jacobian +=
std::log(tree.node_heights_[parent_id] - tree.node_bounds_[node_id]);
}
});
return log_det_jacobian;
}
| 10,616
|
C++
|
.cpp
| 217
| 42.631336
| 88
| 0.687838
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,052
|
gp_instance.cpp
|
phylovi_bito/src/gp_instance.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "gp_instance.hpp"
#include <chrono>
#include <iomanip>
#include <string>
#include "csv.hpp"
#include "driver.hpp"
#include "gp_operation.hpp"
#include "numerical_utils.hpp"
#include "rooted_tree_collection.hpp"
#include "sbn_probability.hpp"
#include "stopwatch.hpp"
using namespace GPOperations; // NOLINT
void GPInstance::PrintStatus() {
const auto tree_count = tree_collection_.TreeCount();
const auto taxon_count = tree_collection_.TaxonCount();
if (tree_count > 0) {
std::cout << tree_count << " trees loaded on " << taxon_count << " leaves.\n";
} else {
std::cout << "No trees loaded.\n";
}
std::cout << alignment_.Data().size() << " sequences loaded.\n";
std::cout << GetDAG().NodeCount() << " DAG nodes with "
<< GetDAG().EdgeCountWithLeafSubsplits() << " edges representing "
<< GetDAG().TopologyCount() << " trees.\n";
std::cout << GetDAG().EdgeCountWithLeafSubsplits() << " continuous parameters.\n";
if (HasGPEngine()) {
std::cout << "Engine available using "
<< GetGPEngine().GetPLVHandler().GetByteCount() / 1e9
<< "G virtual memory.\n";
} else {
std::cout << "Engine has not been made.\n";
}
}
// ** I/O
StringSizeMap GPInstance::DAGSummaryStatistics() {
return GetDAG().SummaryStatistics();
}
void GPInstance::ReadFastaFile(const std::string &fname) {
alignment_ = Alignment::ReadFasta(fname);
fasta_path_ = fname;
}
void GPInstance::ReadNewickFile(const std::string &fname, const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNewickFile(fname));
newick_path_ = fname;
}
void GPInstance::ReadNewickFileGZ(const std::string &fname, const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNewickFileGZ(fname));
newick_path_ = fname;
}
void GPInstance::ReadNexusFile(const std::string &fname, const bool sort_taxa) {
Driver driver;
driver.SetSortTaxa(sort_taxa);
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNexusFile(fname));
nexus_path_ = fname;
}
void GPInstance::ReadNexusFileGZ(const std::string &fname, const bool sort_taxa) {
Driver driver;
tree_collection_ =
RootedTreeCollection::OfTreeCollection(driver.ParseNexusFileGZ(fname));
nexus_path_ = fname;
}
std::string GPInstance::GetFastaSourcePath() const {
Assert(fasta_path_.has_value(), "No fasta source file has been read.");
return fasta_path_.value();
}
std::string GPInstance::GetNewickSourcePath() const {
Assert(newick_path_.has_value(), "No newick source file has been read.");
return newick_path_.value();
}
std::string GPInstance::GetNexusSourcePath() const {
Assert(nexus_path_.has_value(), "No nexus source file has been read.");
return nexus_path_.value();
}
std::string GPInstance::GetMMapFilePath() const { return mmap_file_path_.value(); }
void GPInstance::CheckSequencesLoaded() const {
if (alignment_.SequenceCount() == 0) {
Failwith(
"Load an alignment into your GPInstance with which you wish to "
"calculate phylogenetic likelihoods.");
}
}
void GPInstance::CheckTreesLoaded() const {
if (tree_collection_.TreeCount() == 0) {
Failwith(
"Load some trees into your GPInstance on which you wish to "
"build your subsplit DAG.");
}
}
// ** DAG
void GPInstance::MakeDAG() {
CheckTreesLoaded();
dag_ = std::make_unique<GPDAG>(tree_collection_);
}
GPDAG &GPInstance::GetDAG() {
Assert(HasDAG(), "DAG not available. Call MakeDAG.");
return *dag_.get();
}
const GPDAG &GPInstance::GetDAG() const {
Assert(HasDAG(), "DAG not available. Call MakeDAG.");
return *dag_.get();
}
bool GPInstance::HasDAG() const { return dag_ != nullptr; }
void GPInstance::PrintDAG() { GetDAG().Print(); }
SitePattern GPInstance::MakeSitePattern() const {
CheckSequencesLoaded();
SitePattern site_pattern(alignment_, tree_collection_.TagTaxonMap());
return site_pattern;
}
// ** GP Engine
void GPInstance::MakeGPEngine(double rescaling_threshold, bool use_gradients) {
std::string mmap_gp_path = mmap_file_path_.value() + ".gp";
auto site_pattern = MakeSitePattern();
if (!HasDAG()) {
MakeDAG();
}
auto sbn_prior = GetDAG().BuildUniformOnTopologicalSupportPrior();
auto unconditional_node_probabilities =
GetDAG().UnconditionalNodeProbabilities(sbn_prior);
auto inverted_sbn_prior =
GetDAG().InvertedGPCSPProbabilities(sbn_prior, unconditional_node_probabilities);
gp_engine_ = std::make_unique<GPEngine>(
std::move(site_pattern), GetDAG().NodeCountWithoutDAGRoot(),
GetDAG().EdgeCountWithLeafSubsplits(), mmap_gp_path, rescaling_threshold,
std::move(sbn_prior),
unconditional_node_probabilities.segment(0, GetDAG().NodeCountWithoutDAGRoot()),
std::move(inverted_sbn_prior), use_gradients);
}
void GPInstance::ReinitializePriors() {
auto sbn_prior = GetDAG().BuildUniformOnTopologicalSupportPrior();
auto unconditional_node_probabilities =
GetDAG().UnconditionalNodeProbabilities(sbn_prior);
auto inverted_sbn_prior =
GetDAG().InvertedGPCSPProbabilities(sbn_prior, unconditional_node_probabilities);
GetGPEngine().InitializePriors(std::move(sbn_prior),
std::move(unconditional_node_probabilities.segment(
0, GetDAG().NodeCountWithoutDAGRoot())),
std::move(inverted_sbn_prior));
}
GPEngine &GPInstance::GetGPEngine() {
Assert(HasGPEngine(),
"Engine not available. Call MakeGPEngine to make an engine for phylogenetic "
"likelihood computation.");
return *gp_engine_.get();
}
const GPEngine &GPInstance::GetGPEngine() const {
Assert(HasGPEngine(),
"Engine not available. Call MakeGPEngine to make an engine for phylogenetic "
"likelihood computation.");
return *gp_engine_.get();
}
void GPInstance::ResizeEngineForDAG() {
Assert(HasGPEngine(), "Engine not available. Call MakeGPEngine before resizing.");
GetGPEngine().GrowPLVs(GetDAG().NodeCountWithoutDAGRoot());
GetGPEngine().GrowGPCSPs(GetDAG().EdgeCountWithLeafSubsplits());
}
bool GPInstance::HasGPEngine() const { return gp_engine_ != nullptr; }
void GPInstance::PrintEdgeIndexer() {
std::cout << "Vector of taxon names: " << tree_collection_.TaxonNames() << std::endl;
GetDAG().PrintEdgeIndexer();
}
void GPInstance::ProcessOperations(const GPOperationVector &operations) {
GetGPEngine().ProcessOperations(operations);
}
void GPInstance::ClearTreeCollectionAssociatedState() { GetDAG() = GPDAG(); }
void GPInstance::HotStartBranchLengths() {
Assert(HasGPEngine(),
"Please load and process some trees before calling HotStartBranchLengths.");
GetGPEngine().HotStartBranchLengths(tree_collection_, GetDAG().BuildEdgeIndexer());
}
SizeDoubleVectorMap GPInstance::GatherBranchLengths() {
Assert(HasGPEngine(),
"Please load and process some trees before calling GatherBranchLengths.");
SizeDoubleVectorMap branch_lengths_from_sample =
GetGPEngine().GatherBranchLengths(tree_collection_, GetDAG().BuildEdgeIndexer());
return branch_lengths_from_sample;
}
void GPInstance::TakeFirstBranchLength() {
Assert(HasGPEngine(),
"Please load and process some trees before calling TakeFirstBranchLength.");
GetGPEngine().TakeFirstBranchLength(tree_collection_, GetDAG().BuildEdgeIndexer());
}
void GPInstance::PopulatePLVs() { ProcessOperations(GetDAG().PopulatePLVs()); }
void GPInstance::ComputeLikelihoods() {
ProcessOperations(GetDAG().ComputeLikelihoods());
}
void GPInstance::ComputeMarginalLikelihood() {
ProcessOperations(GetDAG().MarginalLikelihood());
}
void GPInstance::EstimateBranchLengths(double tol, size_t max_iter, bool quiet,
bool track_intermediate_iterations,
std::optional<OptimizationMethod> method) {
std::stringstream dev_null;
auto &our_ostream = quiet ? dev_null : std::cout;
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
if (method.has_value()) {
GetGPEngine().SetOptimizationMethod(method.value());
}
GetGPEngine().ResetOptimizationCount();
our_ostream << "Begin branch optimization\n";
// GetDAG().ReinitializeTidyVectors();
GPOperationVector branch_optimization_operations =
GetDAG().BranchLengthOptimization();
GPOperationVector marginal_lik_operations = GetDAG().MarginalLikelihood();
GPOperationVector populate_plv_operations = GetDAG().PopulatePLVs();
our_ostream << "Populating PLVs\n";
PopulatePLVs();
auto warmup_duration = timer.Lap();
our_ostream << "Computing initial likelihood\n";
ProcessOperations(marginal_lik_operations);
double current_marginal_log_lik = GetGPEngine().GetLogMarginalLikelihood();
auto initial_likelihood_duration = timer.Lap();
for (size_t i = 0; i < max_iter; i++) {
our_ostream << "Iteration: " << (i + 1) << std::endl;
ProcessOperations(branch_optimization_operations);
// #321 Replace with a cleaned up traversal.
ProcessOperations(populate_plv_operations);
ProcessOperations(marginal_lik_operations);
double marginal_log_lik = GetGPEngine().GetLogMarginalLikelihood();
if (track_intermediate_iterations) {
our_ostream << "Tracking intermediate optimization values" << std::endl;
IntermediateOptimizationValues();
}
our_ostream << "Current marginal log likelihood: ";
our_ostream << std::setprecision(9) << current_marginal_log_lik << std::endl;
our_ostream << "New marginal log likelihood: ";
our_ostream << std::setprecision(9) << marginal_log_lik << std::endl;
double avg_abs_change_perpcsp_branch_length =
GetGPEngine().GetBranchLengthDifferences().array().mean();
our_ostream << "Average absolute change in branch lengths:";
our_ostream << std::setprecision(9) << avg_abs_change_perpcsp_branch_length
<< std::endl;
if (marginal_log_lik < current_marginal_log_lik) {
our_ostream << "Marginal log likelihood decreased.\n";
}
if (avg_abs_change_perpcsp_branch_length < tol) {
our_ostream << "Average absolute change in branch lengths converged. \n";
break;
}
current_marginal_log_lik = marginal_log_lik;
GetGPEngine().IncrementOptimizationCount();
}
timer.Stop();
auto optimization_duration = timer.GetTotal();
our_ostream << "\n# Timing Report\n";
our_ostream << "warmup: " << warmup_duration << "s\n";
our_ostream << "initial likelihood: " << initial_likelihood_duration << "s\n";
our_ostream << "optimization: " << optimization_duration << "s or "
<< optimization_duration / 60 << "m\n";
}
void GPInstance::TPEngineEstimateBranchLengths(
double tol, size_t max_iter, bool quiet, bool track_intermediate_iterations,
std::optional<OptimizationMethod> method) {
std::stringstream dev_null;
auto &our_ostream = quiet ? dev_null : std::cout;
auto &tp_engine = GetTPEngine();
auto &dag = GetDAG();
auto &branch_handler = GetTPEngine().GetLikelihoodEvalEngine().GetDAGBranchHandler();
auto &diffs = branch_handler.GetBranchDifferences().GetData();
auto &branches = branch_handler.GetBranchLengths().GetData();
if (method.has_value()) {
tp_engine.GetDAGBranchHandler().SetOptimizationMethod(method.value());
}
tp_engine.GetLikelihoodEvalEngine().ResetOptimizationCount();
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
our_ostream << "Begin branch optimization\n";
tp_engine.GetLikelihoodEvalEngine().InitializeBranchLengthHandler();
our_ostream << "Computing Likelihoods\n";
tp_engine.GetLikelihoodEvalEngine().Initialize();
tp_engine.GetLikelihoodEvalEngine().ComputeScores();
our_ostream << "Likelihoods: " << std::endl
<< tp_engine.GetTopTreeLikelihoods() << std::endl;
auto cur_likelihoods = branches.segment(1, dag.EdgeCountWithLeafSubsplits()).mean();
auto prv_likelihoods = cur_likelihoods;
auto warmup_duration = timer.Lap();
auto initial_likelihood_duration = timer.Lap();
for (size_t i = 0; i < max_iter; i++) {
tp_engine.GetLikelihoodEvalEngine().BranchLengthOptimization();
tp_engine.GetLikelihoodEvalEngine().ComputeScores();
auto cur_likelihoods = tp_engine.GetTopTreeLikelihoods()
.segment(0, GetDAG().EdgeCountWithLeafSubsplits())
.mean();
our_ostream << "Iteration: " << (i + 1) << std::endl;
our_ostream << "Previous likelihoods: ";
our_ostream << std::setprecision(9) << prv_likelihoods << std::endl;
our_ostream << "Current likelihoods: ";
our_ostream << std::setprecision(9) << cur_likelihoods << std::endl;
double avg_abs_change_perpcsp_branch_length =
diffs.segment(1, dag.EdgeCountWithLeafSubsplits()).mean();
our_ostream << "Average absolute change in branch lengths:";
our_ostream << std::setprecision(9) << avg_abs_change_perpcsp_branch_length
<< std::endl;
if (prv_likelihoods < cur_likelihoods) {
our_ostream << "Marginal log likelihood decreased.\n";
}
if (avg_abs_change_perpcsp_branch_length < tol) {
our_ostream << "Average absolute change in branch lengths converged. \n";
break;
}
prv_likelihoods = cur_likelihoods;
tp_engine.GetLikelihoodEvalEngine().IncrementOptimizationCount();
}
auto optimization_duration = timer.GetTotal();
our_ostream << "\n# Timing Report\n";
our_ostream << "warmup: " << warmup_duration << "s\n";
our_ostream << "initial likelihood: " << initial_likelihood_duration << "s\n";
our_ostream << "optimization: " << optimization_duration << "s or "
<< optimization_duration / 60 << "m\n";
}
void GPInstance::SetOptimizationMethod(const OptimizationMethod method) {
GetGPEngine().SetOptimizationMethod(method);
}
void GPInstance::UseGradientOptimization(const bool use_gradients) {
GetGPEngine().UseGradientOptimization(use_gradients);
}
void GPInstance::IntermediateOptimizationValues() {
GPOperationVector compute_lik_operations = GetDAG().ComputeLikelihoods();
ProcessOperations(compute_lik_operations);
size_t col_idx = per_pcsp_branch_lengths_.cols() - 1;
per_pcsp_branch_lengths_.col(col_idx) = GetGPEngine().GetBranchLengths();
per_pcsp_log_lik_.col(col_idx) = GetGPEngine().GetPerGPCSPLogLikelihoods();
per_pcsp_branch_lengths_.conservativeResize(Eigen::NoChange, col_idx + 2);
per_pcsp_log_lik_.conservativeResize(Eigen::NoChange, col_idx + 2);
}
void GPInstance::EstimateSBNParameters() {
std::cout << "Begin SBN parameter optimization\n";
PopulatePLVs();
ComputeLikelihoods();
ProcessOperations(GetDAG().OptimizeSBNParameters());
}
void GPInstance::CalculateHybridMarginals() {
std::cout << "Calculating hybrid marginals\n";
PopulatePLVs();
GetDAG().TopologicalEdgeTraversal(
[this](const NodeId parent_id, const bool is_edge_on_left, const NodeId child_id,
const EdgeId edge_idx) {
this->GetGPEngine().ProcessQuartetHybridRequest(
GetDAG().QuartetHybridRequestOf(parent_id, is_edge_on_left, child_id));
});
}
EdgeId GPInstance::GetEdgeIndexForLeafNode(const Bitset &parent_subsplit,
const Node *leaf_node) const {
Assert(leaf_node->IsLeaf(), "Only leaf node is permitted.");
return GetDAG().GetEdgeIdx(parent_subsplit,
Bitset::LeafSubsplitOfNonemptyClade(leaf_node->Leaves()));
}
RootedTreeCollection GPInstance::TreesWithGPBranchLengthsOfTopologies(
Node::NodePtrVec &&topologies) const {
const EigenVectorXd gpcsp_indexed_branch_lengths = GetGPEngine().GetBranchLengths();
RootedTree::RootedTreeVector tree_vector;
for (auto &root_node : topologies) {
size_t node_count = 2 * root_node->LeafCount() - 1;
std::vector<double> branch_lengths(node_count);
root_node->RootedPCSPPreorder(
[this, &branch_lengths, &gpcsp_indexed_branch_lengths](
const Node *sister, const Node *focal, const Node *child0,
const Node *child1) {
Bitset parent_subsplit = Bitset::Subsplit(sister->Leaves(), focal->Leaves());
Bitset child_subsplit = Bitset::Subsplit(child0->Leaves(), child1->Leaves());
EdgeId gpcsp_idx = GetDAG().GetEdgeIdx(parent_subsplit, child_subsplit);
branch_lengths[focal->Id()] = gpcsp_indexed_branch_lengths[gpcsp_idx.value_];
if (sister->IsLeaf()) {
gpcsp_idx = GetEdgeIndexForLeafNode(parent_subsplit, sister);
branch_lengths[sister->Id()] =
gpcsp_indexed_branch_lengths[gpcsp_idx.value_];
}
if (child0->IsLeaf()) {
gpcsp_idx = GetEdgeIndexForLeafNode(child_subsplit, child0);
branch_lengths[child0->Id()] =
gpcsp_indexed_branch_lengths[gpcsp_idx.value_];
}
if (child1->IsLeaf()) {
gpcsp_idx = GetEdgeIndexForLeafNode(child_subsplit, child1);
branch_lengths[child1->Id()] =
gpcsp_indexed_branch_lengths[gpcsp_idx.value_];
}
},
false);
tree_vector.emplace_back(root_node, std::move(branch_lengths));
}
return RootedTreeCollection(tree_vector, tree_collection_.TagTaxonMap());
}
RootedTreeCollection GPInstance::GenerateCompleteRootedTreeCollection() {
return TreesWithGPBranchLengthsOfTopologies(GetDAG().GenerateAllTopologies());
}
void GPInstance::GetPerGPCSPLogLikelihoodSurfaces(size_t steps, double scale_min,
double scale_max) {
const EigenVectorXd optimized_branch_lengths = GetGPEngine().GetBranchLengths();
size_t gpcsp_count = optimized_branch_lengths.size();
const EigenVectorXd scaling_vector =
EigenVectorXd::LinSpaced(steps, scale_min, scale_max);
per_pcsp_lik_surfaces_ = EigenMatrixXd(gpcsp_count * steps, 2);
for (EdgeId gpcsp_idx = EdgeId(0); gpcsp_idx < gpcsp_count; gpcsp_idx++) {
EigenVectorXd gpcsp_new_branch_lengths =
scaling_vector * optimized_branch_lengths[gpcsp_idx.value_];
EigenVectorXd new_branch_length_vector = optimized_branch_lengths;
for (size_t i = 0; i < steps; i++) {
new_branch_length_vector[gpcsp_idx.value_] = gpcsp_new_branch_lengths[i];
GetGPEngine().SetBranchLengths(new_branch_length_vector);
PopulatePLVs();
ComputeLikelihoods();
size_t matrix_position = gpcsp_count * i;
per_pcsp_lik_surfaces_(matrix_position + gpcsp_idx.value_, 0) =
gpcsp_new_branch_lengths[i];
per_pcsp_lik_surfaces_(matrix_position + gpcsp_idx.value_, 1) =
GetGPEngine().GetPerGPCSPLogLikelihoods(gpcsp_idx.value_, 1)(0, 0);
}
}
// Reset back to optimized branch lengths
GetGPEngine().SetBranchLengths(optimized_branch_lengths);
}
void GPInstance::PerturbAndTrackValuesFromOptimization() {
const EigenVectorXd optimized_branch_lengths = GetGPEngine().GetBranchLengths();
const EigenVectorXd optimized_per_pcsp_llhs =
GetGPEngine().GetPerGPCSPLogLikelihoods();
size_t gpcsp_count = optimized_branch_lengths.size();
EigenVectorXi run_counts = EigenVectorXi::Zero(gpcsp_count);
EigenMatrixXd tracked_optimization_values(1, 2);
const auto pretty_indexer = PrettyIndexer();
StringVector pretty_index_vector;
GPOperationVector branch_optimization_operations =
GetDAG().BranchLengthOptimization();
for (size_t gpcsp_idx = 0; gpcsp_idx < gpcsp_count; gpcsp_idx++) {
double optimized_llh = optimized_per_pcsp_llhs[gpcsp_idx];
double current_branch_length = 0.1;
while (true) {
run_counts[gpcsp_idx]++;
EigenVectorXd new_branch_length_vector = optimized_branch_lengths;
new_branch_length_vector[gpcsp_idx] = current_branch_length;
GetGPEngine().SetBranchLengths(new_branch_length_vector);
PopulatePLVs();
ComputeLikelihoods();
double current_llh = GetGPEngine().GetPerGPCSPLogLikelihoods(gpcsp_idx, 1)(0, 0);
tracked_optimization_values.row(tracked_optimization_values.rows() - 1)
<< current_branch_length,
current_llh;
tracked_optimization_values.conservativeResize(
tracked_optimization_values.rows() + 1, Eigen::NoChange);
if (fabs(current_llh - optimized_llh) < 1e-3 || run_counts[gpcsp_idx] > 5) {
break;
} else {
ProcessOperations(branch_optimization_operations);
current_branch_length = GetGPEngine().GetBranchLengths()[gpcsp_idx];
}
}
pretty_index_vector.insert(pretty_index_vector.end(), run_counts[gpcsp_idx],
pretty_indexer.at(gpcsp_idx));
}
// Reset back to optimized branch lengths
GetGPEngine().SetBranchLengths(optimized_branch_lengths);
tracked_optimization_values.conservativeResize(tracked_optimization_values.rows() - 1,
Eigen::NoChange);
tracked_values_after_perturbing_.reserve(tracked_optimization_values.rows());
for (int i = 0; i < tracked_optimization_values.rows(); i++) {
tracked_values_after_perturbing_.push_back(
{pretty_index_vector.at(i), tracked_optimization_values.row(i)});
}
}
StringVector GPInstance::PrettyIndexer() const {
StringVector pretty_representation(GetDAG().BuildEdgeIndexer().size());
// #350 consider use of edge vs pcsp here.
for (const auto &[edge, idx] : GetDAG().BuildEdgeIndexer()) {
pretty_representation[idx] = edge.PCSPToString();
}
return pretty_representation;
}
StringDoubleVector GPInstance::PrettyIndexedVector(EigenConstVectorXdRef v) {
StringDoubleVector result;
result.reserve(v.size());
const auto pretty_indexer = PrettyIndexer();
Assert(v.size() <= static_cast<Eigen::Index>(pretty_indexer.size()),
"v is too long in PrettyIndexedVector");
for (Eigen::Index i = 0; i < v.size(); i++) {
result.push_back({pretty_indexer.at(i), v(i)});
}
return result;
}
VectorOfStringAndEigenVectorXdPairs GPInstance::PrettyIndexedMatrix(
EigenConstMatrixXdRef m) {
VectorOfStringAndEigenVectorXdPairs result;
result.reserve(m.rows());
const auto pretty_indexer = PrettyIndexer();
for (int i = 0; i < m.rows(); i++) {
int idx = i % pretty_indexer.size();
result.push_back({pretty_indexer.at(idx), m.row(i)});
}
return result;
}
EigenConstVectorXdRef GPInstance::GetSBNParameters() {
return GetGPEngine().GetSBNParameters();
}
StringDoubleVector GPInstance::PrettyIndexedSBNParameters() {
return PrettyIndexedVector(GetSBNParameters());
}
StringDoubleVector GPInstance::PrettyIndexedBranchLengths() {
return PrettyIndexedVector(GetGPEngine().GetBranchLengths());
}
StringDoubleVector GPInstance::PrettyIndexedPerGPCSPLogLikelihoods() {
return PrettyIndexedVector(GetGPEngine().GetPerGPCSPLogLikelihoods());
}
StringDoubleVector GPInstance::PrettyIndexedPerGPCSPComponentsOfFullLogMarginal() {
return PrettyIndexedVector(GetGPEngine().GetPerGPCSPComponentsOfFullLogMarginal());
}
VectorOfStringAndEigenVectorXdPairs
GPInstance::PrettyIndexedIntermediateBranchLengths() {
return PrettyIndexedMatrix(per_pcsp_branch_lengths_);
}
VectorOfStringAndEigenVectorXdPairs
GPInstance::PrettyIndexedIntermediatePerGPCSPLogLikelihoods() {
return PrettyIndexedMatrix(per_pcsp_log_lik_);
}
VectorOfStringAndEigenVectorXdPairs
GPInstance::PrettyIndexedPerGPCSPLogLikelihoodSurfaces() {
return PrettyIndexedMatrix(per_pcsp_lik_surfaces_);
}
void GPInstance::SBNParametersToCSV(const std::string &file_path) {
CSV::StringDoubleVectorToCSV(PrettyIndexedSBNParameters(), file_path);
}
void GPInstance::SBNPriorToCSV(const std::string &file_path) {
CSV::StringDoubleVectorToCSV(
PrettyIndexedVector(GetDAG().BuildUniformOnTopologicalSupportPrior()), file_path);
}
void GPInstance::BranchLengthsToCSV(const std::string &file_path) {
CSV::StringDoubleVectorToCSV(PrettyIndexedBranchLengths(), file_path);
}
void GPInstance::PerGPCSPLogLikelihoodsToCSV(const std::string &file_path) {
CSV::StringDoubleVectorToCSV(PrettyIndexedPerGPCSPLogLikelihoods(), file_path);
}
void GPInstance::PerPCSPIndexedMatrixToCSV(
VectorOfStringAndEigenVectorXdPairs per_pcsp_indexed_matrix,
const std::string &file_path) {
std::ofstream out_stream(file_path);
for (const auto &[s, eigen] : per_pcsp_indexed_matrix) {
out_stream << s;
for (const auto &value : eigen) {
out_stream << "," << std::setprecision(9) << value;
}
out_stream << std::endl;
}
if (out_stream.bad()) {
Failwith("Failure writing to " + file_path);
}
out_stream.close();
}
void GPInstance::IntermediateBranchLengthsToCSV(const std::string &file_path) {
return PerPCSPIndexedMatrixToCSV(PrettyIndexedIntermediateBranchLengths(), file_path);
}
void GPInstance::IntermediatePerGPCSPLogLikelihoodsToCSV(const std::string &file_path) {
return PerPCSPIndexedMatrixToCSV(PrettyIndexedIntermediatePerGPCSPLogLikelihoods(),
file_path);
}
void GPInstance::PerGPCSPLogLikelihoodSurfacesToCSV(const std::string &file_path) {
std::ofstream out_stream(file_path);
VectorOfStringAndEigenVectorXdPairs vect =
PrettyIndexedPerGPCSPLogLikelihoodSurfaces();
for (const auto &[s, eigen] : vect) {
out_stream << s;
for (const auto &value : eigen) {
out_stream << "," << std::setprecision(9) << value;
}
out_stream << std::endl;
}
if (out_stream.bad()) {
Failwith("Failure writing to " + file_path);
}
out_stream.close();
}
void GPInstance::TrackedOptimizationValuesToCSV(const std::string &file_path) {
return PerPCSPIndexedMatrixToCSV(tracked_values_after_perturbing_, file_path);
}
RootedTreeCollection GPInstance::CurrentlyLoadedTreesWithGPBranchLengths() {
Node::NodePtrVec topologies;
for (const auto &tree : tree_collection_.Trees()) {
topologies.push_back(tree.Topology()->DeepCopy());
}
return TreesWithGPBranchLengthsOfTopologies(std::move(topologies));
}
RootedTreeCollection GPInstance::CurrentlyLoadedTreesWithAPCSPStringAndGPBranchLengths(
const std::string &pcsp_string) {
const BitsetSizeMap &indexer = GetDAG().BuildEdgeIndexer();
Bitset pcsp(pcsp_string);
auto search = indexer.find(pcsp);
if (search == indexer.end()) {
Failwith("Don't have " + pcsp_string + " as a PCSP in the instance!");
}
auto pcsp_index = search->second;
Node::NodePtrVec topologies;
for (const auto &tree : tree_collection_.Trees()) {
auto indexer_representation = GetDAG().IndexerRepresentationOf(
indexer, tree.Topology(), std::numeric_limits<size_t>::max());
if (std::find(indexer_representation.begin(), indexer_representation.end(),
pcsp_index) != indexer_representation.end()) {
topologies.push_back(tree.Topology()->DeepCopy());
}
}
return TreesWithGPBranchLengthsOfTopologies(std::move(topologies));
}
void GPInstance::ExportTrees(const std::string &out_path) {
auto trees = CurrentlyLoadedTreesWithGPBranchLengths();
trees.ToNewickFile(out_path);
}
void GPInstance::ExportTreesWithAPCSP(const std::string &pcsp_string,
const std::string &out_path) {
auto trees = CurrentlyLoadedTreesWithAPCSPStringAndGPBranchLengths(pcsp_string);
trees.ToNewickFile(out_path);
}
void GPInstance::ExportAllGeneratedTrees(const std::string &out_path) {
auto trees = GenerateCompleteRootedTreeCollection();
trees.ToNewickFile(out_path);
}
void GPInstance::ExportAllGeneratedTopologies(const std::string &out_path) {
TreeCollection::UnitBranchLengthTreesOf(GetDAG().GenerateAllTopologies(),
tree_collection_.TagTaxonMap())
.ToNewickTopologyFile(out_path);
}
void GPInstance::LoadAllGeneratedTrees() {
tree_collection_ = GenerateCompleteRootedTreeCollection();
}
EigenVectorXd GPInstance::GetBranchLengths() const {
return GetGPEngine().GetBranchLengths();
}
EigenVectorXd GPInstance::GetPerPCSPLogLikelihoods() const {
return GetGPEngine().GetPerGPCSPLogLikelihoods();
}
void GPInstance::SubsplitDAGToDot(const std::string &out_path,
bool show_index_labels) const {
std::ofstream out_stream(out_path);
out_stream << GetDAG().ToDot(show_index_labels) << std::endl;
if (out_stream.bad()) {
Failwith("Failure writing to " + out_path);
}
out_stream.close();
}
void GPInstance::ExportCoveringTreesWithGPBranchLengths(
const std::string &out_path) const {
const auto trees = GetDAG().GenerateCoveringTrees(GetGPEngine().GetBranchLengths());
std::ofstream out_file;
out_file.open(out_path);
for (auto tree : trees) {
out_file << tree.Newick() << std::endl;
}
out_file.close();
}
void GPInstance::ExportTopTreesWithTPBranchLengths(const std::string &out_path) const {
const auto newick_str = GetTPEngine().ToNewickOfTopTrees();
std::ofstream out_file;
out_file.open(out_path);
out_file << newick_str << std::endl;
out_file.close();
}
// ** TP Engine
void GPInstance::MakeTPEngine() {
auto site_pattern = MakeSitePattern();
std::string mmap_likelihood_path = mmap_file_path_.value() + ".tp_lik";
std::string mmap_parsimony_path = mmap_file_path_.value() + ".tp_pars";
const auto &loaded_trees = GetCurrentlyLoadedTrees();
const auto edge_indexer = GetDAG().BuildEdgeIndexer();
tp_engine_ =
std::make_unique<TPEngine>(GetDAG(), site_pattern, mmap_likelihood_path,
mmap_parsimony_path, loaded_trees, edge_indexer);
}
TPEngine &GPInstance::GetTPEngine() {
Assert(tp_engine_,
"TPEngine not available. Call MakeTPEngine before accessing TPEngine.");
return *tp_engine_;
}
const TPEngine &GPInstance::GetTPEngine() const {
Assert(tp_engine_,
"TPEngine not available. Call MakeTPEngine before accessing TPEngine.");
return *tp_engine_;
}
void GPInstance::TPEngineSetChoiceMapByTakingFirst(const bool use_subsplit_method) {
GetTPEngine().SetChoiceMapByTakingFirst(
GetCurrentlyLoadedTrees(), GetDAG().BuildEdgeIndexer(), use_subsplit_method);
}
void GPInstance::TPEngineSetBranchLengthsByTakingFirst() {
GetTPEngine().SetBranchLengthsByTakingFirst(GetCurrentlyLoadedTrees(),
GetDAG().BuildEdgeIndexer());
}
std::vector<RootedTree> GPInstance::TPEngineGenerateCoveringTrees() {
return GetDAG().GenerateCoveringTrees(GetTPEngine().GetBranchLengths());
}
TreeIdTreeMap GPInstance::TPEngineGenerateTopRootedTrees() {
return GetTPEngine().BuildMapOfTreeIdToTopTrees();
}
void GPInstance::TPEngineExportCoveringTrees(const std::string &out_path) {
std::ofstream file_out;
file_out.open(out_path);
auto trees = GetDAG().GenerateCoveringTrees(GetTPEngine().GetBranchLengths());
for (const auto &tree : trees) {
file_out << tree.Newick() << std::endl;
}
file_out.close();
}
void GPInstance::TPEngineExportTopTrees(const std::string &out_path) {
std::ofstream file_out;
file_out.open(out_path);
auto newick = GetTPEngine().ToNewickOfTopTrees();
file_out << newick;
file_out.close();
}
// ** NNI Engine
void GPInstance::MakeNNIEngine() {
nni_engine_ = std::make_unique<NNIEngine>(GetDAG(), nullptr, nullptr);
if (gp_engine_ != nullptr) {
GetNNIEngine().MakeGPEvalEngine(gp_engine_.get());
}
if (tp_engine_ != nullptr) {
GetNNIEngine().MakeTPEvalEngine(tp_engine_.get());
}
}
NNIEngine &GPInstance::GetNNIEngine() {
Assert(nni_engine_,
"NNIEngine not available. Call MakeNNIEngine before accessing NNIEngine.");
return *nni_engine_;
}
const NNIEngine &GPInstance::GetNNIEngine() const {
Assert(nni_engine_,
"NNIEngine not available. Call MakeNNIEngine before accessing NNIEngine.");
return *nni_engine_;
}
// ** Tree Engines
void GPInstance::MakeLikelihoodTreeEngine() {
auto beagle_pref_flags = BEAGLE_FLAG_VECTOR_SSE;
PhyloModelSpecification model_spec{"JC69", "constant", "strict"};
SitePattern site_pattern = MakeSitePattern();
bool use_tip_states = true;
likelihood_tree_engine_ = std::make_unique<FatBeagle>(
model_spec, site_pattern, beagle_pref_flags, use_tip_states);
}
FatBeagle &GPInstance::GetLikelihoodTreeEngine() {
Assert(likelihood_tree_engine_, "LikelihoodTreeEngine not available.");
return *likelihood_tree_engine_;
}
void GPInstance::MakeParsimonyTreeEngine() {
auto site_pattern = MakeSitePattern();
auto mmap_file_path = GetMMapFilePath() + ".sankoff";
parsimony_tree_engine_ =
std::make_unique<SankoffHandler>(site_pattern, mmap_file_path);
}
SankoffHandler &GPInstance::GetParsimonyTreeEngine() {
Assert(parsimony_tree_engine_, "ParsimonyTreeEngine not available..");
return *parsimony_tree_engine_;
}
// ** Taxon Map
const TagStringMap &GPInstance::GetTaxonMap() const {
return tree_collection_.TagTaxonMap();
}
StringVector GPInstance::GetTaxonNames() const { return tree_collection_.TaxonNames(); }
| 33,196
|
C++
|
.cpp
| 757
| 39.067371
| 88
| 0.719617
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,054
|
gp_doctest.cpp
|
phylovi_bito/src/gp_doctest.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
#include "include_doctest.hpp"
#include "sugar.hpp"
#include "combinatorics.hpp"
#include "gp_instance.hpp"
#include "phylo_model.hpp"
#include "reindexer.hpp"
#include "rooted_sbn_instance.hpp"
#include "stopwatch.hpp"
#include "tidy_subsplit_dag.hpp"
#include "nni_engine.hpp"
#include "pv_handler.hpp"
#include "topology_sampler.hpp"
#include "tp_engine.hpp"
#include "tp_choice_map.hpp"
#include "sankoff_matrix.hpp"
#include "sankoff_handler.hpp"
#include "dag_data.hpp"
#include "optimization.hpp"
using namespace GPOperations; // NOLINT
using PLVType = PLVNodeHandler::PLVType;
// #350 remove all uses of GPCSP.
std::ostream& operator<<(std::ostream& os, EigenConstMatrixXdRef mx) {
for (Eigen::Index i = 0; i < mx.rows(); i++) {
for (Eigen::Index j = 0; j < mx.cols(); j++) {
os << "[" << i << "," << j << "]: " << mx(i, j) << "\t";
}
os << std::endl;
}
return os;
}
// Let the "venus" node be the common ancestor of mars and saturn.
enum HelloGPCSP { jupiter, mars, saturn, venus, rootsplit, root };
// *** GPInstances used for testing ***
GPInstance GPInstanceOfFiles(
const std::string& fasta_path, const std::string& newick_path,
const std::string mmap_filepath = std::string("_ignore/mmapped_pv.data"),
const bool use_gradients = false) {
GPInstance inst(mmap_filepath);
inst.ReadFastaFile(fasta_path);
inst.ReadNewickFile(newick_path, false);
inst.MakeDAG();
inst.MakeGPEngine();
inst.UseGradientOptimization(use_gradients);
return inst;
}
// Our tree is (see check below)
// (jupiter:0.113,(mars:0.15,saturn:0.1)venus:0.22):0.;
// You can see a helpful diagram at
// https://github.com/phylovi/bito/issues/349#issuecomment-898672399
GPInstance MakeHelloGPInstance(const std::string& fasta_path) {
auto inst = GPInstanceOfFiles(fasta_path, "data/hello_rooted.nwk");
EigenVectorXd branch_lengths(5);
// Order set by HelloGPCSP.
branch_lengths << 0, 0.22, 0.113, 0.15, 0.1;
inst.GetGPEngine().SetBranchLengths(branch_lengths);
CHECK_EQ(inst.GenerateCompleteRootedTreeCollection().Newick(),
"(jupiter:0.113,(mars:0.15,saturn:0.1):0.22):0;\n");
return inst;
}
GPInstance MakeHelloGPInstance() { return MakeHelloGPInstance("data/hello.fasta"); }
GPInstance MakeHelloGPInstanceSingleNucleotide() {
return MakeHelloGPInstance("data/hello_single_nucleotide.fasta");
}
GPInstance MakeHelloGPInstanceTwoTrees() {
return GPInstanceOfFiles("data/hello.fasta", "data/hello_rooted_two_trees.nwk");
}
GPInstance MakeFiveTaxonInstance() {
return GPInstanceOfFiles("data/five_taxon.fasta", "data/five_taxon_rooted.nwk");
}
// The sequences for this were obtained by cutting DS1 down to 5 taxa by taking the
// first 4 taxa then moving taxon 15 (Latimera) to be number 5. The alignment was
// trimmed to 500 sites by using seqmagick convert with `--cut 500:1000`.
// The DAG obtained by `inst.SubsplitDAGToDot("_ignore/ds1-reduced-5.dot");` can be seen
// at
// https://github.com/phylovi/bito/issues/391#issuecomment-1169048568
GPInstance MakeDS1Reduced5Instance() {
auto inst = GPInstanceOfFiles("data/ds1-reduced-5.fasta", "data/ds1-reduced-5.nwk");
return inst;
}
GPInstance MakeFluAGPInstance(double rescaling_threshold) {
auto inst = GPInstanceOfFiles("data/fluA.fa", "data/fluA.tree");
inst.MakeGPEngine(rescaling_threshold);
inst.GetGPEngine().SetBranchLengthsToConstant(0.01);
return inst;
}
TEST_CASE("DAGSummaryStatistics") {
auto inst = MakeHelloGPInstanceTwoTrees();
StringSizeMap summaries = {{"edge_count", 10}, {"node_count", 8}};
CHECK(summaries == inst.DAGSummaryStatistics());
}
EigenVectorXd MakeHelloGPInstanceMarginalLikelihoodTestBranchLengths() {
EigenVectorXd hello_gp_optimal_branch_lengths(10);
hello_gp_optimal_branch_lengths << 1, 1, 0.066509261, 0.00119570257, 0.00326456973,
0.0671995398, 0.203893516, 0.204056242, 0.0669969961, 0.068359082;
return hello_gp_optimal_branch_lengths;
}
TEST_CASE("GPInstance: straightforward classical likelihood calculation") {
auto inst = MakeHelloGPInstance();
auto& engine = inst.GetGPEngine();
inst.PopulatePLVs();
inst.ComputeLikelihoods();
EigenVectorXd realized_log_likelihoods =
inst.GetGPEngine().GetPerGPCSPLogLikelihoods();
CheckVectorXdEquality(-84.77961943, realized_log_likelihoods, 1e-6);
CHECK_LT(fabs(engine.GetLogMarginalLikelihood() - -84.77961943), 1e-6);
}
// Compute the exact marginal likelihood via brute force to compare with generalized
// pruning.
// IMPORTANT: We assume that the trees in `newick_path` are all of the trees over which
// we should marginalize. So if you have generated a subsplit DAG with a set of trees,
// use GenerateCompleteRootedTreeCollection to get all the trees over which you will be
// marginalizing.
// If we rename things in #288, let's do that in the body of this function too.
std::pair<double, StringDoubleMap> ComputeExactMarginal(const std::string& newick_path,
const std::string& fasta_path) {
RootedSBNInstance sbn_instance("charlie");
sbn_instance.ReadNewickFile(newick_path, false);
sbn_instance.ProcessLoadedTrees();
const Alignment alignment = Alignment::ReadFasta(fasta_path);
PhyloModelSpecification simple_specification{"JC69", "constant", "strict"};
sbn_instance.SetAlignment(alignment);
sbn_instance.PrepareForPhyloLikelihood(simple_specification, 1);
const size_t tree_count = sbn_instance.TreeCount();
const size_t gpcsp_count = sbn_instance.SBNSupport().GPCSPCount();
auto indexer_representations = sbn_instance.MakeIndexerRepresentations();
double exact_marginal_log_lik = 0.0;
EigenVectorXd exact_per_pcsp_log_marginals(gpcsp_count);
exact_per_pcsp_log_marginals.setZero();
double log_prior_term = log(1. / tree_count);
for (size_t column_idx = 0; column_idx < alignment.Length(); column_idx++) {
sbn_instance.SetAlignment(alignment.ExtractSingleColumnAlignment(column_idx));
sbn_instance.PrepareForPhyloLikelihood(simple_specification, 1);
auto per_site_phylo_likelihoods = sbn_instance.UnrootedLogLikelihoods();
double per_site_log_marginal = DOUBLE_NEG_INF;
EigenVectorXd per_site_per_pcsp_log_marginals(gpcsp_count);
per_site_per_pcsp_log_marginals.setConstant(DOUBLE_NEG_INF);
for (size_t tree_idx = 0; tree_idx < tree_count; tree_idx++) {
const auto per_site_phylo_likelihood = per_site_phylo_likelihoods[tree_idx];
per_site_log_marginal =
NumericalUtils::LogAdd(per_site_log_marginal, per_site_phylo_likelihood);
for (const auto& gpcsp_idx : indexer_representations.at(tree_idx)) {
per_site_per_pcsp_log_marginals[gpcsp_idx] = NumericalUtils::LogAdd(
per_site_per_pcsp_log_marginals[gpcsp_idx], per_site_phylo_likelihood);
}
}
per_site_log_marginal += log_prior_term;
per_site_per_pcsp_log_marginals.array() += log_prior_term;
exact_marginal_log_lik += per_site_log_marginal;
exact_per_pcsp_log_marginals.array() += per_site_per_pcsp_log_marginals.array();
}
return {
exact_marginal_log_lik,
UnorderedMapOf(sbn_instance.PrettyIndexedVector(exact_per_pcsp_log_marginals))};
}
void CheckExactMapVsGPVector(const StringDoubleMap& exact_map,
const StringDoubleVector& gp_vector) {
for (const auto& [gp_string, gp_value] : gp_vector) {
if (exact_map.find(gp_string) == exact_map.end()) {
Assert(Bitset(gp_string.substr(0, gp_string.find('|') - 1)).None() ||
Bitset(gp_string.substr(gp_string.rfind('|') + 1)).None(),
"Missing an internal node in CheckExactMapVsGPVector.");
} else {
const double tolerance = 1e-5;
const double error = fabs(exact_map.at(gp_string) - gp_value);
if (error > tolerance) {
std::cout << "check failed for " << gp_string << ":" << std::endl;
}
CHECK_LT(error, tolerance);
}
}
}
// Test the composite marginal to that generated by ComputeExactMarginal.
//
// IMPORTANT: See the note about appropriate tree file input to that function, as the
// same applies here.
void TestCompositeMarginal(GPInstance inst, const std::string& fasta_path) {
inst.EstimateBranchLengths(0.00001, 100, true);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
inst.ComputeMarginalLikelihood();
std::string tree_path = "_ignore/test_marginal_trees.nwk";
const auto trees = inst.CurrentlyLoadedTreesWithGPBranchLengths();
trees.ToNewickFile(tree_path);
auto [exact_log_likelihood, exact_per_pcsp_log_marginal] =
ComputeExactMarginal(tree_path, fasta_path);
double gp_marginal_log_likelihood = inst.GetGPEngine().GetLogMarginalLikelihood();
auto gp_per_pcsp_log_marginal =
inst.PrettyIndexedPerGPCSPComponentsOfFullLogMarginal();
double tolerance = 1e-6;
if (fabs(gp_marginal_log_likelihood - exact_log_likelihood) > tolerance) {
std::cout << "gp_marginal_log_likelihood: " << gp_marginal_log_likelihood
<< std::endl;
std::cout << "exact_log_likelihood: " << exact_log_likelihood << std::endl;
}
CHECK_LT(fabs(gp_marginal_log_likelihood - exact_log_likelihood), tolerance);
CheckExactMapVsGPVector(exact_per_pcsp_log_marginal, gp_per_pcsp_log_marginal);
}
TEST_CASE("GPInstance: two tree marginal likelihood calculation") {
TestCompositeMarginal(MakeHelloGPInstanceTwoTrees(), "data/hello.fasta");
}
TEST_CASE("GPInstance: marginal likelihood on five taxa") {
TestCompositeMarginal(MakeFiveTaxonInstance(), "data/five_taxon.fasta");
}
TEST_CASE("GPInstance: DS1-reduced-5 marginal likelihood calculation") {
TestCompositeMarginal(MakeDS1Reduced5Instance(), "data/ds1-reduced-5.fasta");
}
TEST_CASE("GPInstance: marginal likelihood on seven taxa and four trees") {
const std::string fasta_path = "data/7-taxon-slice-of-ds1.fasta";
// See the DAG at
// https://github.com/phylovi/bito/issues/391#issuecomment-1169053191
TestCompositeMarginal(
GPInstanceOfFiles(fasta_path, "data/simplest-hybrid-marginal-all-trees.nwk"),
fasta_path);
}
TEST_CASE("GPInstance: gradient calculation") {
auto inst = MakeHelloGPInstanceSingleNucleotide();
auto& engine = inst.GetGPEngine();
inst.PopulatePLVs();
inst.ComputeLikelihoods();
NodeId rootsplit_id = rootsplit;
NodeId child_id = jupiter;
NodeId rootsplit_jupiter_idx = 2;
size_t hello_node_count_without_dag_root_node = 5;
PVId leafward_idx = PLVNodeHandler::GetPVIndex(
PLVType::P, child_id, hello_node_count_without_dag_root_node);
PVId rootward_idx = PLVNodeHandler::GetPVIndex(
PLVType::RLeft, rootsplit_id, hello_node_count_without_dag_root_node);
OptimizeBranchLength op{leafward_idx.value_, rootward_idx.value_,
rootsplit_jupiter_idx.value_};
DoublePair log_lik_and_derivative = engine.LogLikelihoodAndDerivative(op);
// Expect log lik: -4.806671945.
// Expect log lik derivative: -0.6109379521.
CHECK_LT(fabs(log_lik_and_derivative.first - -4.806671945), 1e-6);
CHECK_LT(fabs(log_lik_and_derivative.second - -0.6109379521), 1e-6);
}
TEST_CASE("GPInstance: multi-site gradient calculation") {
auto inst = MakeHelloGPInstance();
auto& engine = inst.GetGPEngine();
inst.PopulatePLVs();
inst.ComputeLikelihoods();
NodeId rootsplit_id = rootsplit;
NodeId child_id = jupiter;
NodeId rootsplit_jupiter_idx = 2;
size_t hello_node_count_without_dag_root_node = 5;
PVId leafward_idx = PLVNodeHandler::GetPVIndex(
PLVType::P, child_id, hello_node_count_without_dag_root_node);
PVId rootward_idx = PLVNodeHandler::GetPVIndex(
PLVType::RLeft, rootsplit_id, hello_node_count_without_dag_root_node);
OptimizeBranchLength op{leafward_idx.value_, rootward_idx.value_,
rootsplit_jupiter_idx.value_};
std::tuple<double, double, double> log_lik_and_derivatives =
engine.LogLikelihoodAndFirstTwoDerivatives(op);
// Expect log likelihood: -84.77961943.
// Expect log llh first derivative: -18.22479569.
// Expect log llh second derivative: -5.4460787413.
CHECK_LT(fabs(std::get<0>(log_lik_and_derivatives) - -84.77961943), 1e-6);
CHECK_LT(fabs(std::get<1>(log_lik_and_derivatives) - -18.22479569), 1e-6);
CHECK_LT(fabs(std::get<2>(log_lik_and_derivatives) - -5.4460787413), 1e-6);
}
// We are outputting the branch length for PCSP 100-011-001
// which has a true branch length of 0.0694244266
double ObtainBranchLengthWithOptimization(const OptimizationMethod method,
bool is_quiet = true) {
GPInstance inst = MakeHelloGPInstance();
GPEngine& engine = inst.GetGPEngine();
engine.SetOptimizationMethod(method);
inst.EstimateBranchLengths(0.0001, 100, is_quiet);
inst.MakeDAG();
GPDAG& dag = inst.GetDAG();
EdgeId default_index = EdgeId(dag.EdgeCountWithLeafSubsplits());
Bitset gpcsp_bitset = Bitset("100011001");
EdgeId index =
AtWithDefault(dag.BuildEdgeIndexer(), gpcsp_bitset, default_index.value_);
return inst.GetGPEngine().GetBranchLengths()(index.value_);
}
TEST_CASE("GPInstance: Gradient-based optimization with Newton's Method") {
double nongradient_length =
ObtainBranchLengthWithOptimization(OptimizationMethod::BrentOptimization);
double gradient_length =
ObtainBranchLengthWithOptimization(OptimizationMethod::NewtonOptimization);
double true_length = 0.0694244266;
double nongrad_diff = abs(nongradient_length - true_length);
double grad_diff = abs(gradient_length - true_length);
double tol = 1e-6;
if (grad_diff > nongrad_diff || grad_diff > tol) {
std::cout << "nongrad_diff: " << nongrad_diff << std::endl;
std::cout << "grad_diff: " << grad_diff << std::endl;
std::cout << "nongradient_length: " << nongradient_length << std::endl;
std::cout << "gradient_length: " << gradient_length << std::endl;
std::cout << "true_length: " << true_length << std::endl;
}
CHECK_LT(grad_diff, nongrad_diff);
CHECK_LT(grad_diff, tol);
}
double MakeAndRunFluAGPInstance(double rescaling_threshold) {
auto inst = MakeFluAGPInstance(rescaling_threshold);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
return inst.GetGPEngine().GetLogMarginalLikelihood();
}
TEST_CASE("GPInstance: rescaling") {
double difference = MakeAndRunFluAGPInstance(GPEngine::default_rescaling_threshold_) -
MakeAndRunFluAGPInstance(1e-4);
CHECK_LT(fabs(difference), 1e-10);
}
StringDoubleMap StringDoubleMapOfStringDoubleVector(StringDoubleVector vect) {
StringDoubleMap m;
for (const auto& [str, x] : vect) {
SafeInsert(m, str, x);
}
return m;
}
TEST_CASE("GPInstance: gather and hotstart branch lengths") {
// » nw_topology data/hotstart_bootstrap_sample.nwk | nw_order - | sort | uniq -c
// 1 (outgroup,(((z0,z1),z2),z3));
// 33 (outgroup,((z0,z1),(z2,z3)));
const std::string tree_path = "data/hotstart_bootstrap_sample.nwk";
GPInstance inst("_ignore/mmapped_pv.data");
// This is just a dummy fasta file, which is required to make an Engine.
inst.ReadFastaFile("data/hotstart.fasta");
inst.ReadNewickFile(tree_path, false);
inst.MakeGPEngine();
// We are going to verify correct assignment of the PCSP with sister z2, z3 and
// children z0, z1, which only appears in the tree (outgroup,((z0,z1),(z2,z3))).
// Vector of taxon names: [outgroup, z2, z3, z1, z0]
// So, this below is the desired GPCSP: 01100|00011|00001
// These branch lengths are obtained by excluding (outgroup,(((z0,z1),z2),z3)) (which
// doesn't have this PCSP) and grabbing the rest of the branch lengths.
//
// We will first test that gather branch lengths is collecting the correct set of
// branches, and then we will test whether hot start is accurately calculating the
// mean of these branches.
EigenVectorXd expected_bls_internal(33);
expected_bls_internal << 0.1175370000, 0.1175750000, 0.1195780000, 0.0918962000,
0.0918931000, 0.1192590000, 0.0906988000, 0.0906972000, 0.0905154000,
0.0903663000, 0.1245620000, 0.1244890000, 0.1245050000, 0.1245550000,
0.1245680000, 0.1248920000, 0.1248490000, 0.1164070000, 0.1164110000,
0.1164120000, 0.1245670000, 0.1245650000, 0.1245670000, 0.1245670000,
0.1240790000, 0.1242540000, 0.1242160000, 0.1242560000, 0.1892030000,
0.1894900000, 0.1895430000, 0.1896900000, 0.1905710000;
SizeDoubleVectorMap branch_lengths_from_sample = inst.GatherBranchLengths();
EigenVectorXd gathered_bls =
EigenVectorXdOfStdVectorDouble(branch_lengths_from_sample[4]);
CheckVectorXdEquality(expected_bls_internal, gathered_bls, 1e-6);
double true_mean_internal = expected_bls_internal.array().mean();
inst.HotStartBranchLengths();
CHECK_LT(fabs(true_mean_internal - inst.GetGPEngine().GetBranchLengths()(4)), 1e-8);
// We also want to verify correct assignment for a pendant branch length.
// Specifically, we are looking at the pendant branch length for z2 with sister z3. So
// the desired GPCSP is 0010001000|0000000000. This corresponds to branch length index
// 8, and is also found by excluding (outgroup, (((z0,z1),z2),z3)), which does not
// have this PCSP.
EigenVectorXd expected_bls_pendant(33);
expected_bls_pendant << 0.0903520000, 0.0903100000, 0.0911710000, 0.0906700000,
0.0906680000, 0.0907450000, 0.0884430000, 0.0883790000, 0.0909010000,
0.0865700000, 0.0999870000, 0.0999920000, 0.0999680000, 0.0999430000,
0.0999610000, 0.0902300000, 0.0902700000, 0.0905340000, 0.0908440000,
0.0901110000, 0.0898580000, 0.0898570000, 0.0909610000, 0.0898660000,
0.0906510000, 0.0906750000, 0.0906480000, 0.0906100000, 0.0894660000,
0.0904620000, 0.0893220000, 0.0902220000, 0.0902000000;
double true_mean_pendant = expected_bls_pendant.array().mean();
CHECK_LT(fabs(true_mean_pendant - inst.GetGPEngine().GetBranchLengths()(8)), 1e-8);
}
TEST_CASE("GPInstance: take first branch length") {
const std::string tree_path = "data/hotstart_bootstrap_sample.nwk";
GPInstance inst("_ignore/mmapped_pv.data");
// This is just a dummy fasta file, which is required to make an Engine.
inst.ReadFastaFile("data/hotstart.fasta");
inst.ReadNewickFile(tree_path, false);
inst.MakeGPEngine();
inst.TakeFirstBranchLength();
auto branch_length_map =
StringDoubleMapOfStringDoubleVector(inst.PrettyIndexedBranchLengths());
// Check at the internal branches
CHECK_EQ(0.191715, branch_length_map.at("01000|00011|00001")); // pcsp index 1
CHECK_EQ(0.117537, branch_length_map.at("01100|00011|00001")); // pcsp index 2
CHECK_EQ(0.0874183, branch_length_map.at("00011|01100|00100")); // pcsp index 3
CHECK_EQ(0.129921, branch_length_map.at("10000|01111|00011")); // pcsp index 4
CHECK_EQ(0.15936, branch_length_map.at("10000|01111|00100")); // pcsp index 5
CHECK_EQ(0.000813992, branch_length_map.at("00100|01011|00011")); // pcsp index 6
// Check at the branches ending in leaves
CHECK_EQ(0.129921, branch_length_map.at("01111|10000|00000")); // pcsp index 7
CHECK_EQ(0.090352, branch_length_map.at("00100|01000|00000")); // pcsp index 8
CHECK_EQ(0.099922, branch_length_map.at("00011|01000|00000")); // pcsp index 9
CHECK_EQ(0.112125, branch_length_map.at("01000|00100|00000")); // pcsp index 10
CHECK_EQ(0.104088, branch_length_map.at("01011|00100|00000")); // pcsp index 11
CHECK_EQ(0.113775, branch_length_map.at("00001|00010|00000")); // pcsp index 12
CHECK_EQ(0.081634, branch_length_map.at("00010|00001|00000")); // pcsp index 13
}
TEST_CASE("GPInstance: generate all trees") {
auto inst = MakeFiveTaxonInstance();
auto rooted_tree_collection = inst.GenerateCompleteRootedTreeCollection();
CHECK_EQ(rooted_tree_collection.TreeCount(), 4);
CHECK_EQ(rooted_tree_collection.TopologyCounter().size(), 4);
}
TEST_CASE("GPInstance: test populate PLV") {
// This test makes sure that PopulatePLVs correctly
// re-populates the PLVs using the current branch lengths.
auto inst = MakeFiveTaxonInstance();
inst.EstimateBranchLengths(1e-6, 10, true);
inst.ComputeLikelihoods();
size_t length = inst.GetGPEngine().GetLogLikelihoodMatrix().rows();
const EigenVectorXd log_likelihoods1 =
inst.GetGPEngine().GetPerGPCSPLogLikelihoods(0, length);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
const EigenVectorXd log_likelihoods2 = inst.GetGPEngine().GetPerGPCSPLogLikelihoods();
CheckVectorXdEquality(log_likelihoods1, log_likelihoods2, 1e-6);
}
TEST_CASE("GPInstance: SBN root split probabilities on five taxa") {
auto inst = MakeFiveTaxonInstance();
inst.GetGPEngine().SetBranchLengthsToConstant(0.1);
inst.PopulatePLVs();
// We need to call ComputeLikelihoods to populate the likelihood matrix.
// Note: EstimateBranchLengths doesn't populate the likelihood matrix.
inst.ComputeLikelihoods();
EigenVectorXd log_likelihood_vector = inst.GetGPEngine().GetPerGPCSPLogLikelihoods();
// Let s be a subsplit and k be the site. Then,
// log_likelihood_matrix.row(s)[k] =
// \log \sum_{\tau : s \in \tau} q(\tau) P(y_k | \tau),
// log_likelihood_vector[s] =
// \sum_{k=1}^{K} \log \sum_{\tau : s \in \tau} q(\tau) P(y_k | \tau).
// To test this, we are going to compute P(y_k | \tau) for {\tau : s \in \tau} and
// multiply this by q(\tau) = 1/4 since we are assuming a uniform prior.
// The collection of trees that we are looking at has 3 rootplits where one root
// split generates two trees and the other 2 root splits generating one tree each
// for the total of 4 trees.
// We will compare the values against the 3 rootsplits, since we cannot assume
// the ordering due to different implementation of the map, we will sort the values
// before comparison.
auto [log_lik_tree_1, ignored_1] =
ComputeExactMarginal("data/five_taxon_tree1.nwk", "data/five_taxon.fasta");
std::ignore = ignored_1;
auto [log_lik_tree_2, ignored_2] =
ComputeExactMarginal("data/five_taxon_tree2.nwk", "data/five_taxon.fasta");
std::ignore = ignored_2;
auto [log_lik_trees_3_4, ignored_3_4] =
ComputeExactMarginal("data/five_taxon_trees_3_4.nwk", "data/five_taxon.fasta");
std::ignore = ignored_3_4;
EigenVectorXd expected_log_lik_vector_at_rootsplits(3);
expected_log_lik_vector_at_rootsplits << log_lik_tree_1, log_lik_tree_2,
log_lik_trees_3_4;
EigenVectorXd realized_log_lik_vector_at_rootsplits =
log_likelihood_vector.segment(0, 3);
CheckVectorXdEqualityAfterSorting(realized_log_lik_vector_at_rootsplits,
expected_log_lik_vector_at_rootsplits, 1e-6);
inst.EstimateSBNParameters();
EigenVectorXd realized_q = inst.GetGPEngine().GetSBNParameters().segment(0, 3);
// The expected values for the SBN parameters: q[s] \propto log_lik[s] +
// log_prior[s]. The SBN params are initialized so that we get a uniform
// distribution over the trees. For the rootsplits, the values are (1/4, 1/4, 2/4)
// corresponding to the entries in expected_log_lik_vector_at_rootsplits.
EigenVectorXd log_prior(3);
log_prior << log(1. / 4), log(1. / 4), log(2. / 4);
EigenVectorXd expected_q = expected_log_lik_vector_at_rootsplits + log_prior;
NumericalUtils::ProbabilityNormalizeInLog(expected_q);
expected_q = expected_q.array().exp();
CheckVectorXdEqualityAfterSorting(realized_q, expected_q, 1e-6);
}
TEST_CASE("GPInstance: CurrentlyLoadedTreesWithGPBranchLengths") {
auto inst = MakeHelloGPInstanceSingleNucleotide();
EigenVectorXd branch_lengths(5);
branch_lengths << 0, 0.1, 0.2, 0.3, 0.4;
inst.GetGPEngine().SetBranchLengths(branch_lengths);
auto trees = inst.CurrentlyLoadedTreesWithGPBranchLengths();
CHECK_EQ(trees.Newick(), "(jupiter:0.2,(mars:0.3,saturn:0.4):0.1):0;\n");
}
TEST_CASE("GPInstance: CurrentlyLoadedTreesWithAPCSPStringAndGPBranchLengths") {
GPInstance inst("_ignore/mmapped_pv.data");
inst.ReadFastaFile("data/five_taxon.fasta");
inst.ReadNewickFile("data/five_taxon_rooted_more.nwk", false);
inst.MakeGPEngine();
inst.GetGPEngine().SetBranchLengthsToConstant(0.9);
// Only take trees that have (x4,(x2,x3)).
auto trees =
inst.CurrentlyLoadedTreesWithAPCSPStringAndGPBranchLengths("000010011000010");
CHECK_EQ(trees.Newick(),
"((x0:0.9,x1:0.9):0.9,((x2:0.9,x3:0.9):0.9,x4:0.9):0.9):0;\n"
"(x0:0.9,(x1:0.9,((x2:0.9,x3:0.9):0.9,x4:0.9):0.9):0.9):0;\n");
}
TEST_CASE("GPInstance: Priors") {
auto inst = GPInstanceOfFiles("data/four-numbered-taxa.fasta",
"data/four-taxon-two-tree-rootsplit-uncertainty.nwk");
// Here are the trees:
// (((1,2),3),4);
// ((1,(2,3)),4);
// ((1,2),(3,4));
//
// Here's the interesting part of the indexer:
// 0000|1111|0001, 0
// 0000|1111|0011, 1
// 0001|1110|0110, 2
// 0001|1110|0010, 3
auto support = inst.GetDAG().BuildUniformOnTopologicalSupportPrior();
CHECK_LT(fabs(support[0] - 2. / 3.), 1e-10);
CHECK_LT(fabs(support[1] - 1. / 3.), 1e-10);
CHECK_LT(fabs(support[2] - 1. / 2.), 1e-10);
CHECK_LT(fabs(support[3] - 1. / 2.), 1e-10);
auto all = inst.GetDAG().BuildUniformOnAllTopologiesPrior();
// There are 15 topologies on 4 taxa.
// There are 3 topologies on 3 taxa, so there are 3 topologies with rootsplit
// 0001|1110.
CHECK_LT(fabs(all[0] - 3. / 15.), 1e-10);
// There is only 1 topology with rootsplit 0011|1100.
CHECK_LT(fabs(all[1] - 1. / 15.), 1e-10);
// There are 3 topologies on 3 taxa.
CHECK_LT(fabs(all[2] - 1. / 3.), 1e-10);
CHECK_LT(fabs(all[3] - 1. / 3.), 1e-10);
}
TEST_CASE("GPInstance: inverted GPCSP probabilities") {
// Note that just for fun, I have duplicated the first tree, but that doesn't matter
// because we are looking at uniform over topological support.
auto inst =
GPInstanceOfFiles("data/five_taxon.fasta", "data/five_taxon_rooted_more_2.nwk");
// See the DAG and the uniform probabilities at
// https://github.com/phylovi/bito/issues/391#issuecomment-1168046752
const auto& dag = inst.GetDAG();
EigenVectorXd normalized_sbn_parameters = dag.BuildUniformOnTopologicalSupportPrior();
EigenVectorXd node_probabilities =
dag.UnconditionalNodeProbabilities(normalized_sbn_parameters);
EigenVectorXd correct_node_probabilities(16);
correct_node_probabilities << //
1., // 0
1., // 1
1., // 2
1., // 3
1., // 4
0.75, // 5
0.5, // 6
0.25, // 7
0.25, // 8
0.5, // 9
0.25, // 10
0.25, // 11
0.5, // 12
0.5, // 13
0.25, // 14
1.; // 15 (DAG root node)
CheckVectorXdEquality(node_probabilities, correct_node_probabilities, 1e-12);
EigenVectorXd inverted_probabilities =
dag.InvertedGPCSPProbabilities(normalized_sbn_parameters, node_probabilities);
EigenVectorXd correct_inverted_probabilities(24);
correct_inverted_probabilities << //
//
1., // 0 (rootsplit)
1., // 1 (rootsplit)
1., // 2 (rootsplit)
1., // 3
1., // 4
2. / 3., // 5
0.5, // 6
0.5, // 7
// We have 0.5 from node 9, but that's split proportionally to the probability
// of each potential parent. Nodes 12 and 14 are equally likely parents of node 9,
// so we have 0.5 for the inverted PCSP probability.
0.5, // 8
1., // 9
1., // 10
0.5, // 11 (analogous to 8)
1. / 3., // 12
0.5, // 13
0.5, // 14
0.5, // 15
0.5, // 16
0.25, // 17
0.5, // 18
0.25, // 19
0.25, // 20
0.75, // 21
0.75, // 22
0.25; // 23
CheckVectorXdEquality(inverted_probabilities, correct_inverted_probabilities, 1e-12);
}
TEST_CASE("GPInstance: GenerateCompleteRootedTreeCollection") {
const std::string fasta_path = "data/5-taxon-slice-of-ds1.fasta";
auto inst =
GPInstanceOfFiles(fasta_path, "data/5-taxon-only-rootward-uncertainty.nwk");
EigenVectorXd branch_lengths(14);
// The branch lengths contain the index of this GPCSP-indexed vector.
branch_lengths << 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13.;
inst.GetGPEngine().SetBranchLengths(branch_lengths);
// Because the branch lengths contain the GPCSP index, we can check that the indices
// correspond to what we see in the GPCSP DAG in
// https://github.com/phylovi/bito/issues/391#issuecomment-1168048090
CHECK_EQ(inst.GenerateCompleteRootedTreeCollection().Newick(),
"((0:7,1:9):3,(2:11,(3:12,4:13):2):6):0;\n"
"(1:10,(0:8,(2:11,(3:12,4:13):2):5):4):0;\n");
}
EigenVectorXd ClassicalLikelihoodOf(const std::string& tree_path,
const std::string& fasta_path) {
RootedSBNInstance sbn_instance("charlie");
sbn_instance.ReadNewickFile(tree_path, false);
sbn_instance.ProcessLoadedTrees();
const Alignment alignment = Alignment::ReadFasta(fasta_path);
PhyloModelSpecification simple_specification{"JC69", "constant", "strict"};
sbn_instance.SetAlignment(alignment);
sbn_instance.PrepareForPhyloLikelihood(simple_specification, 1);
std::vector<double> manual_log_likelihoods = sbn_instance.UnrootedLogLikelihoods();
const double log_prior = log(1. / sbn_instance.tree_collection_.TreeCount());
std::transform(manual_log_likelihoods.begin(), manual_log_likelihoods.end(),
manual_log_likelihoods.begin(),
[&log_prior](double log_like) { return log_like + log_prior; });
return EigenVectorXdOfStdVectorDouble(manual_log_likelihoods);
}
// This is the simplest hybrid marginal that has tree uncertainty above and below the
// focal PCSP. Note that this test and the next one are set up so that the quartets
// reach far enough out that there is no uncertainty in the part of the tree outside
// of the quartet. In this case the hybrid marginal will be the same as the sum of
// classical likelihoods.
TEST_CASE("GPInstance: simplest hybrid marginal") {
const std::string fasta_path = "data/7-taxon-slice-of-ds1.fasta";
// See the DAG at
// https://github.com/phylovi/bito/issues/391#issuecomment-1169053191
auto inst = GPInstanceOfFiles(fasta_path, "data/simplest-hybrid-marginal.nwk");
auto& dag = inst.GetDAG();
// Branch lengths generated from Python via
// import random
// [round(random.uniform(1e-6, 0.1), 3) for i in range(23)]
EigenVectorXd branch_lengths(23);
branch_lengths << 0.058, 0.044, 0.006, 0.099, 0.078, 0.036, 0.06, 0.073, 0.004, 0.041,
0.088, 0.033, 0.043, 0.096, 0.027, 0.039, 0.043, 0.023, 0.064, 0.032, 0.03, 0.085,
0.034;
inst.GetGPEngine().SetBranchLengths(branch_lengths);
inst.PopulatePLVs();
const std::string tree_path = "_ignore/simplest-hybrid-marginal-trees.nwk";
inst.ExportAllGeneratedTrees(tree_path);
// requests are printable to stdout if you're keen.
auto request = dag.QuartetHybridRequestOf(NodeId(12), false, NodeId(11));
EigenVectorXd quartet_log_likelihoods =
inst.GetGPEngine().CalculateQuartetHybridLikelihoods(request);
// Note that we aren't sorting likelihoods here, though we might have to do so for
// more complex tests. I don't think that there's any guarantee that the hybrid log
// likelihoods will be in the same order as the generated tree, but it worked here.
EigenVectorXd manual_log_likelihoods = ClassicalLikelihoodOf(tree_path, fasta_path);
CheckVectorXdEquality(quartet_log_likelihoods, manual_log_likelihoods, 1e-12);
CHECK_EQ(request.IsFullyFormed(), true);
CHECK_EQ(dag.QuartetHybridRequestOf(NodeId(14), true, NodeId(13)).IsFullyFormed(),
false);
CHECK_EQ(dag.QuartetHybridRequestOf(NodeId(14), false, NodeId(0)).IsFullyFormed(),
false);
CHECK_EQ(dag.QuartetHybridRequestOf(NodeId(8), true, NodeId(4)).IsFullyFormed(),
false);
}
// This is a slightly more complex test, that has a rotation status of true, and has
// some paths through the DAG that aren't part of the hybrid marginal.
TEST_CASE("GPInstance: second simplest hybrid marginal") {
const std::string fasta_path = "data/7-taxon-slice-of-ds1.fasta";
// See the DAG at
// https://github.com/phylovi/bito/issues/391#issuecomment-1169056581
auto inst = GPInstanceOfFiles(fasta_path, "data/second-simplest-hybrid-marginal.nwk");
auto& dag = inst.GetDAG();
// Branch lengths generated from Python via
// import random
// [round(random.uniform(1e-6, 0.1), 3) for i in range(32)]
EigenVectorXd branch_lengths(32);
branch_lengths << 0.09, 0.064, 0.073, 0.062, 0.051, 0.028, 0.077, 0.097, 0.089, 0.061,
0.036, 0.049, 0.085, 0.01, 0.099, 0.027, 0.07, 0.023, 0.043, 0.056, 0.043, 0.026,
0.058, 0.015, 0.093, 0.01, 0.011, 0.007, 0.022, 0.009, 0.037, 0.017;
inst.GetGPEngine().SetBranchLengths(branch_lengths);
inst.PopulatePLVs();
const std::string tree_path = "_ignore/simplest-hybrid-marginal-trees.nwk";
inst.ExportAllGeneratedTrees(tree_path);
auto edge = dag.GetDAGEdge(EdgeId(2));
auto request = dag.QuartetHybridRequestOf(NodeId(edge.GetParent()), true,
NodeId(edge.GetChild()));
EigenVectorXd quartet_log_likelihoods =
inst.GetGPEngine().CalculateQuartetHybridLikelihoods(request);
inst.LoadAllGeneratedTrees();
// We restrict to only the trees that contain the DAG edge 6 (which goes between
// node 12 and node 11). We get the bitset representation using
// inst.PrintGPCSPIndexer();
inst.ExportTreesWithAPCSP("000000100111100001110", tree_path);
EigenVectorXd manual_log_likelihoods = ClassicalLikelihoodOf(tree_path, fasta_path);
CheckVectorXdEquality(quartet_log_likelihoods, manual_log_likelihoods, 1e-12);
}
TEST_CASE("GPInstance: test GPCSP indexes") {
const std::string fasta_path = "data/7-taxon-slice-of-ds1.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/simplest-hybrid-marginal.nwk");
auto& dag = inst.GetDAG();
dag.TopologicalEdgeTraversal([&dag](NodeId parent_id, bool is_edge_on_left,
NodeId child_id, EdgeId gpcsp_idx) {
CHECK_EQ(dag.GetEdgeIdx(parent_id, child_id), gpcsp_idx);
});
}
// ** SubsplitDAG tests **
template <typename T>
std::vector<T> ConvertIdVector(const SizeVector& vec_in) {
std::vector<T> vec_out;
for (const auto i : vec_in) {
vec_out.push_back(T(i));
}
return vec_out;
}
TEST_CASE("SubsplitDAG: test rootsplits") {
const std::string fasta_path = "data/7-taxon-slice-of-ds1.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/simplest-hybrid-marginal.nwk");
inst.SubsplitDAGToDot("_ignore/outtest.dot", true);
auto& dag = inst.GetDAG();
for (const auto& rootsplit_id : dag.GetRootsplitNodeIds()) {
const auto rootsplit_node = dag.GetDAGNode(NodeId(rootsplit_id));
CHECK(rootsplit_node.IsRootsplit());
}
}
// See diagram at https://github.com/phylovi/bito/issues/391#issuecomment-1168046752.
TEST_CASE("SubsplitDAG: IsValidAddNodePair tests") {
const std::string fasta_path = "data/five_taxon.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/five_taxon_rooted_more_2.nwk");
auto& dag = inst.GetDAG();
// Nodes are not adjacent (12|34 and 2|4).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("01100", "00011"),
Bitset::Subsplit("00100", "00001")));
// Nodes have 6 taxa while the DAG has 5 (12|34 and 1|2).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("011000", "000110"),
Bitset::Subsplit("010000", "001000")));
// Parent node does not have a parent (12|3 and 1|2).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("01100", "00010"),
Bitset::Subsplit("01000", "00100")));
// Left clade of the parent node does not have a child (02|134 and
// 1|34).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("10100", "01011"),
Bitset::Subsplit("01000", "00011")));
// Left clade of the child node does not have a child (0123|4 and
// 023|1).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("11110", "00001"),
Bitset::Subsplit("10110", "01000")));
// Right clade of the child node does not have a child (0123|4 and
// 0|123).
CHECK_FALSE(dag.IsValidAddNodePair(Bitset::Subsplit("11110", "00001"),
Bitset::Subsplit("10000", "01110")));
// Valid new node pair (0123|4 and 012|3).
CHECK(dag.IsValidAddNodePair(Bitset::Subsplit("11110", "00001"),
Bitset::Subsplit("11100", "00010")));
}
// See diagram at https://github.com/phylovi/bito/issues/391#issuecomment-1168059272.
TEST_CASE("SubsplitDAG: AddNodePair tests") {
const std::string fasta_path = "data/five_taxon.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/five_taxon_rooted_more_2.nwk");
auto& dag = inst.GetDAG();
// Check that AddNodePair throws if node pair is invalid (12|34 and 2|4).
CHECK_THROWS(dag.AddNodePair(Bitset::Subsplit("01100", "00011"),
Bitset::Subsplit("00100", "00001")));
// Add 2|34 and 3|4, which are both already in the DAG.
// Check that AddNodePair returns empty added_node_ids and added_edge_idxs
// and that node_reindexer and edge_reindexer are the identity reindexers.
auto node_addition_result = dag.AddNodePair(Bitset::Subsplit("00100", "00011"),
Bitset::Subsplit("00010", "00001"));
CHECK(node_addition_result.added_node_ids.empty());
CHECK(node_addition_result.added_edge_idxs.empty());
CHECK_EQ(node_addition_result.node_reindexer, Reindexer::IdentityReindexer(16));
CHECK_EQ(node_addition_result.edge_reindexer, Reindexer::IdentityReindexer(24));
// Before adding any nodes.
size_t prev_node_count = dag.NodeCount();
size_t prev_edge_count = dag.EdgeCountWithLeafSubsplits();
size_t prev_topology_count = dag.TopologyCount();
// Add nodes 24|3 and 2|4.
Bitset parent_subsplit = Bitset::Subsplit("00101", "00010");
Bitset child_subsplit = Bitset::Subsplit("00100", "00001");
node_addition_result = dag.AddNodePair(parent_subsplit, child_subsplit);
// Check that the node count and edge count was updated.
CHECK_EQ(dag.NodeCount(), prev_node_count + 2);
CHECK_EQ(dag.EdgeCountWithLeafSubsplits(), prev_edge_count + 6);
// Check that both nodes now exist.
CHECK(dag.ContainsNode(parent_subsplit));
CHECK(dag.ContainsNode(child_subsplit));
// Check that all necessary edges were created.
const auto parent_node = dag.GetDAGNode(dag.GetDAGNodeId(parent_subsplit));
const auto child_node = dag.GetDAGNode(dag.GetDAGNodeId(child_subsplit));
std::map<bool, SizeVector> correct_parents_of_parent{{true, {}}, {false, {14, 16}}};
std::map<bool, SizeVector> parents_of_parent{{true, parent_node.GetLeftRootward()},
{false, parent_node.GetRightRootward()}};
CHECK_EQ(parents_of_parent, correct_parents_of_parent);
std::map<bool, SizeVector> children_of_parent{
{true, parent_node.GetLeftLeafward()}, {false, parent_node.GetRightLeafward()}};
std::map<bool, SizeVector> correct_children_of_parent{{true, {12}}, {false, {3}}};
CHECK_EQ(children_of_parent, correct_children_of_parent);
std::map<bool, SizeVector> parents_of_children{
{true, child_node.GetLeftRootward()}, {false, child_node.GetRightRootward()}};
std::map<bool, SizeVector> correct_parents_of_children{{true, {13}}, {false, {}}};
CHECK_EQ(parents_of_children, correct_parents_of_children);
std::map<bool, SizeVector> children_of_child{{true, child_node.GetLeftLeafward()},
{false, child_node.GetRightLeafward()}};
std::map<bool, SizeVector> correct_children_of_child{{true, {2}}, {false, {4}}};
CHECK_EQ(children_of_child, correct_children_of_child);
// Check that node_reindexer and edge_reindexer are correct.
Reindexer correct_node_reindexer(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 15, 16, 17, 12, 13});
CHECK_EQ(node_addition_result.node_reindexer, correct_node_reindexer);
Reindexer correct_edge_reindexer({0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
11, 12, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 13, 10});
CHECK_EQ(node_addition_result.edge_reindexer, correct_edge_reindexer);
// Check that added_node_ids and added_edge_idxs are correct.
NodeIdVector correct_added_node_ids = ConvertIdVector<NodeId>({12, 13});
CHECK_EQ(node_addition_result.added_node_ids, correct_added_node_ids);
EdgeIdVector correct_added_edge_idxs =
ConvertIdVector<EdgeId>({26, 27, 28, 29, 13, 10});
CHECK_EQ(node_addition_result.added_edge_idxs, correct_added_edge_idxs);
// Check that `dag_nodes` was updated (node 12 -> 14).
const auto& node_14 = dag.GetDAGNode(NodeId(14));
CHECK_EQ(node_14.GetBitset().ToString(), "0100000111");
// Check that node fields were updated correctly.
const auto& right_parents_14 = node_14.GetRightRootward();
const auto& right_children_14 = node_14.GetRightLeafward();
CHECK(std::find(right_parents_14.begin(), right_parents_14.end(), 13) ==
right_parents_14.end());
CHECK(std::find(right_parents_14.begin(), right_parents_14.end(), 15) !=
right_parents_14.end());
CHECK(std::find(right_children_14.begin(), right_children_14.end(), 11) !=
right_children_14.end());
CHECK_EQ(node_14.Id(), 14);
// Check that `subsplit_to_id_` node ids were updated.
CHECK_EQ(dag.GetDAGNodeId(node_14.GetBitset()), NodeId(14));
// Check that `dag_edges_` node ids were updated.
CHECK_EQ(dag.GetEdgeIdx(NodeId(15), NodeId(14)), EdgeId(11));
// Check that `dag_edges_` edge idxs were updated.
CHECK_EQ(dag.GetEdgeIdx(NodeId(14), NodeId(13)), EdgeId(10));
CHECK_EQ(dag.GetEdgeIdx(NodeId(16), NodeId(13)), EdgeId(13));
CHECK_EQ(dag.GetEdgeIdx(NodeId(11), NodeId(4)), EdgeId(25));
// Check that `parent_to_child_range_` was updated.
CHECK_EQ(dag.GetChildEdgeRange(node_14.GetBitset(), false).second, EdgeId(11));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(16)).GetBitset(), false).first,
EdgeId(12));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(16)).GetBitset(), false).second,
EdgeId(14));
// Check that `topology_count_` was updated.
CHECK_EQ(dag.TopologyCount(), prev_topology_count + 2);
}
// See diagram at https://github.com/phylovi/bito/issues/391#issuecomment-1168061363.
TEST_CASE("SubsplitDAG: Only add parent node tests") {
const std::string fasta_path = "data/five_taxon.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/five_taxon_rooted_more_2.nwk");
auto& dag = inst.GetDAG();
// Before adding any nodes.
size_t prev_node_count = dag.NodeCount();
size_t prev_edge_count = dag.EdgeCountWithLeafSubsplits();
// Add nodes 12|34 and 1|2.
dag.AddNodePair(Bitset::Subsplit("01100", "00011"),
Bitset::Subsplit("01000", "00100"));
CHECK_EQ(dag.NodeCount(), prev_node_count + 2);
CHECK_EQ(dag.EdgeCountWithLeafSubsplits(), prev_edge_count + 5);
// Add nodes 0|12 and 1|2 (this should just add 0|12 and associated edges).
dag.AddNodePair(Bitset::Subsplit("10000", "01100"),
Bitset::Subsplit("01000", "00100"));
// Check that the node count and edge count was updated.
CHECK_EQ(dag.NodeCount(), prev_node_count + 3);
CHECK_EQ(dag.EdgeCountWithLeafSubsplits(), prev_edge_count + 8);
// Check that BuildEdgeReindexer() correctly handles left edges.
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(10)).GetBitset(), true).first,
EdgeId(4));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(10)).GetBitset(), true).second,
EdgeId(6));
}
// See diagram at https://github.com/phylovi/bito/issues/391#issuecomment-1168064347.
TEST_CASE("SubsplitDAG: Only add child node tests") {
const std::string fasta_path = "data/five_taxon.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/five_taxon_rooted_more_3.nwk");
auto& dag = inst.GetDAG();
// Before adding any nodes.
size_t prev_node_count = dag.NodeCount();
size_t prev_edge_count = dag.EdgeCountWithLeafSubsplits();
// Add nodes 1|234 and 24|3 (this should just add 24|3 and associated edges).
dag.AddNodePair(Bitset::Subsplit("01000", "00111"),
Bitset::Subsplit("00101", "00010"));
// Check that the node count and edge count was updated.
CHECK_EQ(dag.NodeCount(), prev_node_count + 1);
CHECK_EQ(dag.EdgeCountWithLeafSubsplits(), prev_edge_count + 4);
// Check that new child node is connected to all possible parents.
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(10)).GetBitset(), false).first,
EdgeId(10));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(10)).GetBitset(), false).second,
EdgeId(12));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(11)).GetBitset(), false).first,
EdgeId(5));
CHECK_EQ(dag.GetChildEdgeRange(dag.GetDAGNode(NodeId(11)).GetBitset(), false).second,
EdgeId(7));
}
// Checks that parent nodes found via scan match found via map.
auto TestParentNodeIds = [](const GPDAG& dag, const Bitset& subsplit) {
const auto [left_via_map, right_via_map] = dag.FindParentNodeIdsViaMap(subsplit);
const auto [left_via_scan, right_via_scan] = dag.FindParentNodeIdsViaScan(subsplit);
std::unordered_set<NodeId> left_via_map_set(left_via_map.begin(), left_via_map.end());
std::unordered_set<NodeId> right_via_map_set(right_via_map.begin(),
right_via_map.end());
std::unordered_set<NodeId> left_via_scan_set(left_via_scan.begin(),
left_via_scan.end());
std::unordered_set<NodeId> right_via_scan_set(right_via_scan.begin(),
right_via_scan.end());
bool matches = !(left_via_map_set != left_via_scan_set or
right_via_map_set != right_via_scan_set);
if (!matches) {
std::cout << "FindParentNodeIds [FAIL_BEGIN]" << std::endl;
std::cout << "Subsplit: " << subsplit.SubsplitToString() << std::endl;
std::cout << "LinearSearch: " << left_via_scan_set << " " << right_via_scan_set
<< std::endl;
std::cout << "MapSearch: " << left_via_map_set << " " << right_via_map_set
<< std::endl;
std::cout << "via_map_set: [ ";
for (const auto node_id : left_via_map_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] [ ";
for (const auto node_id : right_via_map_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] " << std::endl;
std::cout << "via_scan_set: [ ";
for (const auto node_id : left_via_scan_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] [ ";
for (const auto node_id : right_via_scan_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] " << std::endl;
std::cout << "FindParentNodeIds [FAIL_END]" << std::endl;
} else {
// std::cout << "FindParentNodeIds [PASS]" << std::endl;
}
return matches;
};
// Checks that child nodes found via scan match found via map.
auto TestChildNodeIds = [](const GPDAG& dag, const Bitset& subsplit) {
const auto [left_via_map, right_via_map] = dag.FindChildNodeIdsViaMap(subsplit);
const auto [left_via_scan, right_via_scan] = dag.FindChildNodeIdsViaScan(subsplit);
std::unordered_set<NodeId> left_via_map_set(left_via_map.begin(), left_via_map.end());
std::unordered_set<NodeId> right_via_map_set(right_via_map.begin(),
right_via_map.end());
std::unordered_set<NodeId> left_via_scan_set(left_via_scan.begin(),
left_via_scan.end());
std::unordered_set<NodeId> right_via_scan_set(right_via_scan.begin(),
right_via_scan.end());
bool matches = !(left_via_map_set != left_via_scan_set or
right_via_map_set != right_via_scan_set);
if (!matches) {
std::cout << "FindChildNodeIds [FAIL_BEGIN]" << std::endl;
std::cout << "Subsplit: " << subsplit.SubsplitToString() << std::endl;
std::cout << "LinearSearch: " << left_via_scan_set << " " << right_via_scan_set
<< std::endl;
std::cout << "MapSearch: " << left_via_map_set << " " << right_via_map_set
<< std::endl;
std::cout << "via_map_set: [ ";
for (const auto node_id : left_via_map_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] [ ";
for (const auto node_id : right_via_map_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] " << std::endl;
std::cout << "via_scan_set: [ ";
for (const auto node_id : left_via_scan_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] [ ";
for (const auto node_id : right_via_scan_set) {
std::cout << dag.GetDAGNode(node_id).GetBitset().SubsplitToString() << " ";
}
std::cout << "] " << std::endl;
std::cout << "FindChildNodeIds [FAIL_END]" << std::endl;
} else {
// std::cout << "FindChildNodeIds [PASS]" << std::endl;
}
return matches;
};
// Compares adding nodes to DAG individually vs adding multiple nodes. Additionally,
// tests that adjacent nodes from acquired via map lookup match those acquired via
// linear scan.
TEST_CASE("SubsplitDAG: Add Multiple Nodes") {
const std::string fasta_path = "data/six_taxon.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
// Instance that will be unaltered.
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
inst.MakeNNIEngine();
NNIEngine& nni_engine = inst.GetNNIEngine();
GPDAG dag1 = inst.GetDAG();
GPDAG dag2 = inst.GetDAG();
nni_engine.SyncAdjacentNNIsWithDAG();
// Check unaltered DAG nodes match via map and via linear scan.
for (const auto node_id : dag1.LeafwardNodeTraversalTrace(true)) {
const auto subsplit = dag1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(TestChildNodeIds(dag1, subsplit),
"Child nodes found by map lookup do not match those found by linear "
"scan (before adding nodes).");
CHECK_MESSAGE(TestParentNodeIds(dag1, subsplit),
"Parent nodes found by map lookup do not match those found by linear "
"scan (before adding nodes).");
}
// Check DAG nodes match after adding nodes individually.
for (const auto nni : nni_engine.GetAdjacentNNIs()) {
auto mods1 = dag1.AddNodePair(nni);
for (const auto node_id : dag1.LeafwardNodeTraversalTrace(true)) {
const auto subsplit = dag1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(TestChildNodeIds(dag1, subsplit),
"Child nodes found by map lookup do not match those found by "
"linear scan (after adding nodes).");
CHECK_MESSAGE(TestParentNodeIds(dag1, subsplit),
"Parent nodes found by map lookup do not match those found by "
"linear scan (after adding nodes).");
}
}
CHECK_MESSAGE(
dag1 != dag2,
"DAG with nodes added individually incorrectly matches the DAG with nodes "
"added collectively (before adding nodes).");
// Check DAGs match by adding nodes individually vs all-at-once.
std::vector<std::pair<Bitset, Bitset>> node_subsplit_pairs;
for (const auto nni : nni_engine.GetAdjacentNNIs()) {
node_subsplit_pairs.push_back({nni.GetParent(), nni.GetChild()});
}
dag2.AddNodes(node_subsplit_pairs);
CHECK_MESSAGE(dag1 == dag2,
"DAG with nodes added individually does not match the DAG with nodes "
"added collectively.");
}
// Compares adding nodes to DAG individually vs adding multiple nodes. Additionally,
// tests that adjacent nodes from acquired via map lookup match those acquired via
// linear scan.
TEST_CASE("SubsplitDAG: Graft Multiple Nodes") {
const std::string fasta_path = "data/six_taxon.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
// Instance that will be unaltered.
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
inst.MakeNNIEngine();
NNIEngine& nni_engine = inst.GetNNIEngine();
GPDAG dag1 = inst.GetDAG();
GraftDAG& graft_dag = nni_engine.GetGraftDAG();
nni_engine.SyncAdjacentNNIsWithDAG();
// Check DAG nodes match after grafting nodes individually.
for (const auto nni : nni_engine.GetAdjacentNNIs()) {
auto mods1 = graft_dag.AddNodePair(nni);
for (const auto node_id : dag1.LeafwardNodeTraversalTrace(true)) {
const auto subsplit = dag1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(TestChildNodeIds(dag1, subsplit),
"Child nodes found by map lookup do not match those found by "
"linear scan (after adding graft).");
CHECK_MESSAGE(TestParentNodeIds(dag1, subsplit),
"Parent nodes found by map lookup do not match those found by "
"linear scan (after adding graft).");
}
graft_dag.RemoveAllGrafts();
for (const auto node_id : dag1.LeafwardNodeTraversalTrace(true)) {
const auto subsplit = dag1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(TestChildNodeIds(dag1, subsplit),
"Child nodes found by map lookup do not match those found by "
"linear scan (after removing graft).");
CHECK_MESSAGE(TestParentNodeIds(dag1, subsplit),
"Parent nodes found by map lookup do not match those found by "
"linear scan (after removing graft).");
}
}
}
// Tests DAG after modifying
TEST_CASE("SubsplitDAG: Add Multiple Edges") {
const std::string fasta_path = "data/six_taxon.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
// Instance that will be unaltered.
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
inst.MakeNNIEngine();
NNIEngine& nni_engine = inst.GetNNIEngine();
GPDAG dag2 = inst.GetDAG();
nni_engine.SyncAdjacentNNIsWithDAG();
for (const auto nni : nni_engine.GetAdjacentNNIs()) {
GPDAG dag1 = inst.GetDAG();
// Get connecting nodes.
const auto grandparent_nodeid = dag1.FindFirstParentNodeId(nni.GetParent());
const auto sister_nodeid =
dag1.FindFirstChildNodeId(nni.GetParent(), nni.WhichCladeIsSister());
const auto leftchild_nodeid =
dag1.FindFirstChildNodeId(nni.GetChild(), SubsplitClade::Left);
const auto rightchild_nodeid =
dag1.FindFirstChildNodeId(nni.GetChild(), SubsplitClade::Right);
std::vector<NodeId> node_ids{
{grandparent_nodeid, sister_nodeid, leftchild_nodeid, rightchild_nodeid}};
// Get PCSP Bitsets.
std::vector<Bitset> pcsps;
pcsps.push_back(
Bitset::PCSP(dag1.GetDAGNodeBitset(grandparent_nodeid), nni.GetParent()));
pcsps.push_back(
Bitset::PCSP(nni.GetParent(), dag1.GetDAGNodeBitset(sister_nodeid)));
pcsps.push_back(nni.GetCentralEdgePCSP());
pcsps.push_back(
Bitset::PCSP(nni.GetChild(), dag1.GetDAGNodeBitset(leftchild_nodeid)));
pcsps.push_back(
Bitset::PCSP(nni.GetChild(), dag1.GetDAGNodeBitset(rightchild_nodeid)));
dag1.AddEdges(pcsps);
for (const auto pcsp : pcsps) {
CHECK_MESSAGE(dag1.ContainsEdge(pcsp), "DAG does not contain added edge.");
}
CHECK_MESSAGE(
dag1.EdgeCountWithLeafSubsplits() == dag2.EdgeCountWithLeafSubsplits() + 5,
"DAG does not contain proper number of edges after adding edges.");
}
}
// ** NNIEngine tests **
using NodeMap = std::unordered_map<NodeId, NodeId>;
using EdgeMap = std::unordered_map<EdgeId, EdgeId>;
using TreeIdMap = std::unordered_map<TreeId, RootedTree>;
using TreeEdgeMap = std::unordered_map<EdgeId, TreeId>;
using EdgeScoreMap = std::unordered_map<EdgeId, double>;
using TreeScoreMap = std::unordered_map<TreeId, double>;
using NNIScoreMap = std::map<NNIOperation, double>;
using BranchLengths = EigenVectorXd;
using NNIBranchLengthsMap = std::map<NNIOperation, EigenVectorXd>;
using BranchMap = DAGBranchHandler::BranchLengthMap;
using NNIBranchMapMap = std::map<NNIOperation, DAGBranchHandler::BranchLengthMap>;
// Builds a mapping from node and edge elements from pre-DAG to post-DAG. If pre-DAG
// element does not exist in post-DAG, return NoId.
std::pair<NodeMap, EdgeMap> BuildNodeAndEdgeMapsFromPreDAGToPostDAG(GPDAG& pre_dag,
GPDAG& post_dag) {
NodeMap node_map;
for (const auto& bitset : pre_dag.BuildSetOfNodeBitsets()) {
if (bitset.SubsplitIsUCA()) {
continue;
}
const auto pre_id = pre_dag.GetDAGNodeId(bitset);
auto post_id = NodeId(NodeId::NoId);
if (post_dag.ContainsNode(bitset)) {
post_id = post_dag.GetDAGNodeId(bitset);
}
node_map[pre_id] = post_id;
}
EdgeMap edge_map;
for (const auto& bitset : pre_dag.BuildSetOfEdgeBitsets()) {
const auto pre_id = pre_dag.GetEdgeIdx(bitset);
auto post_id = EdgeId(EdgeId::NoId);
if (post_dag.ContainsEdge(bitset)) {
post_id = post_dag.GetEdgeIdx(bitset);
}
edge_map[pre_id] = post_id;
}
return std::make_pair(node_map, edge_map);
}
// Tests that reindexers match the remapped node_ids and edge_idxs after AddNodePair.
TEST_CASE("NNIEngine: Reindexers for AddNodePair") {
const std::string fasta_path = "data/five_taxon.fasta";
const std::string newick_path = "data/five_taxon_rooted_more_2.nwk";
auto pre_inst = GPInstanceOfFiles(fasta_path, newick_path);
auto& pre_dag = pre_inst.GetDAG();
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
auto& dag = inst.GetDAG();
inst.MakeNNIEngine();
auto& nni_engine = inst.GetNNIEngine();
nni_engine.SyncAdjacentNNIsWithDAG();
for (const auto& nni : nni_engine.GetAdjacentNNIs()) {
auto mods = dag.AddNodePair(nni);
for (NodeId old_idx = NodeId(0); old_idx < pre_dag.NodeCount(); old_idx++) {
NodeId new_idx =
NodeId(mods.node_reindexer.GetNewIndexByOldIndex(old_idx.value_));
Bitset old_node = pre_dag.GetDAGNode(old_idx).GetBitset();
Bitset new_node = dag.GetDAGNode(new_idx).GetBitset();
CHECK_EQ(old_node, new_node);
}
for (EdgeId old_idx = EdgeId(0); old_idx < pre_dag.EdgeCount(); old_idx++) {
EdgeId new_idx =
EdgeId(mods.edge_reindexer.GetNewIndexByOldIndex(old_idx.value_));
Bitset old_parent =
pre_dag.GetDAGNode(NodeId(pre_dag.GetDAGEdge(old_idx).GetParent()))
.GetBitset();
Bitset old_child =
pre_dag.GetDAGNode(NodeId(pre_dag.GetDAGEdge(old_idx).GetChild()))
.GetBitset();
Bitset new_parent =
dag.GetDAGNode(NodeId(dag.GetDAGEdge(new_idx).GetParent())).GetBitset();
Bitset new_child =
dag.GetDAGNode(NodeId(dag.GetDAGEdge(new_idx).GetChild())).GetBitset();
CHECK_EQ(old_parent, new_parent);
CHECK_EQ(old_child, new_child);
}
pre_dag.AddNodePair(nni);
}
}
// This test builds a DAG, tests if engine generates the same set of adjacent NNIs and
// manually created set. Then adds a node pair to DAG, and tests if engine updates
// correctly.
TEST_CASE("NNIEngine: Adjacent NNI Maintenance") {
// Simple DAG that contains a shared edge, internal leafward fork, and an internal
// rootward fork.
const std::string fasta_path = "data/six_taxon.fasta";
auto inst = GPInstanceOfFiles(fasta_path, "data/six_taxon_rooted_simple.nwk");
GPDAG& dag = inst.GetDAG();
GPEngine& gp_engine = inst.GetGPEngine();
NNISet correct_adjacent_nnis;
auto nni_engine = NNIEngine(dag, &gp_engine);
auto nni_engine_2 = NNIEngine(dag, &gp_engine);
// Build adjacent NNIs from current DAG state.
nni_engine.SyncAdjacentNNIsWithDAG();
// Functions for quick manual insertion/removal for Correct NNI Set.
auto InsertNNI = [&correct_adjacent_nnis](Bitset parent, Bitset child) {
auto nni = NNIOperation(parent, child);
correct_adjacent_nnis.insert(nni);
};
auto RemoveNNI = [&correct_adjacent_nnis](Bitset parent, Bitset child) {
auto nni = NNIOperation(parent, child);
correct_adjacent_nnis.erase(nni);
};
// For images and notes describing this part of the test case, see
// https://github.com/phylovi/bito/pull/366#issuecomment-920454401
// Add NNIs for edge 4 to NNI Set.
InsertNNI(Bitset::Subsplit("010000", "101111"), // (parent)-(child)
Bitset::Subsplit("100000", "001111")); // (1|02345)-(0|2345)
InsertNNI(Bitset::Subsplit("100000", "011111"),
Bitset::Subsplit("010000", "001111")); // (0|12345)-(1|2345)
// Add NNIs for edge 6 to NNI Set.
InsertNNI(Bitset::Subsplit("001000", "110111"),
Bitset::Subsplit("110000", "000111")); // (2|01345)-(01|345)
InsertNNI(Bitset::Subsplit("000111", "111000"),
Bitset::Subsplit("110000", "001000")); // (345|012)-(01|2)
// Add NNIs for edge 7 to NNI Set.
InsertNNI(Bitset::Subsplit("000001", "111110"),
Bitset::Subsplit("110000", "001110")); // (5|01234)-(01|234)
InsertNNI(Bitset::Subsplit("001110", "110001"),
Bitset::Subsplit("110000", "000001")); // (234|015)-(01|5)
// Add NNIs for edge 2 to NNI Set.
InsertNNI(Bitset::Subsplit("000110", "001001"),
Bitset::Subsplit("001000", "000001")); // (34|25)-(2|5)
// No NNIs to add for edge 5 to NNI Set (see notes).
// Add NNIs for edge 3 to NNI Set.
InsertNNI(Bitset::Subsplit("000100", "001010"),
Bitset::Subsplit("001000", "000010")); // (3|24)-(2|4)
InsertNNI(Bitset::Subsplit("000010", "001100"),
Bitset::Subsplit("001000", "000100")); // (4|23)-(2|3)
// Add NNIs for edge 1 to NNI Set.
InsertNNI(Bitset::Subsplit("000010", "000101"),
Bitset::Subsplit("000100", "000001")); // (4|35)-(3|5)
InsertNNI(Bitset::Subsplit("000100", "000011"),
Bitset::Subsplit("000010", "000001")); // (3|45)-(4|5)
// Check that `BuildNNISet()` added correct set of nnis.
auto adjacent_nnis = nni_engine.GetAdjacentNNIs();
CHECK_EQ(adjacent_nnis, correct_adjacent_nnis);
// Now we add a node pair to DAG so we can check UpdateAdjacentNNIsAfterAddNodePair.
// see https://github.com/phylovi/bito/pull/366#issuecomment-922781415
NNIOperation nni_to_add(Bitset::Subsplit("000110", "001001"),
Bitset::Subsplit("001000", "000001")); // (34|25)-(2|5)
dag.AddNodePair(nni_to_add);
// Update NNI.
nni_engine.UpdateAdjacentNNIsAfterDAGAddNodePair(nni_to_add);
// Add parents of parent (edge 8) to NNI Set.
InsertNNI(Bitset::Subsplit("001001", "110110"),
Bitset::Subsplit("110000", "000110")); // (25|0134)-(01|34)
InsertNNI(Bitset::Subsplit("000110", "111001"),
Bitset::Subsplit("110000", "001001")); // (34|0125)-(01|25)
// Add children of parent (edge 19) to NNI Set.
InsertNNI(Bitset::Subsplit("000100", "001011"),
Bitset::Subsplit("001001", "000010")); // (3|245)-(25|4)
InsertNNI(Bitset::Subsplit("000010", "001101"),
Bitset::Subsplit("001001", "000100")); // (4|235)-(25|3)
// No parents of child (edge 20) to add to NNI Set (see notes).
// These should not be equal, as it has not yet removed the pair just added to DAG.
CHECK_NE(nni_engine.GetAdjacentNNIs(), correct_adjacent_nnis);
// Remove NNI added to DAG from NNI Set.
RemoveNNI(nni_to_add.parent_, nni_to_add.child_);
// Check that `UpdateAdjacentNNIsAfterAddNodePair()` updated correctly.
CHECK_EQ(nni_engine.GetAdjacentNNIs(), correct_adjacent_nnis);
// Build NNI Set from current DAG state from scratch.
nni_engine_2.SyncAdjacentNNIsWithDAG();
CHECK_EQ(nni_engine_2.GetAdjacentNNIs(), correct_adjacent_nnis);
}
// Tests DAG equality after adding different NNIs and built from different taxon
// orderings. Test described at:
// https://github.com/phylovi/bito/pull/377#issuecomment-1035410447
TEST_CASE("NNIEngine: Add NNI Test") {
// Fasta contains simple sequences for four taxa: x0,x1,x2,x3.
const std::string fasta_path = "data/four_taxon.fasta";
// dag_A_1 is a DAG that contains pair_1.
auto inst_A_1 =
GPInstanceOfFiles(fasta_path, "data/four_taxon_simple_before_nni_1.nwk",
"_ignore/mmapped_pv_A_1.data");
GPDAG& dag_A_1 = inst_A_1.GetDAG();
// dag_A_2 is a DAG that contains pair_2.
auto inst_A_2 =
GPInstanceOfFiles(fasta_path, "data/four_taxon_simple_before_nni_2.nwk",
"_ignore/mmapped_pv_A_2.data");
GPDAG& dag_A_2 = inst_A_2.GetDAG();
// dag_A_2b is a DAG that contains pair_2 with a different taxon mapping.
auto inst_A_2b =
GPInstanceOfFiles(fasta_path, "data/four_taxon_simple_before_nni_2b.nwk",
"_ignore/mmapped_pv_A_2.data");
GPDAG& dag_A_2b = inst_A_2b.GetDAG();
// dag_B is a DAG containing dag_A_1 after adding node pair_2.
auto inst_B = GPInstanceOfFiles(fasta_path, "data/four_taxon_simple_after_nni.nwk",
"_ignore/mmapped_pv_B.data");
GPDAG& dag_B = inst_B.GetDAG();
// pair_1: NNI pair missing from dag_A_1.
NNIOperation pair_1(Bitset::Subsplit("0110", "0001"), // 12|3
Bitset::Subsplit("0100", "0010")); // 1|2
// pair_2: NNI pair missing from dag_A_2.
NNIOperation pair_2(Bitset::Subsplit("0110", "0001"), // 12|3
Bitset::Subsplit("0100", "0010")); // 1|2
// pair_2b: NNI pair missing from dag_A_2b.
NNIOperation pair_2b(Bitset::Subsplit("0100", "0011"), // 1|23
Bitset::Subsplit("0010", "0001")); // 2|3
// Before adding missing NNIs, dag_A_2 variants are equal, but dag_A_1 and dag_A_2 are
// different.
CHECK_EQ(dag_A_1, dag_A_1);
CHECK_EQ(dag_A_2, dag_A_2b);
CHECK_NE(dag_A_1, dag_A_2);
// Add missing NNIs.
dag_A_1.AddNodePair(pair_1);
dag_A_2.AddNodePair(pair_2);
dag_A_2b.AddNodePair(pair_2b);
// After adding missing NNIs, all DAGs are equal to dag_B.
CHECK_EQ(dag_A_1, dag_B);
CHECK_EQ(dag_A_2, dag_B);
CHECK_EQ(dag_A_2b, dag_B);
}
// Starts with a DAG built from a single tree. Iteratively finds all adjacent NNIs and
// adds them to the DAG, until there are no more adjacent NNIs to DAG.
// (1) Tests that resulting DAG is equal to the complete DAG, containing all possible
// subsplits. (2) Reruns with "include rootsplit" option off. Checks that
// resulting DAG contains only edges reachable from the initial rootsplit.
TEST_CASE("NNIEngine: Build Complete DAG by Adding NNIs (include/exclude rootsplit)") {
auto BuildCompleteDAGSubsplits = [](const size_t taxon_count) {
auto BuildSubsplitsFromAllTaxonAssignment =
[](std::set<Bitset>& all_subsplits, Bitset& subsplit, const size_t i,
auto&& BuildSubsplitsFromAllTaxonAssignment) {
if (i == subsplit.SubsplitGetCladeSize()) {
auto subsplit_out =
Bitset::Subsplit(subsplit.SubsplitGetClade(SubsplitClade::Left),
subsplit.SubsplitGetClade(SubsplitClade::Right));
if (subsplit_out.SubsplitGetClade(SubsplitClade::Right).None() ||
subsplit_out.SubsplitGetClade(SubsplitClade::Left).None()) {
return;
}
all_subsplits.insert(subsplit_out);
return;
}
for (size_t j = 0; j < 3; j++) {
if (j == 0) {
subsplit.set(i, true);
subsplit.set(subsplit.SubsplitGetCladeSize() + i, false);
} else if (j == 1) {
subsplit.set(i, false);
subsplit.set(subsplit.SubsplitGetCladeSize() + i, true);
} else {
subsplit.set(i, false);
subsplit.set(subsplit.SubsplitGetCladeSize() + i, false);
}
BuildSubsplitsFromAllTaxonAssignment(all_subsplits, subsplit, i + 1,
BuildSubsplitsFromAllTaxonAssignment);
}
};
std::set<Bitset> subsplits;
Bitset subsplit(taxon_count * 2, false);
BuildSubsplitsFromAllTaxonAssignment(subsplits, subsplit, 0,
BuildSubsplitsFromAllTaxonAssignment);
for (size_t i = 0; i < taxon_count; i++) {
subsplits.insert(
Bitset::LeafSubsplitOfNonemptyClade(Bitset::Singleton(taxon_count, i)));
}
subsplits.insert(Bitset::UCASubsplitOfTaxonCount(taxon_count));
return subsplits;
};
auto TestCompleteDAG = [&BuildCompleteDAGSubsplits](
const std::string& fasta_path,
const std::string& newick_path,
const bool include_rootsplits = true) {
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
auto& dag = inst.GetDAG();
std::set<Bitset> rootsplit_subsplits;
for (const auto& node_id : dag.GetRootsplitNodeIds()) {
const auto& subsplit = dag.GetDAGNodeBitset(node_id);
rootsplit_subsplits.insert(subsplit);
}
inst.MakeNNIEngine();
auto& nniengine = inst.GetNNIEngine();
nniengine.SetIncludeRootsplitNNIs(include_rootsplits);
nniengine.SyncAdjacentNNIsWithDAG();
while (nniengine.GetAdjacentNNICount() > 0) {
for (const auto& nni : nniengine.GetAdjacentNNIs()) {
dag.AddNodePair(nni.GetParent(), nni.GetChild());
}
nniengine.SyncAdjacentNNIsWithDAG();
}
auto subsplits = BuildCompleteDAGSubsplits(dag.TaxonCount());
if (!include_rootsplits) {
std::set<Bitset> tmp_rootsplit_subsplits;
for (const auto& node_id : dag.GetRootsplitNodeIds()) {
const auto& subsplit = dag.GetDAGNodeBitset(node_id);
tmp_rootsplit_subsplits.insert(subsplit);
}
CHECK_MESSAGE(
rootsplit_subsplits == tmp_rootsplit_subsplits,
"Rootsplit NNIs were added when NNIEngine flagged to exclude rootsplits.");
std::set<Bitset> tmp_subsplits;
for (const auto& ancestor : rootsplit_subsplits) {
tmp_subsplits.insert(ancestor);
}
for (const auto& descendant : subsplits) {
for (const auto& ancestor : rootsplit_subsplits) {
if (Bitset::SubsplitIsAncestorDescendantPair(ancestor, descendant,
SubsplitClade::Left) ||
Bitset::SubsplitIsAncestorDescendantPair(ancestor, descendant,
SubsplitClade::Right)) {
tmp_subsplits.insert(descendant);
break;
}
}
}
tmp_subsplits.insert(Bitset::UCASubsplitOfTaxonCount(dag.TaxonCount()));
subsplits = tmp_subsplits;
}
bool contains_all_subsplits = true;
for (const auto& subsplit : subsplits) {
contains_all_subsplits &= dag.ContainsNode(subsplit);
if (!contains_all_subsplits) {
std::cout << "Missing subsplit from complete DAG: "
<< subsplit.SubsplitToString() << std::endl;
break;
}
}
contains_all_subsplits &= (subsplits.size() == dag.NodeCount());
CHECK_MESSAGE(subsplits.size() == dag.NodeCount(),
"DAG node count is not equal to number of nodes in Complete DAG.");
return contains_all_subsplits;
};
const std::string fasta_path_0 = "data/hello.fasta";
const std::string newick_path_0 = "data/hello_rooted_diff_branches.nwk";
CHECK_MESSAGE(TestCompleteDAG(fasta_path_0, newick_path_0, true),
"Complete DAG test for Hello.");
CHECK_MESSAGE(TestCompleteDAG(fasta_path_0, newick_path_0, false),
"Complete DAG test for Hello when excluding rootsplits.");
const std::string fasta_path_1 = "data/five_taxon.fasta";
const std::string newick_path_1 = "data/five_taxon_trees_3_4_diff_branches.nwk";
CHECK_MESSAGE(TestCompleteDAG(fasta_path_1, newick_path_1, true),
"Complete DAG test for Five Taxon.");
CHECK_MESSAGE(TestCompleteDAG(fasta_path_1, newick_path_1, false),
"Complete DAG test for Five Taxon when excluding rootsplits.");
}
// Access ith NNI from NNI set.
NNIOperation GetWhichNNIFromSet(const NNISet& nni_set, const size_t which_nni) {
auto nni_set_ptr = nni_set.begin();
for (size_t i = 0; i < which_nni; i++) {
nni_set_ptr++;
}
return *nni_set_ptr;
};
// This compares DAGs after adding NNIs to SubsplitDAG vs GraftDAG.
// Also tests that Adding all node pairs to DAG gives proper result.
TEST_CASE("NNIEngine: GraftDAG") {
// Simple DAG that contains a shared edge, internal leafward fork, and an internal
// rootward fork.
const std::string fasta_path = "data/six_taxon.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
// Instance that will be unaltered.
auto pre_inst = GPInstanceOfFiles(fasta_path, newick_path);
GPDAG& pre_dag = pre_inst.GetDAG();
// Instance that is used by grafted DAG.
auto graft_inst = GPInstanceOfFiles(fasta_path, newick_path);
graft_inst.MakeNNIEngine();
NNIEngine& nni_engine = graft_inst.GetNNIEngine();
GPDAG& host_dag = graft_inst.GetDAG();
GraftDAG& graft_dag = nni_engine.GetGraftDAG();
// Find NNIs of DAG.
nni_engine.SyncAdjacentNNIsWithDAG();
size_t nni_count = nni_engine.GetAdjacentNNICount();
// TEST #0:
// Check DAG and GraftDAG equal before adding any NNIs.
CHECK_MESSAGE(GraftDAG::CompareToDAG(graft_dag, pre_dag) == 0,
"GraftDAG not equal to DAG before AddNodePair.");
// TEST #1:
// Add each NNI pair to GraftDAG, then check that expected nodes and edges are present
// in DAG.
for (size_t i = 0; i < nni_count; i++) {
auto nni = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), i);
graft_dag.AddNodePair(nni.parent_, nni.child_);
CHECK_MESSAGE(graft_dag.ContainsNode(nni.parent_),
"Cannot find parent node in GraftDAG.");
CHECK_MESSAGE(graft_dag.ContainsNode(nni.child_),
"Cannot find child node in GraftDAG.");
if (!(graft_dag.ContainsNode(nni.parent_) || graft_dag.ContainsNode(nni.child_))) {
graft_dag.RemoveAllGrafts();
continue;
}
size_t neighbor_count = 0;
for (const auto subsplit : {nni.GetParent(), nni.GetChild()}) {
const auto& node_id = graft_dag.GetDAGNodeId(subsplit);
const auto& node = graft_dag.GetDAGNode(node_id);
const auto& node_view = node.GetNeighbors();
for (auto it = node_view.begin(); it != node_view.end(); ++it) {
CHECK_MESSAGE(graft_dag.ContainsEdge(it.GetParentId(), it.GetChildId()),
"Cannot find edge in GraftDAG.");
neighbor_count++;
}
}
if (neighbor_count < 6) {
std::cout << "Too few neighbors to NNI: " << i << " " << nni << std::endl;
}
CHECK_MESSAGE(neighbor_count >= 6, "There were too few neighbors to NNI.");
}
graft_dag.RemoveAllGrafts();
// TEST #2:
// Test DAGs equivalent after adding NNI individually.
for (size_t i = 0; i < nni_count; i++) {
auto nni = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), i);
// New temp DAG.
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
GPDAG& dag = inst.GetDAG();
// Add NNI to DAG and GraftDAG and compare results.
dag.AddNodePair(nni.parent_, nni.child_);
graft_dag.AddNodePair(nni.parent_, nni.child_);
CHECK_MESSAGE(GraftDAG::CompareToDAG(graft_dag, dag) == 0,
"GraftDAG not equal to DAG after adding NNIs.");
// Clear NNIs from GraftDAG and compare to initial DAG.
graft_dag.RemoveAllGrafts();
CHECK_MESSAGE(GraftDAG::CompareToDAG(graft_dag, pre_dag) == 0,
"GraftDAG not equal to initial DAG after removing all NNIs.");
}
// TEST #3:
// Test DAGs not equivalent when adding different NNIs.
{
auto nni_1 = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), 0);
auto nni_2 = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), 1);
// New temp DAG.
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
GPDAG& dag = inst.GetDAG();
// Add NNI to DAG and GraftDAG and compare results.
dag.AddNodePair(nni_1.parent_, nni_1.child_);
graft_dag.AddNodePair(nni_2.parent_, nni_2.child_);
CHECK_MESSAGE(GraftDAG::CompareToDAG(graft_dag, dag) != 0,
"GraftDAG is equal to DAG after adding different NNIs.");
}
// TEST #4:
// Modify GraftDAG, clear GraftDAG, modify DAG, then modify GraftDAG again.
for (size_t i = 0; i < nni_count; i++) {
auto nni = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), i);
graft_dag.AddNodePair(nni.parent_, nni.child_);
CHECK_MESSAGE(
(graft_dag.ContainsNode(nni.parent_) && graft_dag.ContainsNode(nni.child_)),
"Graft DAG does not contain added nodes by the Graft DAG.");
graft_dag.RemoveAllGrafts();
host_dag.AddNodePair(nni.parent_, nni.child_);
CHECK_MESSAGE(
(host_dag.ContainsNode(nni.parent_) && host_dag.ContainsNode(nni.child_)),
"Host DAG does not contain added nodes added by the Host DAG.");
CHECK_MESSAGE(
(graft_dag.ContainsNode(nni.parent_) && graft_dag.ContainsNode(nni.child_)),
"Graft DAG does not contain added nodes added by the Graft DAG.");
CHECK_MESSAGE(
graft_dag.GetDAGNodeId(nni.parent_) == host_dag.GetDAGNodeId(nni.parent_),
"Graft DAG NodeId does not match Host DAG NodeId.");
}
}
// Initialize GPInstance, make GPEngine, DAG, and NNIEngine.
// Perform initial run of GP optimization.
void GPInstanceRunGP(GPInstance& inst, const bool do_optimize_branch_lengths = true,
const bool do_fixed_branch_lengths = false,
const bool do_reinit_priors = true) {
if (do_fixed_branch_lengths) {
inst.GetGPEngine().SetBranchLengthsToDefault();
}
if (do_optimize_branch_lengths) {
inst.EstimateBranchLengths(0.0001, 100, true);
}
if (do_reinit_priors) {
inst.ReinitializePriors();
}
inst.PopulatePLVs();
inst.ComputeLikelihoods();
inst.ComputeMarginalLikelihood();
}
// Adds NNIs to DAG, then resizes and reindexes GPEngine, then checks that the same
// node and edge bitsets correspond to the same PLVs and branch lengths before and after
// AddNodePair.
TEST_CASE("NNIEngine: Resize and Reindex GPEngine after AddNodePair") {
// Check that two GPInstances produce the same results after GP run.
auto CheckGPEngineRun = [](GPInstance& inst, GPInstance& pre_inst) {
bool passes_gp_run = true;
inst.EstimateBranchLengths(0.0001, 100, true);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
inst.ComputeMarginalLikelihood();
auto likelihoods = inst.GetGPEngine().GetPerGPCSPLogLikelihoods();
pre_inst.EstimateBranchLengths(0.0001, 100, true);
pre_inst.PopulatePLVs();
pre_inst.ComputeLikelihoods();
pre_inst.ComputeMarginalLikelihood();
auto pre_likelihoods = inst.GetGPEngine().GetPerGPCSPLogLikelihoods();
if (!VectorXdEquality(likelihoods, pre_likelihoods, 1e-3)) {
return false;
};
return passes_gp_run;
};
// Check that GPEngine resized and reindexed after DAG modifications.
auto CheckGPEngineResizeAndReindex = [](GPDAG& dag, GPEngine& gpengine,
GPDAG& pre_dag, GPEngine& pre_gpengine) {
bool passes_resized = true;
bool passes_plv_reindexed = true;
bool passes_branch_reindexed = true;
// Check resizing GPEngine properly.
passes_resized &= (gpengine.GetNodeCount() == dag.NodeCountWithoutDAGRoot());
passes_resized &= (gpengine.GetGPCSPCount() == dag.EdgeCountWithLeafSubsplits());
// Check that elements reindexed properly.
const auto& [node_map, edge_map] =
BuildNodeAndEdgeMapsFromPreDAGToPostDAG(pre_dag, dag);
// Check PLVs reindexed properly.
for (const auto& [pre_node_id, node_id] : node_map) {
for (const auto plv_type : PLVTypeEnum::Iterator()) {
const auto& plv_a = gpengine.GetPLVHandler().GetPV(plv_type, node_id);
const auto& plv_b = pre_gpengine.GetPLVHandler().GetPV(plv_type, pre_node_id);
auto max_diff = PLVNodeHandler::MaxDifference(plv_a, plv_b);
if (max_diff > 1e-3) {
passes_plv_reindexed = false;
}
}
}
// Check branch length reindexed properly.
auto& pre_branch_lengths = pre_gpengine.GetBranchLengthHandler();
auto& branch_lengths = gpengine.GetBranchLengthHandler();
for (const auto& [pre_edge_id, edge_id] : edge_map) {
const auto branch_a = branch_lengths.Get(edge_id);
const auto branch_b = pre_branch_lengths.Get(pre_edge_id);
if (abs(branch_a - branch_b) > 1e-3) {
passes_branch_reindexed = false;
}
}
return passes_resized && passes_plv_reindexed && passes_branch_reindexed;
};
// Test that adds nodes to DAG, resizes and reindexes GPEngine and checks that
// GPEngine reindexed correctly.
auto ResizeAndReindexGPEngineTest = [&CheckGPEngineResizeAndReindex,
&CheckGPEngineRun](
const size_t nni_add_limit,
const size_t test_after_every,
const bool skip_reindexing,
const bool perform_resize_unmodded_test) {
BoolVector test_array;
bool test_passes = true;
const std::string fasta_path = "data/hotstart.fasta";
const std::string newick_path = "data/hotstart_bootstrap_sample.nwk";
// Instance that will not be modified.
auto pre_inst =
GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmap_plv_A.data");
GPInstanceRunGP(pre_inst);
GPDAG& pre_dag = pre_inst.GetDAG();
GPEngine& pre_gpengine = pre_inst.GetGPEngine();
// Instance that will have DAG and GPEngine modified.
auto inst = GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmap_plv_C.data");
GPInstanceRunGP(inst);
GPDAG& dag = inst.GetDAG();
GPEngine& gpengine = inst.GetGPEngine();
inst.MakeNNIEngine();
NNIEngine& nni_engine = inst.GetNNIEngine();
// Run unmodified DAG with resized GPEngine test.
if (perform_resize_unmodded_test) {
// Verify engine not resized yet by accessing too big index.
PVId plv_idx_out_of_range =
PVId((dag.NodeCountWithoutDAGRoot() * 10 * PLVNodeHandler::plv_count_) - 1);
CHECK_THROWS(gpengine.GetPLV(plv_idx_out_of_range));
// Force bigger reallocation, with no reindexing.
gpengine.GrowPLVs(pre_dag.NodeCountWithoutDAGRoot(), std::nullopt,
pre_dag.NodeCountWithoutDAGRoot() * 10);
gpengine.GrowGPCSPs(pre_dag.EdgeCountWithLeafSubsplits(), std::nullopt,
pre_dag.EdgeCountWithLeafSubsplits() * 10);
// Verify engine was resized by accessing too big index.
CHECK_NOTHROW(gpengine.GetPLV(plv_idx_out_of_range));
bool gp_run_passes = CheckGPEngineRun(inst, pre_inst);
return gp_run_passes;
}
// Initialize Maps and Reindexers.
Reindexer node_reindexer, node_reindexer_without_root, edge_reindexer;
node_reindexer = Reindexer::IdentityReindexer(inst.GetDAG().NodeCount());
edge_reindexer =
Reindexer::IdentityReindexer(inst.GetDAG().EdgeCountWithLeafSubsplits());
// Add NNIs to DAG and check resized and reindexed properly.
nni_engine.SyncAdjacentNNIsWithDAG();
size_t nni_count = nni_engine.GetAdjacentNNICount();
size_t nni_add = 0;
while (nni_count > 0) {
for (size_t i = 0; i < nni_count; i++) {
auto nni = GetWhichNNIFromSet(nni_engine.GetAdjacentNNIs(), i);
auto mods = inst.GetDAG().AddNodePair(nni.parent_, nni.child_);
node_reindexer = node_reindexer.ComposeWith(mods.node_reindexer);
edge_reindexer = edge_reindexer.ComposeWith(mods.edge_reindexer);
nni_add++;
if (nni_add >= nni_add_limit) {
break;
}
if (nni_add % test_after_every == 0) {
node_reindexer_without_root =
node_reindexer.RemoveNewIndex(dag.GetDAGRootNodeId().value_);
size_t node_count = dag.NodeCountWithoutDAGRoot();
size_t edge_count = dag.EdgeCountWithLeafSubsplits();
if (!skip_reindexing) {
gpengine.GrowPLVs(node_count, node_reindexer_without_root);
gpengine.GrowGPCSPs(edge_count, edge_reindexer);
} else {
gpengine.GrowPLVs(node_count);
gpengine.GrowGPCSPs(edge_count);
}
// Test resizing and reindexing.
test_passes =
CheckGPEngineResizeAndReindex(dag, gpengine, pre_dag, pre_gpengine);
test_array.push_back(test_passes);
// Reinitialize reindexers.
node_reindexer = Reindexer::IdentityReindexer(dag.NodeCount());
edge_reindexer =
Reindexer::IdentityReindexer(dag.EdgeCountWithLeafSubsplits());
}
}
nni_engine.ResetNNIData();
nni_engine.SyncAdjacentNNIsWithDAG();
nni_count = nni_engine.GetAdjacentNNICount();
if (nni_add >= nni_add_limit) {
break;
}
}
// Test final resizing and reindexing.
node_reindexer_without_root =
node_reindexer.RemoveNewIndex(dag.GetDAGRootNodeId().value_);
if (!skip_reindexing) {
gpengine.GrowPLVs(dag.NodeCountWithoutDAGRoot(), node_reindexer_without_root);
gpengine.GrowGPCSPs(dag.EdgeCountWithLeafSubsplits(), edge_reindexer);
} else {
gpengine.GrowPLVs(dag.NodeCountWithoutDAGRoot());
gpengine.GrowGPCSPs(dag.EdgeCountWithLeafSubsplits());
}
test_passes = CheckGPEngineResizeAndReindex(dag, gpengine, pre_dag, pre_gpengine);
test_array.push_back(test_passes);
// Finally, test run full GP Optimization after all modifications completed.
inst.ReinitializePriors();
dag.ReinitializeTidyVectors();
inst.EstimateBranchLengths(0.0001, 100, true);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
inst.ComputeMarginalLikelihood();
test_passes = std::accumulate(test_array.begin(), test_array.end(), true,
std::logical_and<>());
return test_passes;
};
// TEST_0: Test that resize and reindex GPEngine works with no modification the DAG.
auto test_0 = ResizeAndReindexGPEngineTest(0, 1, false, false);
CHECK_MESSAGE(test_0,
"TEST_0: Resize and reindex GPEngine fails when no modifications are "
"made to DAG.");
// TEST_1: Test resize and reindex GPEngine works when adding a single node pair to
// DAG.
auto test_1 = ResizeAndReindexGPEngineTest(1, 1, false, false);
CHECK_MESSAGE(
test_1,
"TEST_1: Resize and reindex GPEngine fails after single AddNodePair to DAG.");
// TEST_2: Test that improper mapping occurs when not reindexing GPEngine when adding
// a single node pair to DAG.
auto test_2 = ResizeAndReindexGPEngineTest(10, 1, true, false);
CHECK_FALSE_MESSAGE(test_2,
"TEST_2: Resize and reindex GPEngine is not incorrect when NOT "
"reindexing after single AddNodePair to DAG.");
// TEST_3: Test resize and reindex GPEngine works when adding a many node pairs,
// performing resizing and reindexing for each modification of DAG.
auto test_3 = ResizeAndReindexGPEngineTest(100, 1, false, false);
CHECK_MESSAGE(test_3,
"TEST_3: Resize and reindex GPEngine fails after multiple AddNodePair, "
"reindexed individually.");
// TEST_4: Test resize and reindex GPEngine works when adding a many node pairs,
// composing multiple modifications of DAG into single reindexing operation.
auto test_4 = ResizeAndReindexGPEngineTest(100, 10, false, false);
CHECK_MESSAGE(
test_4,
"TEST_4: Resize and reindex GPEngine fails after multiple AddNodePair to DAG, "
"reindexed in batches.");
// TEST_5: Resizes GPEngine without modifying the DAG. Then tests that resized
// GPEngine and unmodified GPEngine produce same GP run results.
auto test_5 = ResizeAndReindexGPEngineTest(1, 1, true, true);
CHECK_MESSAGE(
test_5,
"TEST_5: Resized GPEngine with unmodified DAG changed results of GP Run.");
};
// Compares NNI likelihoods computed by two different GPInstances.
// - The true GPInstance adds each NNI individually to the DAG, resizes the GPEngine,
// repopulates all PLVs in DAG, then recomputes the likelihoods for each NNI.
// - The NNIEngine version of the GPInstance adds all NNIs to the GraftDAG, then resizes
// GPEngine for temporary space (two PLVs total, plus one BranchLength and
// PerGPCSPLogLikelihood per NNI), then generates and processes a GPOperationVector for
// computing all NNI Likelihoods in series.
// - Results of each version are then compared by the likelihood of each NNI's central
// edge, spanning the added parent and child. All priors are set to 1.0 and all branch
// lengths set to 0.1 to remove their impact on the computation.
// - Note: Input DAG is fully connected -- all legal edges between any two subsplits in
// DAG are added. This ensures that NNIs via truthDAG using AddNodePair and graftDAG
// using Pre-NNI references have the same topology.
TEST_CASE("NNIEngine via GPEngine: Proposed NNI vs DAG NNI GPLikelihoods") {
// Fetch likelihood from instance.
auto GPInstGetNNILikelihood = [](GPInstance& inst, const NNIOperation& nni) {
const GPDAG& dag = inst.GetDAG();
const auto edge_idx = dag.GetEdgeIdx(nni.parent_, nni.child_);
Assert(edge_idx < size_t(inst.GetGPEngine().GetPerGPCSPLogLikelihoods().size()),
"Edge idx out of range for GPInstGetNNILikelihood.");
const double likelihood =
inst.GetGPEngine().GetPerGPCSPLogLikelihoods()[edge_idx.value_];
return likelihood;
};
auto CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs =
[&GPInstGetNNILikelihood](const std::string& fasta_path,
const std::string& newick_path,
const bool do_optimize_new_branch_lengths = false,
const bool do_fixed_branch_lengths = false) {
bool passes_test = true;
bool do_optimize_branch_lengths = !do_fixed_branch_lengths;
// Likelihoods.
NNIScoreMap prenni_predag_likelihoods;
NNIScoreMap prenni_graftdag_likelihoods;
NNIScoreMap nni_graftdag_likelihoods;
NNIScoreMap prenni_truthdag_likelihoods;
NNIScoreMap nni_truthdag_likelihoods;
// Branch Lengths.
BranchLengths predag_branchlengths;
NNIBranchLengthsMap graftdag_branchlengths;
NNIBranchLengthsMap truthdag_branchlengths;
// Branch Map.
BranchMap predag_branchmap;
NNIBranchMapMap graftdag_branchmaps;
NNIBranchMapMap truthdag_branchmaps;
// PreDAG Instance: unaltered DAG.
auto pre_inst =
GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmapped_pv_pre.data");
pre_inst.MakeNNIEngine();
GPDAG& pre_dag = pre_inst.GetDAG();
GPEngine& pre_gpengine = pre_inst.GetGPEngine();
NNIEngine& nniengine = pre_inst.GetNNIEngine();
pre_dag.FullyConnect();
pre_gpengine.GrowPLVs(pre_dag.NodeCountWithoutDAGRoot());
pre_gpengine.GrowGPCSPs(pre_dag.EdgeCountWithLeafSubsplits());
pre_gpengine.SetNullPrior();
const bool do_init_branch_lengths = false;
GPInstanceRunGP(pre_inst,
(do_optimize_branch_lengths || do_init_branch_lengths),
do_fixed_branch_lengths, false);
predag_branchmap =
pre_gpengine.GetBranchLengthHandler().BuildBranchLengthMap(pre_dag);
const auto pre_branches =
pre_gpengine.GetBranchLengths(0, pre_gpengine.GetPaddedGPCSPCount());
predag_branchlengths = pre_branches;
nniengine.SyncAdjacentNNIsWithDAG();
// Map from pre-NNI to NNI that created NNI.
std::map<NNIOperation, NNIOperation> nni_to_prenni_map;
for (const auto& nni : nniengine.GetAdjacentNNIs()) {
auto pre_nni = nniengine.GetDAG().FindNNINeighborInDAG(nni);
nni_to_prenni_map.insert({nni, pre_nni});
}
// Compute likelihoods for preDAG.
for (const auto& [nni, pre_nni] : nni_to_prenni_map) {
std::ignore = nni;
const auto likelihood = GPInstGetNNILikelihood(pre_inst, pre_nni);
prenni_predag_likelihoods.insert({pre_nni, likelihood});
}
// Instance that is used by graftDAG.
auto graft_inst =
GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmapped_pv_graft.data");
graft_inst.MakeNNIEngine();
GPDAG& graft_dag = graft_inst.GetDAG();
GPEngine& graft_gpengine = graft_inst.GetGPEngine();
NNIEngine& graft_nniengine = graft_inst.GetNNIEngine();
graft_dag.FullyConnect();
graft_gpengine.GrowPLVs(graft_dag.NodeCountWithoutDAGRoot());
graft_gpengine.GrowGPCSPs(graft_dag.EdgeCountWithLeafSubsplits());
graft_gpengine.SetNullPrior();
graft_inst.GetNNIEngine().GetGPEvalEngine().SetOptimizeNewEdges(
do_optimize_new_branch_lengths);
graft_gpengine.GetBranchLengthHandler().ApplyBranchLengthMap(predag_branchmap,
graft_dag);
GPInstanceRunGP(graft_inst, false, do_fixed_branch_lengths, false);
// Compute likelihoods for graftDAG.
graft_nniengine.SyncAdjacentNNIsWithDAG();
graft_nniengine.GraftAdjacentNNIsToDAG();
graft_nniengine.GrowEvalEngineForAdjacentNNIs(true, true);
graft_gpengine.SetNullPrior();
graft_nniengine.GetGPEvalEngine().SetOptimizationMaxIteration(20);
for (const auto& [nni, pre_nni] : nni_to_prenni_map) {
std::ignore = nni;
auto pre_nni_llh = GPInstGetNNILikelihood(graft_inst, pre_nni);
prenni_graftdag_likelihoods.insert({pre_nni, pre_nni_llh});
}
for (const auto& [nni, pre_nni] : nni_to_prenni_map) {
// GraftDAG Instance.
const auto [graft_llh, _] =
graft_nniengine.GetGPEvalEngine().ComputeAdjacentNNILikelihood(nni);
std::ignore = _;
nni_graftdag_likelihoods.insert({nni, graft_llh});
const auto graft_branchmap =
graft_gpengine.GetBranchLengthHandler().BuildBranchLengthMap(graft_dag);
graftdag_branchmaps[nni] = graft_branchmap;
const auto graft_branches =
graft_gpengine.GetBranchLengths(0, graft_gpengine.GetPaddedGPCSPCount());
graftdag_branchlengths[nni] = graft_branches;
// TruthDAG Instance.
auto truth_inst = GPInstanceOfFiles(fasta_path, newick_path,
"_ignore/mmapped_pv_truth.data");
truth_inst.MakeNNIEngine();
auto& truth_dag = truth_inst.GetDAG();
auto& truth_nniengine = truth_inst.GetNNIEngine();
truth_dag.FullyConnect();
auto& truth_gpengine = truth_inst.GetGPEngine();
auto mods = truth_dag.AddNodePair(nni);
auto node_reindexer_without_root =
mods.node_reindexer.RemoveNewIndex(truth_dag.GetDAGRootNodeId().value_);
truth_gpengine.GrowPLVs(truth_dag.NodeCountWithoutDAGRoot(),
node_reindexer_without_root);
truth_gpengine.GrowGPCSPs(truth_dag.EdgeCountWithLeafSubsplits(),
mods.edge_reindexer);
truth_gpengine.SetNullPrior();
GPInstanceRunGP(truth_inst, false, do_fixed_branch_lengths, false);
truth_gpengine.GetBranchLengthHandler().ApplyBranchLengthMap(predag_branchmap,
truth_dag);
if (!do_fixed_branch_lengths) {
truth_nniengine.GetGPEvalEngine().CopyGPEngineDataAfterAddingNNI(pre_nni,
nni);
}
if (do_optimize_new_branch_lengths) {
truth_nniengine.GetGPEvalEngine().NNIBranchLengthOptimization(nni);
}
truth_inst.PopulatePLVs();
truth_inst.ComputeLikelihoods();
truth_inst.ComputeMarginalLikelihood();
const auto truth_branchmap =
truth_gpengine.GetBranchLengthHandler().BuildBranchLengthMap(truth_dag);
truthdag_branchmaps[nni] = truth_branchmap;
const auto truth_branches =
truth_gpengine.GetBranchLengths(0, truth_gpengine.GetGPCSPCount());
truthdag_branchlengths[nni] = truth_branches;
auto pre_nni_llh = GPInstGetNNILikelihood(truth_inst, pre_nni);
prenni_truthdag_likelihoods.insert({pre_nni, pre_nni_llh});
auto truth_llh = GPInstGetNNILikelihood(truth_inst, nni);
nni_truthdag_likelihoods.insert({nni, truth_llh});
}
// Tests that pre-NNIs that created new NNIs were unaltered.
const double tolerance = 1e-3;
for (const auto& [nni, pre_nni] : nni_to_prenni_map) {
std::ignore = nni;
const auto prenni_truth = prenni_truthdag_likelihoods.at(pre_nni);
const auto prenni_graft = prenni_graftdag_likelihoods.at(pre_nni);
const auto diff = std::abs(prenni_truth - prenni_graft);
const auto passes_current_test = (diff < tolerance);
if (!passes_current_test) {
std::cout << "Pre NNI: FAIL -- " << prenni_truth << " " << prenni_graft
<< std::endl;
}
passes_test = (passes_test & passes_current_test);
CHECK_MESSAGE(diff < tolerance,
"Pre-NNI Likelihood from NNI Engine does not match truth.");
}
// Tests that adding new NNIs via GraftDAG produces same likelihood as
// TruthDAG.
for (const auto& [nni, pre_nni] : nni_to_prenni_map) {
std::ignore = pre_nni;
const auto nni_truth = nni_truthdag_likelihoods.at(nni);
const auto nni_graft = nni_graftdag_likelihoods.at(nni);
const auto diff = std::abs(nni_truth - nni_graft);
const auto passes_current_test = (diff < tolerance);
if (!passes_current_test) {
std::cout << "Added NNI: FAIL -- " << nni_truth << " " << nni_graft
<< std::endl;
}
passes_test = (passes_test & passes_current_test);
CHECK_MESSAGE(diff < tolerance,
"NNI Likelihood from NNI Engine does not match truth.");
}
return passes_test;
};
// Test_0
const std::string fasta_path_0 = "data/hello.fasta";
const std::string newick_path_0 = "data/hello_rooted_diff_branches.nwk";
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_0, newick_path_0,
false, true),
"Test_0a: Hello (with default branch lengths) failed.");
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_0, newick_path_0,
false, false),
"Test_0b: Hello (without optimized branch lengths) failed.");
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_0, newick_path_0,
true, false),
"Test_0c: Hello (with optimized branch lengths) failed.");
// Test_1
const std::string fasta_path_1 = "data/six_taxon_longer.fasta";
const std::string newick_path_1 = "data/six_taxon_rooted_simple.nwk";
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_1, newick_path_1,
false, true),
"Test_1a: Six Taxon (with default branch lengths) failed.");
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_1, newick_path_1,
false, false),
"Test_1b: Six Taxon (without optimized branch lengths) failed.");
CHECK_MESSAGE(CompareGPLikelihoodsOfProposedNNIsVsDAGNNIs(fasta_path_1, newick_path_1,
false, false),
"Test_1c: Six Taxon (with optimized branch lengths) failed.");
}
// This checks that potential parent or child nodes found via subsplit map lookup match
// those found via brute force (linear scan) after adding or grafting nodes to DAG.
// Repeats tests after adding or grafting NNIs to DAG.
TEST_CASE("NNIEngine: Finding Parent and Child Nodes After Adding/Grafting Nodes") {
bool is_quiet = false;
std::stringstream dev_null;
std::ostream& os = (is_quiet ? dev_null : std::cerr);
const std::string fasta_path = "data/five_taxon.fasta";
const std::string newick_path_1 = "data/five_taxon_rooted.nwk";
auto inst_1 = GPInstanceOfFiles(fasta_path, newick_path_1, "_ignore/mmapped_pv.data");
inst_1.MakeTPEngine();
inst_1.MakeNNIEngine();
auto& dag_1 = inst_1.GetDAG();
auto& nniengine_1 = inst_1.GetNNIEngine();
auto& graftdag_1 = nniengine_1.GetGraftDAG();
nniengine_1.SetTPLikelihoodCutoffFilteringScheme(0.0);
nniengine_1.SetTopKScoreFilteringScheme(2);
nniengine_1.RunInit();
auto VectorToSet = [](const std::pair<NodeIdVector, NodeIdVector>& node_id_vector)
-> std::pair<std::set<NodeId>, std::set<NodeId>> {
auto [left, right] = node_id_vector;
std::set<NodeId> left_set(left.begin(), left.end());
std::set<NodeId> right_set(right.begin(), right.end());
return {left_set, right_set};
};
auto TestDAGFindNodeIdsViaMapVSViaScan = [&os, &dag_1,
&VectorToSet](const Bitset& subsplit) {
bool test_passed = true;
{
auto [left, right] = VectorToSet(dag_1.FindParentNodeIds(subsplit));
auto [left_map, right_map] = VectorToSet(dag_1.FindParentNodeIdsViaMap(subsplit));
auto [left_scan, right_scan] =
VectorToSet(dag_1.FindParentNodeIdsViaScan(subsplit));
bool results_match = (left == left_scan and left_map == left_scan) and
(right == right_scan and right_map == right_scan);
if (!results_match) {
os << "DAG_FIND_PARENT_NODES: " << (results_match ? "PASS" : "FAIL")
<< std::endl;
os << "LEFT: " << left_map << " " << left_scan << std::endl;
os << "RIGHT: " << right_map << " " << right_scan << std::endl;
}
test_passed &= results_match;
}
{
auto [left, right] = VectorToSet(dag_1.FindChildNodeIds(subsplit));
auto [left_map, right_map] = VectorToSet(dag_1.FindChildNodeIdsViaMap(subsplit));
auto [left_scan, right_scan] =
VectorToSet(dag_1.FindChildNodeIdsViaScan(subsplit));
bool results_match = (left == left_scan and left_map == left_scan) and
(right == right_scan and right_map == right_scan);
if (!results_match) {
os << "DAG_FIND_CHILD_NODES: " << (results_match ? "PASS" : "FAIL")
<< std::endl;
os << "LEFT: " << left_map << " " << left_scan << std::endl;
os << "RIGHT: " << right_map << " " << right_scan << std::endl;
}
test_passed &= results_match;
}
return test_passed;
};
auto TestGraftDAGFindNodeIdsViaMapVSViaScan = [&os, &graftdag_1,
&VectorToSet](const Bitset& subsplit) {
bool test_passed = true;
{
auto [left, right] = VectorToSet(graftdag_1.FindParentNodeIds(subsplit));
auto [left_map, right_map] =
VectorToSet(graftdag_1.FindParentNodeIdsViaMap(subsplit));
auto [left_scan, right_scan] =
VectorToSet(graftdag_1.FindParentNodeIdsViaScan(subsplit));
bool results_match = (left == left_scan and left_map == left_scan) and
(right == right_scan and right_map == right_scan);
if (!results_match) {
os << "GRAFTDAG_FIND_PARENT_NODES: " << (results_match ? "PASS" : "FAIL")
<< std::endl;
os << "LEFT: " << left_map << " " << left_scan << std::endl;
os << "RIGHT: " << right_map << " " << right_scan << std::endl;
}
test_passed &= results_match;
}
{
auto [left, right] = VectorToSet(graftdag_1.FindChildNodeIds(subsplit));
auto [left_map, right_map] =
VectorToSet(graftdag_1.FindChildNodeIdsViaMap(subsplit));
auto [left_scan, right_scan] =
VectorToSet(graftdag_1.FindChildNodeIdsViaScan(subsplit));
bool results_match = (left == left_scan and left_map == left_scan) and
(right == right_scan and right_map == right_scan);
if (!results_match) {
os << "GRAFTDAG_FIND_CHILD_NODES: " << (results_match ? "PASS" : "FAIL")
<< std::endl;
os << "LEFT: " << left_map << " " << left_scan << std::endl;
os << "RIGHT: " << right_map << " " << right_scan << std::endl;
}
test_passed &= results_match;
}
return test_passed;
};
for (NodeId node_id{0}; node_id < dag_1.NodeCount(); node_id++) {
auto subsplit = dag_1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(TestDAGFindNodeIdsViaMapVSViaScan(subsplit),
"Finding nodes via map does not match finding nodes via scan (before "
"adding NNIs).");
}
size_t max_iter = 10;
for (size_t iter = 0; iter < max_iter; iter++) {
nniengine_1.GraftAdjacentNNIsToDAG();
for (NodeId node_id{0}; node_id < graftdag_1.NodeCount(); node_id++) {
auto subsplit = graftdag_1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(
TestGraftDAGFindNodeIdsViaMapVSViaScan(subsplit),
"Finding nodes via map does not match finding nodes via scan (before "
"adding NNIs).");
}
nniengine_1.FilterPreScore();
nniengine_1.FilterScoreAdjacentNNIs();
nniengine_1.FilterPostScore();
nniengine_1.FilterEvaluateAdjacentNNIs();
nniengine_1.RemoveAllGraftedNNIsFromDAG();
nniengine_1.AddAcceptedNNIsToDAG();
for (NodeId node_id{0}; node_id < dag_1.NodeCount(); node_id++) {
auto subsplit = dag_1.GetDAGNodeBitset(node_id);
CHECK_MESSAGE(
TestDAGFindNodeIdsViaMapVSViaScan(subsplit),
"Finding nodes via map does not match finding nodes via scan (before "
"adding NNIs).");
}
nniengine_1.RunPostLoop();
}
}
// ** TPEngine tests **
// Makes and returns an SBNInstance. Used for truth test vs TPEngine likelihoods.
RootedSBNInstance MakeRootedSBNInstance(const std::string& newick_path,
const std::string& fasta_path,
PhyloModelSpecification& specification,
const bool init_time_trees = true,
const size_t thread_count = 1) {
RootedSBNInstance inst("demo_instance");
inst.ReadNewickFile(newick_path, false);
inst.ReadFastaFile(fasta_path);
inst.ProcessLoadedTrees();
// make engine and set phylo model parameters
inst.PrepareForPhyloLikelihood(specification, thread_count);
return inst;
};
// Build GPInstance with TPEngine and NNIEngine.
GPInstance MakeGPInstanceWithTPEngine(const std::string& fasta_path,
const std::string& newick_path,
const std::string& mmap_path) {
// Make GPInstance and TPEngine.
auto inst = GPInstanceOfFiles(fasta_path, newick_path, mmap_path);
auto& dag = inst.GetDAG();
inst.MakeGPEngine();
auto edge_indexer = dag.BuildEdgeIndexer();
inst.MakeTPEngine();
inst.MakeNNIEngine();
TPEngine& tpengine = inst.GetTPEngine();
tpengine.GetLikelihoodEvalEngine()
.GetDAGBranchHandler()
.GetBranchLengths()
.FillWithDefault();
// Set choice map according to subsplit method or pcsp method.
const bool use_subsplit_method = true;
inst.TPEngineSetChoiceMapByTakingFirst(use_subsplit_method);
tpengine.GetLikelihoodEvalEngine().Initialize();
tpengine.GetLikelihoodEvalEngine().ComputeScores();
return inst;
}
// Builds maps from tree_id->tree and edge_id->tree_id. TreeEdgeMap is determined by
// extracting the TPEngine's choicemap's topology for the given edge.
std::pair<TreeIdMap, TreeEdgeMap>
BuildTreeIdMapAndTreeEdgeMapFromGPInstanceAndChoiceMap(
GPInstance& inst, bool apply_branch_lengths = false) {
TreeIdMap final_tree_id_map;
TreeIdMap tree_id_map;
TreeEdgeMap tree_edge_map;
const auto& tp_engine = inst.GetTPEngine();
for (EdgeId edge_id(0); edge_id < inst.GetDAG().EdgeCountWithLeafSubsplits();
edge_id++) {
auto tree = tp_engine.GetTopTreeWithEdge(edge_id);
tree_id_map.insert({TreeId(edge_id.value_), tree});
}
// Build tree_edge_map from tree_id_map.
for (const EdgeId edge_id : inst.GetDAG().LeafwardEdgeTraversalTrace(false)) {
if (inst.GetDAG().IsEdgeRoot(edge_id)) {
continue;
}
// Get tree topology from TPEngine's choice map.
auto topology = tp_engine.GetTopTopologyWithEdge(edge_id);
bool is_found = false;
for (const auto& [tree_id, tree] : tree_id_map) {
if (topology == tree.Topology()) {
tree_edge_map[edge_id] = tree_id;
if (apply_branch_lengths) {
auto& branch_handler =
inst.GetTPEngine().GetLikelihoodEvalEngine().GetDAGBranchHandler();
auto final_tree = DAGBranchHandler::BuildTreeWithBranchLengthsFromTopology(
inst.GetDAG(), branch_handler, tree.Topology());
final_tree_id_map.insert({tree_id, final_tree});
} else {
final_tree_id_map.insert({tree_id, tree});
}
is_found = true;
break;
}
}
Assert(is_found, "Could not find TPEngine topology in TreeCollection.");
}
return std::make_pair(final_tree_id_map, tree_edge_map);
}
// Build map from an edge in DAG to its TPEngine score, where each edge corresponds to
// "top tree" topology according to TPEngine's choicemap.
EdgeScoreMap BuildEdgeTPScoreMapFromInstance(GPInstance& inst,
const TPEvalEngineType score_method) {
EdgeScoreMap tp_score_map;
auto& tpengine = inst.GetTPEngine();
// Populate tree vector and edge map.
const auto& [tree_id_map, tree_edge_map] =
BuildTreeIdMapAndTreeEdgeMapFromGPInstanceAndChoiceMap(inst);
std::ignore = tree_id_map;
// Compute likelihoods with TPEngine.
if (score_method == TPEvalEngineType::LikelihoodEvalEngine) {
auto& llh_engine = tpengine.GetLikelihoodEvalEngine();
// llh_engine.Initialize();
llh_engine.ComputeScores();
for (const auto& [edge_id, tree_id] : tree_edge_map) {
std::ignore = tree_id;
auto score = llh_engine.GetTopTreeScoreWithEdge(edge_id);
tp_score_map[edge_id] = score;
}
}
// Compute parsimonies with TPEngine.
else if (score_method == TPEvalEngineType::ParsimonyEvalEngine) {
auto& parsimony_engine = tpengine.GetParsimonyEvalEngine();
parsimony_engine.Initialize();
parsimony_engine.ComputeScores();
for (const auto& [edge_id, tree_id] : tree_edge_map) {
std::ignore = tree_id;
auto score = parsimony_engine.GetTopTreeScoreWithEdge(edge_id);
tp_score_map[edge_id] = score;
}
} else {
Failwith("Given TPEvalEngineType is not valid.");
}
return tp_score_map;
};
// Build map from an edge in the DAG to its TPEngine proposed NNI score, where each edge
// is from the "Post-DAG", a DAG which already contains all proposed NNIs.
EdgeScoreMap BuildProposedEdgeTPScoreMapFromInstance(
GPInstance& inst, GPInstance& post_inst, const TPEvalEngineType score_method) {
EdgeScoreMap tp_score_map;
auto& nni_engine = inst.GetNNIEngine();
nni_engine.SyncAdjacentNNIsWithDAG();
auto& tpengine = inst.GetTPEngine();
auto& pre_dag = inst.GetDAG();
auto& post_dag = post_inst.GetDAG();
auto pre_node_map = SubsplitDAG::BuildNodeIdMapBetweenDAGs(pre_dag, post_dag);
auto pre_edge_map = SubsplitDAG::BuildEdgeIdMapBetweenDAGs(pre_dag, post_dag);
auto post_edge_map = SubsplitDAG::BuildEdgeIdMapBetweenDAGs(post_dag, pre_dag);
if (score_method == TPEvalEngineType::LikelihoodEvalEngine) {
auto& llh_engine = tpengine.GetLikelihoodEvalEngine();
llh_engine.Initialize();
llh_engine.ComputeScores();
auto best_edge_map = tpengine.BuildMapOfProposedNNIPCSPsToBestPreNNIEdges(
nni_engine.GetAdjacentNNIs());
for (const auto& nni : nni_engine.GetAdjacentNNIs()) {
const auto pre_nni = tpengine.FindHighestPriorityNeighborNNIInDAG(nni);
const auto post_edge_id = post_dag.GetEdgeIdx(nni);
double score =
llh_engine.GetTopTreeScoreWithProposedNNI(nni, pre_nni, 0, best_edge_map);
tp_score_map[post_edge_id] = score;
}
}
if (score_method == TPEvalEngineType::ParsimonyEvalEngine) {
auto& parsimony_engine = tpengine.GetParsimonyEvalEngine();
parsimony_engine.Initialize();
parsimony_engine.ComputeScores();
for (const auto& nni : nni_engine.GetAdjacentNNIs()) {
const auto& pre_nni = nni_engine.GetDAG().FindNNINeighborInDAG(nni);
double score = parsimony_engine.GetTopTreeScoreWithProposedNNI(nni, pre_nni);
auto edge_id = post_dag.GetEdgeIdx(nni);
tp_score_map[edge_id] = score;
}
}
return tp_score_map;
}
// Build map from an edge in DAG to its score, where each edge corresponds
// to "top tree" topology according to TPEngine's choicemap, and its score is
// computed using a BEAGLE likelihood engine.
EdgeScoreMap BuildEdgeScoreMapFromInstanceUsingBeagleEngine(
GPInstance& inst, TreeIdMap& tree_id_map, TreeEdgeMap& tree_edge_map) {
EdgeScoreMap score_map;
const std::string newick_path = inst.GetNewickSourcePath();
const std::string fasta_path = inst.GetFastaSourcePath();
PhyloModelSpecification simple_spec{"JC69", "constant", "strict"};
auto rooted_sbn_inst = MakeRootedSBNInstance(newick_path, fasta_path, simple_spec);
auto& beagle_engine = *rooted_sbn_inst.GetEngine()->GetFirstFatBeagle();
for (auto& [edge_id, tree_id] : tree_edge_map) {
auto score = beagle_engine.UnrootedLogLikelihood(tree_id_map.at(tree_id));
score_map[edge_id] = score;
}
return score_map;
}
// Build map from an edge in DAG to its score, where each edge corresponds
// to "top tree" topology according to TPEngine's choicemap, and its score is
// computed using a Sankoff Handler parsimony engine.
EdgeScoreMap BuildEdgeScoreMapFromInstanceUsingSankoffHandler(
GPInstance& inst, TreeIdMap& tree_id_map, TreeEdgeMap& tree_edge_map) {
EdgeScoreMap score_map;
auto site_pattern = inst.MakeSitePattern();
auto sankoff_engine = SankoffHandler(site_pattern, "_ignore/sankoff_handler.data");
for (auto& [edge_id, tree_id] : tree_edge_map) {
sankoff_engine.RunSankoff(tree_id_map.at(tree_id).Topology());
auto score = sankoff_engine.ParsimonyScore();
score_map[edge_id] = score;
}
return score_map;
}
bool TestCompareEdgeScoreMapToCorrectEdgeScoreMap(EdgeScoreMap& score_map_test,
GPDAG& dag_test,
EdgeScoreMap& score_map_correct,
GPDAG& dag_correct,
const bool is_quiet = true) {
bool is_equal = true;
std::stringstream dev_null;
std::ostream& os = (is_quiet ? dev_null : std::cerr);
const double tolerance = 1e-3;
for (const auto [edge_id_test, score_test] : score_map_test) {
double min_diff = abs(score_test - score_map_correct[edge_id_test]);
double min_score_correct = score_map_correct[edge_id_test];
EdgeId min_edge_id_correct = edge_id_test;
// Check all edge scores in correct map for corresponding match in test map.
for (const auto [edge_id_correct, score_correct] : score_map_correct) {
const auto diff = abs(score_test - score_correct);
if (min_diff > diff) {
min_diff = diff;
min_score_correct = score_correct;
min_edge_id_correct = edge_id_correct;
}
}
if (min_diff > tolerance) {
is_equal = false;
os << ":: NNI_SCORE_FAIL :: Error: " << min_diff << std::endl;
os << "CORRECT: " << min_edge_id_correct << ": " << min_score_correct
<< std::endl;
os << "TEST: " << edge_id_test << ": " << score_test << std::endl;
}
CHECK_MESSAGE(min_diff <= tolerance,
"Score of Proposed NNIs in smaller DAG without NNIs did not match "
"score from larger DAG.");
}
if (!is_equal) {
os << "NOT_EQUAL: " << std::endl;
os << "CORRECT: " << score_map_correct << std::endl;
os << "TEST: " << score_map_test << std::endl;
}
return is_equal;
}
// Compares the TPEngine's top tree scores for each given edge in the DAG.
// Tests likelihoods by comparing to the likelihoods from BEAGLE engine.
// Tests parsimonies by comparing to the parsimonies from Sankoff handler.
bool CheckAllTPEngineScores(GPInstance& inst, const bool test_likelihood = true,
const bool test_parsimony = true,
const bool test_pvs = false, const bool is_quiet = true) {
bool test_passes = true;
std::stringstream dev_null;
std::ostream& os = (is_quiet ? dev_null : std::cerr);
const double tolerance = 1e5;
auto& dag = inst.GetDAG();
auto& gpengine = inst.GetGPEngine();
auto& tpengine = inst.GetTPEngine();
auto site_pattern = inst.MakeSitePattern();
const auto& [tree_id_map, tree_edge_map] =
BuildTreeIdMapAndTreeEdgeMapFromGPInstanceAndChoiceMap(inst);
// Check that scores from TPEngine match the correct scores from the individual
// trees. Note, if the test only contains a single tree, then this amounts to
// checking if each edge's likelihood matches that one tree.
auto TestMatchingScores = [&os, is_quiet, &tree_edge_map, &test_passes, &tolerance](
const std::string& test_name,
TreeScoreMap& correct_tree_score_map,
EdgeScoreMap& tp_score_map) {
for (const auto& [edge_id, tree_id] : tree_edge_map) {
const auto tp_score = tp_score_map[edge_id];
const auto correct_score = correct_tree_score_map[tree_id];
double min_error = abs(tp_score - correct_score);
CHECK_LT(min_error, tolerance);
if (min_error > tolerance) {
test_passes = false;
os << "::" << test_name << "_FAILURE:: EdgeId: " << edge_id
<< ", TP_Score: " << tp_score_map[edge_id]
<< ", Correct_Score: " << correct_tree_score_map[tree_id]
<< ", Error: " << min_error << std::endl;
}
}
if (!test_passes) {
os << "TestMatchingScore: " << tp_score_map.size() << std::endl;
os << "TP_Scores: " << tp_score_map.size() << " " << tp_score_map << std::endl;
os << "Correct_Score: " << correct_tree_score_map.size() << " "
<< correct_tree_score_map << std::endl;
}
};
// Run tests comparing TPEngine for computing likelihood vs. true tree
// likelihood computed via BEAGLE Engine.
if (test_likelihood) {
TreeScoreMap correct_tree_likelihood_map;
EdgeScoreMap tp_likelihood_map;
// BEAGLE Engine for correct tree likelihoods.
const std::string newick_path = inst.GetNewickSourcePath();
const std::string fasta_path = inst.GetFastaSourcePath();
PhyloModelSpecification simple_spec{"JC69", "constant", "strict"};
auto rooted_sbn_inst = MakeRootedSBNInstance(newick_path, fasta_path, simple_spec);
auto& beagle_engine = *rooted_sbn_inst.GetEngine()->GetFirstFatBeagle();
for (const auto& [tree_id, tree] : tree_id_map) {
auto correct_likelihood = beagle_engine.UnrootedLogLikelihood(tree);
correct_tree_likelihood_map[tree_id] = correct_likelihood;
}
// Check against likelihoods with TPEngine.
for (const auto& [edge_id, tree_id] : tree_edge_map) {
std::ignore = tree_id;
auto likelihood =
tpengine.GetLikelihoodEvalEngine().GetTopTreeScoreWithEdge(edge_id);
tp_likelihood_map[edge_id] = likelihood;
}
// Check that scores from TPEngine match the correct scores from the individual
// trees computed by BEAGLE engine.
TestMatchingScores(std::string("LIKELIHOODS"), correct_tree_likelihood_map,
tp_likelihood_map);
// Compare GP and TP partial vectors. Note, this test is only relevant with single
// trees, as GP and TP PVs are only equal in the case of single tree DAGs.
if (test_likelihood && test_pvs) {
auto& tp_pvs = tpengine.GetLikelihoodPVs();
auto& gp_pvs = gpengine.GetPLVHandler();
if (test_pvs) {
test_passes = (tp_pvs.GetCount() == gp_pvs.GetCount());
for (const auto& pv_type : PLVTypeEnum::Iterator()) {
for (EdgeId edge_id = 0; edge_id < dag.EdgeCountWithLeafSubsplits();
edge_id++) {
NodeId child_id = dag.GetDAGEdge(edge_id).GetChild();
bool is_equal =
(tp_pvs.GetPV(pv_type, edge_id) == gp_pvs.GetPV(pv_type, child_id));
if (!is_equal) {
test_passes = false;
os << "!!! *** NOT EQUAL ***" << std::endl;
os << "TP_" << tp_pvs.ToString(pv_type, edge_id);
os << "GP_" << gp_pvs.ToString(pv_type, child_id);
}
}
}
}
}
}
// Run tests comparing TPEngine infastructure for computing parsimony over DAG via
// Sankoff vs. true trees via Sankoff Handler. (Sankoff Handler tests are already
// been tested on full trees in these doctests using trees with known parsimonies,
// so we can trust it to generate correct tree parsimonies for testing the
// TPEngine.)
if (test_parsimony) {
TreeScoreMap correct_tree_parsimony_map;
EdgeScoreMap tp_parsimony_map;
// Sankoff Handler for correct tree parsimonies.
SankoffHandler sankoff_engine(site_pattern, "_ignore/mmapped_pv.sankoff.data");
for (const auto& [tree_id, tree] : tree_id_map) {
sankoff_engine.RunSankoff(tree.Topology());
auto correct_parsimony = sankoff_engine.ParsimonyScore(tree.Topology()->Id());
correct_tree_parsimony_map[tree_id] = correct_parsimony;
}
// Compute parsimonies with TPEngine.
for (const auto& [edge_id, tree_id] : tree_edge_map) {
std::ignore = tree_id;
auto parsimony =
tpengine.GetParsimonyEvalEngine().GetTopTreeScoreWithEdge(edge_id);
tp_parsimony_map[edge_id] = parsimony;
}
// Check that scores from TPEngine match the correct scores from the individual
// trees computed by Sankoff Handler.
TestMatchingScores(std::string("PARSIMONY"), correct_tree_parsimony_map,
tp_parsimony_map);
// Compare GP and TP partial vectors. Note, this test is only relevant with single
// trees, as GP and TP PVs are only equal in the case of single tree DAGs.
if (test_pvs) {
auto& tp_pvs = tpengine.GetParsimonyPVs();
auto& sankoff_pvs = sankoff_engine.GetPSVHandler();
for (const auto& pv_type : PSVTypeEnum::Iterator()) {
for (EdgeId edge_id = 0; edge_id < tp_pvs.GetCount(); edge_id++) {
NodeId child_id = dag.GetDAGEdge(edge_id).GetChild();
bool is_equal =
(tp_pvs.GetPV(pv_type, edge_id) == sankoff_pvs.GetPV(pv_type, child_id));
if (!is_equal) {
test_passes = false;
os << "!!! *** NOT EQUAL ***" << std::endl;
os << "TP_" << tp_pvs.ToString(pv_type, edge_id);
os << "SANKOFF_" << sankoff_pvs.ToString(pv_type, child_id);
}
}
}
}
}
return test_passes;
}
// Compare TPEngine's top tree score (likelihood or parsimony) in DAG for each edge to
// the collection of verified true tree scores that come from the collection of trees
// that created the DAG. In the single tree cases, can compare the partial vectors of
// verified method vs TPEngine method. Note: The input newick file does not need to
// contain every possible tree expressible in the DAG. The input tree collection only
// needs to be ordered in terms of score. The DAG edges will then each be assigned
// according to the best/first tree containing the given edge.
bool TestTPEngineScoresAndPVs(const std::string& fasta_path,
const std::string& newick_path,
const bool test_likelihood = true,
const bool test_parsimony = true,
const bool test_pvs = false, const bool is_quiet = true) {
// Make GPInstance and TPEngine.
auto inst = GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmapped_pv.data");
inst.MakeDAG();
inst.MakeGPEngine();
inst.MakeTPEngine();
auto& dag = inst.GetDAG();
auto& gpengine = inst.GetGPEngine();
auto& tpengine = inst.GetTPEngine();
auto tree_collection = inst.GenerateCompleteRootedTreeCollection();
auto edge_indexer = dag.BuildEdgeIndexer();
// Compute likelihoods with GPEngine.
inst.EstimateBranchLengths(0.00001, 100, true);
inst.PopulatePLVs();
inst.ComputeLikelihoods();
// Perform computations for TPEngine.
tpengine.SetBranchLengths(gpengine.GetBranchLengths());
tpengine.SetChoiceMapByTakingFirst(tree_collection, edge_indexer);
if (test_likelihood) {
tpengine.GetLikelihoodEvalEngine().Initialize();
tpengine.GetLikelihoodEvalEngine().ComputeScores();
}
if (test_parsimony) {
tpengine.GetParsimonyEvalEngine().Initialize();
tpengine.GetParsimonyEvalEngine().ComputeScores();
}
// Test resulting scores against those by computed by individual trees.
return CheckAllTPEngineScores(inst, test_likelihood, test_parsimony, test_pvs,
is_quiet);
};
// Initializes a ChoiceMap for a DAG. Then uses a naive method that picks the first
// listed neighbor for each parent, sister, left and right child. Tests that results
// is a valid selection (all edges have mapped valid edge choices, except for root
// and leaves).
// - Tests that invalid TreeMask are found invalid.
// - Tests that invalid TreeMasks causes Topology to throw exception.
// - Creates TreeMask and Node Topology for each edge in DAG, a list of edge ids
// which represent a embedded tree in the DAG.
// - Tests that TreeMask is valid state for the DAG.
// - Tests that resulting Topology exists in the DAG.
TEST_CASE("TPEngine: ChoiceMap") {
const std::string fasta_path = "data/six_taxon_longer.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
auto inst = GPInstanceOfFiles(fasta_path, newick_path, "_ignore/mmapped_pv.data");
auto dag = inst.GetDAG();
auto choice_map = TPChoiceMap(dag);
CHECK_FALSE_MESSAGE(choice_map.SelectionIsValid(),
"ChoiceMap selection was incorrectly found valid.");
choice_map.SelectFirstEdge();
CHECK_MESSAGE(choice_map.SelectionIsValid(),
"ChoiceMap selection was found invalid.");
// Test for fail states for invalid TreeMasks.
TPChoiceMap::TreeMask tree_mask;
Node::NodePtr topology;
NodeIdVector tree_nodes;
bool quiet_errors = true;
for (const auto edge_id : tree_mask) {
tree_nodes.push_back(dag.GetDAGEdge(edge_id).GetParent());
tree_nodes.push_back(dag.GetDAGEdge(edge_id).GetChild());
}
// Empty tree.
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid when empty.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when empty.");
// Complete DAG.
for (EdgeId edge_id = EdgeId(0); edge_id < dag.EdgeCountWithLeafSubsplits();
edge_id++) {
tree_mask.insert(edge_id);
}
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid with full DAG.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when missing root edge.");
// Tree missing root node.
tree_mask = choice_map.ExtractTreeMask(EdgeId(0));
for (const auto edge_id : tree_mask) {
if (dag.IsEdgeRoot(edge_id)) {
tree_mask.erase(tree_mask.find(edge_id));
break;
}
}
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid when missing root edge.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when missing root edge.");
// Tree missing leaf node.
tree_mask = choice_map.ExtractTreeMask(EdgeId(0));
for (const auto edge_id : tree_mask) {
if (dag.IsEdgeLeaf(edge_id)) {
tree_mask.erase(tree_mask.find(edge_id));
break;
}
}
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid when missing leaf edge.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when missing leaf edge.");
// Tree missing internal edge.
tree_mask = choice_map.ExtractTreeMask(EdgeId(0));
for (const auto edge_id : tree_mask) {
if (!dag.IsEdgeLeaf(edge_id) && !dag.IsEdgeLeaf(edge_id)) {
tree_mask.erase(tree_mask.find(edge_id));
break;
}
}
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid when missing internal edge.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when missing internal edge.");
// Tree contains extra edge.
tree_mask = choice_map.ExtractTreeMask(EdgeId(0));
for (EdgeId edge_id = EdgeId(0); edge_id < dag.EdgeCountWithLeafSubsplits();
edge_id++) {
const auto contains_edge = (tree_mask.find(edge_id) != tree_mask.end());
if (!contains_edge) {
const auto& parent_id = dag.GetDAGEdge(edge_id).GetParent();
const auto contains_parent = (std::find(tree_nodes.begin(), tree_nodes.end(),
parent_id) != tree_nodes.end());
const auto& child_id = dag.GetDAGEdge(edge_id).GetChild();
const auto contains_child = (std::find(tree_nodes.begin(), tree_nodes.end(),
child_id) != tree_nodes.end());
if (!contains_parent && !contains_child) {
tree_mask.insert(edge_id);
break;
}
}
}
CHECK_FALSE_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"TreeMask is incorrectly valid when containing an extra edge.");
CHECK_THROWS_MESSAGE(choice_map.ExtractTopology(tree_mask),
"Tree is incorrectly valid when containing an extra edge.");
// Valid topology that exists in the DAG.
// ((x0,x1),(x2,((x3,x4),x5)));
topology = Node::Join(
Node::Join(Node::Leaf(0, 6), Node::Leaf(1, 6)),
Node::Join(
Node::Join(Node::Leaf(2, 6), Node::Join(Node::Leaf(3, 6), Node::Leaf(4, 6))),
Node::Leaf(5, 6)));
CHECK_MESSAGE(dag.ContainsTopology(topology, quiet_errors),
"Incorrectly could not find topology that exists in DAG.");
// Valid topology that does not exist in the DAG.
// (((x0,x1),(x2,x3)),(x4, x5))
topology = Node::Join(Node::Join(Node::Join(Node::Leaf(0, 6), Node::Leaf(1, 6)),
Node::Join(Node::Leaf(2, 6), Node::Leaf(3, 6))),
Node::Join(Node::Leaf(4, 6), Node::Leaf(5, 6)));
CHECK_FALSE_MESSAGE(dag.ContainsTopology(topology, quiet_errors),
"Incorrectly found topology that does not exist in DAG.");
// Incomplete topology -- does not terminate at a leaf.
// ((x0,x1),(x2,x3_4),x5)));
topology = Node::Join(
Node::Join(Node::Leaf(0, 6), Node::Leaf(1, 6)),
Node::Join(Node::Join(Node::Leaf(2, 6), Node::Leaf(34, Bitset("000110"))),
Node::Leaf(5, 6)));
CHECK_FALSE_MESSAGE(dag.ContainsTopology(topology, quiet_errors),
"Incorrectly found incomplete topology in DAG.");
// Incomplete topology -- subtree with missing taxa.
// (x2,((x3,x4),x5))
topology = Node::Join(
Node::Join(Node::Leaf(2, 6), Node::Join(Node::Leaf(3, 6), Node::Leaf(4, 6))),
Node::Leaf(5, 6));
CHECK_FALSE_MESSAGE(dag.ContainsTopology(topology, quiet_errors),
"Incorrectly found subtree topology in DAG.");
// Test TreeMasks created from all DAG edges result in valid tree.
for (EdgeId edge_id = EdgeId(0); edge_id < dag.EdgeCountWithLeafSubsplits();
edge_id++) {
const auto tree_mask = choice_map.ExtractTreeMask(edge_id);
const auto topology = choice_map.ExtractTopology(edge_id);
CHECK_MESSAGE(tree_mask.find(edge_id) != tree_mask.end(),
"TreeMask did not contain given central edge.");
CHECK_MESSAGE(choice_map.TreeMaskIsValid(tree_mask, quiet_errors),
"Edge resulted in an invalid TreeMask.");
CHECK_MESSAGE(dag.ContainsTopology(topology, quiet_errors),
"Edge resulted in an invalid Topology not contained in DAG.");
}
}
// Initializes the TPEngine choice map and retrieves the top tree for each edge in
// DAG. Then finds all trees contained in the DAG and verifies that each top tree
// produced is a tree from the DAG.
TEST_CASE("TPEngine: Initialize TPEngine and ChoiceMap") {
const std::string fasta_path = "data/six_taxon.fasta";
const std::string newick_path = "data/six_taxon_rooted_simple.nwk";
auto inst = GPInstanceOfFiles(fasta_path, newick_path);
GPDAG& dag = inst.GetDAG();
inst.EstimateBranchLengths(0.00001, 100, true);
auto all_trees = inst.GenerateCompleteRootedTreeCollection();
SitePattern site_pattern = inst.MakeSitePattern();
TPEngine tpengine = TPEngine(dag, site_pattern, "_ignore/mmapped_pv.tpl.data",
"_ignore/mmapped_pv.tpp.data");
tpengine.InitializeChoiceMap();
auto TopologyExistsInTreeCollection = [](const Node::NodePtr tree_topology,
RootedTreeCollection& tree_collection) {
for (const auto& tree : tree_collection.Trees()) {
if (tree.Topology() == tree_topology) {
return true;
}
}
return false;
};
for (EdgeId edge_id = 0; edge_id < dag.EdgeCountWithLeafSubsplits(); edge_id++) {
auto top_tree_topology = tpengine.GetTopTopologyWithEdge(edge_id);
auto exists = TopologyExistsInTreeCollection(top_tree_topology, all_trees);
CHECK_MESSAGE(exists, "Top Tree does not exist in DAG.");
}
}
// Builds a TPEngine instance from a set of input trees. Then populates TPEngine's PVs
// and computes the top tree likelihood for each edge in the DAG. Compares these
// likelihoods against the tree's likelihood computed using BEAGLE engine.
TEST_CASE("TPEngine: TPEngine Likelihood scores vs BEAGLE Likelihood scores") {
auto TestTPLikelihoods = [](const std::string& fasta_path,
const std::string& newick_path,
const bool test_pvs) -> bool {
return TestTPEngineScoresAndPVs(fasta_path, newick_path, true, false, test_pvs,
false);
};
// Input files.
const std::string fasta_hello = "data/hello_short.fasta";
const std::string newick_hello = "data/hello_rooted.nwk";
const std::string fasta_six = "data/six_taxon.fasta";
const std::string newick_six_single = "data/six_taxon_rooted_single.nwk";
const std::string newick_six_simple = "data/six_taxon_rooted_simple.nwk";
// Test cases.
const auto test_1 = TestTPLikelihoods(fasta_hello, newick_hello, true);
CHECK_MESSAGE(test_1, "Hello Example Single Tree failed.");
const auto test_2 = TestTPLikelihoods(fasta_six, newick_six_single, true);
CHECK_MESSAGE(test_2, "Six Taxa Single Tree failed.");
const auto test_3 = TestTPLikelihoods(fasta_six, newick_six_simple, false);
CHECK_MESSAGE(test_3, "Six Taxa Multi Tree failed.");
}
// Builds a TPEngine instance from a set of input trees. Then populates TPEngine's PVs
// and computes the top tree parsimony for each edge in the DAG. Compares these
// likelihoods against the tree's likelihood computed using the `sankoff handler`.
TEST_CASE("TPEngine: TPEngine Parsimony scores vs SankoffHandler Parsimony scores") {
auto TestTPParsimonies = [](const std::string& fasta_path,
const std::string& newick_path,
const bool test_pvs) -> bool {
return TestTPEngineScoresAndPVs(fasta_path, newick_path, false, true, test_pvs,
false);
};
// Input files.
const std::string fasta_ex = "data/parsimony_leaf_seqs.fasta";
const std::string newick_ex = "data/parsimony_tree_0_score_75.0.nwk";
const std::string fasta_hello = "data/hello_short.fasta";
const std::string newick_hello = "data/hello_rooted.nwk";
const std::string fasta_six = "data/six_taxon.fasta";
const std::string newick_six_single = "data/six_taxon_rooted_single.nwk";
const std::string newick_six_simple = "data/six_taxon_rooted_simple.nwk";
const std::string fasta_five = "data/five_taxon.fasta";
const std::string newick_five_more = "data/five_taxon_rooted_more.nwk";
// Test cases.
const auto test_0 = TestTPParsimonies(fasta_ex, newick_ex, false);
CHECK_MESSAGE(test_0, "Parsimony Test Case Tree failed.");
const auto test_1 = TestTPParsimonies(fasta_hello, newick_hello, false);
CHECK_MESSAGE(test_1, "Hello Example Single Tree failed.");
const auto test_2 = TestTPParsimonies(fasta_six, newick_six_single, false);
CHECK_MESSAGE(test_2, "Six Taxa Tree failed.");
const auto test_3 = TestTPParsimonies(fasta_six, newick_six_simple, false);
CHECK_MESSAGE(test_3, "Six Taxa Multi Tree failed.");
const auto test_4 = TestTPParsimonies(fasta_five, newick_five_more, false);
CHECK_MESSAGE(test_4, "Five Taxa Many Trees failed.");
}
// Creates an instance of TPEngine for two DAGs: DAG_1, a simple DAG, and DAG_2, a DAG
// formed from DAG_1 plus all of its adjacent NNIs. Both DAGs PVs are populated and
// their edge TP likelihoods are computed. Then DAG_1's adjacent proposed NNI
// likelihoods are computed using only PVs from the pre-NNI already contained in
// DAG_1.
// Finally, we compare the results of the proposed NNIs from DAG_1 with the known
// likelihoods of the actual NNIs already contained in DAG_2. This verifies we
// generate the same result from adding NNIs to the DAG and updating as we do from
// using the pre-NNI computation.
TEST_CASE("TPEngine: Proposed NNI vs DAG NNI vs BEAGLE Likelihood") {
bool do_print_all = false;
const double tol = 1e-5;
// Build NNIEngine from DAG that does not include NNIs. Compute likelihoods.
auto CompareProposedNNIvsDAGNNIvsBEAGLE =
[do_print_all, tol](const std::string& fasta_path, const std::string& newick_path,
const bool optimize_branch_lengths = true,
const bool take_first_branch_lengths = true) {
bool test_passes = true;
// Instance for computing proposed scores.
auto inst_1 = MakeGPInstanceWithTPEngine(fasta_path, newick_path,
"_ignore/mmapped_pv.1.data");
// Instance for computing internal DAG scores for comparison.
auto inst_2 = MakeGPInstanceWithTPEngine(fasta_path, newick_path,
"_ignore/mmapped_pv.2.data");
auto& nniengine_1 = inst_1.GetNNIEngine();
auto& dag_2 = inst_2.GetDAG();
auto& nniengine_2 = inst_2.GetNNIEngine();
auto& tpengine_2 = inst_2.GetTPEngine();
// Set nni engine to use TP likelihoods.
nniengine_1.SetTPLikelihoodCutoffFilteringScheme(0.0);
nniengine_1.SetNoFilter(true);
nniengine_2.SetTPLikelihoodCutoffFilteringScheme(0.0);
nniengine_2.SetNoFilter(true);
// Copy branch lengths from input trees.
if (take_first_branch_lengths) {
inst_1.TPEngineSetBranchLengthsByTakingFirst();
inst_2.TPEngineSetBranchLengthsByTakingFirst();
inst_1.TPEngineSetChoiceMapByTakingFirst();
inst_2.TPEngineSetChoiceMapByTakingFirst();
}
inst_1.GetTPEngine().GetLikelihoodEvalEngine().SetOptimizeNewEdges(
optimize_branch_lengths);
inst_2.GetTPEngine().GetLikelihoodEvalEngine().SetOptimizeNewEdges(
optimize_branch_lengths);
inst_1.GetTPEngine().GetLikelihoodEvalEngine().SetOptimizationMaxIteration(1);
inst_2.GetTPEngine().GetLikelihoodEvalEngine().SetOptimizationMaxIteration(1);
// Add all NNIs to DAG.
nniengine_2.SetNoFilter(true);
nniengine_2.RunInit(true);
CHECK_MESSAGE(tpengine_2.GetChoiceMap().SelectionIsValid(false),
"ChoiceMap is not valid before adding NNIs.");
nniengine_2.RunMainLoop(true);
nniengine_2.RunPostLoop(true);
CHECK_MESSAGE(tpengine_2.GetChoiceMap().SelectionIsValid(false),
"ChoiceMap is not valid after adding NNIs.");
// Report all NNIs.
nniengine_1.SyncAdjacentNNIsWithDAG();
// Likelihoods of expanded DAG with proposed NNIs.
auto likelihoods_post = BuildEdgeTPScoreMapFromInstance(
inst_2, TPEvalEngineType::LikelihoodEvalEngine);
// Likelihoods of base DAG.
auto likelihoods_pre = BuildEdgeTPScoreMapFromInstance(
inst_1, TPEvalEngineType::LikelihoodEvalEngine);
// Likelihoods DAG's adjacent proposed NNIs.
auto likelihoods_proposed = BuildProposedEdgeTPScoreMapFromInstance(
inst_1, inst_2, TPEvalEngineType::LikelihoodEvalEngine);
// Compute top tree scores using BEAGLE engine.
auto [tree_id_map, edge_id_map] =
BuildTreeIdMapAndTreeEdgeMapFromGPInstanceAndChoiceMap(inst_2, true);
auto beagle = BuildEdgeScoreMapFromInstanceUsingBeagleEngine(
inst_2, tree_id_map, edge_id_map);
std::unordered_map<TreeId, std::string> newick_id_map;
for (const auto& [tree_id, tree] : tree_id_map) {
newick_id_map[tree_id] = dag_2.TreeToNewickTree(tree);
}
// Compare likelihoods by edge_id.
for (const auto& nni : nniengine_1.GetAdjacentNNIs()) {
auto edge_id = dag_2.GetEdgeIdx(nni);
auto score_proposed = likelihoods_proposed[edge_id];
auto score_dag = likelihoods_post[edge_id];
auto score_tree = beagle[edge_id];
bool matches_score_dag = (abs(score_proposed - score_dag) < tol);
bool matches_score_tree = (abs(score_proposed - score_tree) < tol);
bool matches_dag_tree = (abs(score_dag - score_tree) < tol);
test_passes &= matches_score_dag;
test_passes &= matches_score_tree;
test_passes &= matches_dag_tree;
if (!matches_score_dag or !matches_score_tree) {
std::cout << "SCORE_FAILED_NNI: " << nni << std::endl;
std::cout << "SCORE_FAILED_PROPOSED: " << score_tree << " vs "
<< score_proposed << ": " << std::abs(score_tree - score_proposed)
<< std::endl;
std::cout << "SCORE_FAILED_DAG: " << score_tree << " vs " << score_dag
<< ": " << std::abs(score_tree - score_dag) << std::endl;
} else if (do_print_all) {
std::cout << "SCORE_PASS: " << score_tree << std::endl;
}
}
return test_passes;
};
const std::string fasta_path_0 = "data/hello.fasta";
const std::string newick_path_0 = "data/hello_rooted_diff_branches.nwk";
CHECK_MESSAGE(
CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_0, newick_path_0, false, false),
"Test_0a: hello (with fixed branch lengths) failed.");
CHECK_MESSAGE(
CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_0, newick_path_0, false, true),
"Test_0b: hello (without optimized branch lengths) failed.");
CHECK_MESSAGE(
CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_0, newick_path_0, true, true),
"Test_0c: hello (with optimized branch lengths) failed.");
const std::string fasta_path_1 = "data/five_taxon.fasta";
const std::string newick_path_1 = "data/five_taxon_trees_3_4_diff_branches.nwk";
CHECK_MESSAGE(
CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_1, newick_path_1, false, false),
"Test_1a: five_taxon_simple (with fixed branch lengths) failed.");
CHECK_MESSAGE(
CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_1, newick_path_1, false, true),
"Test_1b: five_taxon_simple (without optimized branch lengths) failed.");
CHECK_MESSAGE(CompareProposedNNIvsDAGNNIvsBEAGLE(fasta_path_1, newick_path_1, true),
"Test_1c: five_taxon_simple (with optimized branch lengths) failed.");
}
// Builds TPEngine from single tree DAG, then run branch length optimization.
// Compares results to GPEngine's branch length optimized on the same tree (GP is
// equivalent to traditional likelihood in the single tree case).
TEST_CASE("TPEngine: Branch Length Optimization") {
GPInstance inst = MakeHelloGPInstance();
EigenVectorXd seed_branch_lengths(5);
seed_branch_lengths << 0, 0.22, 0.113, 0.15, 0.1;
OptimizationMethod method = OptimizationMethod::BrentOptimization;
bool is_quiet = true;
bool track_intermediate_values = false;
// GPEngine
GPEngine& gp_engine = inst.GetGPEngine();
inst.GetGPEngine().SetBranchLengths(seed_branch_lengths);
inst.EstimateBranchLengths(0.0001, 100, is_quiet, track_intermediate_values, method);
// TPEngine
inst.MakeTPEngine();
TPEngine& tp_engine = inst.GetTPEngine();
tp_engine.GetLikelihoodEvalEngine().GetDAGBranchHandler().SetBranchLengths(
seed_branch_lengths);
inst.TPEngineEstimateBranchLengths(0.0001, 100, is_quiet, track_intermediate_values,
method);
// Capture Results.
const auto& gpengine_data = gp_engine.GetBranchLengths();
const auto& tpengine_data = tp_engine.GetBranchLengths();
// Compare TPEngine results to GPEngine results.
double tol = 1e-3;
double max_diff = 0;
for (size_t i = 0; i < size_t(gpengine_data.size()); i++) {
if (double diff = abs(tpengine_data[i] - gpengine_data[i]); max_diff < diff) {
max_diff = diff;
}
}
// Report results.
if (max_diff > tol) {
std::cout << "=== TEST FAILED ===" << std::endl;
std::cout << "max_diff: " << max_diff << std::endl;
std::cout << "tpengine_lengths:" << tpengine_data << std::endl;
std::cout << "gpengine_lengths:" << gpengine_data << std::endl;
std::cout << "tp_likelihood " << tp_engine.GetTopTreeLikelihood(EdgeId(1))
<< std::endl;
std::cout << "gp_likelihood: " << gp_engine.GetLogMarginalLikelihood() << std::endl;
auto gp_lengths = gp_engine.GetBranchLengths();
tp_engine.GetLikelihoodEvalEngine().GetDAGBranchHandler().SetBranchLengths(
gp_lengths);
tp_engine.InitializeScores();
tp_engine.ComputeScores();
std::cout << "tp_likelihood_using_gp: " << tp_engine.GetTopTreeLikelihoods()
<< std::endl;
}
CHECK_LT(max_diff, tol);
}
// Exports Newicks from DAG and TPEngine. DAG exports a covering set of trees, which
// should contain every node and edge from the DAG in a (unproven) minimum set of trees.
// TPEngine exports an ordered vector of trees which contains a covering set of trees
// that maintains the relative priority of edges according to the TPEngine's choice map.
// Tests this by using the exported trees to build a new DAG and TPEngine, and checks
// that new DAG and TPEngine match the old DAG and TPEngine. Repeats tests after adding
// NNIs to the DAG.
TEST_CASE("TPEngine: Exporting Newicks") {
const std::string fasta_path = "data/five_taxon.fasta";
const std::string newick_path_1 = "data/five_taxon_rooted.nwk";
const std::string newick_path_2 = "data/five_taxon_rooted_shuffled.nwk";
// Build map of trees to tree_id.
auto BuildTopTreeMapViaBruteForce = [](GPInstance& inst) {
TreeIdTopologyMap tree_map;
std::vector<RootedTree> visited_trees;
for (TreeId tree_id(0); tree_id < inst.GetTPEngine().GetMaxTreeId(); tree_id++) {
std::vector<RootedTree> temp_trees;
// Find all edges that contain the given tree_id.
for (EdgeId edge_id{0}; edge_id < inst.GetDAG().EdgeCountWithLeafSubsplits();
edge_id++) {
if (inst.GetTPEngine().GetTreeSource(edge_id) != tree_id) {
continue;
}
auto tree = inst.GetTPEngine().GetTopTreeWithEdge(edge_id);
// Check if the edge's tree topology has already been found for this tree_id.
bool tree_found = false;
for (size_t i = 0; i < temp_trees.size(); i++) {
if (tree == temp_trees[i]) {
tree_found = true;
break;
}
}
if (!tree_found) {
temp_trees.push_back(tree);
}
}
// Add tree to map if does not already exist in map.
for (const auto& tree : temp_trees) {
bool tree_found = false;
for (const auto& visited_tree : visited_trees) {
if (tree == visited_tree) {
tree_found = true;
break;
}
}
if (!tree_found) {
if (tree_map.find(tree_id) == tree_map.end()) {
tree_map[tree_id] = {};
}
tree_map[tree_id].push_back(tree.Topology());
visited_trees.push_back(tree);
}
}
}
return tree_map;
};
// Export a covering newick file from TPEngine, then build new DAG from that file.
// Compare to the input DAG.
auto BuildCoveringNewickAndCompareNewDAG = [](GPInstance& inst_1) {
const std::string temp_newick_path = "_ignore/temp.newick";
std::ofstream file_out;
file_out.open(temp_newick_path);
file_out << inst_1.GetDAG().ToNewickOfCoveringTopologies() << std::endl;
file_out.close();
auto inst_2 = GPInstanceOfFiles(inst_1.GetFastaSourcePath(), temp_newick_path,
"_ignore/mmapped_pv.data1");
bool dags_equal =
(SubsplitDAG::Compare(inst_1.GetDAG(), inst_2.GetDAG(), false) == 0);
return dags_equal;
};
// Export a top tree newick file from TPEngine, then build new TPEngine from that
// file. Compare to the input TPEngine.
auto BuildTopTreeNewickAndCompareNewTPEngine =
[&BuildTopTreeMapViaBruteForce](GPInstance& inst_1) {
std::ofstream file_out;
// Use internal method for building Newick string.
const std::string temp_newick_path_1 = "_ignore/temp_1.newick";
const auto tree_map_1 = inst_1.GetTPEngine().BuildMapOfTreeIdToTopTopologies();
std::string newick_1 = inst_1.GetTPEngine().ToNewickOfTopTopologies();
file_out.open(temp_newick_path_1);
file_out << newick_1 << std::endl;
file_out.close();
// Use brute force method for building Newick string.
const std::string temp_newick_path_2 = "_ignore/temp_2.newick";
std::string newick_2;
file_out.open(temp_newick_path_2);
const auto tree_map_2 = BuildTopTreeMapViaBruteForce(inst_1);
for (const auto& [tree_id, tree_vec] : tree_map_2) {
std::ignore = tree_id;
for (const auto& tree : tree_vec) {
newick_2 += inst_1.GetDAG().TopologyToNewickTopology(tree) + '\n';
file_out << inst_1.GetDAG().TopologyToNewickTopology(tree) << std::endl;
}
}
file_out.close();
bool newicks_equal = (newick_1 == newick_2);
if (!newicks_equal) {
std::cerr << "ERROR: Newicks do not match." << std::endl;
// std::cerr << "NEWICK_TEST: " << std::endl << newick_1 << std::endl;
// std::cerr << "NEWICK_TRUTH: " << std::endl << newick_2 << std::endl;
auto TreeIdTopologyMapToString = [&inst_1](const TreeIdTopologyMap& map) {
std::stringstream ss;
size_t tree_count = 0;
for (const auto& [tree_id, tree_vec] : map) {
ss << "(" << tree_id << ", [ ";
for (const auto& tree : tree_vec) {
std::string newick = tree->Newick();
size_t hash = std::hash<std::string>{}(newick);
const auto& tpengine = inst_1.GetTPEngine();
auto tree_ids = tpengine.FindTreeIdsInTreeEdgeVector(
tpengine.BuildSetOfEdgesRepresentingTopology(tree));
ss << tree_count++ << " " << HashToString(hash, 5) << " "
<< tree->Newick() << " " << tree_ids << " ";
}
ss << "]), " << std::endl;
}
return ss.str();
};
std::cerr << "TREE_MAP_TEST: " << std::endl
<< TreeIdTopologyMapToString(tree_map_1) << std::endl;
std::cerr << "TREE_MAP_TRUTH: " << std::endl
<< TreeIdTopologyMapToString(tree_map_2) << std::endl;
}
// Build new TPEngine and check that old and new engines are equal.
auto inst_2 = GPInstanceOfFiles(inst_1.GetFastaSourcePath(), temp_newick_path_2,
"_ignore/mmapped_pv.data3");
inst_2.MakeTPEngine();
inst_2.MakeNNIEngine();
bool engines_equal =
(TPEngine::Compare(inst_1.GetTPEngine(), inst_2.GetTPEngine(), false) == 0);
if (!engines_equal) {
std::cerr << "ERROR: Engines do not match." << std::endl;
}
return (newicks_equal and engines_equal);
};
auto inst_1 =
GPInstanceOfFiles(fasta_path, newick_path_1, "_ignore/mmapped_pv.data1");
auto inst_2 =
GPInstanceOfFiles(fasta_path, newick_path_2, "_ignore/mmapped_pv.data2");
inst_1.MakeTPEngine();
inst_1.MakeNNIEngine();
auto& tp_engine = inst_1.GetTPEngine();
auto& nni_engine = inst_1.GetNNIEngine();
inst_2.MakeTPEngine();
inst_2.MakeNNIEngine();
CHECK_MESSAGE(
TPEngine::Compare(inst_1.GetTPEngine(), inst_1.GetTPEngine(), false) == 0,
"TPEngines not equal to self.");
CHECK_MESSAGE(SubsplitDAG::Compare(inst_1.GetDAG(), inst_2.GetDAG(), true) == 0,
"DAGs formed from shuffled Newicks not equal.");
CHECK_MESSAGE(
TPEngine::Compare(inst_1.GetTPEngine(), inst_2.GetTPEngine(), true) != 0,
"TPEngines formed from shuffled Newicks are incorrectly equal.");
CHECK_MESSAGE(BuildCoveringNewickAndCompareNewDAG(inst_1),
"DAG built from Covering Newick not equal to the DAG that build it "
"(before adding NNIs).");
CHECK_MESSAGE(BuildTopTreeNewickAndCompareNewTPEngine(inst_1),
"Newick and TPEngine built from Top Tree Newick not equal to the "
"TPEngine that build it (before adding NNIs).");
tp_engine.GetLikelihoodEvalEngine().SetOptimizationMaxIteration(5);
tp_engine.GetLikelihoodEvalEngine().BranchLengthOptimization(false);
nni_engine.SetTPLikelihoodCutoffFilteringScheme(0.0);
nni_engine.SetTopKScoreFilteringScheme(1);
nni_engine.SetReevaluateRejectedNNIs(true);
nni_engine.RunInit(true);
for (size_t iter = 0; iter < 10; iter++) {
nni_engine.RunMainLoop(true);
if (iter == 7) break;
// Issue #479: this creates an unknown problem on iteration 7.
// Error occurs during GetLine() in subsplit_dag_storage.hpp:564.
// Parent-child vertice pair references a line outside range of DAG.
// (May be an issue with graft addition/removal process?)
nni_engine.RunPostLoop(true);
CHECK_MESSAGE(BuildCoveringNewickAndCompareNewDAG(inst_1),
"DAG built from Covering Newick not equal to the DAG that build it"
" (after adding NNIs).");
CHECK_MESSAGE(BuildTopTreeNewickAndCompareNewTPEngine(inst_1),
"Newicks and TPEngine built from Top Tree Newick not equal to the "
"TPEngine that build it (after adding NNIs).");
}
}
// PVHandler can be reindexed using two methods. Either via move-copy, where PVs are
// moved so that the PV vector is ordered according to the order of the node_ids they
// represent in the DAG, or via remapping, where a PV map is updated to point at the
// correct PV when given a node_id, which avoids copying. Test checks that both
// methods have PV with same values. Repeats tests after adding NNIs to DAG.
TEST_CASE("TPEngine: Resize and Reindex PV Handler") {
const std::string fasta_path = "data/five_taxon.fasta";
const std::string newick_path_1 = "data/five_taxon_rooted.nwk";
auto inst_1 =
GPInstanceOfFiles(fasta_path, newick_path_1, "_ignore/mmapped_pv.data1");
auto inst_2 =
GPInstanceOfFiles(fasta_path, newick_path_1, "_ignore/mmapped_pv.data2");
// NNI Engine that uses remapping for reindexing PVs.
inst_1.MakeTPEngine();
inst_1.MakeNNIEngine();
auto& tpengine_1 = inst_1.GetTPEngine();
auto& nniengine_1 = inst_1.GetNNIEngine();
auto& pvs_1 = tpengine_1.GetLikelihoodEvalEngine().GetPVs();
pvs_1.SetUseRemapping(false);
nniengine_1.SetTPLikelihoodCutoffFilteringScheme(0.0);
nniengine_1.SetTopKScoreFilteringScheme(1);
nniengine_1.RunInit();
// NNI Engine that does NOT use remapping for reindexing PVs.
inst_2.MakeTPEngine();
inst_2.MakeNNIEngine();
auto& tpengine_2 = inst_2.GetTPEngine();
auto& nniengine_2 = inst_2.GetNNIEngine();
auto& pvs_2 = tpengine_2.GetLikelihoodEvalEngine().GetPVs();
pvs_2.SetUseRemapping(false);
nniengine_2.SetTPLikelihoodCutoffFilteringScheme(0.0);
nniengine_2.SetTopKScoreFilteringScheme(1);
nniengine_2.RunInit();
CHECK_MESSAGE(nniengine_1.GetDAG() == nniengine_2.GetDAG(),
"DAGs do not match (before adding NNIs).");
auto pvs_match = pvs_1.Compare(pvs_1, pvs_2, false);
CHECK_MESSAGE(pvs_match, "PVs do not match (before adding NNIs).");
size_t max_iter = 10;
for (size_t iter = 0; iter < max_iter; iter++) {
bool pvs_match;
nniengine_1.RunMainLoop();
nniengine_1.RunPostLoop();
pvs_match = pvs_1.Compare(pvs_1, pvs_2, true);
CHECK_FALSE_MESSAGE(
pvs_match, "PVs incorrectly match (after adding NNIs to only one NNIEngine).");
nniengine_2.RunMainLoop();
nniengine_2.RunPostLoop();
pvs_match = pvs_1.Compare(pvs_1, pvs_2, false);
CHECK_MESSAGE(pvs_match, "PVs do not match (after adding NNIs).");
}
}
// ** DAGData tests **
// Builds DAGData vectors from the nodes and edges of a DAG. Checks that data is
// resized and reindexed properly after modifying reference DAG.
TEST_CASE("DAGData: Resize and Reindex") {
std::string fasta_path = "data/five_taxon.fasta";
std::string newick_path = "data/five_taxon_rooted.nwk";
std::string mmap_path_1 = "_ignore/mmap.1.data";
std::string mmap_path_2 = "_ignore/mmap.2.data";
GPInstance pre_inst =
MakeGPInstanceWithTPEngine(fasta_path, newick_path, mmap_path_1);
auto& pre_dag = pre_inst.GetDAG();
auto& pre_llh_engine =
pre_inst.GetNNIEngine().GetTPEvalEngine().GetTPEngine().GetLikelihoodEvalEngine();
pre_llh_engine.Initialize();
GPInstance inst = MakeGPInstanceWithTPEngine(fasta_path, newick_path, mmap_path_2);
auto& dag = inst.GetDAG();
auto& llh_engine =
inst.GetNNIEngine().GetTPEvalEngine().GetTPEngine().GetLikelihoodEvalEngine();
llh_engine.Initialize();
int default_val = -1;
DAGNodeIntData node_data(dag, default_val);
DAGEdgeIntData edge_data(dag, default_val);
// Resize to fit dag.
size_t spare_count = 10;
node_data.Resize(dag.NodeCount(), spare_count, std::nullopt, std::nullopt);
edge_data.Resize(dag.EdgeCountWithLeafSubsplits(), spare_count, std::nullopt,
std::nullopt);
// Check that counts match the size of the DAG.
CHECK_EQ(node_data.GetCount(), dag.NodeCount());
CHECK_EQ(edge_data.GetCount(), dag.EdgeCountWithLeafSubsplits());
// Check that padded size matches spare_count.
CHECK_EQ(node_data.GetSpareCount(), spare_count);
CHECK_EQ(edge_data.GetSpareCount(), spare_count);
// Check that new data is filled with default.
for (NodeId i = node_data.GetCount(); i < node_data.GetPaddedCount(); i++) {
CHECK_EQ(node_data(i), default_val);
}
// Check that no exceptions thrown while accessing elements in range.
// Assign each to their index value.
for (NodeId i = 0; i < node_data.GetPaddedCount(); i++) {
CHECK_NOTHROW(node_data(NodeId(i)) = i.value_);
}
for (EdgeId i = 0; i < edge_data.GetPaddedCount(); i++) {
CHECK_NOTHROW(edge_data(EdgeId(i)) = i.value_);
}
// Grow DAG by adding all adjacent NNIs.
DAGNodeIntData pre_node_data(node_data);
DAGEdgeIntData pre_edge_data(edge_data);
inst.MakeNNIEngine();
auto& nni_engine = inst.GetNNIEngine();
nni_engine.SyncAdjacentNNIsWithDAG();
auto node_reindexer = Reindexer::IdentityReindexer(dag.NodeCount());
auto edge_reindexer = Reindexer::IdentityReindexer(dag.EdgeCountWithLeafSubsplits());
for (const auto& nni : nni_engine.GetAdjacentNNIs()) {
auto mods = dag.AddNodePair(nni);
node_reindexer = node_reindexer.ComposeWith(mods.node_reindexer);
edge_reindexer = edge_reindexer.ComposeWith(mods.edge_reindexer);
}
// Resize and Reindex the data vectors.
node_data.Resize(dag, std::nullopt, node_reindexer);
edge_data.Resize(dag, std::nullopt, edge_reindexer);
// Grow engine for data.
llh_engine.GrowEdgeData(edge_reindexer.size(), edge_reindexer);
// Check that counts match the size of the DAG.
CHECK_EQ(node_data.GetCount(), dag.NodeCount());
CHECK_EQ(edge_data.GetCount(), dag.EdgeCountWithLeafSubsplits());
// Check that padded size matches spare_count.
CHECK_EQ(node_data.GetSpareCount(), spare_count);
CHECK_EQ(edge_data.GetSpareCount(), spare_count);
// Check that node and edge data was reindexed properly.
const auto& [node_map, edge_map] =
BuildNodeAndEdgeMapsFromPreDAGToPostDAG(pre_dag, dag);
for (const auto& [pre_id, post_id] : node_map) {
CHECK_EQ(node_data(post_id), pre_node_data(pre_id));
}
for (const auto& [pre_id, post_id] : edge_map) {
CHECK_EQ(edge_data(post_id), pre_edge_data(pre_id));
}
}
// Checks that two identical Newick trees with different orderings yield the same
// tree.
TEST_CASE("GPInstance: Taxon Sorted Tree Collection") {
for (const bool sort_taxa : {false, true}) {
GPInstance inst_1("_ignore/mmap.1.data");
inst_1.ReadFastaFile("data/three_taxon.fasta");
inst_1.ReadNewickFile("data/three_taxon_1.nwk", sort_taxa);
GPInstance inst_2("_ignore/mmap.2.data");
inst_2.ReadFastaFile("data/three_taxon.fasta");
inst_2.ReadNewickFile("data/three_taxon_2.nwk", sort_taxa);
auto& trees_1 = inst_1.GetCurrentlyLoadedTrees();
auto& trees_2 = inst_2.GetCurrentlyLoadedTrees();
auto& tree_1 = trees_1.GetTree(0);
auto& tree_2 = trees_2.GetTree(0);
if (sort_taxa) {
CHECK_MESSAGE(tree_1 == tree_2, "Trees incorrectly found not equal.");
} else {
CHECK_MESSAGE(tree_1 != tree_2, "Trees incorrectly found equal.");
}
}
}
| 164,545
|
C++
|
.cpp
| 3,259
| 43.655109
| 88
| 0.668281
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,055
|
site_model.cpp
|
phylovi_bito/src/site_model.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "site_model.hpp"
#include <cmath>
#include "sugar.hpp"
std::unique_ptr<SiteModel> SiteModel::OfSpecification(
const std::string& specification) {
if (specification == "constant") {
return std::make_unique<ConstantSiteModel>();
} // else
if (specification.rfind("weibull", 0) == 0) {
size_t category_count = 4;
auto index = specification.find("+");
if (index != std::string::npos) {
std::string cat_count_str = specification.substr(index + 1);
category_count = stoi(cat_count_str);
}
return std::make_unique<WeibullSiteModel>(category_count, 1.0);
} // else
Failwith("Site model not known: " + specification);
}
void WeibullSiteModel::SetParameters(const EigenVectorXdRef param_vector) {
GetBlockSpecification().CheckParameterVectorSize(param_vector);
EigenVectorXd shape = ExtractSegment(param_vector, shape_key_);
shape_ = shape[0];
UpdateRates();
}
// Discretized Weibull distribution using the median approximation
// Equivalent to the discretized gamma method in Yang 1994.
// The scale (lambda) is fixed to 1
void WeibullSiteModel::UpdateRates() {
double mean_rate = 0;
double mean_rate_derivative = 0;
std::vector<double> deriv_unscaled_rates(category_count_);
for (size_t i = 0; i < category_count_; i++) {
double quantile = (2.0 * i + 1.0) / (2.0 * category_count_);
// Set rate to inverse CDF at quantile.
category_rates_[i] = pow(-std::log(1.0 - quantile), 1.0 / shape_);
mean_rate += category_rates_[i];
// Derivative of unormalized rate i wrt shape
deriv_unscaled_rates[i] =
-category_rates_[i] * std::log(-std::log(1.0 - quantile)) / (shape_ * shape_);
mean_rate_derivative += deriv_unscaled_rates[i];
}
mean_rate /= category_count_;
mean_rate_derivative /= category_count_;
for (size_t i = 0; i < category_count_; i++) {
// Derivative of rate i wrt shape
// dr_i/dshape = d(ur_i/mean)/dshape = (dur_i* mean - ur_i*dmean)/mean^2
rate_derivatives_[i] = (deriv_unscaled_rates[i] * mean_rate -
category_rates_[i] * mean_rate_derivative) /
(mean_rate * mean_rate);
category_rates_[i] /= mean_rate;
}
}
size_t WeibullSiteModel::GetCategoryCount() const { return category_count_; }
const EigenVectorXd& WeibullSiteModel::GetCategoryRates() const {
return category_rates_;
}
const EigenVectorXd& WeibullSiteModel::GetCategoryProportions() const {
return category_proportions_;
}
const EigenVectorXd& WeibullSiteModel::GetRateGradient() const {
return rate_derivatives_;
};
| 2,712
|
C++
|
.cpp
| 65
| 37.646154
| 86
| 0.689302
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,056
|
nni_operation.cpp
|
phylovi_bito/src/nni_operation.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "nni_operation.hpp"
#include "bitset.hpp"
#include "subsplit_dag.hpp"
// ** NNIOperation
int NNIOperation::Compare(const NNIOperation &nni_a, const NNIOperation &nni_b) {
auto compare_parent = Bitset::SubsplitCompare(nni_a.parent_, nni_b.parent_);
if (compare_parent != 0) {
return compare_parent;
}
auto compare_child = Bitset::SubsplitCompare(nni_a.child_, nni_b.child_);
return compare_child;
};
int NNIOperation::Compare(const NNIOperation &nni_b) const {
const NNIOperation &nni_a = *this;
return Compare(nni_a, nni_b);
}
bool operator<(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) < 0;
}
bool operator<=(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) <= 0;
}
bool operator>(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) > 0;
}
bool operator>=(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) >= 0;
}
bool operator==(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) == 0;
}
bool operator!=(const NNIOperation &lhs, const NNIOperation &rhs) {
return NNIOperation::Compare(lhs, rhs) != 0;
}
// ** Special Constructors
NNIOperation NNIOperation::GetNeighboringNNI(
const Bitset parent_in, const Bitset child_in,
const SubsplitClade child_clade_swapped_with_sister,
const SubsplitClade focal_clade) {
// Input: Parent(X,YZ) -> Child(Y,Z).
Bitset X = parent_in.SubsplitGetClade(SubsplitCladeEnum::Opposite(focal_clade));
// "Y" clade can be chosen arbitrarily from (Y,Z), so "Y" is chosen based on which
// we want to swap with "X".
Bitset Y = child_in.SubsplitGetClade(child_clade_swapped_with_sister);
Bitset Z = child_in.SubsplitGetClade(
SubsplitCladeEnum::Opposite(child_clade_swapped_with_sister));
// Output: Parent(Y,XZ) -> Child(X,Z).
Bitset parent_out = Bitset::Subsplit(Y, X | Z);
Bitset child_out = Bitset::Subsplit(X, Z);
return NNIOperation(parent_out, child_out);
}
NNIOperation NNIOperation::GetNeighboringNNI(
const Bitset parent_in, const Bitset child_in,
const SubsplitClade child_clade_swapped_with_sister) {
SubsplitClade focal_clade =
Bitset::SubsplitIsChildOfWhichParentClade(parent_in, child_in);
return GetNeighboringNNI(parent_in, child_in, child_clade_swapped_with_sister,
focal_clade);
}
NNIOperation NNIOperation::GetNeighboringNNI(
const SubsplitClade child_clade_swapped_with_sister) const {
return GetNeighboringNNI(parent_, child_, child_clade_swapped_with_sister);
}
// ** Query
bool NNIOperation::AreNNIOperationsNeighbors(const NNIOperation &nni_a,
const NNIOperation &nni_b) {
if (nni_a.GetSisterClade() == nni_b.GetSisterClade()) {
return false;
}
std::array<Bitset, 3> vec_a = {nni_a.GetSisterClade(), nni_a.GetLeftChildClade(),
nni_a.GetRightChildClade()};
std::array<Bitset, 3> vec_b = {nni_b.GetSisterClade(), nni_b.GetLeftChildClade(),
nni_b.GetRightChildClade()};
std::sort(vec_a.begin(), vec_a.end());
std::sort(vec_b.begin(), vec_b.end());
return (vec_a == vec_b);
};
SubsplitClade NNIOperation::WhichCladeSwapWithSisterToCreatePostNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) {
const Bitset &pre_sister = pre_nni.GetSisterClade();
for (SubsplitClade child_clade : SubsplitCladeEnum::Iterator()) {
if (pre_sister == post_nni.GetChild().SubsplitGetClade(child_clade)) {
return child_clade;
}
}
Failwith("Given NNIs must be neighbors to find clade swap.");
};
// ** Miscellaneous
NNIOperation::NNICladeArray NNIOperation::BuildNNICladeMapFromPreNNIToNNI(
const NNIOperation &pre_nni, const NNIOperation &post_nni) {
Assert(AreNNIOperationsNeighbors(pre_nni, post_nni),
"Given NNIs must be neighbors to find clade map.");
NNICladeArray nni_clade_map;
EnumArray<NNIClade, NNICladeCount, bool> mapped_post_clades;
mapped_post_clades.fill(false);
const std::array<NNIClade, 3> mappable_clades = {
NNIClade::ParentSister, NNIClade::ChildLeft, NNIClade::ChildRight};
for (const NNIClade pre_nni_clade_type : mappable_clades) {
const Bitset &pre_nni_clade = pre_nni.GetClade(pre_nni_clade_type);
bool is_found = false;
for (const NNIClade post_nni_clade_type : mappable_clades) {
if (mapped_post_clades[post_nni_clade_type]) {
continue;
}
const Bitset &post_nni_clade = post_nni.GetClade(post_nni_clade_type);
if (pre_nni_clade == post_nni_clade) {
is_found = true;
nni_clade_map[pre_nni_clade_type] = post_nni_clade_type;
mapped_post_clades[post_nni_clade_type] = true;
break;
}
}
Assert(is_found,
"Unexpected Error: Was not able to find a clade mapping from pre_nni to "
"post_nni.");
}
nni_clade_map[NNIClade::ParentFocal] = NNIClade::ParentFocal;
return nni_clade_map;
};
bool NNIOperation::IsValid() {
return Bitset::SubsplitIsParentChildPair(parent_, child_);
};
std::string NNIOperation::ToString() const {
std::stringstream os;
os << "{ P:" << parent_.SubsplitToString() << ", C:" << child_.SubsplitToString()
<< " }";
return os.str();
}
std::string NNIOperation::ToHashString(const size_t length) const {
return HashToString(Hash(), length);
}
std::string NNIOperation::ToSplitHashString(const size_t length) const {
std::stringstream ss;
ss << "[" << HashToString(GetParent().Hash(), length) << "||"
<< HashToString(GetChild().Hash(), length) << "]";
return ss.str();
}
std::ostream &operator<<(std::ostream &os, const NNIOperation &nni) {
return os << nni.ToString();
};
| 5,959
|
C++
|
.cpp
| 142
| 37.647887
| 84
| 0.703812
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,057
|
zlib_stream.cpp
|
phylovi_bito/src/zlib_stream.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "zlib_stream.hpp"
namespace zlib {
namespace detail {
constexpr Result::Code call_zlib(int zlib_code, const z_stream& impl) {
if (zlib_code < 0) {
const auto code = static_cast<typename Exception::Code>(zlib_code);
if (impl.msg) {
throw Exception(code, impl.msg);
} else {
throw Exception(code);
}
}
return static_cast<Result::Code>(zlib_code);
}
} // namespace detail
ZStream::ZStream() {
impl_.next_in = nullptr;
impl_.avail_in = 0;
impl_.zalloc = nullptr;
impl_.zfree = nullptr;
impl_.opaque = this;
auto ret = detail::call_zlib(::inflateInit2(&impl_, 16 + MAX_WBITS), impl_);
if (ret != Result::Code::ok) {
throw std::logic_error("Unexpected result from zlib");
}
}
ZStream::~ZStream() noexcept try { Close(); } catch (...) {
}
void ZStream::Close() {
if (!closed_.test_and_set()) {
auto ret = detail::call_zlib(::inflateEnd(&impl_), impl_);
if (ret != Result::Code::ok) {
throw std::logic_error("Unexpected result from zlib");
}
}
}
Result ZStream::Inflate(Flush mode, const unsigned char* in, size_t in_size,
unsigned char* out, size_t out_size) {
impl_.next_in = const_cast<unsigned char*>(in);
impl_.avail_in = in_size;
impl_.next_out = out;
impl_.avail_out = out_size;
const auto ret = detail::call_zlib(
::inflate(&impl_, static_cast<std::underlying_type_t<Flush>>(mode)), impl_);
return {ret, in_size - impl_.avail_in, out_size - impl_.avail_out};
}
ZStringBuf::ZStringBuf(const std::istream& in, size_t in_buf_size, size_t out_buf_size)
: base{},
in_{*in.rdbuf()},
in_buf_{std::make_unique<char[]>(in_buf_size)},
in_buf_size_{static_cast<std::streamsize>(in_buf_size)},
out_buf_{std::make_unique<char[]>(out_buf_size)},
out_buf_size_{static_cast<std::streamsize>(out_buf_size)},
inflate_{} {}
ZStringBuf::~ZStringBuf() {}
ZStringBuf::int_type ZStringBuf::underflow() {
ensure_avail(1);
return base::underflow();
}
ZStringBuf::int_type ZStringBuf::uflow() {
ensure_avail(1);
return base::uflow();
}
std::streamsize ZStringBuf::xsgetn(char_type* s, std::streamsize count) {
ensure_avail(count);
return base::xsgetn(s, count);
}
void ZStringBuf::ensure_avail(std::streamsize count) {
while (count > in_avail()) {
// Reads from the compressed stream
const size_t in_count =
in_.sgetn(in_buf_.get(), std::min(in_buf_size_, count - in_avail()));
size_t consumed = 0;
while (consumed < in_count) {
// Performs zlib decompression
auto ret = inflate_.Inflate(
Flush::partial, reinterpret_cast<unsigned char*>(in_buf_.get() + consumed),
static_cast<size_t>(in_count - consumed),
reinterpret_cast<unsigned char*>(out_buf_.get()),
static_cast<size_t>(out_buf_size_));
if (ret.code != Result::Code::ok) {
if (ret.code == Result::Code::need_dict) {
throw std::runtime_error("Preset dictionary is needed");
}
if (ret.code == Result::Code::stream_end) {
break;
}
throw std::logic_error("Unexpected result from zlib");
}
consumed += ret.in_count;
// Stores the decompressed data
std::streamsize produced = 0;
while (produced < static_cast<std::streamsize>(ret.out_count)) {
const auto prod = sputn(out_buf_.get() + produced, ret.out_count - produced);
if (prod < 1) break;
produced += prod;
}
}
if (consumed < 1) break;
}
}
} // namespace zlib
| 3,674
|
C++
|
.cpp
| 104
| 30.461538
| 87
| 0.635417
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,058
|
rooted_tree.cpp
|
phylovi_bito/src/rooted_tree.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "rooted_tree.hpp"
// Branch lengths should correspond to a time tree up to this tolerance.
constexpr double BRANCH_LENGTH_TOLERANCE = 1e-4;
RootedTree::RootedTree(const Node::NodePtr& topology, BranchLengthVector branch_lengths)
: Tree(topology, std::move(branch_lengths)) {
AssertTopologyBifurcatingInConstructor(topology);
}
RootedTree::RootedTree(const Tree& tree)
: RootedTree(tree.Topology(), tree.BranchLengths()) {}
RootedTree::RootedTree(const Node::NodePtr& topology, BranchLengthVector branch_lengths,
std::vector<double> node_bounds,
std::vector<double> height_ratios,
std::vector<double> node_heights, std::vector<double> rates,
size_t rate_count)
: Tree(topology, std::move(branch_lengths)),
node_bounds_(std::move(node_bounds)),
height_ratios_(std::move(height_ratios)),
node_heights_(std::move(node_heights)),
rates_(std::move(rates)),
rate_count_(rate_count) {
AssertTopologyBifurcatingInConstructor(topology);
}
RootedTree RootedTree::DeepCopy() const {
return RootedTree(Topology()->DeepCopy(), branch_lengths_, node_bounds_,
height_ratios_, node_heights_, rates_, rate_count_);
};
void RootedTree::SetTipDates(const TagDoubleMap& tag_date_map) {
node_heights_ = std::vector<double>(Topology()->Id() + 1);
rates_ = std::vector<double>(Topology()->Id(), 1.0);
rate_count_ = 1; // Default is a strict clock with rate 1
SetNodeBoundsUsingDates(tag_date_map);
for (const auto& [tag, date] : tag_date_map) {
node_heights_[MaxLeafIDOfTag(tag)] = date;
}
}
void RootedTree::SetNodeBoundsUsingDates(const TagDoubleMap& tag_date_map) {
const size_t leaf_count = LeafCount();
node_bounds_ = std::vector<double>(Topology()->Id() + 1);
for (const auto& [tag, date] : tag_date_map) {
node_bounds_[MaxLeafIDOfTag(tag)] = date;
}
Topology()->BinaryIdPostorder(
[&leaf_count, this](size_t node_id, size_t child0_id, size_t child1_id) {
if (node_id >= leaf_count) {
node_bounds_[node_id] =
std::max(node_bounds_[child0_id], node_bounds_[child1_id]);
}
});
}
void RootedTree::InitializeTimeTreeUsingBranchLengths() {
EnsureTipDatesHaveBeenSet();
const size_t leaf_count = LeafCount();
const int root_id = static_cast<int>(Topology()->Id());
height_ratios_.resize(leaf_count - 1);
// Initialize the internal heights.
Topology()->BinaryIdPostorder([&leaf_count, this](size_t node_id, size_t child0_id,
size_t child1_id) {
if (node_id >= leaf_count) {
node_heights_[node_id] = node_heights_[child0_id] + branch_lengths_[child0_id];
const auto height_difference =
fabs(node_heights_[child1_id] + branch_lengths_[child1_id] -
node_heights_[node_id]);
if (height_difference > BRANCH_LENGTH_TOLERANCE) {
Failwith(
"Tree isn't time-calibrated in "
"RootedTree::InitializeTimeTreeUsingBranchLengths. "
"Height difference: " +
std::to_string(height_difference));
}
}
});
// Initialize ratios.
// The "height ratio" for the root is the root height.
height_ratios_[root_id - leaf_count] = node_heights_[root_id];
Topology()->TripleIdPreorderBifurcating(
[&leaf_count, this](size_t node_id, size_t, size_t parent_id) {
if (node_id >= leaf_count) {
// See the beginning of the header file for an explanation.
height_ratios_[node_id - leaf_count] =
(node_heights_[node_id] - node_bounds_[node_id]) /
(node_heights_[parent_id] - node_bounds_[node_id]);
}
});
}
void RootedTree::InitializeTimeTreeUsingHeightRatios(
EigenConstVectorXdRef height_ratios) {
EnsureTipDatesHaveBeenSet();
size_t leaf_count = LeafCount();
size_t root_id = Topology()->Id();
height_ratios_.resize(leaf_count - 1);
node_heights_[root_id] = height_ratios(root_id - leaf_count);
for (size_t i = 0; i < height_ratios_.size(); i++) {
height_ratios_[i] = height_ratios(i);
}
Topology()->TripleIdPreorderBifurcating(
[&leaf_count, &height_ratios, this](size_t node_id, size_t, size_t parent_id) {
if (node_id >= leaf_count) {
node_heights_[node_id] =
node_bounds_[node_id] +
height_ratios(node_id - leaf_count) *
(node_heights_[parent_id] - node_bounds_[node_id]);
}
branch_lengths_[node_id] = node_heights_[parent_id] - node_heights_[node_id];
});
}
TagDoubleMap RootedTree::TagDateMapOfDateVector(std::vector<double> leaf_date_vector) {
Assert(leaf_date_vector.size() == LeafCount(),
"Wrong size vector in TagDateMapOfDateVector");
TagDoubleMap tag_date_map;
for (uint32_t leaf_id = 0; leaf_id < LeafCount(); ++leaf_id) {
SafeInsert(tag_date_map, PackInts(leaf_id, 1), leaf_date_vector[leaf_id]);
}
return tag_date_map;
}
RootedTree RootedTree::Example() {
auto topology = Node::ExampleTopologies()[3];
RootedTree tree(Tree(topology, {2., 1.5, 2., 1., 2.5, 2.5, 0.}));
std::vector<double> date_vector({5., 3., 0., 1.});
auto tag_date_map = tree.TagDateMapOfDateVector(date_vector);
tree.SetTipDates(tag_date_map);
tree.InitializeTimeTreeUsingBranchLengths();
return tree;
}
RootedTree RootedTree::UnitBranchLengthTreeOf(Node::NodePtr topology) {
topology->Polish();
return RootedTree(topology, BranchLengthVector(1 + topology->Id(), 1.));
}
bool RootedTree::operator==(const RootedTree& other) const {
return (this->Topology() == other.Topology()) &&
(this->BranchLengths() == other.BranchLengths());
}
void RootedTree::AssertTopologyBifurcatingInConstructor(const Node::NodePtr& topology) {
Assert(
Children().size() == 2,
"Failed to create a RootedTree out of a topology that isn't bifurcating at the "
"root. Perhaps you are trying to parse unrooted trees into a RootedSBNInstance?");
}
| 6,187
|
C++
|
.cpp
| 139
| 38.402878
| 88
| 0.661748
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,059
|
unrooted_tree_collection.cpp
|
phylovi_bito/src/unrooted_tree_collection.cpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#include "unrooted_tree_collection.hpp"
// Explicit class template instantiation:
// https://en.cppreference.com/w/cpp/language/class_template#Explicit_instantiation
template class GenericTreeCollection<UnrootedTree>;
UnrootedTreeCollection::UnrootedTreeCollection(
const PreUnrootedTreeCollection& pre_collection)
: PreUnrootedTreeCollection(pre_collection){};
UnrootedTreeCollection UnrootedTreeCollection::OfTreeCollection(
const TreeCollection& trees) {
TTreeVector unrooted_trees;
unrooted_trees.reserve(trees.TreeCount());
for (const auto& tree : trees.Trees()) {
unrooted_trees.emplace_back(tree);
}
return UnrootedTreeCollection(std::move(unrooted_trees), trees.TagTaxonMap());
}
| 840
|
C++
|
.cpp
| 18
| 44
| 83
| 0.806846
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,060
|
reps_and_likelihoods.hpp
|
phylovi_bito/extras/reps_and_likelihoods.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// The purpose of this executable is to determine likelihoods and PCSP indexer
// representations of various trees as part of a proof of concept for nni sdag support.
// Specifically, this takes in a fasta file and three newick files of rooted trees with
// branch lengths. A subsplit DAG is built on the first set of trees and this is used
// for the indexer representations of all three sets of trees. When a subsplit appears
// in the second or third set of trees, but not the subsplit DAG, the subsplit has
// SIZE_MAX as the index.
#pragma once
#include "unrooted_sbn_instance.hpp"
#include "rooted_sbn_instance.hpp"
#include "gp_instance.hpp"
#include <thread>
std::vector<RootedIndexerRepresentation> GetIndexerRepresentations(
PreRootedTreeCollection &trees, BitsetSizeMap &indexer);
void WriteTreesToFile(const std::string &out_path,
const std::vector<RootedIndexerRepresentation> &representations,
const std::vector<double> &log_likelihoods = {});
void WriteNewickToFile(const std::string &out_path, const RootedTreeCollection &trees);
| 1,224
|
C++
|
.h
| 21
| 54.761905
| 87
| 0.768781
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,061
|
tidy_subsplit_dag.hpp
|
phylovi_bito/src/tidy_subsplit_dag.hpp
|
// Copyright 2019-2020 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A "tidy" subsplit DAG has a notion of clean and dirty vectors.
//
// A node-clade is dirty iff there has been a calculation below that node-clade that
// invalidates the p-hat PLV coming up into it.
//
// #321 It would be nice to make the traversals const, which would require us to supply
// dirty and clean vectors, and updating_below_ as variables. Perhaps these could be
// part of the Action?
#pragma once
#include "subsplit_dag.hpp"
class TidySubsplitDAG : public SubsplitDAG {
public:
TidySubsplitDAG();
explicit TidySubsplitDAG(const RootedTreeCollection &tree_collection);
// Initialize tidy vectors for after initialization or modification of DAG.
void ReinitializeTidyVectors();
// Add an adjacent node pair to the DAG.
virtual ModificationResult AddNodePair(const NNIOperation &nni) {
return AddNodePair(nni.GetParent(), nni.GetChild());
}
virtual ModificationResult AddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit) {
auto mods = SubsplitDAG::AddNodePair(parent_subsplit, child_subsplit);
ReinitializeTidyVectors();
return mods;
}
// What nodes are above or below the specified node? We consider a node to be both
// above and below itself (this just happens to be handy for the implementation).
EigenArrayXb BelowNode(NodeId node_id);
// These use a different convention of rotated, then node id, reflecting that we are
// asking the question "which `rotated` nodes are above node_id"?
EigenArrayXbRef BelowNode(bool is_edge_on_left, NodeId node_id);
EigenArrayXb AboveNode(NodeId node_id) const;
EigenArrayXb AboveNode(bool is_edge_on_left, NodeId node_id) const;
EigenArrayXbRef DirtyVector(bool is_edge_on_left);
bool IsDirtyBelow(NodeId node_id, bool is_edge_on_left);
void SetDirtyStrictlyAbove(NodeId node_id);
void SetClean();
std::string AboveMatricesAsString() const;
// From ((0,1),2)
// https://github.com/phylovi/bito/issues/349#issuecomment-897963382
static TidySubsplitDAG TrivialExample();
// The same DAG, built by hand for the test.
static TidySubsplitDAG ManualTrivialExample();
// From (0,(1,(2,3))) and ((0,(2,3)),1)
// See https://github.com/phylovi/bito/issues/349#issuecomment-897980459
// Update during #288 #321
static TidySubsplitDAG MotivatingExample();
std::string RecordTraversal();
// Apply a TidySubsplitDAGTraversalAction via a depth first traversal. Do not visit
// leaf nodes.
// We assume that ModifyEdge leaves (node_id, rotated) in a clean state, however, each
// ModifyEdge dirties all of the nodes above it. These nodes must be cleaned by
// UpdateEdge before they are ready to be used. See TidySubslitDAGTraversalAction for
// more details.
//
// Applied to a given node, we:
// - Apply BeforeNode
// - For each of the clades of the node, we:
// - Descend into each clade, cleaning up the sister clade with UpdateEdge as
// needed.
// - Apply BeforeNodeClade
// - For each edge descending from that clade, we:
// - Recur into the child node of the clade if it is not a leaf
// - Apply VisitEdge to the edge
// - Apply AfterNode
template <typename TidyTraversalActionT>
void DepthFirstWithTidyAction(const NodeIdVector &starting_nodes,
const TidyTraversalActionT &action) {
std::unordered_set<NodeId> visited_nodes;
for (const auto &node_id : starting_nodes) {
DepthFirstWithTidyActionForNode(action, NodeId(node_id), visited_nodes);
}
};
// The portion of the traversal that is below a given node.
template <typename TidyTraversalActionT>
void DepthFirstWithTidyActionForNode(const TidyTraversalActionT &action,
NodeId node_id,
std::unordered_set<NodeId> &visited_nodes) {
action.BeforeNode(node_id);
// #288 #321 Here we are doing true and then false (left and then right).
// This means that we get an update with the MotivatingExample as coded.
DepthFirstWithTidyActionForNodeClade(action, node_id, true, visited_nodes);
DepthFirstWithTidyActionForNodeClade(action, node_id, false, visited_nodes);
action.AfterNode(node_id);
};
// The portion of the traversal that is below a given clade of a given node.
// Do not recur into leaf nodes.
template <typename TidyTraversalActionT>
void DepthFirstWithTidyActionForNodeClade(const TidyTraversalActionT &action,
NodeId node_id, bool is_edge_on_left,
std::unordered_set<NodeId> &visited_nodes) {
if (updating_below_) {
UpdateWithTidyActionForNodeClade(action, node_id, is_edge_on_left, visited_nodes);
} else {
ModifyWithTidyActionForNodeClade(action, node_id, is_edge_on_left, visited_nodes);
}
};
// Recursively perform updates under this node-clade.
template <typename TidyTraversalActionT>
void UpdateWithTidyActionForNodeClade(const TidyTraversalActionT &action,
NodeId node_id, bool is_edge_on_left,
std::unordered_set<NodeId> &visited_nodes) {
if (IsDirtyBelow(node_id, is_edge_on_left)) {
const auto node = GetDAGNode(node_id);
for (const auto child_id : node.GetLeafward(is_edge_on_left)) {
if (!GetDAGNode(NodeId(child_id)).IsLeaf()) {
// #288 Here we are doing true and then false (left and then right).
DepthFirstWithTidyActionForNodeClade(action, NodeId(child_id), true,
visited_nodes);
DepthFirstWithTidyActionForNodeClade(action, NodeId(child_id), false,
visited_nodes);
action.AfterNode(NodeId(child_id));
}
action.UpdateEdge(node_id, NodeId(child_id), is_edge_on_left);
DirtyVector(is_edge_on_left)[node_id.value_] = false;
}
}
// When we get to this point, everything is clean below node_id,rotated.
if (*updating_below_ == std::make_pair(node_id, is_edge_on_left)) {
// We have completed updating our original goal of updating, and can turn off
// updating mode.
updating_below_ = std::nullopt;
}
};
// Perform edge modification below this node clade, dirtying and cleaning up as
// appropriate.
template <typename TidyTraversalActionT>
void ModifyWithTidyActionForNodeClade(const TidyTraversalActionT &action,
NodeId node_id, bool is_edge_on_left,
std::unordered_set<NodeId> &visited_nodes) {
// We are in modifying mode.
// If the _other_ clade is dirty, then go into updating mode and recur into it.
if (IsDirtyBelow(node_id, !is_edge_on_left)) {
updating_below_ = {node_id, !is_edge_on_left};
UpdateWithTidyActionForNodeClade(action, node_id, !is_edge_on_left,
visited_nodes);
}
// When we get to this point, the other clade is clean and we can proceed.
action.BeforeNodeClade(node_id, is_edge_on_left);
const auto node = GetDAGNode(node_id);
for (const auto child_id : node.GetLeafward(is_edge_on_left)) {
if (visited_nodes.count(NodeId(child_id)) == 0) {
visited_nodes.insert(NodeId(child_id));
if (!GetDAGNode(NodeId(child_id)).IsLeaf()) {
DepthFirstWithTidyActionForNode(action, NodeId(child_id), visited_nodes);
}
}
action.ModifyEdge(node_id, NodeId(child_id), is_edge_on_left);
SetDirtyStrictlyAbove(node_id);
// We assume that ModifyEdge leaves (node_id, rotated) in a clean state.
DirtyVector(is_edge_on_left)[node_id.value_] = false;
}
};
private:
// This constructor is really just meant for testing.
explicit TidySubsplitDAG(size_t node_count);
TidySubsplitDAG(size_t taxon_count, const Node::TopologyCounter &topology_counter,
const TagStringMap &tag_taxon_map);
// Set the below matrix up to have all of the nodes below src_id below the
// subsplit-clade described by (dst_rotated, dst_id). Meant to be used as part of a
// depth-first traversal.
void SetBelow(NodeId dst_id, bool dst_on_left, NodeId src_id);
// If this is set then we are in an "updating mode", where we are updating below the
// specified node-clade.
std::optional<std::pair<NodeId, bool>> updating_below_;
// above_rotated_(i,j) is true iff i,true is above j.
EigenMatrixXb above_rotated_;
// above_sorted(i,j) is true iff i,false is above j.
EigenMatrixXb above_sorted_;
// dirty_rotated_(i) is true iff there has been a calculation below i,true that
// invalidates the p-hat PLV coming up into it.
EigenArrayXb dirty_rotated_;
// dirty_rotated_(i) is true iff there has been a calculation below i,false that
// invalidates the p-hat PLV coming up into it.
EigenArrayXb dirty_sorted_;
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("TidySubsplitDAG: slicing") {
auto manual_dag = TidySubsplitDAG::ManualTrivialExample();
// std::cout << manual_dag.AboveMatricesAsString() << std::endl;
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(0))), "[1, 0, 0, 1, 1, 1]\n");
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(1))), "[0, 1, 0, 1, 1, 1]\n");
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(2))), "[0, 0, 1, 0, 1, 1]\n");
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(3))), "[0, 0, 0, 1, 1, 1]\n");
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(4))), "[0, 0, 0, 0, 1, 1]\n");
CHECK_EQ(GenericToString(manual_dag.AboveNode(NodeId(5))), "[0, 0, 0, 0, 0, 1]\n");
auto trivial_dag = TidySubsplitDAG::TrivialExample();
CHECK_EQ(trivial_dag.AboveMatricesAsString(), manual_dag.AboveMatricesAsString());
auto motivating_dag = TidySubsplitDAG::MotivatingExample();
CHECK_EQ(GenericToString(motivating_dag.AboveNode(false, NodeId(4))),
"[0, 0, 0, 0, 1, 1, 1, 1, 0, 0]\n");
CHECK_EQ(GenericToString(motivating_dag.AboveNode(true, NodeId(4))),
"[0, 0, 0, 0, 1, 0, 0, 0, 1, 1]\n");
CHECK_EQ(GenericToString(motivating_dag.AboveNode(false, NodeId(7))),
"[0, 0, 0, 0, 0, 0, 0, 1, 0, 0]\n");
CHECK_EQ(GenericToString(motivating_dag.AboveNode(true, NodeId(7))),
"[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]\n");
CHECK_EQ(GenericToString(motivating_dag.BelowNode(false, NodeId(7))),
"[0, 0, 1, 1, 1, 0, 0, 1, 0, 0]\n");
CHECK_EQ(GenericToString(motivating_dag.BelowNode(true, NodeId(7))),
"[1, 0, 0, 0, 0, 0, 0, 1, 0, 0]\n");
motivating_dag.SetDirtyStrictlyAbove(NodeId(4));
CHECK_EQ(GenericToString(motivating_dag.DirtyVector(true)),
"[0, 0, 0, 0, 0, 0, 0, 0, 1, 1]\n");
CHECK_EQ(GenericToString(motivating_dag.DirtyVector(false)),
"[0, 0, 0, 0, 0, 1, 1, 1, 0, 0]\n");
motivating_dag.SetClean();
// #321 Add test for Tidy traversal.
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 11,180
|
C++
|
.h
| 214
| 45.546729
| 88
| 0.678947
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,062
|
node.hpp
|
phylovi_bito/src/node.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// The Node class is how we express tree topologies.
//
// Nodes are immutable after construction except for the id_ and the leaves_.
// The id_ is provided for applications where it is useful to have the edges
// numbered with a contiguous set of integers. The leaves get
// their indices (which are contiguously numbered from 0 through the leaf
// count minus 1) and the rest get ordered according to a postorder traversal.
// Thus the root always has id equal to the number of nodes in the tree.
// See ExampleTopologies below for some examples.
//
// Because this integer assignment cannot be known as we
// are building up the tree, we must make a second pass through the tree, which
// must mutate state. However, this re-id-ing pass is itself deterministic, so
// doing it a second time will always give the same result.
//
// leaves_ is a bitset indicating the set of leaves below. Similarly it needs to
// be calculated on a second pass, because we don't even know the size of the
// bitset as the tree is being built.
//
// Both of these features are prepared using the Polish method.
//
// In summary, call Polish after building your tree if you need to use internal
// node ids or leaf sets. Note that Tree construction calls Polish, if you are
// manually manipulating the topology make you do manipulations with that in
// mind.
//
// Equality is in terms of tree topologies. These mutable members don't matter.
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "bitset.hpp"
#include "intpack.hpp"
#include "sugar.hpp"
class Node {
public:
using NodePtr = std::shared_ptr<Node>;
using Topology = NodePtr;
using NodePtrVec = std::vector<NodePtr>;
using NodePtrVecPtr = std::shared_ptr<NodePtrVec>;
using TopologyCounter = std::unordered_map<NodePtr, uint32_t>;
// This is the type of functions that are used in the PCSP recursion
// functions. See `doc/svg/pcsp.svg` for a diagram of the PCSP traversal. In that
// file, the first tree shows the terminology, and the subsequent trees show
// the calls to f_root and f_internal.
//
// The signature is in 5 parts. The first 4 describe the position in the tree
// and then the direction: the sister clade, the focal clade, child 0, and
// child 1. False means down the tree structure and true means up. The 5th
// part is the top of the virtual root clade, namely the clade containing the
// virtual root (shown in gray in the diagram). Caution: in the case where the
// virtual root clade is above the subsplit, the "virtual root clade" will be
// the entire tree. There's nothing else we can do without rerooting the tree.
// It's not too hard to exclude the undesired bits with a conditional tree
// traversal. See IndexerRepresentationOfTopology for an example.
using UnrootedPCSPFun =
std::function<void(const Node*, bool, const Node*, bool, const Node*, bool,
const Node*, bool, const Node*)>;
// The rooted version just uses: sister clade, the focal clade, child 0, and child 1.
using RootedPCSPFun =
std::function<void(const Node*, const Node*, const Node*, const Node*)>;
using TwoNodeFun = std::function<void(const Node*, const Node*)>;
// A function that takes the following node arguments: grandparent, parent, sister,
// child0, child1.
using NeighborFun = std::function<void(const Node*, const Node*, const Node*,
const Node*, const Node*)>;
public:
explicit Node(uint32_t leaf_id, Bitset leaves);
explicit Node(NodePtrVec children, size_t id, Bitset leaves);
size_t Id() const { return id_; }
uint64_t Tag() const { return tag_; }
const Bitset& Leaves() const { return leaves_; }
std::string TagString() const { return StringOfPackedInt(this->tag_); }
uint32_t MaxLeafID() const { return MaxLeafIDOfTag(tag_); }
uint32_t LeafCount() const { return LeafCountOfTag(tag_); }
size_t Hash() const { return hash_; }
bool IsLeaf() const { return children_.empty(); }
const NodePtrVec& Children() const { return children_; }
void AddChild(NodePtr child) { children_.push_back(child); }
void AddChildren(NodePtr left_child, NodePtr right_child) {
children_.resize(2);
children_[0] = left_child;
children_[1] = right_child;
}
// Creates a subsplit bitset from given node. Requires tree must be bifurcating.
Bitset BuildSubsplit() const;
// Creates an edge PCSP from edge below given clade's side. Requires tree must be
// bifurcating.
Bitset BuildPCSP(const SubsplitClade clade) const;
// Creates a vector of all subsplit bitsets for all nodes in topology.
std::unordered_set<Bitset> BuildSetOfSubsplits() const;
// Creates a vector of all PCSP bitsets for all edges in topology.
std::unordered_set<Bitset> BuildSetOfPCSPs() const;
bool operator==(const Node& other) const;
NodePtr DeepCopy() const;
void Preorder(std::function<void(const Node*)> f) const;
// ConditionalPreorder continues to recur as long as f returns true.
void ConditionalPreorder(std::function<bool(const Node*)> f) const;
void Postorder(std::function<void(const Node*)> f) const;
void LevelOrder(std::function<void(const Node*)> f) const;
// Apply the pre function before recurring down the tree, and then apply the
// post function as we are recurring back up the tree.
void DepthFirst(std::function<void(const Node*)> pre,
std::function<void(const Node*)> post) const;
// We take in two functions, f_root, and f_internal, each of which take three
// edges.
// We assume that f_root is symmetric in its last two arguments so that
// f_root's signature actually looks like f_root(node0, {node1, node2}).
// We apply f_root to the descendant edges like so: 012, 120, and 201. Because
// f_root is symmetric in the last two arguments, we are going to get all of
// the distinct calls of f.
// At the internal nodes we cycle through triples of (node, sister, parent)
// for f_internal.
void TriplePreorder(
std::function<void(const Node*, const Node*, const Node*)> f_root,
std::function<void(const Node*, const Node*, const Node*)> f_internal) const;
// Iterate f through (node, sister, parent) for bifurcating trees using a
// preorder traversal.
void TriplePreorderBifurcating(
std::function<void(const Node*, const Node*, const Node*)> f) const;
// As above, but getting indices rather than nodes themselves.
void TripleIdPreorderBifurcating(std::function<void(size_t, size_t, size_t)> f) const;
// These two functions take functions accepting triples of (node_id,
// child0_id, child1_id) and apply them according to various traversals.
void BinaryIdPreorder(std::function<void(size_t, size_t, size_t)> f) const;
void BinaryIdPostorder(std::function<void(size_t, size_t, size_t)> f) const;
// See the typedef of UnrootedPCSPFun and RootedPCSPFun to understand the argument
// type to these functions.
void UnrootedPCSPPreorder(UnrootedPCSPFun f) const;
// Apply a RootedPCSPFun to the nodes through a preorder traversal. When allow_leaves
// is on, the function will be applied on both the internal and leaf nodes.
// Otherwise, it is only applied on the internal nodes.
void RootedPCSPPreorder(RootedPCSPFun f, bool allow_leaves) const;
// Iterate over (leaf sister, leaf) pairs in order. Rooted because that's the only
// case in which we are guaranteed to have a well defined set of such pairs.
void RootedSisterAndLeafTraversal(TwoNodeFun f) const;
// This function prepares the id_ and leaves_ member variables as described at
// the start of this document. It returns a map that maps the tags to their
// indices. It's the verb, not the nationality.
TagSizeMap Polish(bool update_leaves = true,
std::optional<size_t> leaf_count_opt = std::nullopt);
NodePtr Deroot();
// ** I/O
// Return a vector such that the ith component describes the indices for nodes
// above the current node.
SizeVectorVector IdsAbove() const;
// Build a map from each node's id to its parent node. For rootward traversal of tree.
std::unordered_map<size_t, const Node*> BuildParentNodeMap() const;
// Output as Newick string, with option for branch lengths.
std::string Newick(std::function<std::string(const Node*)> node_labeler,
const DoubleVectorOption& branch_lengths = std::nullopt) const;
// Output as Newick string, with options for branch lengths and labels.
std::string Newick(const DoubleVectorOption& branch_lengths = std::nullopt,
const TagStringMapOption& node_labels = std::nullopt,
bool show_tags = false) const;
// Construct a vector such that the ith entry is the id of the parent of the
// node having id i. We assume that the indices are contiguous, and that the
// root has the largest id.
std::vector<size_t> ParentIdVector() const;
// Outputs this node's id, adjacent leaf ids, and leaf clade bitset to string.
std::string NodeIdAndLeavesToString() const;
// Outputs `NodeIdAndLeavesToString` for all entire topology below this node.
std::string NodeIdAndLeavesToStringForTopology() const;
// ** Static methods
// Constructs a leaf node with given id, and an empty taxon clade by default for
// its leaves.
static NodePtr Leaf(uint32_t id, Bitset leaves = Bitset(0));
// Constructs a leaf node with given id, and a single taxon clade with a length of
// taxon_count for its leaves.
static NodePtr Leaf(uint32_t id, size_t taxon_count);
// Join builds a Node with the given descendants, or-ing the leaves_ of the
// descendants.
static NodePtr Join(NodePtrVec children, size_t id = SIZE_MAX);
static NodePtr Join(NodePtr left, NodePtr right, size_t id = SIZE_MAX);
// Build a tree given a vector of indices, such that each entry gives the
// id of its parent. We assume that the indices are contiguous, and that
// the root has the largest id.
static NodePtr OfParentIdVector(const std::vector<size_t>& indices);
// topology with internal node indices
// -------- --------------------------
// 0: (0,1,(2,3)) (0,1,(2,3)4)5;
// 1; (0,1,(2,3)) again (0,1,(2,3)4)5;
// 2: (0,2,(1,3)) (0,2,(1,3)4)5;
// 3: (0,(1,(2,3))) (0,(1,(2,3)4)5)6;
static NodePtrVec ExampleTopologies();
// Make a maximally-unbalanced "ladder" tree.
static NodePtr Ladder(uint32_t leaf_count);
// A "cryptographic" hash function from Stack Overflow (the std::hash function
// appears to leave uint32_ts as they are, which doesn't work for our
// application).
// https://stackoverflow.com/a/12996028/467327
static uint32_t SOHash(uint32_t x);
// Bit rotation from Stack Overflow.
// c is the amount by which we rotate.
// https://stackoverflow.com/a/776523/467327
static size_t SORotate(size_t n, uint32_t c);
private:
// Vector of direct child descendants of node in tree topology.
NodePtrVec children_;
// NOTE: See beginning of file for notes about the id and the leaves.
// Unique identifier in tree containing node.
size_t id_;
// Bitset of all leaves below node (alternatively can view a leaf as a member of the
// taxon set in the tree).
Bitset leaves_;
// The tag_ is a pair of packed integers representing (1) the maximum leaf ID
// of the leaves below this node, and (2) the number of leaves below the node.
uint64_t tag_;
// Hashkey for node maps.
size_t hash_;
// Make copy constructors private to eliminate copying.
Node(const Node&);
Node& operator=(const Node&);
// This is a private Postorder that can change the Node.
void MutablePostorder(std::function<void(Node*)> f);
std::string NewickAux(std::function<std::string(const Node*)> node_labeler,
const DoubleVectorOption& branch_lengths) const;
// Make a leaf bitset by or-ing the leaf bitsets of the provided children.
// Private just to avoid polluting the public interface.
static Bitset LeavesOf(const NodePtrVec& children);
};
// Compare NodePtrs by their Nodes.
inline bool operator==(const Node::NodePtr& lhs, const Node::NodePtr& rhs) {
return *lhs == *rhs;
}
inline bool operator!=(const Node::NodePtr& lhs, const Node::NodePtr& rhs) {
return !(lhs == rhs);
}
namespace std {
template <>
struct hash<Node::NodePtr> {
size_t operator()(const Node::NodePtr& n) const { return n->Hash(); }
};
template <>
struct equal_to<Node::NodePtr> {
bool operator()(const Node::NodePtr& lhs, const Node::NodePtr& rhs) const {
return lhs == rhs;
}
};
} // namespace std
#ifdef DOCTEST_LIBRARY_INCLUDED
typedef std::unordered_map<uint64_t, Bitset> TagBitsetMap;
// Make a map from Tags to the bitset representing the leaves below the Tag.
// Just used for testing now.
TagBitsetMap TagLeafSetMapOf(Node::NodePtr topology) {
TagBitsetMap map;
auto leaf_count = topology->LeafCount();
topology->Postorder([&map, leaf_count](const Node* node) {
Bitset bitset(static_cast<size_t>(leaf_count));
if (node->IsLeaf()) {
bitset.set(node->MaxLeafID());
} else {
// Take the union of the children below.
for (const auto& child : node->Children()) {
bitset |= map.at(child->Tag());
}
}
SafeInsert(map, node->Tag(), std::move(bitset));
});
return map;
}
TEST_CASE("Node") {
Node::NodePtrVec examples = Node::ExampleTopologies();
Node::NodePtr t1 = examples[0]; // 0: (0,1,(2,3))
Node::NodePtr t1_twin = examples[1]; // 1; (0,1,(2,3)) again
Node::NodePtr t2 = examples[2]; // 2: (0,2,(1,3))
Node::NodePtr t3 = examples[3]; // 3: (0,(1,(2,3)))
// ((((0,1)7,2)8,(3,4)9)10,5,6)11;
Node::NodePtr tbig = Node::OfParentIdVector({7, 7, 8, 9, 9, 11, 11, 8, 10, 10, 11});
std::vector<std::string> triples;
auto collect_triple = [&triples](const Node* node, const Node* sister,
const Node* parent) {
triples.push_back(std::to_string(node->Id()) + ", " + std::to_string(sister->Id()) +
", " + std::to_string(parent->Id()));
};
tbig->TriplePreorder(collect_triple, collect_triple);
std::vector<std::string> correct_triples(
{"10, 5, 6", "8, 9, 10", "7, 2, 8", "0, 1, 7", "1, 0, 7", "2, 7, 8", "9, 8, 10",
"3, 4, 9", "4, 3, 9", "5, 6, 10", "6, 10, 5"});
CHECK_EQ(triples, correct_triples);
// This is actually a non-trivial test (see note in Node constructor above),
// which shows why we need bit rotation.
CHECK_NE(t1->Hash(), t2->Hash());
CHECK_EQ(t1, t1_twin);
CHECK_NE(t1, t2);
// Tree with trifurcation at the root.
Node::NodePtr t1_alt = Node::OfParentIdVector({5, 5, 4, 4, 5});
CHECK_EQ(t1, t1_alt);
// Bifurcating tree.
Node::NodePtr t3_alt = Node::OfParentIdVector({6, 5, 4, 4, 5, 6});
CHECK_EQ(t3, t3_alt);
for (const auto& topology : examples) {
CHECK_EQ(topology, Node::OfParentIdVector(topology->ParentIdVector()));
CHECK_EQ(topology, topology->DeepCopy());
auto tag_leaf_set_map = TagLeafSetMapOf(topology);
topology->Preorder([&tag_leaf_set_map](const Node* node) {
CHECK_EQ(node->Leaves(), tag_leaf_set_map.at(node->Tag()));
});
}
// Check Deroot when we deroot on the right.
CHECK_EQ(t1, t3->Deroot());
// Check Deroot when we deroot on the left.
CHECK_EQ(Node::OfParentIdVector({3, 3, 3}),
// tree ((0,1)3,2)4
Node::OfParentIdVector({3, 3, 4, 4})->Deroot());
CHECK_EQ(Node::OfParentIdVector({4, 4, 5, 6, 5, 6}), Node::Ladder(4));
SizeVector correct_sisters({5, 4, 3, 2});
SizeVector sisters;
t3->RootedSisterAndLeafTraversal([&sisters](const Node* sister, const Node* leaf) {
sisters.push_back(sister->Id());
});
CHECK_EQ(correct_sisters, sisters);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 16,028
|
C++
|
.h
| 320
| 46.309375
| 88
| 0.700785
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,063
|
tree_collection.hpp
|
phylovi_bito/src/tree_collection.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include "generic_tree_collection.hpp"
#include "tree.hpp"
using TreeCollection = GenericTreeCollection<Tree>;
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("TreeCollection") {
auto first_four_example_trees = Tree::ExampleTrees();
first_four_example_trees.resize(4);
TreeCollection collection(first_four_example_trees);
auto counter = collection.TopologyCounter();
std::unordered_map<std::string, uint32_t> counted;
for (const auto &iter : counter) {
SafeInsert(counted, iter.first->Newick(std::nullopt, std::nullopt, true),
iter.second);
}
std::unordered_map<std::string, uint32_t> counted_correct(
{{"(0_1,1_1,(2_1,3_1)3_2)3_4;", 2},
{"(0_1,2_1,(1_1,3_1)3_2)3_4;", 1},
{"(0_1,(1_1,(2_1,3_1)3_2)3_3)3_4;", 1}});
CHECK_EQ(counted, counted_correct);
collection.DropFirst(0.25);
CHECK_EQ(collection.TreeCount(), 3);
collection.DropFirst(1.);
CHECK_EQ(collection.TreeCount(), 0);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 1,115
|
C++
|
.h
| 28
| 36.357143
| 77
| 0.698984
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,064
|
subsplit_dag.hpp
|
phylovi_bito/src/subsplit_dag.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// The purpose of this class is to hold a DAG built from the parent-child relationships
// of the subsplits. We wish to have information associated with both the nodes and
// edges of the DAG. Our strategy for doing that is via non-negative integer indices:
// nodes have a unique size_t `Id`, and each edge has a unique `edge_idx`. We can then
// store arbitrary associated information in other data structures associated with these
// indices.
//
// The DAG has a well-defined notion of rootward and leafward.
//
// The data structure for this DAG is as follows:
// - The nodes of the DAG are vectors of indices representing their edges to other
// nodes. These include edges to the children (leafward edges) and also the edges that
// are connecting to that node (rootward edges). Furthermore, these sets of edges are
// separated into two subsets: those that descend from the left component of the
// subsplit
// ("rotated" edges) and those that descend from the right component of the subsplit
// ("sorted" edges). The nodes of the DAG also include bitsets describing the taxa they
// contain.
// - The edges of the DAG are indexed separately, and there is a map (`dag_edges_`)
// which maps from pairs of node Ids to its edge index. These DAG edges are indexed
// such that all of the sorted edges descending from a given node have a contiguous set
// of indices, as do all of the rotated indices. The range of indices for such a set of
// edges is given by the `parent_to_child_range_` map.
// There is also a `subsplit_to_id_` map that maps from the subsplit bitset of the DAG
// node (and its rotated version) to the node Id.
// - To clarify terminology, the "DAG root node" refers to the universal ancestor and is
// representated by a bitset where the first half is all ones and the second half is all
// zeros (e.g. for taxon_count_ = 4, 1111|0000). Note that this implies that the DAG
// root node only has rotated children. Children of the DAG root node are called
// "rootsplits" and partition the whole taxon set.
#pragma once
#include "resizer.hpp"
#include "reindexer.hpp"
#include "rooted_tree_collection.hpp"
#include "sbn_maps.hpp"
#include "subsplit_dag_action.hpp"
#include "nni_operation.hpp"
#include "subsplit_dag_node.hpp"
#include "node.hpp"
#include "stopwatch.hpp"
// Types of node neighbors/adjacencies.
enum class NodeAdjacent { Parent, LeftChild, RightChild };
static const inline size_t NodeAdjacentCount = 3;
class NodeAdjacentEnum
: public EnumWrapper<NodeAdjacent, size_t, NodeAdjacentCount, NodeAdjacent::Parent,
NodeAdjacent::RightChild> {
public:
static inline const std::string Prefix = "NodeAdjacent";
static inline const Array<std::string> Labels = {
{"Parent", "LeftChild", "RightChild"}};
static std::string ToString(const NodeAdjacent e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const NodeAdjacent e) {
os << ToString(e);
return os;
}
};
// Types of edge neighbors/adjacencies.
enum class EdgeAdjacent { Parent, Sister, LeftChild, RightChild };
static const inline size_t EdgeAdjacentCount = 4;
class EdgeAdjacentEnum
: public EnumWrapper<EdgeAdjacent, size_t, EdgeAdjacentCount, EdgeAdjacent::Parent,
EdgeAdjacent::RightChild> {
public:
static inline const std::string Prefix = "EdgeAdjacent";
static inline const Array<std::string> Labels = {
{"Parent", "Sister", "LeftChild", "RightChild"}};
static std::string ToString(const EdgeAdjacent e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const EdgeAdjacent e) {
os << ToString(e);
return os;
}
};
class SubsplitDAG {
public:
// ** Constructor
SubsplitDAG(const SubsplitDAG &) = default;
// Build empty Subsplit DAG with no topologies and no taxa.
SubsplitDAG();
// Build a Subsplit DAG expressing all tree topologies from tree_collection.
explicit SubsplitDAG(const RootedTreeCollection &tree_collection);
// ** Comparator
// This compare ensures that both DAGs have the same topology according to their
// set of node and edge bitsets. However, it does ensure that DAGs have the same id
// and idxs for their respective nodes and edges, only that they contain the
// same set of nodes and edges (as long as taxon positions in the
// clades have the same mapping).
int Compare(const SubsplitDAG &other, const bool quiet = true) const;
static int Compare(const SubsplitDAG &lhs, const SubsplitDAG &rhs,
const bool quiet = true);
friend bool operator==(const SubsplitDAG &lhs, const SubsplitDAG &rhs);
friend bool operator!=(const SubsplitDAG &lhs, const SubsplitDAG &rhs);
// Compares the subsplits between two DAGs. Returns the subsplits from lhs_dag not in
// rhs_dag, and subsplits from the rhs_dag not in lhs_dag.
static std::tuple<std::set<Bitset>, std::set<Bitset>, std::set<Bitset>>
CompareSubsplits(const SubsplitDAG &lhs, const SubsplitDAG &rhs);
// Compares the subsplits between two DAGs. Returns the subsplits from lhs_dag not in
// rhs_dag, and subsplits from the rhs_dag not in lhs_dag.
static std::tuple<std::set<Bitset>, std::set<Bitset>, std::set<Bitset>> ComparePCSPs(
const SubsplitDAG &lhs, const SubsplitDAG &rhs);
// ** Count
// The total number of individual taxa in the DAG.
size_t TaxonCount() const;
// The total number of nodes in the DAG (including the root and leaves).
size_t NodeCount() const;
// The total number of nodes in the DAG (excluding the root, but including the
// leaves).
size_t NodeCountWithoutDAGRoot() const;
// The current minimum and maximum node Id values.
NodeIdPair NodeIdRange() const;
// The total number of rootsplits in DAG. These count all direct descendants of the
// root (also, the union of each rootsplits clades cover the set of all taxa in the
// DAG).
size_t RootsplitCount() const;
// The total number of edges in the DAG (excluding edges which terminate at a root or
// leaf node).
size_t EdgeCount() const;
// The total number of edges in the DAG (including edges which terminat at a root of
// leaf node).
size_t EdgeCountWithLeafSubsplits() const;
// The current minimum and maximum edge Idx values.
EdgeIdPair EdgeIdxRange() const;
// The total number of tree topologies expressable by the DAG.
double TopologyCount() const;
// Checks how many valid neighbors nodes of given type exist in the DAG for the
// specified subsplit.
SizePair GetSubsplitNodeNeighborCounts(const Bitset &subsplit,
const Direction direction) const;
// ** I/O
// Print all nodes:
// - One line is given for (node_id | subsplit_bitset).
// - One line is given for each combination of leafward/rootward, sorted/rotated, for
// all adjacent node_ids.
void Print() const;
// Print all nodes as (node_id | node_bitsets) pairs, one-per-line.
void PrintNodes() const;
// Print all edges/PCSP, as (pcsp_bitset | edge_idx) pairs, one-per-line.
void PrintEdgeIndexer() const;
// Print all edges/PCSP, as (parent_node_id | child_node_id) pairs, one-per-line.
void PrintDAGEdges() const;
// Print all nodes, as (bitset | range_begin | range_end)
void PrintParentToRange() const;
// Print DAG Storage contents.
void PrintStorage() const { storage_.Print(); }
// Output DOT format graph of DAG to file.
void ToDot(const std::string file_path, bool show_index_labels = true) const;
// Output DOT format graph of DAG to a string.
std::string ToDot(bool show_index_labels = true) const;
// ** Build Indexers/Vectors
// Create a EdgeIndexer representing the DAG.
// The EdgeIndexer is a map (edge/PCSP bitset -> edge index).
// The edge/PCSP indexer contains leafs and rootsplits.
BitsetSizeMap BuildEdgeIndexer() const;
// Builds inverse of EdgeIndexer map: (edge index -> edge/PCSP bitset).
SizeBitsetMap BuildInverseEdgeIndexer() const;
// Get the rotated and sorted parents of the node with the given subsplit.
NodeIdVectorPair FindParentNodeIds(const Bitset &subsplit) const;
NodeIdVectorPair FindParentNodeIdsViaMap(const Bitset &subsplit) const;
NodeIdVectorPair FindParentNodeIdsViaScan(const Bitset &subsplit,
bool graft_nodes_only = false) const;
NodeId FindFirstParentNodeId(const Bitset &subsplit) const;
// Get the rotated and sorted children of the node with the given subsplit.
NodeIdVectorPair FindChildNodeIds(const Bitset &subsplit) const;
NodeIdVectorPair FindChildNodeIdsViaMap(const Bitset &subsplit) const;
NodeIdVectorPair FindChildNodeIdsViaScan(const Bitset &subsplit,
bool graft_nodes_only = false) const;
NodeId FindFirstChildNodeId(const Bitset &subsplit, const SubsplitClade clade) const;
// Output RootedIndexerRepresentation of DAG (from RootedSBNMaps).
// RootedIndexerRepresentation is a vector of edge idxs in topological preorder.
RootedIndexerRepresentation IndexerRepresentationOf(const BitsetSizeMap &indexer,
const Node::NodePtr &topology,
size_t default_index) const;
// ** Access
// Get Taxon's bitset clade positional id.
TaxonId GetTaxonId(const std::string &name) const;
// Get vector of all taxon ids.
TaxonIdVector GetTaxonIds() const;
// Get node based on node id.
SubsplitDAGNode GetDAGNode(const NodeId node_id) const;
MutableSubsplitDAGNode GetDAGNode(const NodeId node_id);
// Get the subsplit bitset for the given node.
Bitset GetDAGNodeBitset(const NodeId node_id) const;
// Get the node id based on the subsplit bitset.
NodeId GetDAGNodeId(const Bitset &subsplit) const;
// Gets the node id of the DAG root.
NodeId GetDAGRootNodeId() const;
// Get the node ids corresponding to the rootsplits.
ConstNeighborsView GetRootsplitNodeIds() const;
// Get the edge ids corresponding to the rootsplits.
EdgeIdVector GetRootsplitEdgeIds() const;
// Get first edge id corresponding to the rootsplits.
EdgeId GetFirstRootsplitEdgeId() const;
// Get the node ids corresponding to the leaves for all taxa.
NodeIdVector GetLeafNodeIds() const;
// Get the node id corresponding to the given taxon.
NodeId GetLeafNodeId(const TaxonId taxon_id) const;
// Get leaf edge ids for given taxon.
EdgeIdVector GetLeafEdgeIds() const;
EdgeIdVector GetLeafEdgeIds(const TaxonId taxon_id) const;
// Get edge based on edge id.
ConstLineView GetDAGEdge(const EdgeId edge_id) const;
// Get the PCSP bitset for the given edge.
Bitset GetDAGEdgeBitset(const EdgeId edge_id) const;
// Get the edge id by for given PCSP.
EdgeId GetEdgeIdx(const Bitset &parent_subsplit, const Bitset &child_subsplit) const;
EdgeId GetEdgeIdx(const NodeId parent_id, const NodeId child_id) const;
EdgeId GetEdgeIdx(const Bitset &edge_pcsp) const;
EdgeId GetEdgeIdx(const NNIOperation &nni) const;
// Get focal clade of edge.
SubsplitClade GetFocalClade(const EdgeId edge_id) const;
// Get sister clade of edge.
SubsplitClade GetSisterClade(const EdgeId edge_id) const;
// Get NNI from edge index.
NNIOperation GetNNI(const EdgeId edge_id) const;
// Find a neighboring NNI. Returns the first NNI discovered.
NNIOperation FindNNINeighborInDAG(const NNIOperation &nni) const;
// Finds all the neighboring NNIs in the DAG. Array accessed by the child clade
// swapped with the sister to produce the neighbor NNI.
SubsplitCladeEnum::Array<std::optional<NNIOperation>> FindAllNNINeighborsInDAG(
const NNIOperation &nni) const;
// Get the range of outgoing idxs from the given clade of a subsplit.
EdgeIdPair GetChildEdgeRange(const Bitset &subsplit,
const bool is_edge_on_left) const;
// Get set of all taxon names.
StringVector BuildSetOfTaxonNames() const;
// Get set of all node Subsplit bitsets.
std::set<Bitset> BuildSetOfNodeBitsets() const;
// Get set of all edge PCSP bitsets.
std::set<Bitset> BuildSetOfEdgeBitsets() const;
// Get reference to taxon map.
const StringTaxonIdMap &GetTaxonMap() const;
// Get reference to the tag taxon map.
const TagStringMapOption GetTagTaxonMap() const;
// Get reference to subsplit -> node_id map.
const BitsetNodeIdMap &GetSubsplitToIdMap() const;
// Get reference to parent_node -> child_edge_range map.
const NodeIdEdgeIdPairMap &GetParentNodeToChildEdgeRangeMap() const;
// Maps for finding potential node neighbors.
const BitsetNodeIdSetMap &GetSubsplitUnionMap() const;
const BitsetNodeIdSetMap &GetSubsplitUnionGraftMap() const;
const BitsetNodeIdSetMap &GetSubsplitCladeMap() const;
const BitsetNodeIdSetMap &GetSubsplitCladeGraftMap() const;
// ** DAG Lambda Iterators
// These methods iterate over the nodes and take lambda functions with arguments
// relative to current node.
// NodeLambda is for iterating over nodes.
using NodeLambda = std::function<void(SubsplitDAGNode)>;
// EdgeDestinationLambda takes in a rotation status (true is rotated, false is not)
// and a "destination" node. For iterating over DAG edges with a rotation status.
using EdgeDestinationLambda = std::function<void(bool, SubsplitDAGNode)>;
// EdgeAndNodeLambda takes a PCSP index of an edge, its rotation status, and an index
// of the node on the other side of the edge.
using EdgeAndNodeLambda = std::function<void(const EdgeId, const bool, const NodeId)>;
// ParentEdgeChildLambda takes: the parent id in the DAG, the rotation status of the
// edge, the child id, and the GCPSP index of the edge.
using ParentRotationChildEdgeLambda =
std::function<void(const NodeId, const bool, const NodeId, const EdgeId)>;
// Iterate over the "real" nodes, i.e. those that do not correspond to
// leaf subsplits or the DAG root node.
void IterateOverRealNodes(const NodeLambda &f) const;
// Iterate over the all leafward edges, rotated and sorted, of node using an
// EdgeDestinationLambda.
void IterateOverLeafwardEdges(SubsplitDAGNode node,
const EdgeDestinationLambda &f) const;
// Iterate over only the rotated/sorted leafward edges of node using a NodeLambda.
void IterateOverLeafwardEdges(SubsplitDAGNode node, bool rotated,
const NodeLambda &f) const;
// Iterate over the leafward edges, supplying both the index of the edge and
// the SubsplitDAGNode of the corresponding child.
void IterateOverLeafwardEdgesAndChildren(SubsplitDAGNode node,
const EdgeAndNodeLambda &f) const;
// Iterate over the rootward edges of node using an EdgeDestinationLambda.
// Excludes edges to the DAG root node.
void IterateOverRootwardEdges(SubsplitDAGNode node,
const EdgeDestinationLambda &f) const;
// Iterate over the rootward edges, supplying both the a PCSP index of the edge and
// the SubsplitDAGNode of the corresponding child.
void IterateOverRootwardEdgesAndParents(SubsplitDAGNode node,
const EdgeAndNodeLambda &f) const;
// Iterate over the leafward edges, supplying the parent node id, child node id,
// rotation of child, and the PCSP index of the rootward edge connecting the two.
void IterateOverParentAndChildAndLeafwardEdges(
SubsplitDAGNode node, const ParentRotationChildEdgeLambda &f) const;
// ** DAG Traversals
// These perform a traversal of the DAG and takes an argument TraversalActionT.
// TraversalActionT passes four functions of the following form and order:
// (1) void BeforeNode(size_t node_id)
// (2) void AfterNode(size_t node_id)
// (3) void BeforeNodeClade(size_t node_id, bool rotated)
// (4) void VisitEdge(size_t node_id, size_t child_id, bool rotated)
// See below for order of usage.
// Apply a TraversalAction via a depth first traversal. Do not visit leaf nodes.
// Applied to a given node, we:
// - Apply BeforeNode()
// - For each of the clades of the node, we:
// - Apply BeforeNodeClade()
// - For each edge descending from that clade, we:
// - Recur into the child node of the clade if it is not a leaf
// - Apply VisitEdge() to the edge
// - Apply AfterNode()
template <typename TraversalActionT>
void DepthFirstWithAction(const NodeIdVector &starting_nodes,
const TraversalActionT &action) const {
std::unordered_set<NodeId> visited_nodes;
for (const auto &node_id : starting_nodes) {
DepthFirstWithActionForNode(action, NodeId(node_id), visited_nodes);
}
};
// The portion of the traversal that is below a given node.
template <typename TraversalActionT>
void DepthFirstWithActionForNode(const TraversalActionT &action, NodeId node_id,
std::unordered_set<NodeId> &visited_nodes) const {
action.BeforeNode(node_id);
DepthFirstWithActionForNodeClade(action, node_id, false, visited_nodes);
DepthFirstWithActionForNodeClade(action, node_id, true, visited_nodes);
action.AfterNode(node_id);
};
// The portion of the traversal that is below a given clade of a given node.
// Does not recurse into leaf nodes.
template <typename TraversalActionT>
void DepthFirstWithActionForNodeClade(
const TraversalActionT &action, NodeId node_id, bool is_edge_on_left,
std::unordered_set<NodeId> &visited_nodes) const {
action.BeforeNodeClade(node_id, is_edge_on_left);
const auto node = GetDAGNode(node_id);
for (const auto child_id : node.GetLeafward(is_edge_on_left)) {
if (visited_nodes.count(NodeId(child_id)) == 0) {
visited_nodes.insert(NodeId(child_id));
if (!GetDAGNode(NodeId(child_id)).IsLeaf()) {
DepthFirstWithActionForNode(action, NodeId(child_id), visited_nodes);
}
}
action.VisitEdge(node_id, NodeId(child_id), is_edge_on_left);
}
};
// ** DAG Node Traversals
// These functions produce a vector of node IDs representing an ordered traversal
// of the DAG.
// Creates a vector of node IDs representing a leafward DFS postorder traversal of
// the DAG.
[[nodiscard]] NodeIdVector LeafwardNodeTraversalTrace(
bool include_dag_root_node) const;
// Creates a vector of node IDs representing a rootward DFS postorder traversal of
// the DAG.
[[nodiscard]] NodeIdVector RootwardNodeTraversalTrace(
bool include_dag_root_node) const;
// Creates a vector of node IDs representing a reverse DFS postorder, leafward
// traversal of the DAG. NOTE: A reverse postorder traversal represents a leafward
// topological sort.
[[nodiscard]] NodeIdVector TopologicalNodeTraversalTrace() const;
// ** DAG Edge Traversals
// Creates a vector of edge IDs representing a leafward DFS postorder traversal of
// the DAG.
[[nodiscard]] EdgeIdVector LeafwardEdgeTraversalTrace(
bool include_dag_root_node) const;
// Creates a vector of edge IDs representing a rootward DFS postorder traversal of
// the DAG.
[[nodiscard]] EdgeIdVector RootwardEdgeTraversalTrace(
bool include_dag_root_node) const;
// Creates a vector of edge IDs representing a reverse DFS postorder, leafward
// traversal of the DAG. NOTE: A reverse postorder traversal represents a leafward
// topological sort.
[[nodiscard]] EdgeIdVector TopologicalEdgeTraversalTrace(
bool include_dag_root_node) const;
// Do a topological traversal on the edges of the DAG, including edges from the
// DAG root node to the rootsplits, supplying the relevant indices to a lambda.
void TopologicalEdgeTraversal(ParentRotationChildEdgeLambda f) const;
// ** Priors
// Discrete uniform distribution over each subsplit.
[[nodiscard]] EigenVectorXd BuildUniformQ() const;
// Uniform prior over all topologies in the subsplit support.
[[nodiscard]] EigenVectorXd BuildUniformOnTopologicalSupportPrior() const;
// Uniform prior over all topologies, whether or not they are in the support.
// Thus, this will only be a normalized probability distribution for each subsplit if
// all topologies are in the support.
[[nodiscard]] EigenVectorXd BuildUniformOnAllTopologiesPrior() const;
// Get a vector from each DAG node index to the probability of sampling that DAG node
// with the supplied SBN parameters. These SBN parameters must be indexed in a
// compatible way as the dag_edges_ of the subsplit DAG.
EigenVectorXd UnconditionalNodeProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters) const;
// Get a map from each non-leaf subsplit to the probability of observing that
// subsplit with the supplied SBN parameters. See
// UnconditionalSubsplitProbabilityVector for notes.
BitsetDoubleMap UnconditionalSubsplitProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters) const;
// Get a vector from each PCSP index to the Bayes-inverted probability of sampling
// the parent given the child.
EigenVectorXd InvertedGPCSPProbabilities(
EigenConstVectorXdRef normalized_sbn_parameters,
EigenConstVectorXdRef node_probabilities) const;
// ** Query DAG
// Does DAG have attached graftDAG?
bool ContainsGraft() const;
// Does a taxon with the given name exist in DAG?
bool ContainsTaxon(const std::string &name) const;
// Does a node with the given subsplit exist in DAG?
bool ContainsNode(const Bitset &subsplit) const;
bool ContainsNode(const NodeId node_id) const;
// Does an edge that connects the two nodes exist in DAG?
bool ContainsEdge(const Bitset &parent_subsplit, const Bitset &child_subsplit) const;
bool ContainsEdge(const NodeId parent_id, const NodeId child_id) const;
bool ContainsEdge(const Bitset &edge_subsplit) const;
bool ContainsEdge(const EdgeId edge_id) const;
// Does the node pair of an NNI exist in DAG?
bool ContainsNNI(const NNIOperation &nni) const;
// Does the given tree/topology exist in DAG?
bool ContainsTree(const RootedTree &tree, const bool is_quiet = true) const;
bool ContainsTopology(const Node::Topology &topology,
const bool is_quiet = true) const;
// Is node the root?
bool IsNodeRoot(const NodeId node_id) const;
// Is node a leaf?
bool IsNodeLeaf(const NodeId node_id) const;
// Does edge connect to the root node?
bool IsEdgeRoot(const EdgeId edge_id) const;
// Does edge connect to a leaf node?
bool IsEdgeLeaf(const EdgeId edge_id) const;
// ** Tree/Topologies
std::unordered_map<NodeId, const Node *> BuildDAGNodeIdToTreeNodeMapFromTopology(
const Node::Topology &topology) const;
// Build map from NodeId in DAG to NodeId in topology.
std::unordered_map<NodeId, size_t> BuildNodeIdMapFromTopology(
const Node::Topology &topology) const;
// Build map from EdgeId in DAG to edge's child NodeId in topology.
std::unordered_map<EdgeId, SizePair> BuildEdgeIdMapFromTopology(
const Node::Topology &topology) const;
using ParentToChildNodeIdMap =
std::unordered_map<NodeId, SubsplitCladeEnum::Array<NodeId>>;
using ChildToParentNodeIdMap = std::unordered_map<NodeId, NodeId>;
// Generate topology for a Node Id vector.
Node::Topology BuildTopologyFromNodeIdMap(ParentToChildNodeIdMap &tree_map,
NodeId rootsplit_id) const;
// Build Tree from a given topology using given DAG branch lengths.
RootedTree BuildTreeFromTopology(const Node::Topology &topology,
const EigenVectorXd &dag_branch_lengths) const;
// Output given tree to newick topology string using DAG's leaf labels (without branch
// lengths).
std::string TreeToNewickTree(const RootedTree &tree) const;
// Output given tree to newick tree string using DAG's leaf labels (with branch
// lengths).
std::string TreeToNewickTopology(const RootedTree &tree) const;
// Output given topology to newick topology string using DAG's leaf labels (with
// branch lengths).
std::string TopologyToNewickTopology(const Node::Topology &topology) const;
// Generates all possible topologies contained in DAG.
// Each node in a topology is constructed with SubsplitDAGNode ID as Node ID.
Node::NodePtrVec GenerateAllTopologies() const;
std::vector<RootedTree> GenerateAllTrees(
const EigenVectorXd &dag_branch_lengths) const;
std::string ToNewickOfAllTopologies() const;
// Generate a set of tree topologies that span all nodes and edges in the DAG.
Node::NodePtrVec GenerateCoveringTopologies() const;
std::vector<RootedTree> GenerateCoveringTrees(
const EigenVectorXd &dag_branch_length) const;
std::string ToNewickOfCoveringTopologies() const;
// ** Modify DAG
// These methods are for directly modifying the DAG by adding or removing nodes and
// edges. All these methods promise that all data owned by the SubsplitDAG will be
// valid and up-to-date at their conclusion. This includes maintaining the following
// DAG invariants:
// - Tips have ids 0 to taxon_count_.
// - Parents have higher ids than their children.
// - The DAG root node has highest id.
// - Edges descending from the same node clade have contiguous idxs.
// - There are no gaps in node ids or edge idxs.
// Returned is the ModificationResult, which will specify changes and reordering to
// data that occurred during modification.
// ModificationResult is the return value of Modification methods.
// Contains the output needed to update all related data to reflect modifications to
// DAG.
struct ModificationResult {
// Nodes that were added or removed by modification.
NodeIdVector added_node_ids;
// Edges that were added or removed by modification.
EdgeIdVector added_edge_idxs;
// New ordering of node ids relative to their ordering before DAG modification.
Reindexer node_reindexer;
// New ordering of edge idxs relative to their ordering before DAG modification.
Reindexer edge_reindexer;
// Previous node count before modification.
size_t prv_node_count;
// Previous edge count before modification.
size_t prv_edge_count;
// Current node count after modification.
size_t cur_node_count;
// Current edge count after modification.
size_t cur_edge_count;
// Initialize a modification result that represents no changes in the DAG.
void Reinit(const SubsplitDAG &dag);
// Compose a modification result with a previous which represents the net changes
// across both changes in the DAG.
ModificationResult ComposeWith(const ModificationResult other);
};
// Add an adjacent node pair to the DAG.
virtual ModificationResult AddNodePair(const NNIOperation &nni);
virtual ModificationResult AddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit);
// Add multiple nodes to the DAG.
ModificationResult AddNodes(const BitsetPairVector &node_subsplit_pairs);
// Add multiple edges to the DAG.
ModificationResult AddEdges(const std::vector<Bitset> &edge_subsplits);
// Add all pontential edges to DAG. Building DAGs from a collection of trees can
// result in a DAG that is not fully connected, in which one or more potentially
// adjacent nodes in the DAG do not have an edge between them.
ModificationResult FullyConnect();
// Add all tree topologies in topology_counter to DAG.
std::tuple<BitsetSizeMap, SizeBitsetMap, BitsetVector> ProcessTopologyCounter(
const Node::TopologyCounter &topology_counter);
// ** Reindexers
// These methods are for building and applying reindexers.
// A reindexer describes a remapping/transform of identifiers (i.e. node ids, edge
// idxs) before and after a modification to the DAG. The index is before the
// modification, the value is after the modification. Reindexers are used to perform a
// transform on data vectors,
// Uses a depth first DAG traversal to build a reindexer for node_ids.
Reindexer BuildNodeReindexer(const size_t prev_node_count);
// Get the reindexer for edges idxs.
Reindexer BuildEdgeReindexer(const size_t prev_edge_count);
// Remap all node ids according to the node_reindexer.
void RemapNodeIds(const Reindexer &node_reindexer);
// Remap all edge idxs according to the edge_reindexer.
void RemapEdgeIdxs(const Reindexer &edge_reindexer);
// ** Validation Tests
// These methods are used to assert that a DAG is in a valid state or that given
// operation will result in a valid DAG.
// Checks if SubsplitDAG's corresponding data is consistent and up-to-date.
// Specifically, checks that:
// - subsplit_to_id_ map consistent with nodes in dag_nodes_.
// - parent_to_child_range_ map consistent with each parent and child node's
// neighbors.
// - parent_to_child_range_ map consistent with each parent's edges.
bool IsConsistent() const;
// Checks if SubsplitDAG is in a valid state (assumes that DAG is consistent).
// Specifically, checks that:
// - Each node is valid. That is, either:
// - Node has zero parents and zero children.
// - Node has 1+ parents, 1+ sorted children, and 1+ rotated children.
bool IsValid() const;
// Check if it is valid to add given node pair.
// Specifically, check that:
// - The nodes are adjacent.
// - The nodes do not add/remove any taxa.
// - The parent node has at least one parent.
// - Including the child node, each clade of the parent node has at least one child.
// - Each clade of the child node has at least 1 child.
bool IsValidAddNodePair(const Bitset &parent_subsplit,
const Bitset &child_subsplit) const;
// Check if the taxon map is valid. Specifically, check that:
// - No duplicate ids.
// - Ids cover all clade bits from 0 to map_size.
bool IsValidTaxonMap() const;
// ** Miscellaneous
// Creates a dictionary of summary statistics for DAG.
// Includes node_count and edge_count.
StringSizeMap SummaryStatistics() const;
// Rotates Node Subsplit only if it is out of sorted order (rotated).
Bitset SubsplitToSortedOrder(const Bitset &subsplit, bool rotated) const;
// Builds a map from the nodes of one DAG onto another DAG. Nodes are considered
// matching when they represent the same Subsplit bitset. Map only contains the edges
// common to both DAGs.
static std::unordered_map<NodeId, NodeId> BuildNodeIdMapBetweenDAGs(
const SubsplitDAG &dag_a, const SubsplitDAG &dag_b);
// Builds a map from the edges of one DAG onto another DAG. Edges are considered
// matching when they represent the same PCSP bitset. Map only contains the edges
// common to both DAGs.
static std::unordered_map<EdgeId, EdgeId> BuildEdgeIdMapBetweenDAGs(
const SubsplitDAG &dag_a, const SubsplitDAG &dag_b);
// Build vector between from SubsplitDAGs dag_a to dag_b corresponding to their taxon
// ids. Can be treated as a "map" with indices representing keys. Requires that both
// SubsplitDAGs use the same taxon set.
// - Map: [ index: dag_a's taxon_id => value: dag_b's taxon_id ]
static SizeVector BuildTaxonTranslationMap(const SubsplitDAG &dag_a,
const SubsplitDAG &dag_b);
// Compare two bitsets (subsplits or PCSPs) from two different DAGs using a taxon
// map.
static int BitsetCompareViaTaxonTranslationMap(const Bitset &bitset_a,
const Bitset &bitset_b,
const SizeVector &taxon_map);
// Translate bitset using the taxon translation map output positions.
// NOTE: forward_translate uses index in map as input bit locations, values in map as
// output bit locations.
// If false, map is used vice versa.
static Bitset BitsetTranslateViaTaxonTranslationMap(
const Bitset &bitset, const SizeVector &taxon_map,
const bool forward_translate = true);
// Build a default taxon map for constructor with dummy taxon names:
// E.g. {{0, "x0"}, {1, "x1"}, ...}
static TagStringMap BuildDummyTagTaxonMap(const size_t taxon_count);
// ** GraftDAG Helper
// Remove Graft nodes and edges from Host DAG.
void ResetHostDAG(SubsplitDAG &host_dag);
protected:
// ** GraftDAG Helper
explicit SubsplitDAG(SubsplitDAG &host_dag, HostDispatchTag);
// ** Count
// Traverses the DAG and refreshes count of the number of topologies contained in the
// DAG. Updates topology_count_ and topology_count_below_, which contains the count of
void CountTopologies();
// Update edge count without leaf subsplits.
void CountEdgesWithoutLeafSubsplits();
// ** DAG Traversal
// NOTES: - visit_order is the output vector of node ids in specified traversal order.
// - visited_nodes is a set of all node ids reached by traversal.
// Creates vector of node ids in leafward depth first post-order traversal of DAG.
void LeafwardDepthFirst(NodeId node_id, NodeIdVector &visit_order,
std::unordered_set<NodeId> &visited_nodes) const;
// Create vector of node ids in rootward depth first post-order traversal of DAG.
void RootwardDepthFirst(NodeId node_id, NodeIdVector &visit_order,
std::unordered_set<NodeId> &visited_nodes) const;
// ** Modify DAG Helpers
// These methods help calling functions to modify the DAG, but do NOT ensure a valid
// state at the end of the function. Do not necessarily handle remapping ids and idxs,
// removing old references to deleted objects, etc.
// Add taxon map to DAG.
void BuildTaxonMap(const TagStringMap &tag_taxon_map);
// Create Node and insert it into the DAG. Returns ID of created node.
NodeId CreateAndInsertNode(const Bitset &subsplit);
// Create Edge between given nodes and insert it into the DAG. Returns ID of created
// edge.
EdgeId CreateAndInsertEdge(const NodeId parent_id, const NodeId child_id,
const bool is_edge_on_left);
// Add edge between given parent and child nodes to the DAG.
void ConnectGivenNodes(const NodeId parent_id, const NodeId child_id,
const bool is_edge_on_left, const EdgeId edge_id);
// Add edges between node_id and all children in map.
void ConnectNodes(const SizeBitsetMap &index_to_child, const NodeId node_id,
const bool is_edge_on_left);
// Add nodes for all children in map.
void BuildNodes(const SizeBitsetMap &index_to_child, const BitsetVector &rootsplits);
// Add nodes in depth first ordering for children in map.
void BuildNodesDepthFirst(const SizeBitsetMap &index_to_child, const Bitset &subsplit,
std::unordered_set<Bitset> &visited_subsplits);
// Add edges from all parent nodes according to child nodes in map.
void BuildEdges(const SizeBitsetMap &index_to_child);
// Add edges to DAG according to node_id pairs in edge indexer.
void BuildDAGEdgesFromEdgeIndexer(BitsetSizeMap &edge_indexer);
// Connect the child to all of its children. Push all new edges to
// added_edge_idxs.
void ConnectChildToAllChildren(const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs);
// Connect the parent to all of its children except for the given child node. Insert
// all new edges to added_edge_idxs vector.
void ConnectParentToAllChildrenExcept(const Bitset &parent_subsplit,
const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs);
// Connect the child to all of its parents except for the given parent node. Insert
// all new edge to added_edge_idxs vector.
void ConnectChildToAllParentsExcept(const Bitset &parent_subsplit,
const Bitset &child_subsplit,
EdgeIdVector &added_edge_idxs);
// Connect the parent to all of its parents. Insert all new edges to
// added_edge_idxs vector.
void ConnectParentToAllParents(const Bitset &parent_subsplit,
EdgeIdVector &added_edge_idxs);
// Expand dag_edges_ and parent_to_child_range_ with leaf subsplits at the end.
void AddLeafSubsplitsToDAGEdgesAndParentToRange();
// Builds a vector of subsplits of all children , optionally including leaf nodes.
BitsetVector GetChildSubsplits(const SizeBitsetMap &index_to_child,
const Bitset &subsplit,
bool include_leaf_subsplits = false);
// Internal logic helper for adding node pair. Assumes that validation check
// has already been performed.
ModificationResult AddNodePairInternals(const Bitset &parent_subsplit,
const Bitset &child_subsplit);
ModificationResult AddNodePairInternals(
const std::vector<std::pair<Bitset, Bitset>> &node_subsplit_pairs);
// Internal logic helper that inserts node pair without reindexing. Just appends and
// adds nodes to modification result.
void AddNodePairInternalsWithoutReindexing(
const std::vector<std::pair<Bitset, Bitset>> &node_subsplit_pairs,
ModificationResult &mods);
// Check if a node would have at least one valid neighboring parent exist in
// the DAG for given subsplit.
bool SubsplitNodeHasParent(const Bitset &node_subsplit) const;
// Check if a node would have at least one valid neighboring right and one left child
// exist in the DAG for given subsplit.
bool SubsplitNodeHasLeftAndRightChild(const Bitset &node_subsplit) const;
// ** Constructor Helpers
// Build a Subsplit DAG on given number of taxa, expressing all tree topologies from
// tree_collection, with trees on the given taxa names/labels.
SubsplitDAG(size_t taxon_count, const Node::TopologyCounter &topology_counter,
const TagStringMap &tag_taxon_map);
private:
void StoreEdgeIds();
protected:
// Underlying data containing nodes and edges.
SubsplitDAGStorage storage_;
// NOTE: When using unique identifiers, for DAG nodes (aka Subsplits) we use the term
// "ids", and for edges (aka PCSPs) we use the term index or "idx", to more easily
// distinguish the two. This corresponds to the analogous concept for topologies.
// - Map of Taxon Names
// - [ Taxon Name => Taxon Id (position of the "on" bit in the clades) ]
StringTaxonIdMap dag_taxa_;
const TagStringMap *tag_taxon_map_ = nullptr;
// - Map of all DAG Nodes:
// - [ Node Subsplit (Bitset) => Node Id ]
// A node's id is equivalent to its index in dag_nodes_. The first entries are
// reserved for leaf subsplits. The last entries are reserved for rootsplits. The DAG
// root node has the highest node id.
BitsetNodeIdMap subsplit_to_id_;
// - These two maps facilitate a quick lookup of adjacent nodes in the DAG.
// The subsplit_union_ map contains a map from every clade union in the DAG to the set
// of all node_ids which contain that clade union.
BitsetNodeIdSetMap subsplit_union_;
BitsetNodeIdSetMap subsplit_union_graft_;
// The subsplit_clade_ map ontains a map from every left and right clades of every
// subsplit in the DAG to the set of all node_ids which contain that clade as one of
// its sides.
BitsetNodeIdSetMap subsplit_clade_;
BitsetNodeIdSetMap subsplit_clade_graft_;
// - Map of all DAG Nodes:
// - [ Node Subsplit (Bitset) => (begin, end) Range of Child Ids ]
// This indexer is an expanded version of parent_to_child_range_ in sbn_instance:
// It includes single element range for leaf subsplits.
BitsetEdgeIdPairMap parent_to_child_range_;
// The number of taxa in the DAG. This is equivalent to the size of the clades in each
// subsplit. Also equivalent to the number of leaf nodes in the DAG.
size_t taxon_count_;
// Number of internal edges in the DAG (excludes all edges that go to a root or
// leaf).
size_t edge_count_without_leaf_subsplits_;
// Total number of tree topologies spanned by the DAG.
double topology_count_;
// Storage for the number of topologies below for each node. Each index maps to the
// count for the corresponding node_id.
EigenVectorXd topology_count_below_;
};
| 40,608
|
C++
|
.h
| 738
| 49.95122
| 88
| 0.732703
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,065
|
rooted_gradient_transforms.hpp
|
phylovi_bito/src/rooted_gradient_transforms.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Calculation of the ratio and root height gradient, adpated from BEAST.
// https://github.com/beast-dev/beast-mcmc
// Credit to Xiang Ji and Marc Suchard.
#pragma once
#include <numeric>
#include "phylo_flags.hpp"
#include "rooted_tree.hpp"
namespace RootedGradientTransforms {
// \partial{L}/\partial{t_k} = \sum_j \partial{L}/\partial{b_j}
// \partial{b_j}/\partial{t_k}
std::vector<double> HeightGradient(const RootedTree &tree,
const std::vector<double> &branch_gradient);
double GetNodePartial(size_t node_id, size_t leaf_count,
const std::vector<double> &heights,
const std::vector<double> &ratios,
const std::vector<double> &bounds);
// Calculate \partial{t_j}/\partial{r_k}
double GetEpochGradientAddition(
size_t node_id, size_t child_id, size_t leaf_count,
const std::vector<double> &heights, const std::vector<double> &ratios,
const std::vector<double> &bounds,
const std::vector<double> &ratiosGradientUnweightedLogDensity);
std::vector<double> GetLogTimeArray(const RootedTree &tree);
// Update ratio gradient with \partial{t_j}/\partial{r_k}
std::vector<double> UpdateGradientUnWeightedLogDensity(
const RootedTree &tree, const std::vector<double> &gradient_height);
std::vector<double> GradientLogDeterminantJacobian(const RootedTree &tree);
double UpdateHeightParameterGradientUnweightedLogDensity(
const RootedTree &tree, const std::vector<double> &gradient);
std::vector<double> RatioGradientOfHeightGradient(
const RootedTree &tree, const std::vector<double> &height_gradient);
std::vector<double> RatioGradientOfBranchGradient(
const RootedTree &tree, const std::vector<double> &branch_gradient);
std::vector<double> RatioGradientOfBranchGradient(
const RootedTree &tree, const std::vector<double> &branch_gradient,
const std::optional<PhyloFlags> flags = std::nullopt);
// This should go away with #205.
EigenVectorXd RatioGradientOfHeightGradientEigen(const RootedTree &tree,
EigenConstVectorXdRef height_gradient);
// Computes the Log Determinant Jacobian of the Height Transform.
double LogDetJacobianHeightTransform(const RootedTree &tree);
} // namespace RootedGradientTransforms
| 2,439
|
C++
|
.h
| 45
| 48.644444
| 88
| 0.736864
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,066
|
gp_operation.hpp
|
phylovi_bito/src/gp_operation.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// These operations are just declarations. We process them with the GPEngine.
#pragma once
#include <iostream>
#include <variant>
#include <vector>
#include "sugar.hpp"
using StringSizePairVector = std::vector<std::pair<std::string, size_t>>;
// Assume we have:
// - all PLVs in a data structure such that `plv[idx]` is the `idx`th PLV
// - branch lengths in a vector `branch_lengths` indexed by PCSPs
// - log likelihoods in a vector `log_likelihoods` indexed by PCSPs
// - a vector of SBN probabilities `q` indexed by PCSPs, such that the children of a
// given parent are contiguous
// - a collection of weights from site pattern compression
namespace GPOperations {
// We use the convention that `src_` and `dest_` indices always index PLVs.
// ZeroPLV out the PLV at `dest_`.
struct ZeroPLV {
constexpr explicit ZeroPLV(size_t dest) : dest_{dest} {}
size_t dest_;
StringSizePairVector guts() const { return {{"dest_", dest_}}; }
};
// Set the PLV at `dest_` to be the stationary distribution multiplied by
// rootsplit probability indexed by `root_gpcsp_idx` at every site.
struct SetToStationaryDistribution {
constexpr explicit SetToStationaryDistribution(size_t dest, size_t root_gpcsp_idx)
: dest_{dest}, root_gpcsp_idx_(root_gpcsp_idx) {}
size_t dest_;
size_t root_gpcsp_idx_;
StringSizePairVector guts() const {
return {{"dest_", dest_}, {"root_gpcsp_idx_", root_gpcsp_idx_}};
}
};
// Set transition_matrix_ using branch_length(gpcsp_) then,
// perform `plv[dest_] += q[gpcsp_] * transition_matrix_ * plv[src_]`
struct IncrementWithWeightedEvolvedPLV {
constexpr IncrementWithWeightedEvolvedPLV(size_t dest, size_t gpcsp, size_t src)
: dest_{dest}, gpcsp_{gpcsp}, src_{src} {}
size_t dest_;
size_t gpcsp_;
size_t src_;
StringSizePairVector guts() const {
return {{"dest_", dest_}, {"gpcsp_", gpcsp_}, {"src_", src_}};
}
};
// Reset the marginal likelihood before incrementing.
struct ResetMarginalLikelihood {
constexpr ResetMarginalLikelihood() {}
StringSizePairVector guts() const { return {}; }
};
// Increment log marginal likelihood with the log likelihood at rootsplit rootsplit_
// with a leafward PLV p_ and a stationary distribution multiplied by the rootsplit
// prior at stationary_times_prior_.
// #288 note that this also sets the per-rootsplit conditional likelihood. It deserves a
// better name that has something to do with rootsplits. We should also consider just
// rejiggering things to use the stationary distribution directly.
struct IncrementMarginalLikelihood {
constexpr IncrementMarginalLikelihood(size_t stationary_times_prior, size_t rootsplit,
size_t p)
: stationary_times_prior_{stationary_times_prior}, rootsplit_{rootsplit}, p_{p} {}
size_t stationary_times_prior_;
size_t rootsplit_;
size_t p_;
StringSizePairVector guts() const {
return {{"stationary_times_prior_", stationary_times_prior_},
{"rootsplit_", rootsplit_},
{"p_", p_}};
}
};
// Componentwise multiplication: `plv[dest_] = plv[src1_] o plv[src2_]`
struct Multiply {
constexpr Multiply(size_t dest, size_t src1, size_t src2)
: dest_{dest}, src1_{src1}, src2_{src2} {}
size_t dest_;
size_t src1_;
size_t src2_;
StringSizePairVector guts() const {
return {{"dest_", dest_}, {"src1_", src1_}, {"src2_", src2_}};
}
};
// #288 this deserves a better description, and perhaps a better name.
// Stores the likelihood of `plv[child_]` and `plv[parent_]` with branch length
// branch_lengths[dest_], incorporating site pattern weights, in
// `log_likelihoods[dest_]`
struct Likelihood {
constexpr Likelihood(size_t dest, size_t child, size_t parent)
: dest_{dest}, child_{child}, parent_{parent} {}
size_t dest_;
size_t child_;
size_t parent_;
StringSizePairVector guts() const {
return {{"dest_", dest_}, {"child_", child_}, {"parent_", parent_}};
}
};
// Finds the optimal `branch_length` for the likelihood of
// `plv[rootward_]` and `P(branch_length) plv[leafward_]`,
// starting optimization at `branch_lengths[branch_length_]`, and
// storing optimal branch length at `branch_lengths[branch_length_]`.
// #288 are we happy with definition of rootward and leafward?
struct OptimizeBranchLength {
constexpr OptimizeBranchLength(size_t leafward, size_t rootward, size_t gpcsp)
: leafward_{leafward}, rootward_{rootward}, gpcsp_{gpcsp} {}
size_t leafward_;
size_t rootward_;
size_t gpcsp_;
StringSizePairVector guts() const {
return {{"leafward_", leafward_}, {"rootward_", rootward_}, {"gpcsp_", gpcsp_}};
}
};
// Assumption: log_likelihoods_ have been updated on [op.start_, op.stop_).
// Performs `eq:SBNUpdates`. That is, let `total` be the log sum of
// `log_likelihoods[idx]` for all `idx` in `start_ <= idx < stop_`. Now let
// `q[idx] = exp(log_likelihoods[idx] - total)` for all `idx` in `start_ <= idx <
// stop_`.
// Note that this operation modifies our log_likelihoods in place by normalizing them
// across children of a parent. Thus they are no longer valid.
struct UpdateSBNProbabilities {
constexpr UpdateSBNProbabilities(size_t start, size_t stop)
: start_{start}, stop_{stop} {}
size_t start_;
size_t stop_;
StringSizePairVector guts() const { return {{"start_", start_}, {"stop_", stop_}}; }
};
// This operation sets the rescaling amount for the PLV in op.dest_ to be the minimum of
// that for all of the PLVs in op.src_vector_. We do this so that we can sum over
// partial likelihood vectors after rescaling each one so that it is on the same scale
// as dest. Note that we want the minimum here because we want to preserve accuracy for
// the PLV with the highest likelihood (which corresponds to the least amount of
// rescaling).
struct PrepForMarginalization {
PrepForMarginalization(size_t dest, SizeVector src_vector)
: dest_{dest}, src_vector_{std::move(src_vector)} {}
size_t dest_;
SizeVector src_vector_;
std::pair<std::pair<std::string, size_t>, std::pair<std::string, SizeVector>> guts()
const {
return {{"dest_", dest_}, {"src_vector_", src_vector_}};
}
};
} // namespace GPOperations
using GPOperation = std::variant<
GPOperations::ZeroPLV, GPOperations::SetToStationaryDistribution,
GPOperations::IncrementWithWeightedEvolvedPLV, GPOperations::Multiply,
GPOperations::Likelihood, GPOperations::OptimizeBranchLength,
GPOperations::UpdateSBNProbabilities, GPOperations::ResetMarginalLikelihood,
GPOperations::IncrementMarginalLikelihood, GPOperations::PrepForMarginalization>;
using GPOperationVector = std::vector<GPOperation>;
// The purpose of this visitor class is to accumulate the
// things-that-need-preparation-for-marginalization and build them into a
// PrepForMarginalization (see PrepForMarginalizationOfOperations implementation).
struct PrepForMarginalizationVisitor {
std::optional<size_t> dest_ = std::nullopt;
SizeVector src_vector;
explicit PrepForMarginalizationVisitor(const GPOperationVector& operations) {
for (const auto& operation : operations) {
std::visit(*this, operation);
}
}
void operator()(const GPOperations::IncrementWithWeightedEvolvedPLV& op) {
if (dest_) {
Assert(*dest_ == op.dest_, "dest_ mismatch in PrepForMarginalizationVisitor");
} else {
dest_ = op.dest_;
}
src_vector.push_back(op.src_);
}
// Do nothing for the rest of the operations.
void operator()(const GPOperations::ZeroPLV&) {} // NOLINT
void operator()(const GPOperations::SetToStationaryDistribution&) {} // NOLINT
void operator()(const GPOperations::ResetMarginalLikelihood&) {} // NOLINT
void operator()(const GPOperations::IncrementMarginalLikelihood&) {} // NOLINT
void operator()(const GPOperations::Multiply&) {} // NOLINT
void operator()(const GPOperations::Likelihood&) {} // NOLINT
void operator()(const GPOperations::OptimizeBranchLength&) {} // NOLINT
void operator()(const GPOperations::UpdateSBNProbabilities&) {} // NOLINT
void operator()(const GPOperations::PrepForMarginalization&) {} // NOLINT
GPOperations::PrepForMarginalization ToPrepForMarginalization() {
Assert(dest_, "Nothing to prep in ToPrepForMarginalization");
return GPOperations::PrepForMarginalization{dest_.value(), src_vector};
}
};
namespace GPOperations {
void AppendGPOperations(GPOperationVector& operations,
GPOperationVector&& new_operations);
PrepForMarginalization PrepForMarginalizationOfOperations(
const GPOperationVector& operations);
}; // namespace GPOperations
struct GPOperationOstream {
std::ostream& os_;
explicit GPOperationOstream(std::ostream& os) : os_{os} {}
void operator()(const GPOperations::ZeroPLV& operation) {
os_ << "ZeroPLV" << operation.guts();
}
void operator()(const GPOperations::SetToStationaryDistribution& operation) {
os_ << "SetToStationaryDistribution" << operation.guts();
}
void operator()(const GPOperations::IncrementWithWeightedEvolvedPLV& operation) {
os_ << "IncrementWithWeightedEvolvedPLV" << operation.guts();
}
// #288 should this be TotalMarginalLikelihood or something?
void operator()(const GPOperations::ResetMarginalLikelihood& operation) {
os_ << "ResetMarginalLikelihood" << operation.guts();
}
void operator()(const GPOperations::IncrementMarginalLikelihood& operation) {
os_ << "IncrementMarginalLikelihood" << operation.guts();
}
void operator()(const GPOperations::Multiply& operation) {
os_ << "Multiply" << operation.guts();
}
void operator()(const GPOperations::Likelihood& operation) {
os_ << "Likelihood" << operation.guts();
}
void operator()(const GPOperations::OptimizeBranchLength& operation) {
os_ << "OptimizeBranchLength" << operation.guts();
}
void operator()(const GPOperations::UpdateSBNProbabilities& operation) {
os_ << "UpdateSBNProbabilities" << operation.guts();
}
void operator()(const GPOperations::PrepForMarginalization& operation) {
os_ << "PrepForMarginalization" << operation.guts();
}
};
std::ostream& operator<<(std::ostream& os, GPOperation const& operation);
std::ostream& operator<<(std::ostream& os, GPOperationVector const& operation_vector);
| 10,470
|
C++
|
.h
| 227
| 42.947137
| 88
| 0.720454
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,067
|
rooted_sbn_support.hpp
|
phylovi_bito/src/rooted_sbn_support.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include "sbn_support.hpp"
class RootedSBNSupport : public SBNSupport {
public:
RootedSBNSupport() : SBNSupport({}){};
explicit RootedSBNSupport(const Node::TopologyCounter &topologies,
StringVector taxon_names)
: SBNSupport(std::move(taxon_names)) {
std::tie(rootsplits_, indexer_, index_to_child_, parent_to_child_range_,
gpcsp_count_) =
SBNMaps::BuildIndexerBundle(RootedSBNMaps::RootsplitCounterOf(topologies),
RootedSBNMaps::PCSPCounterOf(topologies));
}
RootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const Node::TopologyCounter &topology_counter, const size_t out_of_sample_index) {
return RootedSBNMaps::IndexerRepresentationCounterOf(indexer_, topology_counter,
out_of_sample_index);
}
RootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const Node::TopologyCounter &topology_counter) {
return IndexerRepresentationCounterOf(topology_counter, GPCSPCount());
}
RootedIndexerRepresentation IndexerRepresentationOf(
const Node::NodePtr &topology, const size_t out_of_sample_index) const {
return RootedSBNMaps::IndexerRepresentationOf(indexer_, topology,
out_of_sample_index);
}
RootedIndexerRepresentation IndexerRepresentationOf(
const Node::NodePtr &topology) const {
return IndexerRepresentationOf(topology, GPCSPCount());
}
static BitsetSizeDict RootsplitCounterOf(const Node::TopologyCounter &topologies) {
return RootedSBNMaps::RootsplitCounterOf(topologies);
}
static PCSPCounter PCSPCounterOf(const Node::TopologyCounter &topologies) {
return RootedSBNMaps::PCSPCounterOf(topologies);
}
};
| 1,965
|
C++
|
.h
| 40
| 40.875
| 88
| 0.717641
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,068
|
subsplit_dag_node.hpp
|
phylovi_bito/src/subsplit_dag_node.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A node in a directed acyclic graph representing a collection of subsplits with their
// corresponding parent-child relationships.
//
// Each node represents a sorted subsplit, which is stored as a bitset in DAGVertex.
// The leafward edges are divided into two groups based on if they split apart the
// right clade (in which case they are called sorted children) or if they split apart
// the left clade (in which case they are called rotated children).
// Similarly, the rootward edges are divided into two groups based on if the child of
// the edge splits apart the right clade of the parent (in which case they are called
// sorted parents) or if they split apart the left clade of the parent (in which case
// they are called rotated parents). The return type [Const]NeighborsView of querying
// edges is a view object, that can be used similarly to a vector reference: can be
// iterated, and checked for size() and empty().
#pragma once
#include "bitset.hpp"
#include "reindexer.hpp"
#include "sugar.hpp"
#include "subsplit_dag_storage.hpp"
namespace {
template <typename T>
auto& GetStorage(const T& node) {
return node.node_;
}
static inline void RemapNeighbors(NeighborsView neighbors,
const SizeVector& node_reindexer) {
std::map<NodeId, EdgeId> remapped;
for (auto id = neighbors.begin(); id != neighbors.end(); ++id) {
remapped[NodeId(node_reindexer[(*id).value_])] = EdgeId(id.GetEdgeId());
}
neighbors.SetNeighbors(remapped);
}
} // namespace
template <typename T>
class GenericSubsplitDAGNode {
public:
GenericSubsplitDAGNode(T& node) : node_{node} {}
GenericSubsplitDAGNode(const GenericSubsplitDAGNode<std::remove_const_t<T>>& other)
: node_{other.node_} {}
// Compare SubsplitDAGNodes by their ids.
static int Compare(const SubsplitDAGNode& node_a, const SubsplitDAGNode& node_b);
// Compare SubsplitDAGNodes by their subsplit representations.
static int CompareBySubsplit(const SubsplitDAGNode& node_a,
const SubsplitDAGNode& node_b);
NodeId Id() const { return NodeId(node_.GetId()); }
const Bitset& GetBitset() const { return node_.GetSubsplit(); }
const Bitset GetBitset(bool rotated) const {
return rotated ? node_.GetSubsplit().SubsplitRotate() : node_.GetSubsplit();
}
// ** Node Types
// Is the node the DAG root (universal ancestor of the DAG)?
bool IsDAGRootNode() const {
return (GetRightRootward().empty() && GetLeftRootward().empty());
}
// Is the node a rootsplit (direct descendent of root, dividing entire taxon set)?
bool IsRootsplit() const { return node_.GetSubsplit().SubsplitIsRootsplit(); }
// Is the node a leaf (has no descendants)?
bool IsLeaf() const {
return GetLeftLeafward().empty() && GetRightLeafward().empty();
}
// ** Edges
void AddEdge(NodeId adjacent_node_id, EdgeId edge_id, Direction which_direction,
SubsplitClade which_clade) {
node_.AddNeighbor(which_direction, which_clade, adjacent_node_id, edge_id);
}
void AddLeftLeafward(NodeId node_id, EdgeId edge_id) {
node_.AddNeighbor(Direction::Leafward, SubsplitClade::Left, node_id, edge_id);
}
void AddRightLeafward(NodeId node_id, EdgeId edge_id) {
node_.AddNeighbor(Direction::Leafward, SubsplitClade::Right, node_id, edge_id);
}
void AddLeftRootward(NodeId node_id, EdgeId edge_id) {
node_.AddNeighbor(Direction::Rootward, SubsplitClade::Left, node_id, edge_id);
}
void AddRightRootward(NodeId node_id, EdgeId edge_id) {
node_.AddNeighbor(Direction::Rootward, SubsplitClade::Right, node_id, edge_id);
}
// ** Neighbors
// This is a super-iterator that iterates over all ConstNeighborView iterators of all
// given directions/clade combinations.
class MultiConstNeighborsView {
public:
MultiConstNeighborsView(
const SubsplitDAGNode& node,
std::vector<std::pair<Direction, SubsplitClade>> dir_clade_pairs)
: node_id_(node.Id()), dir_clade_pairs_(dir_clade_pairs) {
for (const auto [dir, clade] : dir_clade_pairs_) {
views_.push_back(node.GetNeighbors(dir, clade));
}
}
class Iterator {
public:
Iterator(size_t view_idx, const NodeId node_id,
const std::vector<std::pair<Direction, SubsplitClade>>& dir_clade_pairs,
const std::vector<ConstNeighborsView>& views)
: node_id_(node_id),
dir_clade_pairs_(dir_clade_pairs),
views_(views),
view_idx_(view_idx) {
AssignIterator();
if (it_ != nullptr) {
MaybeGetNextIterator();
}
}
bool operator!=(const Iterator& other) { return view_idx_ != other.GetViewIdx(); }
Iterator& operator++() {
++(*it_);
MaybeGetNextIterator();
return *this;
}
NodeId operator*() { return *(*it_); }
NodeId GetNodeId() const { return (*it_).GetNodeId(); }
EdgeId GetEdgeId() const { return (*it_).GetEdgeId(); }
Direction GetDirection() const { return dir_clade_pairs_[view_idx_].first; }
SubsplitClade GetSubsplitClade() const {
return dir_clade_pairs_[view_idx_].second;
}
const ConstNeighborsView GetCurrentView() const { return views_[view_idx_]; }
size_t GetViewIdx() const { return view_idx_; }
NodeId GetParentId() const {
return (GetDirection() == Direction::Leafward) ? node_id_ : GetNodeId();
};
NodeId GetChildId() const {
return (GetDirection() == Direction::Leafward) ? GetNodeId() : node_id_;
};
private:
ConstNeighborsView::Iterator GetIterator() { return *it_; }
void MaybeGetNextIterator() {
while ((*it_) == (*end_) and view_idx_ < views_.size()) {
view_idx_++;
AssignIterator();
}
}
void AssignIterator() {
if (view_idx_ >= views_.size()) return;
it_ = std::make_unique<ConstNeighborsView::Iterator>(GetCurrentView().begin());
end_ = std::make_unique<ConstNeighborsView::Iterator>(GetCurrentView().end());
}
const NodeId node_id_;
const std::vector<std::pair<Direction, SubsplitClade>>& dir_clade_pairs_;
const std::vector<ConstNeighborsView>& views_;
std::unique_ptr<ConstNeighborsView::Iterator> it_ = nullptr;
std::unique_ptr<ConstNeighborsView::Iterator> end_ = nullptr;
size_t view_idx_;
};
auto begin() const { return Iterator(0, node_id_, dir_clade_pairs_, views_); }
auto end() const {
return Iterator(dir_clade_pairs_.size(), node_id_, dir_clade_pairs_, views_);
}
size_t size() const {
size_t size = 0;
std::for_each(views_.begin(), views_.end(),
[&size](const auto view) { size += view.size(); });
return size;
}
bool empty() const { return size() == 0; }
private:
const NodeId node_id_;
const std::vector<std::pair<Direction, SubsplitClade>> dir_clade_pairs_;
std::vector<ConstNeighborsView> views_;
};
// Get all neighbors.
MultiConstNeighborsView GetNeighbors() const {
std::vector<std::pair<Direction, SubsplitClade>> dir_clade_pairs;
for (auto dir : DirectionEnum::Iterator()) {
for (auto clade : SubsplitCladeEnum::Iterator()) {
dir_clade_pairs.push_back({dir, clade});
}
}
return {node_, dir_clade_pairs};
}
// Get all leafward/rootward neighbors.
MultiConstNeighborsView GetNeighbors(Direction dir) const {
std::vector<std::pair<Direction, SubsplitClade>> dir_clade_pairs;
for (auto clade : SubsplitCladeEnum::Iterator()) {
dir_clade_pairs.push_back({dir, clade});
}
}
// Get neighbors in specified direction.
ConstNeighborsView GetNeighbors(Direction direction, SubsplitClade clade) const {
return node_.GetNeighbors(direction, clade);
}
ConstNeighborsView GetLeafwardOrRootward(bool leafward, bool rotated) const {
return leafward ? GetLeafward(rotated) : GetRootward(rotated);
};
ConstNeighborsView GetLeftLeafward() const {
return node_.GetNeighbors(Direction::Leafward, SubsplitClade::Left);
}
ConstNeighborsView GetRightLeafward() const {
return node_.GetNeighbors(Direction::Leafward, SubsplitClade::Right);
}
ConstNeighborsView GetLeafward(bool rotated) const {
return rotated ? GetLeftLeafward() : GetRightLeafward();
}
ConstNeighborsView GetLeafward(SubsplitClade clade) const {
return (clade == SubsplitClade::Left) ? GetLeftLeafward() : GetRightLeafward();
}
ConstNeighborsView GetLeftRootward() const {
return node_.GetNeighbors(Direction::Rootward, SubsplitClade::Left);
}
ConstNeighborsView GetRightRootward() const {
return node_.GetNeighbors(Direction::Rootward, SubsplitClade::Right);
}
ConstNeighborsView GetRootward(bool rotated) const {
return rotated ? GetLeftRootward() : GetRightRootward();
}
ConstNeighborsView GetRootward(SubsplitClade clade) const {
return (clade == SubsplitClade::Left) ? GetLeftRootward() : GetRightRootward();
}
// Remap node ids after modifying DAG.
void RemapNodeIds(const Reindexer& node_reindexer) {
Assert(node_reindexer.IsValid(),
"Reindexer must be valid in GenericSubsplitDAGNode::RemapNodeIds.");
node_.SetId(NodeId(node_reindexer.GetNewIndexByOldIndex(node_.GetId().value_)));
node_.GetNeighbors(Direction::Leafward, SubsplitClade::Left)
.RemapNodeIds(node_reindexer);
node_.GetNeighbors(Direction::Leafward, SubsplitClade::Right)
.RemapNodeIds(node_reindexer);
node_.GetNeighbors(Direction::Rootward, SubsplitClade::Left)
.RemapNodeIds(node_reindexer);
node_.GetNeighbors(Direction::Rootward, SubsplitClade::Right)
.RemapNodeIds(node_reindexer);
}
// Remap edge ids after modifying DAG.
void RemapEdgeIdxs(const Reindexer& edge_reindexer) {
Assert(edge_reindexer.IsValid(),
"Reindexer must be valid in GenericSubsplitDAGNode::RemapNodeIds.");
node_.GetNeighbors(Direction::Leafward, SubsplitClade::Left)
.RemapEdgeIdxs(edge_reindexer);
node_.GetNeighbors(Direction::Leafward, SubsplitClade::Right)
.RemapEdgeIdxs(edge_reindexer);
node_.GetNeighbors(Direction::Rootward, SubsplitClade::Left)
.RemapEdgeIdxs(edge_reindexer);
node_.GetNeighbors(Direction::Rootward, SubsplitClade::Right)
.RemapEdgeIdxs(edge_reindexer);
}
std::optional<std::tuple<EdgeId, Direction, SubsplitClade>> FindNeighbor(NodeId id) {
return node_.FindNeighbor(id);
}
void SetEdgeId(NodeId neighbor, EdgeId line) { node_.SetEdgeId(neighbor, line); }
bool IsValid() const;
std::string ToString() const;
private:
friend class DAGVertex;
template <typename>
friend class GenericSubsplitDAGNode;
friend DAGVertex& GetStorage(const GenericSubsplitDAGNode<DAGVertex>&);
friend const DAGVertex& GetStorage(const GenericSubsplitDAGNode<const DAGVertex>&);
T& node_;
};
namespace {
static inline std::string GetNeighborString(ConstNeighborsView neighbors) {
std::string str;
for (auto i = neighbors.begin(); i != neighbors.end(); ++i) {
str += std::to_string((*i).value_) + " ";
}
return str;
}
} // namespace
template <typename T>
bool GenericSubsplitDAGNode<T>::IsValid() const {
// If node is a leaf, then a valid node should have no parents.
if (IsLeaf()) {
return (GetRightLeafward().size() + GetLeftLeafward().size() == 0);
}
// If node is a root, then a valid node should have no children.
else if (IsDAGRootNode()) {
return (GetRightRootward().size() + GetLeftRootward().size() == 0);
}
// If neither, then node should either have:
// (1) Zero parents and zero children.
// (2) 1+ parents, 1+ sorted children, and 1+ rotated children.
size_t parent_node_count = GetRightRootward().size() + GetLeftRootward().size();
if (parent_node_count > 0) {
if (GetRightLeafward().size() == 0 || GetLeftLeafward().size() == 0) {
return false;
}
} else {
if (GetRightLeafward().size() > 0 || GetLeftLeafward().size() > 0) {
return false;
}
}
return true;
}
template <typename T>
std::string GenericSubsplitDAGNode<T>::ToString() const {
std::string str =
std::to_string(Id().value_) + ": " + GetBitset().SubsplitToString() + "\n";
str += "Right Rootward: " + GetNeighborString(GetRightRootward()) + "\n";
str += "Left Rootward: " + GetNeighborString(GetLeftRootward()) + "\n";
str += "Right Leafward: " + GetNeighborString(GetRightLeafward()) + "\n";
str += "Left Leafward: " + GetNeighborString(GetLeftLeafward()) + "\n";
return str;
}
DAGVertex::DAGVertex(SubsplitDAGNode node) : DAGVertex(node.node_) {}
DAGVertex::DAGVertex(MutableSubsplitDAGNode node) : DAGVertex(node.node_) {}
template <typename T>
typename GenericVerticesView<T>::view_type GenericVerticesView<T>::Iterator::operator*()
const {
return storage_.vertices_[index_];
}
template <typename T>
typename GenericVerticesView<T>::view_type GenericVerticesView<T>::operator[](
size_t i) const {
return storage_.vertices_[i];
}
template <typename T>
typename GenericVerticesView<T>::view_type GenericVerticesView<T>::at(size_t i) const {
return storage_.vertices_.at(i);
}
#ifdef DOCTEST_LIBRARY_INCLUDED
inline DAGVertex& GetStorage(const GenericSubsplitDAGNode<DAGVertex>& node) {
return node.node_;
}
inline const DAGVertex& GetStorage(
const GenericSubsplitDAGNode<const DAGVertex>& node) {
return node.node_;
}
/* Create the following topology:
[0]
/ \
0 1
/ \
[1] [2]
/ \
2 3
/ \
[3] [4]
*/
static SubsplitDAGStorage MakeStorage() {
SubsplitDAGStorage storage;
storage.AddLine({EdgeId(0), NodeId(0), NodeId(1), SubsplitClade::Left});
storage.AddLine({EdgeId(1), NodeId(0), NodeId(2), SubsplitClade::Right});
storage.AddLine({EdgeId(2), NodeId(1), NodeId(3), SubsplitClade::Left});
storage.AddLine({EdgeId(3), NodeId(1), NodeId(4), SubsplitClade::Right});
storage.AddVertex(DAGVertex{}.SetId(NodeId(0)))
.AddNeighbor(Direction::Leafward, SubsplitClade::Left, NodeId(1), EdgeId(0))
.AddNeighbor(Direction::Leafward, SubsplitClade::Right, NodeId(2), EdgeId(1));
storage.AddVertex(DAGVertex{}.SetId(NodeId(1)))
.AddNeighbor(Direction::Rootward, SubsplitClade::Left, NodeId(0), EdgeId(0))
.AddNeighbor(Direction::Leafward, SubsplitClade::Left, NodeId(3), EdgeId(2))
.AddNeighbor(Direction::Leafward, SubsplitClade::Right, NodeId(4), EdgeId(3));
storage.AddVertex(DAGVertex{}.SetId(NodeId(2)))
.AddNeighbor(Direction::Rootward, SubsplitClade::Right, NodeId(0), EdgeId(1));
storage.AddVertex(DAGVertex{}.SetId(NodeId(3)))
.AddNeighbor(Direction::Rootward, SubsplitClade::Left, NodeId(1), EdgeId(2));
storage.AddVertex(DAGVertex{}.SetId(NodeId(4)))
.AddNeighbor(Direction::Rootward, SubsplitClade::Right, NodeId(1), EdgeId(3));
return storage;
}
TEST_CASE("SubsplitDAGStorage: LinesView structured binding") {
auto storage = MakeStorage();
size_t i = 0;
for (auto [node_ids, line_id] : storage.GetLines()) {
std::ignore = line_id;
auto [parent_id, child_id] = node_ids;
switch (i++) {
case 0:
CHECK_EQ(parent_id, 0);
CHECK_EQ(child_id, 1);
break;
case 1:
CHECK_EQ(parent_id, 0);
CHECK_EQ(child_id, 2);
break;
case 2:
CHECK_EQ(parent_id, 1);
CHECK_EQ(child_id, 3);
break;
case 3:
CHECK_EQ(parent_id, 1);
CHECK_EQ(child_id, 4);
break;
default:
Failwith("More lines than expected");
}
}
}
TEST_CASE("SubsplitDAGStorage: Neighbors iterator") {
auto storage = MakeStorage();
CHECK_EQ(*GetStorage(storage.GetVertices()[1])
.GetNeighbors(Direction::Leafward, SubsplitClade::Left)
.begin(),
3);
CHECK_EQ(GetStorage(storage.GetVertices()[1])
.GetNeighbors(Direction::Leafward, SubsplitClade::Left)
.begin()
.GetEdgeId(),
2);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 16,279
|
C++
|
.h
| 393
| 36.262087
| 88
| 0.686257
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,070
|
optimization.hpp
|
phylovi_bito/src/optimization.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include <cmath>
#include <functional>
#include <numeric>
#include <utility>
#include "sugar.hpp"
// We tested both gradient and gradient-free based methods for branch length
// optimization using generalized pruning. We found that gradient-based, particularly
// Newton's method, works well on small test cases, but fails compared to gradient-free
// methods in real data. We tried a "hybrid" approach, where ran Brent for the first
// round or 2 of optimization (on the full DAG), and then handed off to Newton's method,
// but these didn't show improvements over Newton's method.
//
// We made improvements on Brent's method, adapted from the Boost C++ library, for
// our purposes. First, we introduced a "guess" as the initial value, when previously
// the initial value was always set to the input "max" bracket value. We also introduced
// the option to take small gradient steps in the case that the initial algorithm fails
// to improve on the current best argmin value (turned on with use_gradients).
class Optimization {
public:
// Options for optimization method.
enum class OptimizationMethod {
BrentOptimization,
BrentOptimizationWithGradients,
GradientAscentOptimization,
LogSpaceGradientAscentOptimization,
NewtonOptimization
};
class OptimizationMethodEnum
: public EnumWrapper<OptimizationMethod, size_t, 5,
OptimizationMethod::BrentOptimization,
OptimizationMethod::NewtonOptimization> {
public:
static inline const std::string Prefix = "OptimizationMethod";
static inline const Array<std::string> Labels = {
{"BrentOptimization", "BrentOptimizationWithGradients",
"GradientAscentOptimization", "LogSpaceGradientAscentOptimization",
"NewtonOptimization"}};
static std::string ToString(const OptimizationMethod e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const OptimizationMethod e) {
os << ToString(e);
return os;
}
};
// ** Evaluation Functions
// Functions to optimize.
template <class T>
using FuncAndDerivative = std::function<std::pair<T, T>(T)>;
template <class T>
using FuncAndFirstTwoDerivatives = std::function<Tuple3<T>(T)>;
template <class T>
using NegFunc = std::function<T(T)>;
template <class T>
using NegFuncAndDerivative = std::function<std::pair<T, T>(T)>;
// Adapted from https://www.boost.org/doc/libs/1_73_0/boost/math/tools/minima.hpp
template <class T>
static std::tuple<T, T> BrentMinimize(NegFunc<T> f, T guess, T min, T max,
int significant_digits, size_t max_iter,
T step_size) {
T tolerance = static_cast<T>(ldexp(1.0, 1 - significant_digits));
T x; // minima so far
T w; // second best point
T v; // previous value of w
T u; // most recent evaluation point
T delta; // The distance moved in the last step
T delta2; // The distance moved in the step before last
T fu, fv, fw, fx; // function evaluations at u, v, w, x
T mid; // midpoint of min and max
T fract1, fract2; // minimal relative movement in x
// golden ratio, don't need too much precision here!
static const T golden = 0.3819660f;
w = v = x = guess;
fw = fv = fx = f(x);
delta2 = delta = 0;
size_t count = max_iter;
do {
// get midpoint
mid = (min + max) / 2;
// ** Convergence Test:
// work out if we're done already:
fract1 = tolerance * fabs(x) + tolerance / 4;
fract2 = 2 * fract1;
if (fabs(x - mid) <= (fract2 - (max - min) / 2)) {
break;
}
// ** Parabolic Fit (variant of Inverse Quadratic Interpolation?):
bool use_bisection = true; // only triggers if IQI fails.
if (fabs(delta2) > fract1) {
// Try and construct a parabolic fit:
T r = (x - w) * (fx - fv);
T q = (x - v) * (fx - fw);
T p = (x - v) * q - (x - w) * r;
q = 2 * (q - r);
if (q > 0) p = -p;
q = fabs(q);
T td = delta2;
delta2 = delta;
// Determine whether a parabolic step is acceptable or not:
// must fail all three threshold tests to be accepted.
if (((fabs(p) >= fabs(q * td / 2)) == false) &&
((p <= q * (min - x)) == false) && ((p >= q * (max - x)) == false)) {
// whew, parabolic fit:
delta = p / q;
u = x + delta;
if (((u - min) < fract2) || ((max - u) < fract2)) {
delta = (mid - x) < 0 ? static_cast<T>(-fabs(fract1))
: static_cast<T>(fabs(fract1));
}
// parabolic fit was a success, so don't need bisection.
use_bisection = false;
}
}
// ** Golden Bisection Method (this is an optimization of traditional Bisection
// Method)
if (use_bisection) {
// golden section:
delta2 = (x >= mid) ? min - x : max - x;
delta = golden * delta2;
}
// ** Update current position:
u = (fabs(delta) >= fract1)
? T(x + delta)
: (delta > 0 ? T(x + fabs(fract1)) : T(x - fabs(fract1)));
fu = f(u);
if (fu <= fx) {
// good new point is an improvement!
// update brackets (previous guess becomes the new outer bracket):
if (u >= x)
min = x;
else
max = x;
// update control points:
v = w;
w = x;
x = u;
fv = fw;
fw = fx;
fx = fu;
} else {
// Oh dear, point u is worse than what we have already,
// even so it *must* be better than one of our endpoints:
if (u < x)
min = u;
else
max = u;
if ((fu <= fw) || (w == x)) {
// however it is at least second best:
v = w;
w = u;
fv = fw;
fw = fu;
} else if ((fu <= fv) || (v == x) || (v == w)) {
// third best:
v = u;
fv = fu;
}
}
} while (--count); // countdown until max iterations.
max_iter -= count;
return std::make_tuple(x, fx);
}
template <class T>
static std::tuple<T, T> BrentMinimizeWithGradients(NegFuncAndDerivative<T> f, T guess,
T min, T max,
int significant_digits,
size_t max_iter, T step_size) {
T tolerance = static_cast<T>(ldexp(1.0, 1 - significant_digits));
T x; // minima so far
T w; // second best point
T v; // previous value of w
T u; // most recent evaluation point
T delta; // The distance moved in the last step
T delta2; // The distance moved in the step before last
T fu, fv, fw, fx; // function evaluations at u, v, w, x
T mid; // midpoint of min and max
T fract1, fract2; // minimal relative movement in x
// golden ratio, don't need too much precision here!
static const T golden = 0.3819660f;
w = v = x = guess;
fw = fv = fx = f(x).first;
delta2 = delta = 0;
size_t count = max_iter;
do {
// get midpoint
mid = (min + max) / 2;
// ** Convergence Test:
// work out if we're done already:
fract1 = tolerance * fabs(x) + tolerance / 4;
fract2 = 2 * fract1;
if (fabs(x - mid) <= (fract2 - (max - min) / 2)) {
break;
}
// ** Parabolic Fit (variant of Inverse Quadratic Interpolation?):
bool use_bisection = true; // only triggers if IQI fails.
if (fabs(delta2) > fract1) {
// Try and construct a parabolic fit:
T r = (x - w) * (fx - fv);
T q = (x - v) * (fx - fw);
T p = (x - v) * q - (x - w) * r;
q = 2 * (q - r);
if (q > 0) p = -p;
q = fabs(q);
T td = delta2;
delta2 = delta;
// Determine whether a parabolic step is acceptable or not:
// must fail all three threshold tests to be accepted.
if (((fabs(p) >= fabs(q * td / 2)) == false) &&
((p <= q * (min - x)) == false) && ((p >= q * (max - x)) == false)) {
// whew, parabolic fit:
delta = p / q;
u = x + delta;
if (((u - min) < fract2) || ((max - u) < fract2)) {
delta = (mid - x) < 0 ? static_cast<T>(-fabs(fract1))
: static_cast<T>(fabs(fract1));
}
// parabolic fit was a success, so don't need bisection.
use_bisection = false;
}
}
// ** Golden Bisection Method (this is an optimization of traditional Bisection
// Method)
if (use_bisection) {
// golden section:
delta2 = (x >= mid) ? min - x : max - x;
delta = golden * delta2;
}
// ** Update current position:
u = (fabs(delta) >= fract1)
? T(x + delta)
: (delta > 0 ? T(x + fabs(fract1)) : T(x - fabs(fract1)));
fu = f(u).first;
if (fu <= fx) {
// good new point is an improvement!
// update brackets (previous guess becomes the new outer bracket):
if (u >= x)
min = x;
else
max = x;
// update control points:
v = w;
w = x;
x = u;
fv = fw;
fw = fx;
fx = fu;
} else {
// Considering update using gradient descent
T f_prime_x = f(x).second;
T u_ = x - step_size * f_prime_x;
T fu_ = f(u_).first;
if (fu_ <= fx) {
// good new point using gradient is an improvement!
// update brackets (previous guess becomes the new outer bracket):
if (u_ >= x)
min = x;
else
max = x;
// update control points:
v = w;
w = x;
x = u_;
fv = fw;
fw = fx;
fx = fu_;
} else {
// Oh dear, point u is worse than what we have already,
// even so it *must* be better than one of our endpoints:
if (u < x)
min = u;
else
max = u;
if ((fu <= fw) || (w == x)) {
// however it is at least second best:
v = w;
w = u;
fv = fw;
fw = fu;
} else if ((fu <= fv) || (v == x) || (v == w)) {
// third best:
v = u;
fv = fu;
}
}
}
} while (--count); // countdown until max iterations.
max_iter -= count;
return std::make_tuple(x, fx);
}
static double GradientAscent(FuncAndDerivative<double> f_and_f_prime, double x,
const int significant_digits, const double step_size,
const double min_x, const size_t max_iter) {
double tolerance = std::pow(10, -significant_digits);
size_t iter_idx = 0;
while (true) {
auto [f_x, f_prime_x] = f_and_f_prime(x);
const double new_x = x + f_prime_x * step_size;
x = std::max(new_x, min_x);
if (fabs(f_prime_x) < fabs(f_x) * tolerance || iter_idx >= max_iter) {
return x;
}
++iter_idx;
}
}
static double LogSpaceGradientAscent(FuncAndDerivative<double> f_and_f_prime,
double x, const int significant_digits,
const double log_space_step_size,
const double min_x, const size_t max_iter) {
double tolerance = static_cast<double>(std::pow(10, -significant_digits));
size_t iter_idx = 0;
while (true) {
double y = log(x);
auto [f_x, f_prime_x] = f_and_f_prime(x);
double log_space_grad = x * f_prime_x;
const double new_y = y + log_space_grad * log_space_step_size;
const double new_x = exp(new_y);
x = std::max(new_x, min_x);
if (fabs(f_prime_x) < fabs(f_x) * tolerance || iter_idx >= max_iter) {
return x;
}
++iter_idx;
}
}
static double NewtonRaphsonOptimization(
FuncAndFirstTwoDerivatives<double> f_and_derivatives, double x,
const int significant_digits, const double epsilon, const double min_x,
const double max_x, const size_t max_iter) {
double tolerance = pow(10, -significant_digits);
size_t iter_idx = 0;
double new_x, delta;
double min = min_x;
double max = max_x;
while (true) {
auto [f_x, f_prime_x, f_double_prime_x] = f_and_derivatives(x);
if (fabs(f_double_prime_x) < epsilon) {
return x;
}
new_x = x - f_prime_x / f_double_prime_x;
if (new_x < min_x) {
new_x = x - 0.5 * (x - min);
}
if (new_x > max_x) {
new_x = x - 0.5 * (x - max);
}
delta = fabs(x - new_x);
if (delta < tolerance || fabs(f_prime_x) < fabs(f_x) * tolerance ||
iter_idx == max_iter) {
return x;
}
x = new_x;
++iter_idx;
}
}
};
using OptimizationMethod = Optimization::OptimizationMethod;
| 13,555
|
C++
|
.h
| 360
| 29.022222
| 88
| 0.531559
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,071
|
engine.hpp
|
phylovi_bito/src/engine.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// "Engine" is short for "phylogenetic likelihood computation engine".
// This engine has FatBeagles as cylinders.
#pragma once
#include <memory>
#include <utility>
#include <vector>
#include "fat_beagle.hpp"
#include "phylo_flags.hpp"
#include "phylo_model.hpp"
#include "rooted_tree_collection.hpp"
#include "site_pattern.hpp"
#include "unrooted_tree_collection.hpp"
struct EngineSpecification {
const size_t thread_count_;
const std::vector<BeagleFlags> &beagle_flag_vector_;
const bool use_tip_states_;
};
class Engine {
public:
Engine(const EngineSpecification &engine_specification,
const PhyloModelSpecification &specification, SitePattern site_pattern);
const BlockSpecification &GetPhyloModelBlockSpecification() const;
std::vector<double> LogLikelihoods(
const UnrootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<double> LogLikelihoods(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<double> UnrootedLogLikelihoods(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<double> LogDetJacobianHeightTransform(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<PhyloGradient> Gradients(
const UnrootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<PhyloGradient> Gradients(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
std::vector<DoubleVector> GradientLogDeterminantJacobian(
const RootedTreeCollection &tree_collection,
const EigenMatrixXdRef phylo_model_params, const bool rescaling,
const std::optional<PhyloFlags> flags = std::nullopt) const;
const FatBeagle *const GetFirstFatBeagle() const;
private:
SitePattern site_pattern_;
std::vector<std::unique_ptr<FatBeagle>> fat_beagles_;
};
| 2,676
|
C++
|
.h
| 58
| 42.086207
| 81
| 0.773773
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,072
|
sugar_wrappers.hpp
|
phylovi_bito/src/sugar_wrappers.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Templates for a strong typing. Creates a lightweight wrapper for primitive types.
// Enforces explicit casting between differenct strong types and the underlying
// primitive. Also includes template for inheriting primitive's hashing function for use
// in stl types.
// https://www.fluentcpp.com/2016/12/08/strong-types-for-strong-interfaces/
//
// Templates for enumerated types. Creates a wrapper class for a base enum class.
// Includes using enums to access stl storage classes.
#pragma once
#include <cassert>
#include <iostream>
#include <map>
#include <optional>
#include <sstream>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <type_traits>
#include <iterator>
// ** Strongly Typed Wrappers
// Wrapper for ID types.
template <typename TypeNameTag>
struct GenericId {
using SelfType = GenericId<TypeNameTag>;
using UnderlyingType = size_t;
static constexpr size_t NoId = std::numeric_limits<size_t>::max();
GenericId() = default;
explicit GenericId(UnderlyingType const &value) : value_(value) {}
template <typename T_ = UnderlyingType>
GenericId(
UnderlyingType &&value,
typename std::enable_if<!std::is_reference<T_>{}, std::nullptr_t>::type = nullptr)
: value_(std::move(value)) {}
// Explicit cast to primitive.
explicit operator UnderlyingType &() { return value_; }
explicit operator UnderlyingType() const { return value_; }
// Compare to its own type.
int Compare(const SelfType &other) const { return Compare(other.value_); }
bool operator==(const SelfType &other) const { return value_ == other.value_; }
bool operator!=(const SelfType &other) const { return value_ != other.value_; }
bool operator>(const SelfType &other) const { return value_ > other.value_; }
bool operator<(const SelfType &other) const { return value_ < other.value_; }
bool operator>=(const SelfType &other) const { return value_ >= other.value_; }
bool operator<=(const SelfType &other) const { return value_ <= other.value_; }
// Compare to its primitive.
int Compare(const UnderlyingType &other) const {
return (value_ > other) ? value_ - other : other - value_;
}
bool operator==(const UnderlyingType &other) const { return value_ == other; }
bool operator!=(const UnderlyingType &other) const { return value_ != other; }
bool operator>(const UnderlyingType &other) const { return value_ > other; }
bool operator<(const UnderlyingType &other) const { return value_ < other; }
bool operator>=(const UnderlyingType &other) const { return value_ >= other; }
bool operator<=(const UnderlyingType &other) const { return value_ <= other; }
// Increment
SelfType &operator++() {
value_++;
return *this;
}
SelfType operator++(int) {
SelfType temp = *this;
++*this;
return temp;
}
// I/O
static std::string PrefixToString() { return "Id"; }
std::string ToString(const bool include_prefix = true) const {
std::stringstream os;
os << (include_prefix ? PrefixToString() : "")
<< "::" << ((value_ == NoId) ? "NoId" : std::to_string(value_));
return os.str();
}
friend std::ostream &operator<<(std::ostream &os, const SelfType &obj) {
os << obj.ToString(true);
return os;
}
UnderlyingType value_ = NoId;
};
constexpr size_t NoId = std::numeric_limits<size_t>::max();
// Generic hash function for GenericIds.
namespace std {
template <typename TypeNameTag>
struct hash<GenericId<TypeNameTag>> {
std::size_t operator()(const GenericId<TypeNameTag> &id) const noexcept {
std::size_t value_hash = std::hash<size_t>()(id.value_);
return value_hash;
}
};
} // namespace std
// ** Enumerated Type Wrappers
// Generic Iterator for enum class types.
// Requires that enum's underlying types have no gaps.
template <typename EnumType, EnumType FirstEnum, EnumType LastEnum>
class EnumIterator {
typedef typename std::underlying_type<EnumType>::type val_t;
int val;
public:
EnumIterator(const EnumType &f) : val(static_cast<val_t>(f)) {}
EnumIterator() : val(static_cast<val_t>(FirstEnum)) {}
EnumIterator operator++() {
++val;
return *this;
}
EnumType operator*() { return static_cast<EnumType>(val); }
EnumIterator begin() { return *this; } // default ctor is good
EnumIterator end() {
static const EnumIterator endIter = ++EnumIterator(LastEnum); // cache it
return endIter;
}
bool operator!=(const EnumIterator &i) { return val != i.val; }
};
// Generic Array for using class enum for index access.
template <class EnumType, size_t EnumCount, class DataType>
class EnumArray {
public:
EnumArray() = default;
EnumArray(std::array<DataType, EnumCount> array) : array_(std::move(array)) {}
DataType &operator[](const EnumType i) { return array_[static_cast<int>(i)]; }
const DataType &operator[](const EnumType i) const {
return array_[static_cast<int>(i)];
}
void fill(DataType fill_value) { array_.fill(fill_value); }
int size() const { return array_.size(); }
auto begin() const { return std::begin(array_); }
auto end() const { return std::end(array_); }
friend std::ostream &operator<<(std::ostream &os, const EnumArray &array) {
os << array.array_;
return os;
}
private:
std::array<DataType, EnumCount> array_;
};
// Generic Wrapper with collection of static functions for using class enum with
// common data structures.
template <class EnumType, class UnderlyingType, size_t EnumCount, EnumType FirstEnum,
EnumType LastEnum>
class EnumWrapper {
public:
using Type = EnumType;
using Iterator = EnumIterator<EnumType, FirstEnum, LastEnum>;
template <class DataType>
using Array = EnumArray<EnumType, EnumCount, DataType>;
static inline const EnumType First = FirstEnum;
static inline const EnumType Last = LastEnum;
static inline const size_t Count = EnumCount;
static UnderlyingType GetIndex(const EnumType i) {
return static_cast<UnderlyingType>(i);
}
static std::array<Type, Count> TypeArray() {
std::array<Type, Count> arr;
size_t i = 0;
for (const auto e : Iterator()) {
arr[i++] = e;
}
return arr;
}
static std::string ToString(const EnumType i) {
std::stringstream os;
os << "Enum::" << std::to_string(GetIndex(i));
return os.str();
}
};
| 6,424
|
C++
|
.h
| 165
| 35.866667
| 88
| 0.707524
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,073
|
mmapped_matrix.hpp
|
phylovi_bito/src/mmapped_matrix.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// RAII class for Eigen matrices that are mmapped to disk.
//
// https://en.wikipedia.org/wiki/Memory-mapped_file
// Motivating post: https://stackoverflow.com/a/43301909/467327
// Intro: https://www.youtube.com/watch?v=m7E9piHcfr4
// Simple example: https://jameshfisher.com/2017/01/28/mmap-file-write/
// Most complete example: https://gist.github.com/marcetcheverry/991042
#pragma once
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <unistd.h>
#include <iostream>
#include "eigen_sugar.hpp"
#include "sugar.hpp"
template <typename EigenDenseMatrixBaseT>
class MmappedMatrix {
using Scalar = typename Eigen::DenseBase<EigenDenseMatrixBaseT>::Scalar;
public:
MmappedMatrix(const std::string &file_path, Eigen::Index rows, Eigen::Index cols)
: rows_(rows),
cols_(cols),
byte_count_(rows * cols * sizeof(Scalar)),
file_path_(file_path) {
file_descriptor_ = open(
file_path.c_str(),
O_RDWR | O_CREAT, // Open for reading and writing; create if it doesn't exit.
S_IRUSR | S_IWUSR // Make the file readable and writable by the user.
);
if (file_descriptor_ == -1) {
Failwith("MmappedMatrix could not create a file at " + file_path_);
}
InitializeMMap(rows, cols);
}
~MmappedMatrix() {
auto CheckStatus = [](int status, std::string name) {
if (status != 0) {
std::cout << "Warning: " << name
<< " did not succeed in MmappedMatrix: " << strerror(errno)
<< std::endl;
}
};
// Synchronize memory with physical storage.
auto msync_status = msync(mmapped_memory_, byte_count_, MS_SYNC);
CheckStatus(msync_status, "msync");
// Unmap memory mapped with mmap.
auto munmap_status = munmap(mmapped_memory_, byte_count_);
CheckStatus(munmap_status, "munmap");
auto close_status = close(file_descriptor_);
CheckStatus(close_status, "close");
}
// Intialize virtual memory map.
void InitializeMMap(Eigen::Index rows, Eigen::Index cols) {
// Resizes file so it's just right for our vector.
rows_ = rows;
cols_ = cols;
byte_count_ = rows * cols * sizeof(Scalar);
auto ftruncate_status = ftruncate(file_descriptor_, byte_count_);
if (ftruncate_status != 0) {
Failwith("MmappedMatrix could not intialize the file at " + file_path_);
}
mmapped_memory_ = static_cast<Scalar *>(mmap( //
NULL, // This address is ignored as we are using MAP_SHARED.
byte_count_, // Size of map.
PROT_READ | PROT_WRITE, // We want to read and write.
MAP_SHARED, // We need MAP_SHARED to actually write to memory.
file_descriptor_, // File descriptor.
0 // Offset.
));
if (mmapped_memory_ == MAP_FAILED) {
throw std::system_error(errno, std::system_category(), "mmap");
}
}
// Resize virtual memory map. Optional reporting.
void ResizeMMap(Eigen::Index rows, Eigen::Index cols, const bool quiet = true) {
std::stringstream dev_null;
auto &our_ostream = quiet ? dev_null : std::cout;
// Resizes file so it's just right for our vector.
rows_ = rows;
cols_ = cols;
const size_t old_byte_count = byte_count_;
byte_count_ = rows * cols * sizeof(Scalar);
if (byte_count_ == old_byte_count) {
return;
}
auto ftruncate_status = ftruncate(file_descriptor_, byte_count_);
void *old_mmapped_memory = static_cast<void *>(mmapped_memory_);
if (ftruncate_status != 0) {
Failwith("MmappedMatrix could not resize the file at " + file_path_);
}
// OSX mman and mman-win32 do not implement mremap or MREMAP_MAYMOVE.
#ifndef MREMAP_MAYMOVE
if (munmap(mmapped_memory_, old_byte_count) == -1) {
throw std::system_error(errno, std::system_category(), "munmap");
}
mmapped_memory_ = static_cast<Scalar *>(mmap( //
NULL, // This address is ignored as we are using MAP_SHARED.
byte_count_, // Size of map.
PROT_READ | PROT_WRITE, // We want to read and write.
MAP_SHARED, // We need MAP_SHARED to actually write to memory.
file_descriptor_, // File descriptor.
0 // Offset.
));
#else
mmapped_memory_ = static_cast<Scalar *>(mremap( //
static_cast<void *>(mmapped_memory_), // old address
old_byte_count, // old size
byte_count_, // new size
MREMAP_MAYMOVE // will move virtual memory if necessary.
));
#endif
if (mmapped_memory_ == MAP_FAILED) {
throw std::system_error(errno, std::system_category(), "mremap");
}
// Report if remapping occurred.
bool is_remapping_moved =
(static_cast<void *>(mmapped_memory_) != old_mmapped_memory);
our_ostream << "REMAPPING OCCURRED: " << old_byte_count << " bytes -> "
<< byte_count_ << " bytes " << std::endl;
our_ostream << "REMAPPING DID " << (is_remapping_moved ? "" : "*NOT* ")
<< "MOVE MEMORY ADDRESS: " << old_mmapped_memory << " "
<< mmapped_memory_ << std::endl;
}
MmappedMatrix(const MmappedMatrix &) = delete;
MmappedMatrix(const MmappedMatrix &&) = delete;
MmappedMatrix &operator=(const MmappedMatrix &) = delete;
MmappedMatrix &operator=(const MmappedMatrix &&) = delete;
Eigen::Map<EigenDenseMatrixBaseT> Get() {
return Eigen::Map<EigenDenseMatrixBaseT>(mmapped_memory_, rows_, cols_);
}
size_t ByteCount() const { return byte_count_; }
private:
Eigen::Index rows_;
Eigen::Index cols_;
size_t byte_count_;
int file_descriptor_;
std::string file_path_;
Scalar *mmapped_memory_;
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("MmappedMatrix") {
Eigen::Index rows = 4;
Eigen::Index cols = 5;
using MmappedMatrixXd = MmappedMatrix<Eigen::MatrixXd>;
{
MmappedMatrixXd mmapped_matrix("_ignore/mmapped_matrix.data", rows, cols);
mmapped_matrix.Get()(rows - 1, cols - 1) = 5.;
} // End of scope, so our mmap is destroyed and file written.
MmappedMatrixXd mmapped_matrix("_ignore/mmapped_matrix.data", rows, cols);
CHECK_EQ(mmapped_matrix.Get()(rows - 1, cols - 1), 5.);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 6,500
|
C++
|
.h
| 155
| 36.76129
| 87
| 0.631521
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,074
|
sugar.hpp
|
phylovi_bito/src/sugar.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include <cassert>
#include <iostream>
#include <map>
#include <optional>
#include <sstream>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <queue>
#include <iomanip>
#include <functional>
#include "intpack.hpp"
#include "prettyprint.hpp"
#include "sugar_wrappers.hpp"
#include "sugar_iterators.hpp"
// Put typedefs that are built of STL types here.
using Tag = uint64_t;
using SymbolVector = std::vector<int>;
using BoolVector = std::vector<bool>;
using IntVector = std::vector<int>;
using SizeVector = std::vector<size_t>;
using DoubleVector = std::vector<double>;
using SizeVectorVector = std::vector<SizeVector>;
using DoubleVector = std::vector<double>;
using DoubleVectorVector = std::vector<std::vector<double>>;
using TagDoubleMap = std::unordered_map<Tag, double>;
using TagSizeMap = std::unordered_map<Tag, size_t>;
using TagStringMap = std::unordered_map<Tag, std::string>;
using StringStringMap = std::unordered_map<std::string, std::string>;
using CharIntMap = std::unordered_map<char, int>;
using StringSizeMap = std::unordered_map<std::string, size_t>;
using StringDoubleMap = std::unordered_map<std::string, double>;
using DoubleVectorOption = std::optional<std::vector<double>>;
using TagStringMapOption = std::optional<TagStringMap>;
using StringVector = std::vector<std::string>;
using CStringVector = std::vector<const char *>;
using StringVectorVector = std::vector<StringVector>;
using StringSet = std::unordered_set<std::string>;
using StringSetVector = std::vector<StringSet>;
using StringDoubleVector = std::vector<std::pair<std::string, double>>;
using SizeDoubleMap = std::unordered_map<size_t, double>;
using StringBoolVector = std::vector<std::pair<std::string, bool>>;
using StringBoolDoubleVector = std::vector<std::tuple<std::string, bool, double>>;
using StringPairVector = std::vector<std::pair<std::string, std::string>>;
using SizeDoubleVectorMap = std::unordered_map<size_t, std::vector<double>>;
using DoublePair = std::pair<double, double>;
using SizePair = std::pair<size_t, size_t>;
using SizePairVector = std::vector<std::pair<size_t, size_t>>;
using SizeOptionVector = std::vector<std::optional<size_t>>;
using BoolVector = std::vector<bool>;
using DoubleVector = std::vector<double>;
using DoubleVectorPair = std::pair<DoubleVector, DoubleVector>;
template <typename T>
using Tuple2 = std::tuple<T, T>;
template <typename T>
using Tuple3 = std::tuple<T, T, T>;
template <size_t L>
using CStringArray = std::array<const char *, L>;
inline uint32_t MaxLeafIDOfTag(Tag tag) { return UnpackFirstInt(tag); }
inline uint32_t LeafCountOfTag(Tag tag) { return UnpackSecondInt(tag); }
inline std::string TagTaxonMapToString(const TagStringMap &tag_taxon_map) {
std::stringstream str;
str << "{ ";
for (const auto &[tag, name] : tag_taxon_map) {
const auto id = MaxLeafIDOfTag(tag);
const auto count = LeafCountOfTag(tag);
str << "{ (" << id << ", " << count << "), " << name << " } ";
}
str << "} ";
return str.str();
}
// Format hash in hexidecimal notation.
inline std::string HashToString(const size_t hash, const size_t length = 16) {
std::stringstream ss;
ss << "0x" << std::uppercase << std::hex << std::setw(16) << std::setfill('0')
<< hash;
return ss.str().substr(0, length + 2);
}
// Format double as scientific notation.
inline std::string DblToScientific(const double value, const int precision = 3) {
std::stringstream ss;
ss.precision(precision);
ss << std::scientific << value;
return ss.str();
}
// We implement problems in terms of exceptions. That means that they work great
// in Jupyter notebooks.
//
// This macro always evaluates the argument. We use a macro for the stupid
// reason that then the assert can go away upon using NDEBUG.
#ifdef NDEBUG
#define Assert(to_evaluate, message) ((void)(to_evaluate));
#else
#define Assert(to_evaluate, message) \
({ \
if (!(to_evaluate)) { \
Failwith(message); \
} \
})
#endif
// Use Failwith when it's a problem with input data versus a problem
// with program logic. That way we can turn off Assert if we want to.
// As you can see Assert is implemented in terms of Failwith.
//
// Here we use a macro to avoid "control may reach end of non-void function"
// errors. We shouldn't have to return when we throw an exception.
#define Failwith(message) \
({ \
std::string str_message(message); \
str_message.append(" ("); \
str_message.append(__FILE__); \
str_message.append(":"); \
str_message.append(std::to_string(__LINE__)); \
str_message.append(" in "); \
str_message.append(__func__); \
str_message.append(")"); \
throw std::runtime_error(str_message); \
})
template <class Key, class T, class Hash>
constexpr void SafeInsert(std::unordered_map<Key, T, Hash> &map, const Key &k,
const T &v) {
Assert(map.insert({k, v}).second, "Failed map insertion!");
}
template <class Key, class T, class Hash>
constexpr void SafeInsert(std::unordered_map<Key, T, Hash> &map, Key &&k, T &&v) {
Assert(map.insert({k, v}).second, "Failed map insertion!");
}
template <class Key, class T, class Hash>
constexpr void SafeInsert(std::map<Key, T, Hash> &map, const Key &k, const T &v) {
Assert(map.insert({k, v}).second, "Failed map insertion!");
}
template <class Key, class T, class Hash>
constexpr void SafeInsert(std::map<Key, T, Hash> &map, Key &&k, T &&v) {
Assert(map.insert({k, v}).second, "Failed map insertion!");
}
template <class Key, class Hash>
constexpr void SafeInsert(std::unordered_set<Key, Hash> &set, const Key &k) {
Assert(set.insert(k).second, "Failed set insertion!");
}
template <class Key, class Hash>
constexpr void SafeInsert(std::unordered_set<Key, Hash> &set, Key &&k) {
Assert(set.insert(k).second, "Failed set insertion!");
}
// Return value associated with key in map.
// If key does not exist in map, returns default_value.
template <class Key, class T, class Hash>
T AtWithDefault(const std::unordered_map<Key, T, Hash> &map, const Key &key,
T default_value) {
auto search = map.find(key);
if (search == map.end()) {
return default_value;
}
return search->second;
}
template <typename T>
std::string GenericToString(T x) {
std::stringstream ss;
ss << x << std::endl;
return ss.str();
}
template <class Key, class T>
std::unordered_map<Key, T> UnorderedMapOf(const std::vector<std::pair<Key, T>> &v) {
std::unordered_map<Key, T> m;
for (const auto &[key, value] : v) {
SafeInsert(m, key, value);
}
return m;
}
| 6,979
|
C++
|
.h
| 171
| 38.625731
| 84
| 0.682916
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,075
|
stopwatch.hpp
|
phylovi_bito/src/stopwatch.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Simple stopwatch class is for performing runtime speed tests.
// Can specify upon construction the time units (in seconds, milliseconds, or
// nanoseconds). Stopwatch uses milliseconds by default. Stopwatch has two states:
// either running or not. User can only capture times through GetElapsed...(), Lap() or
// Stop() while Stopwatch is running. Laps capture the elapsed time between each Lap()
// call (as well as Start()/Lap() and Lap()/Stop() intervals). GetTotal() returns a
// culumative total of all Start()/Stop() time intervals. User can't call Stop() on a
// stopped watch or Start() on a running watch. Clear() removes all memory and stops
// watch by default, but can optionally restart watch immediatedly.
#pragma once
#include <stdio.h>
#include <chrono>
#include <functional>
#include <iostream>
#include <thread>
#include <vector>
#include "sugar.hpp"
class Stopwatch {
public:
using clock = std::chrono::high_resolution_clock;
using time_point = std::chrono::time_point<clock>;
using time_duration = std::chrono::duration<uint16_t, std::nano>;
enum class TimeScale { SecondScale, MillisecondScale, NanosecondScale };
using second_double = std::chrono::duration<double, std::chrono::seconds::period>;
using nanosec_double =
std::chrono::duration<double, std::chrono::nanoseconds::period>;
using millisec_double =
std::chrono::duration<double, std::chrono::milliseconds::period>;
// Constructor:
// Option to start stopwatch running upon initialization, defaults to not starting.
// Option to specify time scale, defaults to milliseconds.
Stopwatch(bool start_on_init = false, TimeScale scale = TimeScale::MillisecondScale)
: is_running_(false), scale_(scale), lap_seconds_(), interval_starts_() {
interval_starts_.push_back(0);
if (start_on_init) {
Start();
}
};
// Return the state of the watch.
bool IsRunning() { return is_running_; }
// Returns time elapsed from ith start/stop interval.
double GetInterval(size_t which_interval) {
Assert(which_interval < GetIntervalCount(),
"Stopwatch.GetInterval(): ith start/stop interval does not exist.");
if (is_running_ && (which_interval == GetIntervalCount() - 1)) {
return GetElapsedOfCurrentInterval();
}
size_t start_point = interval_starts_[which_interval];
size_t end_point = interval_starts_[which_interval + 1];
double interval_total = 0.0;
for (size_t i = start_point; i < end_point; i++) {
interval_total += lap_seconds_[i];
}
return interval_total;
};
// Returns time elapsed during the latest completed start/stop interval.
double GetLatestInterval() {
Assert(GetCompleteIntervalCount() > 0,
"Stopwatch.GetLatestInterval() cannot be called when no complete intervals "
"exist.");
size_t last_pos = GetCompleteIntervalCount() - 1;
return GetInterval(last_pos);
};
// Returns time elapsed since last start. Retains no memory of this in Stopwatch.
double GetElapsedOfCurrentInterval() {
Assert(is_running_,
"Stopwatch.GetElapsedOfCurrentInterval() cannot be called while Stopwatch "
"is not running.");
double elapsed_seconds = 0.0;
size_t start_lap = interval_starts_[interval_starts_.size() - 1];
for (size_t i = start_lap; i < lap_seconds_.size(); i++) {
elapsed_seconds += lap_seconds_[i];
}
elapsed_seconds += GetElapsedOfCurrentLap();
return elapsed_seconds;
};
// Returns number of time intervals that have occurred, including currently running.
size_t GetIntervalCount() {
return GetCompleteIntervalCount() + static_cast<size_t>(is_running_);
}
// Returns number of time intervals that have completed.
size_t GetCompleteIntervalCount() {
Assert(interval_starts_.size() >= 1,
"Stopwatch.interval_starts_ should not be empty.");
return interval_starts_.size() - 1;
}
// Returns total time elapsed over all start/stop intervals.
// If watch is currently running, returns total over all previous interval plus time
// elapsed in current interval.
double GetTotal() {
float total = 0.0;
for (size_t i = 0; i < lap_seconds_.size(); i++) {
total += lap_seconds_[i];
}
return (is_running_ ? total + GetElapsedOfCurrentLap() : total);
};
// Appends the time elapsed since latest lap (or start if on first lap) to laps and
// returns time.
double Lap() {
Assert(is_running_,
"Stopwatch.Lap() cannot be called while Stopwatch is not running.");
time_point now = GetCurrentTime();
double lap_seconds = DurationToSeconds(latest_lap_, now);
lap_seconds_.push_back(lap_seconds);
latest_lap_ = now;
return lap_seconds;
};
// Get time elapsed over the ith lap.
double GetLap(size_t which_lap) {
Assert(which_lap < GetLapCount(), "Stopwatch.GetLap(): ith lap does not exist.");
return lap_seconds_.at(which_lap);
}
// Returns the time interval between the latest completed lap.
double GetLatestLap() { return GetLap(GetLapCount() - 1); };
// Returns time elapsed since latest lap. Retains no memory of this in Stopwatch.
double GetElapsedOfCurrentLap() {
Assert(is_running_,
"Stopwatch.Elapsed() cannot be called while Stopwatch is not running.");
time_point now = GetCurrentTime();
double elapsed_seconds = DurationToSeconds(latest_lap_, now);
return elapsed_seconds;
};
// Returns vector of all lap times.
std::vector<double> GetLaps() { return lap_seconds_; }
// Get number of completed laps.
size_t GetLapCount() { return lap_seconds_.size(); }
// Starts running stopwatch.
void Start() {
Assert(is_running_ == false,
"Stopwatch.Start() cannot be called while Stopwatch is already running.");
start_time_ = GetCurrentTime();
latest_lap_ = start_time_;
is_running_ = true;
};
// Returns time elapsed between current start/stop interval, and adds time elapsed
// from latest lap to stop to laps.
double Stop() {
Assert(is_running_,
"Stopwatch.Stop() cannot be called while Stopwatch is not running.");
time_point now = GetCurrentTime();
double lap_seconds = DurationToSeconds(latest_lap_, now);
lap_seconds_.push_back(lap_seconds);
interval_starts_.push_back(lap_seconds_.size());
double latest_interval = GetLatestInterval();
is_running_ = false;
return latest_interval;
};
// Clears total time and all lap times from memory.
// Option to restart watch upon clearing memory, but defaults to stopped.
void Clear(bool restart_watch = false) {
is_running_ = false;
lap_seconds_.clear();
interval_starts_.clear();
interval_starts_.push_back(0);
if (restart_watch) {
Start();
}
};
// Puts thread to sleep for given milliseconds.
static void Sleep(uint64_t wait_time) {
std::this_thread::sleep_for(std::chrono::milliseconds(wait_time));
};
private:
time_point GetCurrentTime() { return clock::now(); }
// Convert a time duration to a double in the specified time scale.
double DurationToSeconds(time_point &start_time, time_point &end_time) {
double seconds = 0;
switch (scale_) {
case TimeScale::SecondScale:
seconds = second_double(end_time - start_time).count();
break;
case TimeScale::MillisecondScale:
seconds = millisec_double(end_time - start_time).count();
break;
case TimeScale::NanosecondScale:
seconds = nanosec_double(end_time - start_time).count();
break;
}
return seconds;
};
bool is_running_;
TimeScale scale_;
time_point start_time_;
time_point latest_lap_;
// Time intervals between each lap.
std::vector<double> lap_seconds_;
// Starting lap index for each time interval.
std::vector<size_t> interval_starts_;
};
template <typename F, typename... Args>
auto TIME_IT(F &&fn, Args &&...args) {
Stopwatch timer(true, Stopwatch::TimeScale::SecondScale);
auto ret = fn(std::forward<Args>(args)...);
std::cout << "# Timed function: " << timer.Lap() << " sec" << std::endl;
return ret;
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("Stopwatch") {
Stopwatch watch(false, Stopwatch::TimeScale::MillisecondScale);
// Functions not allowed while clock isn't running.
CHECK_THROWS(watch.GetElapsedOfCurrentInterval());
CHECK_THROWS(watch.GetElapsedOfCurrentLap());
CHECK_THROWS(watch.GetLatestInterval());
CHECK_THROWS(watch.Lap());
CHECK_THROWS(watch.Stop());
watch.Start();
// Getting latest lap before first lap exists.
CHECK_THROWS(watch.GetLatestLap());
// Getting latest interval before first interval exists.
CHECK_THROWS(watch.GetLatestInterval());
Stopwatch::Sleep(3);
watch.Stop();
auto interval_1 = watch.GetLatestInterval();
Stopwatch::Sleep(5);
watch.Start();
Stopwatch::Sleep(7);
auto interval_1_during_next_interval = watch.GetLatestInterval();
Stopwatch::Sleep(11);
auto lap_1 = watch.Lap();
Stopwatch::Sleep(13);
auto lap_1_during_next_lap = watch.GetLatestLap();
auto interval_2_midinterval = watch.GetElapsedOfCurrentInterval();
Stopwatch::Sleep(17);
watch.GetElapsedOfCurrentLap();
Stopwatch::Sleep(19);
auto interval_2 = watch.Stop();
// Latest should fetch the last completed lap or interval, even if clock is running.
CHECK_EQ(interval_1, interval_1_during_next_interval);
CHECK_EQ(lap_1, lap_1_during_next_lap);
// The mid-interval time should be less than the total time.
CHECK_GT(interval_2, interval_2_midinterval);
auto laps = watch.GetLaps();
auto total = watch.GetTotal();
std::vector<double> intervals = {interval_1, interval_2};
auto sum_laps = std::accumulate(laps.begin(), laps.end(), 0.0);
auto sum_intervals = std::accumulate(intervals.begin(), intervals.end(), 0.0);
watch.Clear();
CHECK_EQ(watch.GetTotal(), 0.0);
CHECK_EQ(watch.GetLaps().size(), 0);
watch.Start();
// Function not allowed while clock is running.
CHECK_THROWS(watch.Start());
CHECK_EQ(doctest::Approx(sum_laps), total);
CHECK_EQ(doctest::Approx(sum_intervals), total);
auto sleep_twice_func = [](size_t first_sleep, size_t second_sleep) {
Stopwatch::Sleep(first_sleep);
Stopwatch::Sleep(second_sleep);
return 7;
};
auto ret1 = TIME_IT(sleep_twice_func, 2000, 5000);
CHECK_EQ(ret1, 7);
};
#endif // DOCTEST_LIBRARY_INCLUDED
| 10,512
|
C++
|
.h
| 256
| 36.980469
| 87
| 0.702063
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,076
|
phylo_flags.hpp
|
phylovi_bito/src/phylo_flags.hpp
|
// Copyright 2019-2021 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// PhyloFlags are used for adding optional arguments to functions that are specified by
// the user for functions such as SBNInstance::PhyloGradients.
// PhyloMapkeys contains the keys used for accessing members of the
// GradientMap, the output returned by SBNInstance::PhyloGradients.
//
// For convenience, this should match the output mapkey if that mapkey
// directly corresponds to a function option for the computation of underlying data.
// (e.g. in FatBeagle::Gradient, we have an option to compute
// `substitution_model_rates`. If flag is set, then the return map will contain a
// `substitution_model_rates` key).
//
#pragma once
#include "sugar.hpp"
// ** Phylo Mapkey
// This is the base mapkey, used for enumerating possible keys for a given map.
class PhyloMapkey {
public:
PhyloMapkey(const std::string &name, const std::string &key);
// Comparators
static int Compare(const PhyloMapkey &mapkey_a, const PhyloMapkey &mapkey_b);
// General compare.
bool operator==(const PhyloMapkey &other) const;
friend bool operator==(const PhyloMapkey &lhs, const PhyloMapkey &rhs);
bool operator<(const PhyloMapkey &other) const;
friend bool operator<(const PhyloMapkey &lhs, const PhyloMapkey &rhs);
// Compare against String
bool operator==(const std::string &other_name) const;
bool operator<(const std::string &other_name) const;
// Getters
std::string GetName() const { return name_; };
std::string GetKey() const { return key_; };
private:
// This is a descriptive name of the mapkey that will be visible to the user in
// bito pybind interface.
std::string name_;
// This is the uniquely identifiable key that is used for accessing a map location.
std::string key_;
};
// ** Phylo Mapkey Set
// Contains all possible options for function.
class PhyloMapkeySet {
public:
using MapkeyMap = std::map<std::string, PhyloMapkey>;
explicit PhyloMapkeySet(const std::string &name) : name_(name){};
PhyloMapkeySet(const std::string &name, const std::vector<PhyloMapkey> &mapkeys);
// Insert individual mapkey.
void AddMapkey(const PhyloMapkey &mapkey, const bool overwrite = false);
// Does mapkey already exist in set?
bool ContainsMapkey(const PhyloMapkey &mapkey);
const MapkeyMap &GetAllNames() const { return all_mapkeys_; };
std::string ToString() const;
private:
// Name for mapkey set.
std::string name_;
// List of all possible keys.
MapkeyMap all_mapkeys_;
};
// ** Phylo FlagOption
// This is the base option flag type. Also specifies default behaviour for flags.
class PhyloFlagOption {
public:
enum class FlagType { None, Boolean, SetValue, RunAll };
enum class DataType { None, Double };
PhyloFlagOption();
PhyloFlagOption(const std::string &name, const std::string &flag,
const FlagType flag_type, const DataType data_type,
const bool is_set_when_running_defaults,
const bool is_set_when_not_running_defaults);
// PhyloFlagOption FlagType-specific constructors.
static PhyloFlagOption BooleanOption(
const std::string &name, const std::string &flag,
const bool is_set_when_running_defaults = true,
const bool is_set_when_not_running_defaults = false);
static PhyloFlagOption SetValueOption(const std::string &name,
const std::string &flag,
const DataType data_type);
// Add Child Flags (these flags are set when Parent is set).
void AddChild(const PhyloFlagOption &child);
void AddChild(const std::string child_flag);
// Output to String.
std::string ToString() const;
// Comparators
static int Compare(const PhyloFlagOption &flag_a, const PhyloFlagOption &flag_b);
// General compare.
bool operator==(const PhyloFlagOption &other);
friend bool operator==(const PhyloFlagOption &lhs, const PhyloFlagOption &rhs);
bool operator<(const PhyloFlagOption &other);
friend bool operator<(const PhyloFlagOption &lhs, const PhyloFlagOption &rhs);
// Compare against String
bool operator==(const std::string &other_name);
bool operator<(const std::string &other_name);
// Getters
std::string GetName() const { return name_; };
std::string GetFlag() const { return flag_; };
std::string operator()() const { return GetFlag(); };
bool IsSetWhenRunningDefaults() const { return is_set_when_running_defaults_; };
bool IsSetWhenNotRunningDefaults() const {
return is_set_when_not_running_defaults_;
};
FlagType GetFlagType() const { return flag_type_; };
DataType GetDataType() const { return data_type_; };
const StringVector &GetChildFlags() const { return child_flags_; };
private:
// This is a descriptive name of the flag option that will be visible to the user in
// bito pybind interface.
std::string name_;
// This is the uniquely identifiable string that is used for setting/adding flag
// options.
std::string flag_;
// Determines default behavior (whether to consider this option set or unset) when
// `is_run_defaults_` flag is set. This behavior is overridden when by explicit flags.
bool is_set_when_running_defaults_;
bool is_set_when_not_running_defaults_;
// This gives the type of flag. There are:
// - Boolean: these options are either set or unset.
// - SetValue: these options have an associated value.
FlagType flag_type_;
// This gives the underlying datatype of the flag.
// Datatype is None if anything other than a SetValue.
DataType data_type_;
// These allow for subflags, corresponding to subroutines of given superflag routine.
// (e.g. in FatBeagle::Gradient, `substitution_model` flag has two subflags,
// `substitution_model_rates` and `substitution_model_frequencies`. If both subflags
// are set, we should consider the superflag set as well.)
StringVector child_flags_;
};
// ** Phylo FlagOption Set
// Contains all possible options for function.
class PhyloFlagOptionSet {
public:
using FlagOptionMap = std::map<std::string, PhyloFlagOption>;
using SubFlagOptionSetMap = std::map<std::string, PhyloFlagOptionSet *>;
explicit PhyloFlagOptionSet(const std::string &name);
PhyloFlagOptionSet(const std::string &name,
const std::vector<PhyloFlagOption> &options);
PhyloFlagOptionSet(const std::string &name,
const std::vector<PhyloFlagOption> &options,
PhyloFlagOptionSet &parent_optionset);
// Add Flag Option.
void AddFlagOption(const PhyloFlagOption &option, const bool overwrite = false);
// Find Flag by name.
bool ContainsFlagOption(const PhyloFlagOption &option);
std::optional<const PhyloFlagOption> FindFlagOptionByName(
const std::string &name) const;
// Add Option Set for Subroutines.
void AddSubPhyloFlagOptionSet(PhyloFlagOptionSet &sub_option_set,
const bool overwrite = false);
std::optional<PhyloFlagOptionSet *> FindSubPhyloFlagOptionSet(
const std::string name) const;
// Getters
std::string GetName() const { return name_; };
const FlagOptionMap &GetOptions() const { return all_options_; };
const SubFlagOptionSetMap &GetSubOptionsets() const { return sub_optionsets_; };
// Get all FlagOption name, flag strings.
StringPairVector GetAllNames(
std::optional<StringPairVector> vec_to_append = std::nullopt) const;
std::string ToString() const;
private:
// Name for option set.
std::string name_;
// List of all possible options user can set.
// Map of each flag's name to the flag.
FlagOptionMap all_options_;
// Option Sets for Subroutines.
SubFlagOptionSetMap sub_optionsets_;
};
namespace MasterFlagOptions {
// This determines whehter function will run its default behavior.
inline static auto run_defaults_ =
PhyloFlagOption("RUN_DEFAULTS", "run_defaults", PhyloFlagOption::FlagType::RunAll,
PhyloFlagOption::DataType::None, false, false);
inline static auto set_ = PhyloFlagOptionSet("GLOBAL", {run_defaults_});
}; // namespace MasterFlagOptions
// ** Phylo Flags
// User-facing object. Sets and stores flags for user and resolves flag value when
// function is called.
class PhyloFlags {
public:
using FlagMap = std::map<std::string, std::pair<bool, std::optional<double>>>;
PhyloFlags(bool is_run_defaults = true,
PhyloFlagOptionSet &optionset = MasterFlagOptions::set_)
: explicit_flags_(), is_run_defaults_(is_run_defaults), optionset_(&optionset){};
template <class T>
PhyloFlags(const std::vector<T> &key_vec, bool is_run_defaults = true,
PhyloFlagOptionSet &optionset = MasterFlagOptions::set_)
: explicit_flags_(), is_run_defaults_(is_run_defaults), optionset_(&optionset) {
for (auto &key : key_vec) {
SetFlag(key);
}
};
// ** Flag Setter
// FlagSet functions add or return a boolean and associated value to/from the map.
// Final SetFlag.
void SetFlag(const PhyloFlagOption &flag, const bool set = true,
const double value = 1.0);
void SetFlag(const PhyloFlagOption &flag, const double value);
// If passed SetFlag with flag_name string, look up associated PhyloFlagOption flag
// and forward.
template <typename... ArgTypes>
void SetFlag(const std::string &flag_name, ArgTypes... args) {
// Find Phyloflag.
std::optional<PhyloFlagOption> flag = optionset_->FindFlagOptionByName(flag_name);
Assert(flag.has_value(),
"Attempted to set a option flag by name that does not exist: \"" +
flag_name + "\"");
SetFlag(flag.value(), args...);
}
// If passed SetFlag with tuples or pairs, unbind and forward.
template <typename... PairTypes>
void SetFlag(const std::pair<PairTypes...> pair) {
std::apply([this](auto &&...args) { return this->SetFlag(args...); }, pair);
};
template <typename... TupleTypes>
void SetFlag(const std::tuple<TupleTypes...> tuple) {
std::apply([this](auto &&...args) { return this->SetFlag(args...); }, tuple);
};
// Add in all flags from a vector.
void SetAllFlags(const StringVector &key_vec);
// Add in all flags from another PhyloFlags.
void AddPhyloFlags(const std::optional<PhyloFlags> phylo_flags,
const bool overwrite = true);
// Clear all set flags and values.
void ClearFlags();
// ** Flag Checker
// Determine whether the associated flag will be evaluated as true or false.
// - (1) Returns the flag's value if it has been explicitly set.
// - (2) If not, checks whether the `is_run_defaults` flag has been set, in which case
// the flag's default behavior is returned.
// - (3) If not, returns false.
bool IsFlagSet(const PhyloFlagOption &flag) const;
bool IsFlagNotSet(const PhyloFlagOption &flag) const;
// Checks if a flag if user may or may not have passed any options.
// If options have not been passed, uses flag's default behavior.
static bool IsFlagSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag);
static bool IsFlagNotSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag);
// ** Flag Value Getter
// Returns the value associated with the flag.
std::optional<double> GetFlagValue(const std::string &flag_name) const;
std::optional<double> GetFlagValue(const PhyloFlagOption &flag) const;
// Returns the flag's value if set, otherwise returns default value.
double GetFlagValueIfSet(const std::string &flag_name, double default_value) const;
double GetFlagValueIfSet(const PhyloFlagOption &flag, double default_value) const;
static double GetFlagValueIfSet(const std::optional<PhyloFlags> phylo_flags,
const PhyloFlagOption &flag, double default_value);
// ** "Run Defaults" Flag
// Special flag that triggers all other flags' default behavior.
void SetRunDefaultsFlag(bool is_set);
bool IsRunDefaultsSet() const;
// ** Optionset
const PhyloFlagOptionSet &GetOptionSet() const { return *optionset_; }
// ** Miscellaneous
// Get Map of all Set Flags.
const FlagMap &GetFlagMap() const;
// Get PhyloFlagOptionSet in use.
const PhyloFlagOptionSet &GetFlagOptionSet() const;
// Interprets flags as a string.
std::string ToString() const;
private:
// Check if flag option has been explicitly set.
bool IsFlagInMap(const PhyloFlagOption &flag) const;
bool IsFlagInMap(const std::string &flag) const;
// Explictly set flag option by adding to map.
void AddFlagToMap(const PhyloFlagOption &flag, const bool set = true,
const double value = 1.0f);
// Stores all option flags that have been manually modified, with a bool whether the
// flag has been set, and an associated data value.
FlagMap explicit_flags_;
// This is a special flag that determines behavior if option is not explicitly set.
// If is_run_defaults_ is false, all flags are treated as if unset.
// Otherwise, all flags are treated as their default.
bool is_run_defaults_;
// Current Options
PhyloFlagOptionSet *optionset_ = &MasterFlagOptions::set_;
};
// ** FlagOption Sets
// Flag Options for requesting gradients via FatBeagle::Gradient
namespace PhyloGradientFlagOptions {
inline static const auto site_model_ =
PhyloFlagOption::BooleanOption("SITE_MODEL", "site_model", true);
inline static const auto clock_model_ =
PhyloFlagOption::BooleanOption("CLOCK_MODEL", "clock_model", true);
inline static const auto ratios_root_height_ =
PhyloFlagOption::BooleanOption("RATIOS_ROOT_HEIGHT", "ratios_root_height", true);
inline static const auto substitution_model_ =
PhyloFlagOption::BooleanOption("SUBSTITUTION_MODEL", "substitution_model", true);
inline static const auto include_log_det_jacobian_gradient_ =
PhyloFlagOption::BooleanOption("INCLUDE_LOG_DET_JACOBIAN_GRADIENT",
"include_log_det_jacobian_gradient", true, true);
inline static const auto use_stickbreaking_transform_ = PhyloFlagOption::BooleanOption(
"USE_STICKBREAKING_TRANSFORM", "use_stickbreaking_transform", true, true);
inline static const auto set_gradient_delta_ = PhyloFlagOption::SetValueOption(
"SET_GRADIENT_DELTA", "set_gradient_delta", PhyloFlagOption::DataType::Double);
inline static auto set_ = PhyloFlagOptionSet(
"SBNInstance::Gradient",
{site_model_, clock_model_, ratios_root_height_, site_model_, substitution_model_,
include_log_det_jacobian_gradient_, set_gradient_delta_},
MasterFlagOptions::set_);
}; // namespace PhyloGradientFlagOptions
// Flag Options for FatBeagle::LogLikelihood
namespace LogLikelihoodFlagOptions {
inline static const auto include_log_det_jacobian_likelihood_ =
PhyloFlagOption::BooleanOption("INCLUDE_LOG_DET_JACOBIAN_LIKELIHOOD",
"include_log_det_jacobian_likelihood", true, true);
inline static const PhyloFlagOptionSet set_ =
PhyloFlagOptionSet("SBNInstance::LogLikelihood",
{include_log_det_jacobian_likelihood_}, MasterFlagOptions::set_);
}; // namespace LogLikelihoodFlagOptions
| 15,263
|
C++
|
.h
| 312
| 44.355769
| 88
| 0.724492
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,077
|
unrooted_sbn_support.hpp
|
phylovi_bito/src/unrooted_sbn_support.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include "sbn_maps.hpp"
#include "sbn_support.hpp"
class UnrootedSBNSupport : public SBNSupport {
public:
UnrootedSBNSupport() : SBNSupport({}){};
explicit UnrootedSBNSupport(const Node::TopologyCounter &topologies,
StringVector taxon_names)
: SBNSupport(std::move(taxon_names)) {
std::tie(rootsplits_, indexer_, index_to_child_, parent_to_child_range_,
gpcsp_count_) =
SBNMaps::BuildIndexerBundle(UnrootedSBNMaps::RootsplitCounterOf(topologies),
UnrootedSBNMaps::PCSPCounterOf(topologies));
}
UnrootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const Node::TopologyCounter &topology_counter, const size_t out_of_sample_index) {
return UnrootedSBNMaps::IndexerRepresentationCounterOf(indexer_, topology_counter,
out_of_sample_index);
}
UnrootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const Node::TopologyCounter &topology_counter) {
return IndexerRepresentationCounterOf(topology_counter, GPCSPCount());
}
UnrootedIndexerRepresentation IndexerRepresentationOf(
const Node::NodePtr &topology, const size_t out_of_sample_index) const {
return UnrootedSBNMaps::IndexerRepresentationOf(indexer_, topology,
out_of_sample_index);
}
UnrootedIndexerRepresentation IndexerRepresentationOf(
const Node::NodePtr &topology) const {
return IndexerRepresentationOf(topology, GPCSPCount());
}
static BitsetSizeDict RootsplitCounterOf(const Node::TopologyCounter &topologies) {
return UnrootedSBNMaps::RootsplitCounterOf(topologies);
}
static PCSPCounter PCSPCounterOf(const Node::TopologyCounter &topologies) {
return UnrootedSBNMaps::PCSPCounterOf(topologies);
}
};
| 2,021
|
C++
|
.h
| 41
| 41.073171
| 88
| 0.719432
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,078
|
tp_evaluation_engine.hpp
|
phylovi_bito/src/tp_evaluation_engine.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// TP Evaluation Engine is an interface for the TPEngine, that facilitates different
// methods for scoring Top Trees. Each edge in the DAG corresponds to its "top tree",
// which is the best scoring tree contained in the DAG has the given edge, which is
// stored in TPEngine's choice maps. There are various methods for evaluating tree
// scores. Currently tree likelihoods and tree parsimony are supported.
//
// Note: Alongside scoring, handles DAG data particular to given methods. For example,
// the likelhood engine maintains DAG branch lengths.
#pragma once
#include "sugar.hpp"
#include "gp_dag.hpp"
#include "graft_dag.hpp"
#include "pv_handler.hpp"
#include "tp_choice_map.hpp"
#include "nni_operation.hpp"
#include "sankoff_handler.hpp"
#include "dag_branch_handler.hpp"
#include "optimization.hpp"
#include "substitution_model.hpp"
#include "stopwatch.hpp"
class TPEngine;
using BitsetEdgeIdMap = std::unordered_map<Bitset, EdgeId>;
struct LocalPVIds {
PVId grandparent_rhat_;
PVId grandparent_rfocal_;
PVId grandparent_rsister_;
PVId parent_p_;
PVId parent_phatfocal_;
PVId parent_phatsister_;
PVId parent_rhat_;
PVId parent_rfocal_;
PVId parent_rsister_;
PVId child_p_;
PVId child_phatleft_;
PVId child_phatright_;
PVId child_rhat_;
PVId child_rleft_;
PVId child_rright_;
PVId sister_p_;
PVId leftchild_p_;
PVId rightchild_p_;
};
// Compiles all data about proposed NNI needed for computing score.
// "refs" contain the data derived from the pre-NNI remapped to be referenced (not
// modified) for computations. "temps" contain temporary data locations for performing
// computations. "adjs" contains data actually adjacent to proposed NNI.
struct ProposedNNIInfo {
NNIOperation post_nni;
NNIOperation pre_nni;
LocalPVIds temp_pv_ids;
NNIAdjEdgeIds temp_edge_ids;
LocalPVIds ref_pv_ids;
NNIAdjacentMap<PVId> ref_primary_pv_ids;
NNIAdjEdgeIds ref_edge_ids;
NNIAdjNodeIds ref_node_ids;
NNIAdjEdgeIds adj_edge_ids;
NNIAdjPCSPs adj_pcsps;
NNIAdjBools do_optimize_edge;
};
// TPEngine helper for evaluating Top Trees.
class TPEvalEngine {
public:
TPEvalEngine(TPEngine &tp_engine);
// ** Maintenance
// Initialize Computation Engine.
virtual void Initialize() = 0;
// Update the Computation Engine after adding Node Pairs to the DAG.
virtual void UpdateEngineAfterDAGAddNodePair(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) = 0;
// Update the Computation Engine after modifying DAG. Nodes and Edges in reindexer
// that exceed the prev count gives the location of the new node and edge ids.
virtual void UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) = 0;
// Computes scores. Call after Initialize or any Update steps, and before
// GetTopTreeScores.
virtual void ComputeScores(
std::optional<EdgeIdVector> opt_edge_ids = std::nullopt) = 0;
// ** Scoring
// Get the Top Tree from the DAG with the given edge.
virtual double GetTopTreeScoreWithEdge(const EdgeId edge_id) const;
// Get the Top Tree from the DAG containing the proposed NNI.
virtual double GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset = 0,
std::optional<BitsetEdgeIdMap> best_edge_map = std::nullopt) = 0;
// ** Resize
// Resize Engine for modified DAG.
virtual void GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer);
// Grow engine to handle computing NNIs for all adjacent NNIs.
// Option to grow engine for computing via reference or via copy. If computing via
// reference, option whether to use unique temporaries (for testing and computing in
// parallel).
virtual void GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference = true,
const bool use_unique_temps = true);
// Resize GPEngine to accomodate DAG with given number of nodes and edges. Option
// to remap data according to DAG reindexers. Option to give explicit number of
// nodes or edges to allocate memory for (this is the only way memory allocation
// will be decreased).
virtual void GrowNodeData(
const size_t node_count,
std::optional<const Reindexer> node_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false);
virtual void GrowEdgeData(
const size_t edge_count,
std::optional<const Reindexer> edge_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false);
// Grow space for storing temporary computation.
virtual void GrowSpareNodeData(const size_t new_node_spare_count);
virtual void GrowSpareEdgeData(const size_t new_edge_spare_count);
// Copy all edge data from its pre_edge_id to post_edge_id.
virtual void CopyEdgeData(const EdgeId src_edge_id, const EdgeId dest_edge_id);
// ** Access
// Get reference NNIEngine.
TPEngine &GetTPEngine() { return *tp_engine_; }
const TPEngine &GetTPEngine() const { return *tp_engine_; }
// Get reference DAG.
const GPDAG &GetDAG() const { return *dag_; }
// Get reference GraftDAG.
const GraftDAG &GetGraftDAG() const { return *graft_dag_; }
// Get reference SitePattern.
const SitePattern &GetSitePattern() const { return *site_pattern_; }
// Get top tree score vector by edge.
EigenVectorXd &GetTopTreeScores() { return top_tree_per_edge_; }
const EigenVectorXd &GetTopTreeScores() const { return top_tree_per_edge_; }
protected:
// Un-owned reference to NNIEngine.
TPEngine *tp_engine_ = nullptr;
// Un-owned reference to DAG.
const GPDAG *dag_ = nullptr;
// Un-owned reference to GraftDAG.
const GraftDAG *graft_dag_ = nullptr;
// Observed leave states.
SitePattern *site_pattern_ = nullptr;
// Top-scoring tree per edge.
EigenVectorXd top_tree_per_edge_;
};
// TPEngine helper for evaluating Top Trees using Likelihood.
class TPEvalEngineViaLikelihood : public TPEvalEngine {
public:
TPEvalEngineViaLikelihood(TPEngine &tp_engine, const std::string &mmap_path);
// ** Maintenance
// Initialize Computation Engine and Populate PVs.
void Initialize() override;
// Update the Computation Engine after adding Node Pairs to the DAG.
void UpdateEngineAfterDAGAddNodePair(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) override;
// Update the Computation Engine after modifying DAG. Nodes and Edges in reindexer
// that exceed the prev count gives the location of the new node and edge ids.
void UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) override;
// Computes scores. Call after Initialize or any Update steps, and before
// GetTopTreeScores.
void ComputeScores(std::optional<EdgeIdVector> opt_edge_ids = std::nullopt) override;
// ** Scoring
// Get the Top Tree from the DAG with the given edge.
double GetTopTreeScoreWithEdge(const EdgeId edge_id) const override;
// Get the Top Tree from the DAG containing the proposed NNI.
double GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset = 0,
std::optional<BitsetEdgeIdMap> best_edge_map = std::nullopt) override;
// ** Scoring Helpers
// Get NNI info for proposed NNI not in DAG.
ProposedNNIInfo GetProposedNNIInfo(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset = 0,
std::optional<BitsetEdgeIdMap> best_edge_map = std::nullopt) const;
// Get NNI info for NNI in DAG.
ProposedNNIInfo GetRealNNIInfo(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset = 0,
std::optional<BitsetEdgeIdMap> best_edge_map = std::nullopt) const;
// ** Resize
// Resize Engine for modified DAG.
void GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) override;
// Grow engine to handle computing NNIs for all adjacent NNIs.
// Option to grow engine for computing via reference or via copy. If computing via
// reference, option whether to use unique temporaries (for testing and computing in
// parallel).
void GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference = true,
const bool use_unique_temps = true) override;
// Resize GPEngine to accomodate DAG with given number of nodes and edges. Option
// to remap data according to DAG reindexers. Option to give explicit number of
// nodes or edges to allocate memory for (this is the only way memory allocation
// will be decreased).
void GrowNodeData(const size_t node_count,
std::optional<const Reindexer> node_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false) override;
void GrowEdgeData(const size_t edge_count,
std::optional<const Reindexer> edge_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false) override;
// Grow space for storing temporary computation.
void GrowSpareNodeData(const size_t new_node_spare_count) override;
void GrowSpareEdgeData(const size_t new_edge_spare_count) override;
// Copy all edge data from its pre_edge_id to post_edge_id.
void CopyEdgeData(const EdgeId src_edge_id, const EdgeId dest_edge_0id) override;
// ** Populate PVs
// Initialize PVs with zeros.
void ZeroPVs();
// Populate rootward and leafward PVs.
void PopulatePVs();
// Populate P-PVs in a Rootward Pass of DAG.
void PopulateRootwardPVs();
// Populate R-PVs in a Leafward Pass of DAG.
void PopulateLeafwardPVs();
// ** Branch Length Optimization
// Initialize branch length handler to prep for optimization.
// Sets helper functions for optimization methods.
void InitializeBranchLengthHandler();
// Perform single pass over DAG of branch length optimization.
void BranchLengthOptimization(
std::optional<bool> check_branch_convergence = std::nullopt);
// Perform branch length optimization on given edge's branch.
void BranchLengthOptimization(const EdgeId edge_id,
const bool check_branch_convergence,
const bool update_only = false);
// ** Access
size_t GetOptimizationCount() { return branch_handler_.GetOptimizationCount(); }
bool IsFirstOptimization() { return branch_handler_.IsFirstOptimization(); }
void IncrementOptimizationCount() { branch_handler_.IncrementOptimizationCount(); }
void ResetOptimizationCount() { branch_handler_.ResetOptimizationCount(); }
PLVEdgeHandler &GetPVs() { return likelihood_pvs_; }
const PLVEdgeHandler &GetPVs() const { return likelihood_pvs_; }
EigenMatrixXd &GetMatrix() { return log_likelihoods_; }
const EigenMatrixXd &GetMatrix() const { return log_likelihoods_; }
DAGBranchHandler &GetDAGBranchHandler() { return branch_handler_; }
const DAGBranchHandler &GetDAGBranchHandler() const { return branch_handler_; }
// ** Settings
size_t IsOptimizeNewEdges() const { return do_optimize_new_edges_; }
void SetOptimizeNewEdges(const bool do_optimize_new_edges) {
do_optimize_new_edges_ = do_optimize_new_edges;
}
size_t GetOptimizationMaxIteration() const { return optimize_max_iter_; }
void SetOptimizationMaxIteration(const size_t optimize_max_iter) {
optimize_max_iter_ = optimize_max_iter;
}
bool IsInitProposedBranchLengthsWithDAG() const {
return do_init_proposed_branch_lengths_with_dag_;
}
void SetInitProposedBranchLengthsWithDAG(
const bool do_init_proposed_branch_lengths_with_dag) {
do_init_proposed_branch_lengths_with_dag_ =
do_init_proposed_branch_lengths_with_dag;
}
bool IsFixProposedBranchLengthsFromDAG() const {
return do_fix_proposed_branch_lengths_from_dag_;
}
void SetFixProposedBranchLengthsFromDAG(
const bool do_fix_proposed_branch_lengths_from_dag) {
do_fix_proposed_branch_lengths_from_dag_ = do_fix_proposed_branch_lengths_from_dag;
}
// ** PV Operations
// Get primary PV Ids for corresponding parent/child pair.
// Gets the P-PV of the child, and the RFocal-PV of the parent.
// The expected PVs for computing likelihoods.
std::pair<PVId, PVId> GetPrimaryPVIdsOfEdge(const EdgeId edge_id) const;
// Get PV Ids for corresponding parent/child pair.
// Gets the PHatLeft-PV and PHatRight-PV of child, and the the PSister-PV and R-PVs of
// parent.
// The expected PVs for computing temp intermediate PVs for proposed NNIs.
LocalPVIds GetLocalPVIdsOfEdge(const EdgeId edge_id) const;
// Remaps secondary PV Ids according to the clade map.
LocalPVIds RemapLocalPVIdsForPostNNI(
const LocalPVIds &pre_pv_ids, const NNIOperation::NNICladeArray &clade_map) const;
// Get temporary PV Ids for intermediate proposed NNI computations.
LocalPVIds GetTempLocalPVIdsForProposedNNIs(const size_t spare_offset) const;
// Get temporary PV Ids for intermediate proposed NNI computations.
NNIAdjEdgeIds GetTempEdgeIdsForProposedNNIs(const size_t spare_offset) const;
// ** Scoring Helpers
// Set the P-PVs to match the observed site patterns at the leaves.
void PopulateLeafPVsWithSitePatterns();
// Set the R-PVs to the stationary distribution at the root and rootsplits.
void PopulateRootPVsWithStationaryDistribution(
std::optional<EdgeIdVector> opt_edge_ids = std::nullopt);
// Updates the rootward P-PVs for given node or edge.
void PopulateRootwardPVForNode(const NodeId node_id);
void PopulateRootwardPVForEdge(const EdgeId edge_id);
// Updates the leafward R-PVs for given node or edge.
void PopulateLeafwardPVForNode(const NodeId node_id);
void PopulateLeafwardPVForEdge(const EdgeId edge_id);
protected:
// ** Scoring Helpers
// Evolve up the given edge to compute the P-PV of its parent node.
// Updates the parent's PFocalHat PLV using the child's P PLV.
void EvolvePPVUpEdge(const EdgeId rootward_edge_id, const EdgeId leafward_edge_id);
// Evolve down the given edge to compute the R-PV of its child node.
// Updates the child's RHat PLV using the parent's RFocal PLV.
void EvolveRPVDownEdge(const EdgeId rootward_edge_id, const EdgeId leafward_edge_id);
// ** PV Operations
// Copy data from PV at src_id to dest_id.
void TakePVValue(const PVId dest_id, const PVId src_id);
// PV component-wise multiplication of PVs src1 and src2, result stored in dest_id.
void MultiplyPVs(const PVId dest_id, const PVId src1_id, const PVId src2_id);
// Compute Likelihood by taking up-to-date parent R-PV and child P-PV.
void ComputeLikelihood(const EdgeId edge_id, const PVId child_id,
const PVId parent_id);
// Evolve src_id along the branch edge_id and store at dest_id.
void SetToEvolvedPV(const PVId dest_id, const EdgeId edge_id, const PVId src_id);
// Evolve src_id along the branch edge_id and multiply with contents of dest_id.
void MultiplyWithEvolvedPV(const PVId dest_id, const EdgeId edge_id,
const PVId src_id);
// Intermediate computation step for log likelihoods. Stored in temporary variable.
inline void PreparePerPatternLogLikelihoodsForEdge(const PVId src1_idx,
const PVId src2_idx) {
per_pattern_log_likelihoods_ = (GetPVs().GetPV(src1_idx).transpose() *
transition_matrix_ * GetPVs().GetPV(src2_idx))
.diagonal()
.array()
.log();
}
// Intermediate computation step for first derivative of log likelihoods. Stored in
// temporary variable.
inline void PrepareUnrescaledPerPatternLikelihoodDerivatives(const PVId src1_idx,
const PVId src2_idx) {
per_pattern_likelihood_derivatives_ =
(GetPVs().GetPV(src1_idx).transpose() * derivative_matrix_ *
GetPVs().GetPV(src2_idx))
.diagonal()
.array();
}
// Intermediate computation step for second derivative of log likelihoods. Stored in
// temporary variable.
inline void PrepareUnrescaledPerPatternLikelihoodSecondDerivatives(
const PVId src1_idx, const PVId src2_idx) {
per_pattern_likelihood_second_derivatives_ =
(GetPVs().GetPV(src1_idx).transpose() * hessian_matrix_ *
GetPVs().GetPV(src2_idx))
.diagonal()
.array();
}
// Intermediate computation step for log likelihoods, but without rescaling. Stored in
// temporary variable.
inline void PrepareUnrescaledPerPatternLikelihoods(const PVId src1_idx,
const PVId src2_idx) {
per_pattern_likelihoods_ = (GetPVs().GetPV(src1_idx).transpose() *
transition_matrix_ * GetPVs().GetPV(src2_idx))
.diagonal()
.array();
}
// ** Branch Length Optimization Helpers
// Compute log likelihood and derivative for given edge.
DoublePair LogLikelihoodAndDerivative(const EdgeId edge_id);
// Compute log likelihood and first and second derivative for given edge.
std::tuple<double, double, double> LogLikelihoodAndFirstTwoDerivatives(
const EdgeId edge_id);
// Prep temporary transition matrix variable to use given branch length.
void SetTransitionMatrixToHaveBranchLength(const double branch_length);
// Prep temporary transition and derivative matrix variables to use given branch
// length.
void SetTransitionAndDerivativeMatricesToHaveBranchLength(const double branch_length);
// Prep temporary transposed transition matrix variable to use given branch length.
void SetTransitionMatrixToHaveBranchLengthAndTranspose(const double branch_length);
protected:
// Partial Vector for storing Likelihood scores.
PLVEdgeHandler likelihood_pvs_;
// Tree likelihoods matrix across all sites.
EigenMatrixXd log_likelihoods_;
// Branch length parameters for DAG.
DAGBranchHandler branch_handler_;
// Whether new edges are optimized.
bool do_optimize_new_edges_ = true;
// Whether to referehce DAG to initialize branch lengths (otherwise use default).
bool do_init_proposed_branch_lengths_with_dag_ = true;
// Whether to fix branch lengths contained in DAG or optimize them.
bool do_fix_proposed_branch_lengths_from_dag_ = true;
// Number of optimization iterations.
size_t optimize_max_iter_ = 5;
// Temporary map of optimized edge lengths.
std::map<Bitset, double> tmp_branch_lengths_;
// Number of pvs to allocate per node in DAG.
static constexpr size_t pv_count_per_node_ = PLVTypeEnum::Count;
// Number of spare nodes needed to be allocated per proposed NNI.
static constexpr size_t spare_nodes_per_nni_ = 12;
// Number of spare edges needed to be allocated per proposed NNI.
static constexpr size_t spare_edges_per_nni_ = 5;
// ** Substitution Model
// When we change from JC69Model, check that we are actually doing transpose in
// leafward calculations.
JC69Model substitution_model_;
Eigen::Matrix4d eigenmatrix_ = substitution_model_.GetEigenvectors().reshaped(4, 4);
Eigen::Matrix4d inverse_eigenmatrix_ =
substitution_model_.GetInverseEigenvectors().reshaped(4, 4);
Eigen::Vector4d eigenvalues_ = substitution_model_.GetEigenvalues();
Eigen::Vector4d stationary_distribution_ = substitution_model_.GetFrequencies();
// ** Temporaries
// Stores intermediate computations useful for calculation.
EigenVectorXd per_pattern_log_likelihoods_;
EigenVectorXd per_pattern_likelihoods_;
EigenVectorXd per_pattern_likelihood_derivatives_;
EigenVectorXd per_pattern_likelihood_derivative_ratios_;
EigenVectorXd per_pattern_likelihood_second_derivatives_;
EigenVectorXd per_pattern_likelihood_second_derivative_ratios_;
Eigen::Vector4d diagonal_vector_;
Eigen::DiagonalMatrix<double, 4> diagonal_matrix_;
Eigen::Matrix4d transition_matrix_;
Eigen::Matrix4d derivative_matrix_;
Eigen::Matrix4d hessian_matrix_;
};
// TPEngine helper for evaluating Top Trees using Parsimony.
class TPEvalEngineViaParsimony : public TPEvalEngine {
public:
TPEvalEngineViaParsimony(TPEngine &tp_engine, const std::string &mmap_path);
// ** Maintenance
// Initialize Computation Engine.
void Initialize() override;
// Update the Computation Engine after adding Node Pairs to the DAG.
void UpdateEngineAfterDAGAddNodePair(const NNIOperation &post_nni,
const NNIOperation &pre_nni,
std::optional<size_t> new_tree_id) override;
// Update the Computation Engine after modifying DAG. Nodes and Edges in reindexer
// that exceed the prev count gives the location of the new node and edge ids.
void UpdateEngineAfterModifyingDAG(
const std::map<NNIOperation, NNIOperation> &nni_to_pre_nni,
const size_t prev_node_count, const Reindexer &node_reindexer,
const size_t prev_edge_count, const Reindexer &edge_reindexer) override;
// Computes scores. Call after Initialize or any Update steps, and before
// GetTopTreeScores.
void ComputeScores(std::optional<EdgeIdVector> opt_edge_ids = std::nullopt) override;
// ** Scoring
// Get the Top Tree from the DAG with the given edge.
double GetTopTreeScoreWithEdge(const EdgeId edge_id) const override;
// Get the Top Tree from the DAG containing the proposed NNI.
double GetTopTreeScoreWithProposedNNI(
const NNIOperation &post_nni, const NNIOperation &pre_nni,
const size_t spare_offset = 0,
std::optional<BitsetEdgeIdMap> = std::nullopt) override;
// ** Resize
// Resize Engine for modified DAG.
void GrowEngineForDAG(std::optional<Reindexer> node_reindexer,
std::optional<Reindexer> edge_reindexer) override;
// Grow engine to handle computing NNIs for all adjacent NNIs.
// Option to grow engine for computing via reference or via copy. If computing via
// reference, option whether to use unique temporaries (for testing and computing in
// parallel).
void GrowEngineForAdjacentNNIs(const NNISet &adjacent_nnis,
const bool via_reference = true,
const bool use_unique_temps = true) override;
// Resize GPEngine to accomodate DAG with given number of nodes and edges. Option
// to remap data according to DAG reindexers. Option to give explicit number of
// nodes or edges to allocate memory for (this is the only way memory allocation
// will be decreased).
void GrowNodeData(const size_t node_count,
std::optional<const Reindexer> node_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false) override;
void GrowEdgeData(const size_t edge_count,
std::optional<const Reindexer> edge_reindexer = std::nullopt,
std::optional<const size_t> explicit_alloc = std::nullopt,
const bool on_init = false) override;
// Grow space for storing temporary computation.
void GrowSpareNodeData(const size_t new_node_spare_count) override;
void GrowSpareEdgeData(const size_t new_edge_spare_count) override;
// Copy all edge data from its pre_edge_id to post_edge_id.
void CopyEdgeData(const EdgeId src_edge_id, const EdgeId dest_edge_id) override;
// ** Populate PVs
// Initialize PVs with zero.
void ZeroPVs();
// Populate rootward and leafward PVs.
void PopulatePVs();
// Populate P-PVs in a Rootward Pass of DAG.
void PopulateRootwardPVs();
// Populate R-PVs in a Leafward Pass of DAG.
void PopulateLeafwardPVs();
// ** Access
PSVEdgeHandler &GetPVs() { return parsimony_pvs_; }
const PSVEdgeHandler &GetPVs() const { return parsimony_pvs_; }
protected:
// ** Scoring Helpers
// Compute the rootward P-PVs for given node or edge.
void PopulateRootwardParsimonyPVForNode(const NodeId node_id);
void PopulateRootwardParsimonyPVForEdge(const EdgeId edge_id);
// Compute the leafward R-PVs for given node or edge.
void PopulateLeafwardParsimonyPVForNode(const NodeId node_id);
void PopulateLeafwardParsimonyPVForEdge(const EdgeId edge_id);
// Set the P-PVs to match the observed site patterns at the leaves.
void PopulateLeafParsimonyPVsWithSitePatterns();
// Calculate the PV for a given parent-child pair.
EigenVectorXd ParentPartial(EigenVectorXd child_partials);
// Sum P-PVs for right and left children of node 'node_id'
// In this case, we get the full P-PVs of the given node after all P-PVs
// have been concatenated into one SankoffPartialVector.
EigenVectorXd TotalPPartial(const EdgeId edge_id, const size_t site_idx);
EigenVectorXd TotalPPartial(const PVId edge_pleft_pvid, const PVId edge_pright_pvid,
const size_t site_idx);
// Populate rootward P-PVs for given edge.
// Updates parent's Pleft PV with sum of left child P PVs and parent's PRight PV with
// sum of right child P PVs.
void PopulateRootwardParsimonyPVForEdge(const EdgeId parent_id,
const EdgeId left_child_id,
const EdgeId right_child_id);
// Populate leafward Q-PVs for given edge.
// Updates parent's Q PV by combining with sister's sum of P PVs.
void PopulateLeafwardParsimonyPVForEdge(const EdgeId parent_id,
const EdgeId left_child_id,
const EdgeId right_child_id);
// Calculates parsimony score on given edge.
// Takes the minimum element after taking sum of edge's P and Q PVs.
double ParsimonyScore(const EdgeId edge_id);
double ParsimonyScore(const PVId edge_q_pvid, const PVId edge_pleft_pvid,
const PVId edge_pright_pvid);
protected:
// Partial Vector for computing Parsimony scores.
PSVEdgeHandler parsimony_pvs_;
static constexpr size_t pv_count_per_node_ = PSVTypeEnum::Count;
// Stores intermediate computations.
// Internal "temporaries" useful for calculation.
SankoffMatrix parsimony_cost_matrix_;
static constexpr size_t state_count_ = 4;
static constexpr double big_double_ = static_cast<double>(INT_MAX);
// Number of spare nodes needed to be allocated per proposed NNI.
static constexpr size_t spare_nodes_per_nni_ = 2;
// Number of spare edges needed to be allocated per proposed NNI.
static constexpr size_t spare_edges_per_nni_ = 5;
};
| 27,736
|
C++
|
.h
| 531
| 46.039548
| 88
| 0.719873
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,079
|
sbn_maps.hpp
|
phylovi_bito/src/sbn_maps.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A collection of functions to handle the subsplit support and to turn trees into
// indexer representations.
#pragma once
#include <unordered_map>
#include <utility>
#include <vector>
#include "bitset.hpp"
#include "default_dict.hpp"
#include "driver.hpp"
#include "node.hpp"
#include "subsplit_dag_storage.hpp"
#include "rooted_tree_collection.hpp"
using BitsetVector = std::vector<Bitset>;
using SizeBitsetMap = std::unordered_map<size_t, Bitset>;
using BitsetSizeMap = std::unordered_map<Bitset, size_t>;
using SizeBoolVectorMap = std::unordered_map<size_t, std::vector<bool>>;
using BitsetSizePairMap = std::unordered_map<Bitset, std::pair<size_t, size_t>>;
using BitsetSizeDict = DefaultDict<Bitset, size_t>;
using BitsetDoubleMap = std::unordered_map<Bitset, double>;
using RootedIndexerRepresentation = SizeVector;
using RootedIndexerRepresentationCounter =
std::vector<std::pair<RootedIndexerRepresentation, uint32_t>>;
using UnrootedIndexerRepresentation = SizeVectorVector;
using UnrootedIndexerRepresentationCounter =
std::vector<std::pair<UnrootedIndexerRepresentation, uint32_t>>;
using PCSPCounter = std::map<Bitset, DefaultDict<Bitset, size_t>>;
using PCSPIndexVector = std::vector<size_t>;
using RootedIndexerRepresentationSizeDict =
DefaultDict<RootedIndexerRepresentation, size_t>;
using StringSizePairMap = std::unordered_map<std::string, std::pair<size_t, size_t>>;
using SizeStringMap = std::unordered_map<size_t, std::string>;
using StringPCSPMap =
std::unordered_map<std::string, std::unordered_map<std::string, size_t>>;
// An ensemble of indexing data structures needed for an SBNSupport.
using IndexerBundle =
std::tuple<BitsetVector, BitsetSizeMap, SizeBitsetMap, BitsetSizePairMap, size_t>;
namespace SBNMaps {
// Make a map from each Tag to the bitset representing the ids below the Tag.
SizeBitsetMap IdIdSetMapOf(const Node::NodePtr& topology);
// This function returns a vector indexed by the edges of the tree and
// containing indices of the corresponding splits as indexed by the indexer.
SizeVector SplitIndicesOf(const BitsetSizeMap& indexer, const Node::NodePtr& topology);
// Make a string version of a PCSPCounter.
StringPCSPMap StringPCSPMapOf(PCSPCounter d);
// Convert a BitsetDoubleMap to a sorted vector of (bitset string, double)s.
StringDoubleVector StringDoubleVectorOf(BitsetDoubleMap m);
// Make a PCSP bitset from a collection of Nodes and their directions. If direction is
// true, then the bits get flipped.
Bitset PCSPBitsetOf(size_t leaf_count, //
const Node* sister_node, bool sister_direction,
const Node* focal_node, bool focal_direction,
const Node* child0_node, bool child0_direction,
const Node* child1_node, bool child1_direction);
// Build an IndexerBundle from counters of rootsplits and PCSPs. Note that the
// actual counts don't matter: we are just using the support here.
IndexerBundle BuildIndexerBundle(const BitsetSizeDict& rootsplit_counter,
const PCSPCounter& pcsp_counter);
} // namespace SBNMaps
namespace UnrootedSBNMaps {
// Make a DefaultDict mapping rootsplits to the number of times they were seen.
BitsetSizeDict RootsplitCounterOf(const Node::TopologyCounter& topologies);
// Make a PCSPCounter mapping PCSPs to the number of times they were seen.
PCSPCounter PCSPCounterOf(const Node::TopologyCounter& topologies);
// This function gives information about the rootsplits and PCSPs of a given
// topology with respect to the current indexing data structures.
// Specifically, it returns a vector of vectors, such that the ith vector is the indices
// of sbn_parameters_ describing the tree when it is rooted above the ith node. The
// first entry of this representation is always the index of the rootsplit. The rest are
// the indices of the PCSPs that are present in the given topology.
// NOTE: Any rootsplits or PCSPs that aren't known by the indexer are assigned
// `default_index`.
UnrootedIndexerRepresentation IndexerRepresentationOf(const BitsetSizeMap& indexer,
const Node::NodePtr& topology,
size_t default_index);
// Turn a TopologyCounter into an IndexerRepresentationCounter.
UnrootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const BitsetSizeMap& indexer, const Node::TopologyCounter& topology_counter,
size_t default_index);
// Define a "reversed indexer" to be a vector with ith entry being the string version of
// the ith GPCSP. This function takes such a vector and an unrooted indexer
// representation and makes a vector of StringSets, such that the jth entry is the
// string version of the indexer representation of the jth rooting.
StringSetVector StringIndexerRepresentationOf(
const StringVector& reversed_indexer,
const UnrootedIndexerRepresentation& indexer_representation);
} // namespace UnrootedSBNMaps
namespace RootedSBNMaps {
// Make a DefaultDict mapping rootsplits to the number of times they were seen.
BitsetSizeDict RootsplitCounterOf(const Node::TopologyCounter& topologies);
// Make a PCSPCounter mapping PCSPs to the number of times they were seen.
PCSPCounter PCSPCounterOf(const Node::TopologyCounter& topologies);
// A rooted indexer representation is the indexer representation of a given rooted tree.
// That is, the first entry is the rootsplit for that rooting, and after that come the
// PCSP indices.
RootedIndexerRepresentation IndexerRepresentationOf(const BitsetSizeMap& indexer,
const Node::NodePtr& topology,
size_t default_index);
// Turn a TopologyCounter into an IndexerRepresentationCounter.
RootedIndexerRepresentationCounter IndexerRepresentationCounterOf(
const BitsetSizeMap& indexer, const Node::TopologyCounter& topology_counter,
size_t default_index);
// Define a "reversed indexer" to be a vector with ith entry being the string version of
// the ith GPCSP. This function takes such a vector and a rooted indexer representation
// and makes a StringSet of the indexer representation.
StringSet StringIndexerRepresentationOf(
const StringVector& reversed_indexer,
const RootedIndexerRepresentation& indexer_representation);
// For counting standardized (i.e. PCSP index sorted) rooted indexer representations.
void IncrementRootedIndexerRepresentationSizeDict(
RootedIndexerRepresentationSizeDict& dict,
RootedIndexerRepresentation rooted_indexer_representation);
// Apply the above to every rooting in the unrooted indexer representation.
void IncrementRootedIndexerRepresentationSizeDict(
RootedIndexerRepresentationSizeDict& dict,
const UnrootedIndexerRepresentation& indexer_representation);
// Apply function to edges descending from each node on each rooted tree for all trees
// in collection.
using FunctionOnTreeNodeByGPCSP = std::function<void(
EdgeId, const Bitset&, const RootedTree&, const size_t, const Node*)>;
void FunctionOverRootedTreeCollection(
FunctionOnTreeNodeByGPCSP function_on_tree_node_by_gpcsp,
const RootedTreeCollection& tree_collection, const BitsetSizeMap& edge_indexer,
const size_t default_index);
} // namespace RootedSBNMaps
// Turn a <Key, T> map into a <std::string, T> map for any Key type that has
// a ToString method.
template <class Key, class T>
std::unordered_map<std::string, T> StringifyMap(std::unordered_map<Key, T> m) {
std::unordered_map<std::string, T> m_str;
for (const auto& iter : m) {
m_str[iter.first.ToString()] = iter.second;
}
return m_str;
}
// Hash for vectors of size_t.
// https://www.boost.org/doc/libs/1_35_0/doc/html/boost/hash_combine_id241013.html
namespace std {
template <>
struct hash<SizeVector> {
size_t operator()(const SizeVector& values) const {
int hash = values[0];
for (size_t i = 1; i < values.size(); i++) {
// https://stackoverflow.com/a/58845898/467327
// NOLINTNEXTLINE(hicpp-signed-bitwise)
hash ^= values[i] + 0x9e3779b9 + (hash << 6) + (hash >> 2);
}
return hash;
}
};
} // namespace std
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("SBNMaps") {
auto topology0 = Node::ExampleTopologies()[0];
// (0,1,(2,3)4)5;
auto correct_id_id_set_map =
std::unordered_map<size_t, Bitset>({{5, Bitset("111111")},
{1, Bitset("010000")},
{0, Bitset("100000")},
{2, Bitset("001000")},
{3, Bitset("000100")},
{4, Bitset("001110")}});
for (const auto& iter : SBNMaps::IdIdSetMapOf(topology0)) {
CHECK_EQ(correct_id_id_set_map.at(iter.first), iter.second);
}
// Tests comparing to vbpi appear in Python test code.
// Tests of IndexerRepresentationOf in unrooted_sbn_instance.hpp.
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 9,197
|
C++
|
.h
| 171
| 48.608187
| 88
| 0.740811
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,080
|
rooted_tree_collection.hpp
|
phylovi_bito/src/rooted_tree_collection.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A rooted tree collection has a notion of sampling date for the tips of the tree, and
// all taxa are assumed to share those sampling dates.
#pragma once
#include "generic_tree_collection.hpp"
#include "rooted_tree.hpp"
#include "tree_collection.hpp"
template class GenericTreeCollection<RootedTree>;
using PreRootedTreeCollection = GenericTreeCollection<RootedTree>;
class RootedTreeCollection : public PreRootedTreeCollection {
using TagDateMap = TagDoubleMap;
public:
// Inherit all constructors.
using PreRootedTreeCollection::PreRootedTreeCollection;
RootedTreeCollection(const PreRootedTreeCollection& pre_collection,
const TagDateMap& tag_date_map);
static RootedTreeCollection OfTreeCollection(const TreeCollection& trees);
// Build a tree collection by duplicating the first tree.
RootedTreeCollection BuildCollectionByDuplicatingFirst(size_t number_of_times);
const TagDateMap& GetTagDateMap() const { return tag_date_map_; };
void SetDatesToBeConstant(bool initialize_time_trees_using_branch_lengths);
void ParseDatesFromTaxonNames(bool initialize_time_trees_using_branch_lengths);
void ParseDatesFromCSV(const std::string& csv_path,
bool initialize_time_trees_using_branch_lengths);
private:
TagDateMap tag_date_map_;
void SetTipDates();
void ProcessTreeDates(bool initialize_time_trees_using_branch_lengths);
void ParseDatesFromCSVButDontInitializeTimeTrees(const std::string& csv_path);
};
#ifdef DOCTEST_LIBRARY_INCLUDED
// Test of ParseDatesFromTaxonNames appears in rooted_sbn_instance.hpp.
TEST_CASE("RootedTreeCollection") {}
#endif // DOCTEST_LIBRARY_INCLUDED
| 1,799
|
C++
|
.h
| 36
| 46.444444
| 87
| 0.797945
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,081
|
subsplit_dag_action.hpp
|
phylovi_bito/src/subsplit_dag_action.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A class to hold actions that we can perform on the subsplit DAG.
#pragma once
#include <tuple>
template <size_t I, typename... Args>
using TypeOf = std::tuple_element_t<I, std::tuple<Args...>>;
// An action to be performed as part of a traversal of the subsplit DAG.
template <typename... Args>
auto SubsplitDAGTraversalAction(Args&&... args) {
struct Impl {
// Applied just before visiting a node.
TypeOf<0, Args...> BeforeNode;
// Applied after visiting a node.
TypeOf<1, Args...> AfterNode;
// Applied before visiting the set of edges below a (node, clade) pair.
TypeOf<2, Args...> BeforeNodeClade;
// Applied for each edge.
TypeOf<3, Args...> VisitEdge;
};
return Impl{std::forward<Args>(args)...};
}
// An action to be performed as part of a traversal of a Tidy subsplit DAG.
template <typename... Args>
auto TidySubsplitDAGTraversalAction(Args&&... args) {
struct Impl {
// Applied just before visiting a node.
TypeOf<0, Args...> BeforeNode;
// Applied after visiting a node.
TypeOf<1, Args...> AfterNode;
// Applied before visiting the set of edges below a (node, clade) pair.
TypeOf<2, Args...> BeforeNodeClade;
// Applied for each edge, and "dirties" all of the nodes above it.
TypeOf<3, Args...> ModifyEdge;
// Applying this function "cleans" the node just above an edge that has been
// "dirtied" by ModifyEdge assuming the node just below the edge is clean.
// (Traversals using this action ensure that children are visited before parents.)
TypeOf<4, Args...> UpdateEdge;
};
return Impl{std::forward<Args>(args)...};
}
| 1,756
|
C++
|
.h
| 42
| 38.5
| 86
| 0.703335
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,082
|
pv_handler.hpp
|
phylovi_bito/src/pv_handler.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// PVHandler is used for storing and manipulating Partial Vectors. Partial Vectors are
// intermediate computations, such as in likelihoods or parsimonies, used for performing
// dynamic programming on a tree or DAG. Partial Vectors can be "stored on" and indexed
// according to different elements of the DAG: either by the edges or the nodes.
//
// PSVHandler is used to perform the Sankoff algorithm. There are 3 partial vectors:
// PLeft, PRight, and Q. PLeft and Pright corresponds to Sankoff vectors for the left
// and right child respectively, and Q corresponds to the value of the partial vector
// pointing leaf-ward.
#pragma once
#include "sugar.hpp"
#include "mmapped_plv.hpp"
#include "site_pattern.hpp"
#include "reindexer.hpp"
#include "subsplit_dag_storage.hpp"
// Helper Enumerated Types for Partial Vectors.
namespace PartialVectorType {
// PLV: Partial Likelihood Vectors
enum class PLVType : size_t {
P, // p(s)
PHatRight, // phat(s_right)
PHatLeft, // phat(s_left)
RHat, // rhat(s_right) = rhat(s_left)
RRight, // r(s_right)
RLeft, // r(s_left)
};
static inline const size_t PLVCount = 6;
class PLVTypeEnum
: public EnumWrapper<PLVType, size_t, PLVCount, PLVType::P, PLVType::RLeft> {
public:
static PLVType PPLVType(const bool is_on_left) {
return is_on_left ? PLVType::PHatLeft : PLVType::PHatRight;
}
static PLVType RPLVType(const bool is_on_left) {
return is_on_left ? PLVType::RLeft : PLVType::RRight;
}
static PLVType PPLVType(const SubsplitClade clade) {
return (clade == SubsplitClade::Left) ? PLVType::PHatLeft : PLVType::PHatRight;
}
static PLVType RPLVType(const SubsplitClade clade) {
return (clade == SubsplitClade::Left) ? PLVType::RLeft : PLVType::RRight;
}
static inline const std::string Prefix = "PLV";
static inline const Array<std::string> Labels = {
{"P", "PHatRight", "PHatLeft", "RHat", "RRight", "RLeft"}};
static std::string ToString(const PLVType e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const PLVType e) {
os << ToString(e);
return os;
}
};
// PSV: Partial Sankoff Vectors
enum class PSVType : size_t {
PRight, // p(s_right)
PLeft, // p(s_left)
Q // q(s)
};
static inline const size_t PSVCount = 3;
class PSVTypeEnum
: public EnumWrapper<PSVType, size_t, PSVCount, PSVType::PRight, PSVType::Q> {
public:
static PSVType PPSVType(const bool is_on_left) {
return is_on_left ? PSVType::PLeft : PSVType::PRight;
}
static PSVType PPSVType(const SubsplitClade clade) {
return (clade == SubsplitClade::Left) ? PSVType::PLeft : PSVType::PRight;
}
static inline const std::string Prefix = "PSV";
static inline const Array<std::string> Labels = {{"PRight", "PLeft", "Q"}};
static std::string ToString(const PSVType e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const PSVType e) {
os << ToString(e);
return os;
}
};
}; // namespace PartialVectorType
using PVId = GenericId<struct PVIdTag>;
using PVIdVector = std::vector<PVId>;
using PLVType = PartialVectorType::PLVType;
using PLVTypeEnum = PartialVectorType::PLVTypeEnum;
using PSVType = PartialVectorType::PSVType;
using PSVTypeEnum = PartialVectorType::PSVTypeEnum;
// PVTypeEnum determines which PV types need to be stored on each element of the
// PVHandler (e.g. P-PVs, Q-PVs, R-PVs). DAGElementId decides whether indexing PVs
// according to DAG's nodes or edges.
template <class PVTypeEnum, class DAGElementId>
class PartialVectorHandler {
public:
using TypeEnum = PVTypeEnum;
using PVType = typename TypeEnum::Type;
using PVIdArray = typename TypeEnum::template Array<PVId>;
PartialVectorHandler(const std::string &mmap_file_path, const size_t elem_count,
const size_t pattern_count, const double resizing_factor = 2.0)
: element_count_(elem_count),
pattern_count_(pattern_count),
resizing_factor_(resizing_factor),
mmap_file_path_(mmap_file_path),
mmapped_master_pvs_(mmap_file_path_, (elem_count + element_spare_count_) *
pv_count_per_element_ *
size_t(resizing_factor_) *
pattern_count) {
pv_reindexer_ = Reindexer::IdentityReindexer(GetPaddedPVCount());
reindexer_init_size_ = pv_reindexer_.size();
}
// ** Counts
double GetByteCount() const { return mmapped_master_pvs_.ByteCount(); }
size_t GetPVCountPer() const { return pv_count_per_element_; }
size_t GetSitePatternCount() const { return pattern_count_; }
// DAG element counts.
size_t GetCount() const { return element_count_; }
size_t GetSpareCount() const { return element_spare_count_; }
size_t GetAllocatedCount() const { return element_alloc_; }
size_t GetPaddedCount() const { return GetCount() + GetSpareCount(); }
// PV counts.
size_t GetPVCount() const { return GetCount() * GetPVCountPer(); }
size_t GetSparePVCount() const { return GetSpareCount() * GetPVCountPer(); }
size_t GetPaddedPVCount() const { return GetPaddedCount() * GetPVCountPer(); }
size_t GetAllocatedPVCount() const { return GetAllocatedCount() * GetPVCountPer(); }
void SetCount(const size_t elem_count) { element_count_ = elem_count; }
void SetSpareCount(const size_t element_spare_count) {
element_spare_count_ = element_spare_count;
}
void SetAllocatedCount(const size_t element_alloc) { element_alloc_ = element_alloc; }
// ** Resize
// Resize PVHandler to accomodate DAG with given number of nodes.
void Resize(const size_t new_elem_count, const size_t new_element_alloc,
std::optional<size_t> new_element_spare = std::nullopt);
// Reindex PV according to pv_reindexer.
void Reindex(const Reindexer pv_reindexer);
// Reindex PVs by moving data to align with reindexer by copying.
void ReindexViaMoveCopy(const Reindexer pv_reindexer);
// Reindex PVs by updating the map from pv_id to data index.
void ReindexViaRemap(const Reindexer pv_reindexer);
// Expand element_reindexer into pv_reindexer.
Reindexer BuildPVReindexer(const Reindexer &element_reindexer,
const size_t old_elem_count, const size_t new_elem_count);
// ** Access
// Get vector of all Partial Vectors.
NucleotidePLVRefVector &GetPVs() { return pvs_; }
const NucleotidePLVRefVector &GetPVs() const { return pvs_; }
// Get PV by absolute index from the vector of Partial Vectors.
NucleotidePLVRef &GetPV(const PVId pv_id) {
auto &pv = pvs_.at(pv_reindexer_.GetOldIndexByNewIndex(pv_id.value_));
Assert(pv_id < GetAllocatedPVCount(), "pv_id outside valid range.");
return pv;
}
const NucleotidePLVRef &GetPV(const PVId pv_id) const {
auto &pv = pvs_.at(pv_reindexer_.GetOldIndexByNewIndex(pv_id.value_));
Assert(pv_id < GetAllocatedPVCount(), "pv_id outside valid range.");
return pv;
}
NucleotidePLVRef &operator()(const PVId pv_id) { return GetPV(pv_id); }
const NucleotidePLVRef &operator()(const PVId pv_id) const { return GetPV(pv_id); }
// Get PV by PV type and node index from the vector of Partial Vectors.
NucleotidePLVRef &GetPV(const PVType pv_type, const DAGElementId elem_id) {
return GetPV(GetPVIndex(pv_type, elem_id));
}
const NucleotidePLVRef &GetPV(const PVType pv_type,
const DAGElementId elem_id) const {
return GetPV(GetPVIndex(pv_type, elem_id));
}
NucleotidePLVRef &operator()(const PVType pv_type, const DAGElementId elem_id) {
return GetPV(GetPVIndex(pv_type, elem_id));
}
const NucleotidePLVRef &operator()(const PVType pv_type,
const DAGElementId elem_id) const {
return GetPV(GetPVIndex(pv_type, elem_id));
}
// Get Spare PV by index from the vector of Partial Vectors.
NucleotidePLVRef &GetSparePV(const PVId pv_id) {
return GetPV(GetSparePVIndex(pv_id));
}
const NucleotidePLVRef &GetSparePV(const PVId pv_id) const {
return GetPV(GetSparePVIndex(pv_id));
}
// Get total offset into PVs, indexed based on underlying DAG.
static PVId GetPVIndex(const PVType pv_type, const DAGElementId elem_id,
const size_t elem_count) {
return GetPVIndex(TypeEnum::GetIndex(pv_type), elem_id, elem_count);
}
PVId GetPVIndex(const PVType pv_type, const DAGElementId elem_id) const {
Assert(elem_id.value_ < GetCount(), "Requested elem_id is out-of-range.");
return GetPVIndex(pv_type, elem_id, GetCount());
}
// Get PVIndex as a pair of PVType and DAGElementId.
std::pair<PVType, DAGElementId> GetReversePVIdIndex(const PVId pv_id) const {
Assert(pv_id.value_ < GetPVCount(), "Requested pv_id is out-of-range.");
DAGElementId elm_id = (pv_id.value_ % GetCount());
PVType pv_type = PVType(pv_id.value_ / GetCount());
return {pv_type, elm_id};
}
// Get total offset into temporary PVs, indexed based on underlying grafted DAG.
PVId GetSparePVIndex(const PVId pv_id) const {
const size_t pv_scratch_size = GetPaddedPVCount() - GetPVCount();
Assert(pv_id < pv_scratch_size,
"Requested temporary pv_id outside of allocated scratch space.");
return PVId(pv_id.value_ + GetPVCount());
}
PVId GetSparePVIndex(const PVType pv_type, const DAGElementId elem_id) {
Assert(elem_id.value_ < GetSpareCount(),
"Requested spare elem_id is out-of-range.");
PVId spare_pv_id = GetPVIndex(pv_type, elem_id, GetSpareCount());
return GetSparePVIndex(spare_pv_id);
}
// PV Reindexer, which serves as the data map to sort PV data.
const Reindexer &GetPVReindexer() const { return pv_reindexer_; }
// Get array of all pv_ids for given node.
PVIdArray GetPVIdArray(const DAGElementId elem_id) const {
PVIdArray pv_array;
for (auto pv_type : typename PVTypeEnum::Iterator()) {
pv_array[pv_type] = GetPVIndex(pv_type, elem_id);
}
return pv_array;
}
void SetUseRemapping(bool use_remapping) { use_remapping_ = use_remapping; }
bool GetUseRemapping() const { return use_remapping_; }
// ** PV Operations
std::pair<double, double> ValueRange(const PVId pvid) const {
const auto &pv = GetPV(pvid);
double max_value = -INFINITY;
double min_value = INFINITY;
for (int i = 0; i < pv.rows(); i++) {
for (int j = 0; j < pv.cols(); j++) {
double value = pv(i, j);
if (max_value < value) {
max_value = value;
}
if (min_value > value) {
min_value = value;
}
}
}
return {min_value, max_value};
}
// Element-wise PV unary operations.
using UnaryFunction = std::function<double(const double)>;
void ApplyUnaryOperation(const PVId dest_pvid, const PVId src_pvid_a,
UnaryFunction una_fn) {
auto &dest_pv = GetPV(dest_pvid);
const auto &src_pv_a = GetPV(src_pvid_a);
return ApplyUnaryOperation(dest_pv, src_pv_a, una_fn);
}
static void ApplyUnaryOperation(NucleotidePLVRef &dest_pv,
const NucleotidePLVRef &src_pv_a,
UnaryFunction una_fn) {
for (int i = 0; i < src_pv_a.rows(); i++) {
for (int j = 0; j < src_pv_a.cols(); j++) {
dest_pv(i, j) = una_fn(src_pv_a(i, j));
}
}
}
// Element-wise unary operations.
void AbsValue(const PVId dest_pvid, const PVId src_pvid_a) {
auto AbsValueFunc = [](const double src) { return abs(src); };
return ApplyUnaryOperation(dest_pvid, src_pvid_a, AbsValueFunc);
}
// Element-wise PV binary operations.
using BinaryFunction = std::function<double(const double, const double)>;
void ApplyBinaryOperation(const PVId dest_pvid, const PVId src_pvid_a,
const PVId src_pvid_b, BinaryFunction bin_fn) {
auto &dest_pv = GetPV(dest_pvid);
const auto &src_pv_a = GetPV(src_pvid_a);
const auto &src_pv_b = GetPV(src_pvid_b);
return ApplyBinaryOperation(dest_pv, src_pv_a, src_pv_b, bin_fn);
}
static void ApplyBinaryOperation(NucleotidePLVRef &dest_pv,
const NucleotidePLVRef &src_pv_a,
const NucleotidePLVRef &src_pv_b,
BinaryFunction bin_fn) {
for (int i = 0; i < src_pv_a.rows(); i++) {
for (int j = 0; j < src_pv_b.cols(); j++) {
dest_pv(i, j) = bin_fn(src_pv_a(i, j), src_pv_b(i, j));
}
}
}
// Element-wise binary operations.
void Add(const PVId dest_pvid, const PVId src_pvid_a, const PVId src_pvid_b) {
auto AddFunc = [](const double src_a, const double src_b) {
return (src_a + src_b);
};
return ApplyBinaryOperation(dest_pvid, src_pvid_a, src_pvid_b, AddFunc);
}
void Subtract(const PVId dest_pvid, const PVId src_pvid_a, const PVId src_pvid_b) {
auto SubtractFunc = [](const double src_a, const double src_b) {
return (src_a - src_b);
};
return ApplyBinaryOperation(dest_pvid, src_pvid_a, src_pvid_b, SubtractFunc);
}
void AbsDiff(const PVId dest_pvid, const PVId src_pvid_a, const PVId src_pvid_b) {
auto AbsDiffFunc = [](const double src_a, const double src_b) {
return abs(src_a - src_b);
};
return ApplyBinaryOperation(dest_pvid, src_pvid_a, src_pvid_b, AbsDiffFunc);
}
void Multiply(const PVId dest_pvid, const PVId src_pvid_a, const PVId src_pvid_b) {
auto MultiplyFunc = [](const double src_a, const double src_b) {
return (src_a * src_b);
};
return ApplyBinaryOperation(dest_pvid, src_pvid_a, src_pvid_b, MultiplyFunc);
}
void Divide(const PVId dest_pvid, const PVId src_pvid_a, const PVId src_pvid_b) {
auto DivideFunc = [](const double src_a, const double src_b) {
return (src_a / src_b);
};
return ApplyBinaryOperation(dest_pvid, src_pvid_a, src_pvid_b, DivideFunc);
}
// Find the maximum element-wise absolute difference between two PVs.
static double MaxDifference(const NucleotidePLVRef &pv_a,
const NucleotidePLVRef &pv_b) {
double max_diff = 0;
for (int i = 0; i < pv_a.rows(); i++) {
for (int j = 0; j < pv_a.cols(); j++) {
double diff = abs(pv_a(i, j) - pv_b(i, j));
if (diff > max_diff) {
max_diff = diff;
}
}
}
return max_diff;
}
double MaxDifference(const PVId pvid_a, const PVId pvid_b) const {
const auto &pv_a = GetPV(pvid_a);
const auto &pv_b = GetPV(pvid_b);
return MaxDifference(pv_a, pv_b);
}
double Min(const PVId pvid) const { return GetPV(pvid).minCoeff(); }
double Max(const PVId pvid) const { return GetPV(pvid).maxCoeff(); }
// ** I/O
// Output data to string.
std::string ToString(const PVId pv_id, const bool show_labels = false) const {
if (pv_id < GetPVCount()) {
const auto &[pv_type, elem_id] = GetReversePVIdIndex(pv_id);
return ToString(pv_type, elem_id, show_labels);
}
std::stringstream out;
out << "PV[" << pv_id << "]: " << std::endl;
for (auto &&row : GetPV(pv_id).rowwise()) {
out << row << std::endl;
}
return out.str();
}
std::string ToString(const PVType pv_type, const DAGElementId elem_id,
const bool show_labels = false) const {
std::stringstream out;
out << "PV[" << PVTypeEnum::ToString(pv_type) << ", Element" << elem_id << ", PV"
<< GetPVIndex(pv_type, elem_id) << "]: " << std::endl;
out << ToString(GetPV(pv_type, elem_id));
return out.str();
}
std::string AllPVsToString(const bool show_labels = false) const {
std::stringstream out;
for (const auto pv_type : typename PVTypeEnum::Iterator()) {
for (DAGElementId elem_id = 0; elem_id < GetCount(); elem_id++) {
out << ToString(pv_type, elem_id, show_labels);
}
}
return out.str();
}
size_t ToHash(const PVId pv_id) const { return ToHash(GetPV(pv_id)); }
std::string ToHashString(const PVId pv_id, const size_t length = 16) const {
return ToHashString(GetPV(pv_id), length);
}
DoubleVector ToDoubleVector(const PVId pv_id) const {
return ToDoubleVector(GetPV(pv_id));
}
static std::string ToString(const NucleotidePLVRef &pv) {
std::stringstream out;
for (int i = 0; i < pv.rows(); i++) {
out << "[";
for (int j = 0; j < pv.cols(); j++) {
out << pv(i, j) << ((j < (pv.cols() - 1)) ? ", " : "");
}
out << "]" << std::endl;
}
return out.str();
}
static size_t ToHash(const NucleotidePLVRef &pv) {
size_t seed = pv.rows() * pv.cols();
for (int i = 0; i < pv.rows(); i++) {
for (int j = 0; j < pv.cols(); j++) {
seed ^= std::hash<double>()(pv(i, j)) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
}
return seed;
}
static std::string ToHashString(const NucleotidePLVRef &pv,
const size_t length = 16) {
return HashToString(ToHash(pv), length);
}
static DoubleVector ToDoubleVector(const NucleotidePLVRef &pv) {
DoubleVector values;
for (int i = 0; i < pv.rows(); i++) {
for (int j = 0; j < pv.cols(); j++) {
values.push_back(pv(i, j));
}
}
return values;
}
// ** Miscellaneous
std::unordered_map<PVId, std::pair<PVType, DAGElementId>> BuildPVIdMap() const {
std::unordered_map<PVId, std::pair<PVType, DAGElementId>> pvid_map;
for (DAGElementId elem_id = 0; elem_id < GetCount(); elem_id++) {
for (PVType pv_type : typename PVTypeEnum::Iterator()) {
const auto pv_id = GetPVIndex(pv_type, elem_id);
pvid_map[pv_id] = {pv_type, elem_id};
}
}
return pvid_map;
}
static int Compare(const PartialVectorHandler<PVTypeEnum, DAGElementId> &pv_lhs,
const PartialVectorHandler<PVTypeEnum, DAGElementId> &pv_rhs,
const bool is_quiet = true) {
std::stringstream dev_null;
std::ostream &os = (is_quiet ? dev_null : std::cerr);
bool pv_count_match = (pv_lhs.GetPVCount() == pv_rhs.GetPVCount());
if (!pv_count_match) {
os << "PVHandler::Compare: PV Counts do not match." << std::endl;
return false;
}
bool pvs_match = true;
for (PVId pv_id(0); pv_id < pv_lhs.GetPVCount(); pv_id++) {
bool pv_match = (pv_lhs.GetPV(pv_id) == pv_rhs.GetPV(pv_id));
pvs_match &= pv_match;
if (!pvs_match) {
os << "PVHandler::Compare: PVs do not match at PV" << pv_id << std::endl;
os << "PV_LHS:" << std::endl << pv_lhs.ToString(pv_id, true) << std::endl;
os << "PV_RHS:" << std::endl << pv_rhs.ToString(pv_id, true) << std::endl;
return false;
}
}
return true;
}
protected:
// Get total offset into PVs.
static PVId GetPVIndex(const size_t pv_type_id, const DAGElementId elem_id,
const size_t elem_count) {
return (pv_type_id * elem_count) + elem_id.value_;
}
// Get index for given PV enum.
static size_t GetPVTypeIndex(const PVType pv_type) {
return TypeEnum::GetIndex(pv_type);
}
// ** Data Sizing
// "Count" is the currently occupied by data.
// "Padding" is the amount of free working space added to end of occupied space.
// "Alloc" is the total current memory allocation.
// "Resizing factor" is the amount of extra storage allocated for when resizing.
// Number of nodes in DAG.
size_t element_count_ = 0;
// Number of nodes of additional space for temporary graft nodes in DAG.
size_t element_spare_count_ = 16;
// Number of nodes allocated for in PVHandler.
size_t element_alloc_ = 0;
// Size of Site Pattern.
size_t pattern_count_ = 0;
// Number of PVs for each node in DAG.
size_t pv_count_per_element_ = PVTypeEnum::Count;
// When size exceeds current allocation, ratio to grow new allocation.
double resizing_factor_ = 2.0;
// File path to data map.
std::string mmap_file_path_;
// Master PV: Large data block of virtual memory for Partial Likelihood Vectors.
// Subdivided into sections for pvs_.
MmappedNucleotidePLV mmapped_master_pvs_;
// Partial Vectors.
// Divides mmapped_master_pvs_.
// For example, GP PLVs are divided as follows:
// - [0, num_nodes): p(s).
// - [num_nodes, 2*num_nodes): phat(s_right).
// - [2*num_nodes, 3*num_nodes): phat(s_left).
// - [3*num_nodes, 4*num_nodes): rhat(s_right) = rhat(s_left).
// - [4*num_nodes, 5*num_nodes): r(s_right).
// - [5*num_nodes, 6*num_nodes): r(s_left).
NucleotidePLVRefVector pvs_;
// Reindex map for finding pv locations.
Reindexer pv_reindexer_;
size_t reindexer_init_size_ = 0;
// Whether to use remapping to reindex PLVs, otherwise only use
bool use_remapping_ = true;
double reindex_ratio = 10;
};
// PLVHandler: Partial Likelihood Vector Handler
template <class DAGElementId>
class PLVHandler
: public PartialVectorHandler<PartialVectorType::PLVTypeEnum, DAGElementId> {
public:
using PLVType = PartialVectorType::PLVType;
using PLVTypeEnum = PartialVectorType::PLVTypeEnum;
using PLVTypeIterator = PLVTypeEnum::Iterator;
static const inline size_t plv_count_ = PLVTypeEnum::Count;
struct PVIdSet {
DAGElementId id;
PVId p_pvid;
PVId phatright_pvid;
PVId phatleft_pvid;
PVId rhat_pvid;
PVId rright_pvid;
PVId rleft_pvid;
};
PLVHandler(const std::string &mmap_file_path, const size_t elem_count,
const size_t pattern_count, const double resizing_factor = 2.0)
: PartialVectorHandler<PLVTypeEnum, DAGElementId>(
mmap_file_path, elem_count, pattern_count, resizing_factor) {}
PVIdSet BuildPVIdSet(const DAGElementId elem_id) {
PVIdSet result;
result.id = elem_id;
result.p_pvid = GetPVIndex(PLVType::P, elem_id);
result.phatright_pvid = GetPVIndex(PLVType::PHatRight, elem_id);
result.phatleft_pvid = GetPVIndex(PLVType::PHatLeft, elem_id);
result.rhat_pvid = GetPVIndex(PLVType::RHat, elem_id);
result.rright_pvid = GetPVIndex(PLVType::RRight, elem_id);
result.rleft_pvid = GetPVIndex(PLVType::RLeft, elem_id);
return result;
}
};
using PLVNodeHandler = PLVHandler<NodeId>;
using PLVEdgeHandler = PLVHandler<EdgeId>;
// PSVHandler: Partial Sankoff Vector Handler
template <class DAGElementId>
class PSVHandler
: public PartialVectorHandler<PartialVectorType::PSVTypeEnum, DAGElementId> {
public:
using PSVType = PartialVectorType::PSVType;
using PSVTypeEnum = PartialVectorType::PSVTypeEnum;
using PSVTypeIterator = PSVTypeEnum::Iterator;
static const inline size_t psv_count_ = PartialVectorType::PSVTypeEnum::Count;
struct PVIdSet {
EdgeId edge_id;
PVId pright_pvid;
PVId pleft_pvid;
PVId q_pvid;
};
PSVHandler(const std::string &mmap_file_path, const size_t elem_count,
const size_t pattern_count, const double resizing_factor = 2.0)
: PartialVectorHandler<PSVTypeEnum, DAGElementId>(
mmap_file_path, elem_count, pattern_count, resizing_factor) {}
PVIdSet BuildPVIdSet(const DAGElementId elem_id) {
PVIdSet result;
result.id = elem_id;
result.pright_pvid = GetPVIndex(PSVType::PRight, elem_id);
result.pleft_pvid = GetPVIndex(PSVType::PLeft, elem_id);
result.q_pvid = GetPVIndex(PSVType::Q, elem_id);
return result;
}
};
using PSVNodeHandler = PSVHandler<NodeId>;
using PSVEdgeHandler = PSVHandler<EdgeId>;
#ifdef DOCTEST_LIBRARY_INCLUDED
// Check that PLV iterator iterates over all PLVs exactly once.
TEST_CASE("PLVHandler: EnumIterator") {
using namespace PartialVectorType;
const auto plv_types = PLVTypeEnum::TypeArray();
std::map<PLVType, size_t> plv_visited_map;
// Iterate using vector.
for (const PLVType plv_type : plv_types) {
plv_visited_map.insert({plv_type, 0});
}
// Iterate using EnumIterator.
for (const PLVType plv_type : PLVTypeEnum::Iterator()) {
CHECK_MESSAGE(plv_visited_map.find(plv_type) != plv_visited_map.end(),
"Iterator has PLV not in plv_vector.");
plv_visited_map.at(plv_type) += 1;
}
// Check that each was visited only once.
for (const auto [plv_type, visit_count] : plv_visited_map) {
std::ignore = plv_type;
CHECK_FALSE_MESSAGE(visit_count < 1, "One or more PLVs skipped by EnumIterator.");
CHECK_FALSE_MESSAGE(visit_count > 1,
"One or more PLVs in visited more than once by EnumIterator.");
}
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 24,765
|
C++
|
.h
| 583
| 37.178388
| 88
| 0.665478
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,083
|
gp_engine.hpp
|
phylovi_bito/src/gp_engine.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A visitor for GPOperations. See
// https://arne-mertz.de/2018/05/modern-c-features-stdvariant-and-stdvisit/
#pragma once
#include "eigen_sugar.hpp"
#include "gp_operation.hpp"
#include "pv_handler.hpp"
#include "numerical_utils.hpp"
#include "quartet_hybrid_request.hpp"
#include "rooted_tree_collection.hpp"
#include "sbn_maps.hpp"
#include "site_pattern.hpp"
#include "substitution_model.hpp"
#include "reindexer.hpp"
#include "subsplit_dag_storage.hpp"
#include "optimization.hpp"
#include "dag_branch_handler.hpp"
#include "dag_data.hpp"
class GPEngine {
public:
GPEngine(SitePattern site_pattern, size_t node_count, size_t gpcsp_count,
const std::string& mmap_file_path, double rescaling_threshold,
EigenVectorXd sbn_prior, EigenVectorXd unconditional_node_probabilities,
EigenVectorXd inverted_sbn_prior, bool use_gradients);
// Initialize prior with given starting values.
void InitializePriors(EigenVectorXd sbn_prior,
EigenVectorXd unconditional_node_probabilities,
EigenVectorXd inverted_sbn_prior);
// This sets all priors to 1.0. For testing purposes.
void SetNullPrior();
// ** Resizing and Reindexing
// Resize GPEngine to accomodate DAG with given number of nodes and edges. Option to
// remap data according to DAG reindexers. Option to give explicit numver of nodes or
// edges to allocate memory for (this is the only way memory allocation will be
// decreased).
void GrowPLVs(const size_t node_count,
std::optional<const Reindexer> node_reindexer = std::nullopt,
std::optional<const size_t> explicit_allocation = std::nullopt,
const bool on_initialization = false);
void GrowGPCSPs(const size_t gpcsp_count,
std::optional<const Reindexer> gpcsp_reindexer = std::nullopt,
std::optional<const size_t> explicit_allocation = std::nullopt,
const bool on_intialization = false);
// Remap node and edge-based data according to reordering of DAG nodes and edges.
void ReindexPLVs(const Reindexer& node_reindexer, const size_t old_node_count);
void ReindexGPCSPs(const Reindexer& gpcsp_reindexer, const size_t old_gpcsp_count);
// Grow space for storing temporary computation.
void GrowSparePLVs(const size_t new_node_spare_count);
void GrowSpareGPCSPs(const size_t new_gpcsp_spare_count);
// ** GPOperations
// These operators mean that we can invoke this class on each of the operations.
void operator()(const GPOperations::ZeroPLV& op);
void operator()(const GPOperations::SetToStationaryDistribution& op);
void operator()(const GPOperations::IncrementWithWeightedEvolvedPLV& op);
void operator()(const GPOperations::ResetMarginalLikelihood& op);
void operator()(const GPOperations::IncrementMarginalLikelihood& op);
void operator()(const GPOperations::Multiply& op);
void operator()(const GPOperations::Likelihood& op);
void operator()(const GPOperations::OptimizeBranchLength& op);
void operator()(const GPOperations::UpdateSBNProbabilities& op);
void operator()(const GPOperations::PrepForMarginalization& op);
// Apply all operations in vector in order from beginning to end.
void ProcessOperations(GPOperationVector operations);
// ** Branch Length Optimization
void InitializeBranchLengthHandler();
void OptimizeBranchLength(const GPOperations::OptimizeBranchLength& op);
void SetOptimizationMethod(const OptimizationMethod method);
void UseGradientOptimization(const bool use_gradients);
void SetSignificantDigitsForOptimization(int significant_digits);
size_t GetOptimizationCount() { return branch_handler_.GetOptimizationCount(); }
void ResetOptimizationCount() { branch_handler_.ResetOptimizationCount(); }
void IncrementOptimizationCount() { branch_handler_.IncrementOptimizationCount(); }
bool IsFirstOptimization() { return branch_handler_.IsFirstOptimization(); }
void SetTransitionMatrixToHaveBranchLength(double branch_length);
void SetTransitionAndDerivativeMatricesToHaveBranchLength(double branch_length);
void SetTransitionMatrixToHaveBranchLengthAndTranspose(double branch_length);
void SetBranchLengths(EigenVectorXd branch_lengths);
void SetBranchLengthsToConstant(double branch_length);
void SetBranchLengthsToDefault();
void ResetLogMarginalLikelihood();
// The purpose of these functions are here to move data associated with the subsplit
// DAG from their temporary locations before reindexing to their final locations after
// reindexing (though they are more general).
void CopyNodeData(const NodeId src_node_idx, const NodeId dest_node_idx);
void CopyPLVData(const size_t src_plv_idx, const size_t dest_plv_idx);
void CopyGPCSPData(const EdgeId src_gpcsp_idx, const EdgeId dest_gpcsp_idx);
// ** Access
// Get Branch Lengths.
const DAGBranchHandler& GetBranchLengthHandler() const { return branch_handler_; }
DAGBranchHandler& GetBranchLengthHandler() { return branch_handler_; }
EigenVectorXd GetBranchLengths() const;
EigenVectorXd GetBranchLengths(const size_t start, const size_t length) const;
// Get Branch Lengths from temporary space.
EigenVectorXd GetSpareBranchLengths(const size_t start, const size_t length) const;
// Get differences for branch lengths during optimization to assess convergence.
EigenVectorXd GetBranchLengthDifferences() const;
// This function returns a vector indexed by GPCSP such that the i-th entry
// stores the log of the across-sites product of
// (the marginal likelihood conditioned on a given GPCSP) *
// (the unconditional probability of i's parent subsplit).
// That is, it's sum_m r^m(t) P(t -> s) p^m(s).
// See lem:PerPCSPMarginalLikelihood.
// #288 rename?
EigenVectorXd GetPerGPCSPLogLikelihoods() const;
// This override of GetPerGPCSPLogLikelihoods computes the marginal log
// likelihood for GPCSPs in the range [start, start + length).
EigenVectorXd GetPerGPCSPLogLikelihoods(const size_t start,
const size_t length = 1) const;
// Get PerGPCSPLogLikelihoods from temporary space.
EigenVectorXd GetSparePerGPCSPLogLikelihoods(const size_t start,
const size_t length = 1) const;
// This is the full marginal likelihood sum restricted to trees containing a PCSP.
// When we sum the log of eq:PerGPCSPComponentsOfFullMarginal over the sites, we get
// out a term that is the number of sites times the log of the prior conditional PCSP
// probability.
EigenVectorXd GetPerGPCSPComponentsOfFullLogMarginal() const;
// #288 reconsider this name
EigenConstMatrixXdRef GetLogLikelihoodMatrix() const;
EigenConstVectorXdRef GetHybridMarginals() const;
EigenConstVectorXdRef GetSBNParameters() const;
double GetLogMarginalLikelihood() const;
const Eigen::Matrix4d& GetTransitionMatrix() const { return transition_matrix_; };
// Partial Likelihood Vector Handler.
const PLVNodeHandler& GetPLVHandler() const { return plv_handler_; }
NucleotidePLVRefVector& GetPLVs() { return plv_handler_.GetPVs(); }
const NucleotidePLVRefVector& GetPLVs() const { return plv_handler_.GetPVs(); }
NucleotidePLVRef& GetPLV(const PVId plv_index) { return plv_handler_(plv_index); }
const NucleotidePLVRef& GetPLV(const PVId plv_index) const {
return plv_handler_(plv_index);
}
NucleotidePLVRef& GetSparePLV(const PVId plv_index) {
return plv_handler_.GetSparePV(plv_index);
}
const NucleotidePLVRef& GetSparePLV(const PVId plv_index) const {
return plv_handler_.GetSparePV(plv_index);
}
PVId GetSparePLVIndex(const PVId plv_index) const {
return plv_handler_.GetSparePVIndex(plv_index);
}
// ** Other Operations
// Calculate a vector of likelihoods, one for each summand of the hybrid marginal.
EigenVectorXd CalculateQuartetHybridLikelihoods(const QuartetHybridRequest& request);
// Calculate the actual hybrid marginal and store it in the corresponding entry of
// hybrid_marginal_log_likelihoods_.
void ProcessQuartetHybridRequest(const QuartetHybridRequest& request);
// Gather branch lengths from loaded sample with their corresponding pcsp.
SizeDoubleVectorMap GatherBranchLengths(const RootedTreeCollection& tree_collection,
const BitsetSizeMap& indexer);
// Use branch lengths from loaded sample as a starting point for optimization. Use the
// mean branch length found for a given edge.
void HotStartBranchLengths(const RootedTreeCollection& tree_collection,
const BitsetSizeMap& indexer);
// Take the first branch length encountered (in the supplied tree collection) for a
// given edge for the branch length of the sDAG. Set branch lengths that are not thus
// specified to default_branch_length_.
void TakeFirstBranchLength(const RootedTreeCollection& tree_collection,
const BitsetSizeMap& indexer);
DoublePair LogLikelihoodAndDerivative(const GPOperations::OptimizeBranchLength& op);
DoublePair LogLikelihoodAndDerivative(const size_t gpcsp, const size_t rootward,
const size_t leafward);
std::tuple<double, double, double> LogLikelihoodAndFirstTwoDerivatives(
const GPOperations::OptimizeBranchLength& op);
std::tuple<double, double, double> LogLikelihoodAndFirstTwoDerivatives(
const size_t gpcsp, const size_t rootward, const size_t leafward);
// ** I/O
// Output PLV to string.
std::string PLVToString(const PVId plv_idx) const;
// Output LogLikelihood to string.
std::string LogLikelihoodMatrixToString() const;
// ** Counts
size_t GetPLVCountPerNode() const { return plv_handler_.GetPVCountPer(); }
size_t GetSitePatternCount() const { return site_pattern_.PatternCount(); };
// Node Counts.
size_t GetNodeCount() const { return plv_handler_.GetCount(); };
size_t GetSpareNodeCount() const { return plv_handler_.GetSpareCount(); }
size_t GetAllocatedNodeCount() const { return plv_handler_.GetAllocatedCount(); }
size_t GetPaddedNodeCount() const { return plv_handler_.GetPaddedCount(); };
void SetNodeCount(const size_t node_count) { plv_handler_.SetCount(node_count); }
void SetSpareNodeCount(const size_t node_spare_count) {
plv_handler_.SetSpareCount(node_spare_count);
}
void SetAllocatedNodeCount(const size_t node_alloc) {
plv_handler_.SetAllocatedCount(node_alloc);
}
// PLV Counts.
size_t GetPLVCount() const { return plv_handler_.GetPVCount(); };
size_t GetSparePLVCount() const { return plv_handler_.GetSparePVCount(); };
size_t GetPaddedPLVCount() const { return plv_handler_.GetPaddedPVCount(); };
size_t GetAllocatedPLVCount() const { return plv_handler_.GetAllocatedPVCount(); }
// GPCSP Counts.
size_t GetGPCSPCount() const { return gpcsp_count_; };
size_t GetSpareGPCSPCount() const { return gpcsp_spare_count_; };
size_t GetAllocatedGPCSPCount() const { return gpcsp_alloc_; };
size_t GetPaddedGPCSPCount() const { return GetGPCSPCount() + GetSpareGPCSPCount(); };
size_t GetSpareGPCSPIndex(const size_t gpcsp_offset) const {
const size_t gpcsp_scratch_size = GetPaddedGPCSPCount() - GetGPCSPCount();
Assert(gpcsp_offset < gpcsp_scratch_size,
"Requested gpcsp_offset outside of allocated scratch space.");
return gpcsp_offset + GetGPCSPCount();
}
void SetGPCSPCount(const size_t gpcsp_count) { gpcsp_count_ = gpcsp_count; }
void SetSpareGPCSPCount(const size_t gpcsp_spare_count) {
gpcsp_spare_count_ = gpcsp_spare_count;
}
void SetAllocatedGPCSPCount(const size_t gpcsp_alloc) { gpcsp_alloc_ = gpcsp_alloc; }
private:
// Initialize PLVs and populate leaf PLVs with taxon site data.
void InitializePLVsWithSitePatterns();
void RescalePLV(size_t plv_idx, int amount);
void AssertPLVIsFinite(size_t plv_idx, const std::string& message) const;
std::pair<double, double> PLVMinMax(size_t plv_idx) const;
// If a PLV all entries smaller than rescaling_threshold_ then rescale it up and
// increment the corresponding entry in rescaling_counts_.
void RescalePLVIfNeeded(size_t plv_idx);
double LogRescalingFor(size_t plv_idx);
inline void PrepareUnrescaledPerPatternLikelihoodSecondDerivatives(size_t src1_idx,
size_t src2_idx) {
per_pattern_likelihood_second_derivatives_ =
(GetPLV(PVId(src1_idx)).transpose() * hessian_matrix_ * GetPLV(PVId(src2_idx)))
.diagonal()
.array();
}
inline void PrepareUnrescaledPerPatternLikelihoodDerivatives(size_t src1_idx,
size_t src2_idx) {
per_pattern_likelihood_derivatives_ = (GetPLV(PVId(src1_idx)).transpose() *
derivative_matrix_ * GetPLV(PVId(src2_idx)))
.diagonal()
.array();
}
inline void PrepareUnrescaledPerPatternLikelihoods(size_t src1_idx, size_t src2_idx) {
per_pattern_likelihoods_ = (GetPLV(PVId(src1_idx)).transpose() *
transition_matrix_ * GetPLV(PVId(src2_idx)))
.diagonal()
.array();
}
// This function is used to compute the marginal log likelihood over all trees that
// have a given PCSP. We assume that transition_matrix_ is as desired, and src1_idx
// and src2_idx are the two PLV indices on either side of the PCSP.
inline void PreparePerPatternLogLikelihoodsForGPCSP(size_t src1_idx,
size_t src2_idx) {
per_pattern_log_likelihoods_ = (GetPLV(PVId(src1_idx)).transpose() *
transition_matrix_ * GetPLV(PVId(src2_idx)))
.diagonal()
.array()
.log() +
LogRescalingFor(src1_idx) +
LogRescalingFor(src2_idx);
}
public:
static constexpr double default_rescaling_threshold_ = 1e-40;
private:
// Descriptor containing all taxa and sequence alignments.
SitePattern site_pattern_;
// Rescaling threshold factor to prevent under/overflow errors.
const double rescaling_threshold_;
// Rescaling threshold in log space.
const double log_rescaling_threshold_;
// ** Data Sizing
// "Count" is the currently occupied by data.
// "Padding" is the amount of free working space added to end of occupied space.
// "Alloc" is the total current memory allocation.
// "Resizing factor" is the amount of extra storage allocated for when resizing.
// Note: All node and PLV counts are handled by the PLVNodeHandler.
// Total number of edges in DAG. Determines sizes of data vectors indexed on edges
// like branch lengths.
size_t gpcsp_count_ = 0;
size_t gpcsp_alloc_ = 0;
size_t gpcsp_spare_count_ = 3;
// Growth factor when reallocating data.
constexpr static double resizing_factor_ = 2.0;
// ** Per-Node Data
// Partial Likelihood Vector Handler.
PLVNodeHandler plv_handler_;
// Unconditional probabilites for each node in DAG.
EigenVectorXd unconditional_node_probabilities_;
// Rescaling count for each plv.
EigenVectorXi rescaling_counts_;
// For hybrid marginal calculations. #328
// The PLV coming down from the root to s.
EigenMatrixXd quartet_root_plv_;
// The R-PLV pointing leafward from s.
EigenMatrixXd quartet_r_s_plv_;
// The Q-PLV pointing leafward from s.
EigenMatrixXd quartet_q_s_plv_;
// The R-PLV pointing leafward from t.
EigenMatrixXd quartet_r_sorted_plv_;
// ** Per-Edge Data
// branch_handler_, q_, etc. are indexed in the same way as sbn_parameters_ in
// gp_instance.
DAGBranchHandler branch_handler_;
// During initialization, stores the SBN prior.
// After UpdateSBNProbabilities(), stores the SBN probabilities.
// Stored in log space.
EigenVectorXd q_;
EigenVectorXd inverted_sbn_prior_;
// The number of rows is equal to the number of GPCSPs.
// The number of columns is equal to the number of site patterns.
// The rows are indexed in the same way as branch_handler_ and q_.
// Entry (i,j) stores the marginal log likelihood over all trees that include
// a GPCSP corresponding to index i at site j.
EigenMatrixXd log_likelihoods_;
// The length of this vector is equal to the number of site patterns.
// Entry j stores the marginal log likelihood over all trees at site pattern
// j.
EigenVectorXd log_marginal_likelihood_;
// This vector is indexed by the GPCSPs and stores the hybrid marginals if they are
// available.
EigenVectorXd hybrid_marginal_log_likelihoods_;
// Internal "temporaries" useful for likelihood and derivative calculation.
EigenVectorXd per_pattern_log_likelihoods_;
EigenVectorXd per_pattern_likelihoods_;
EigenVectorXd per_pattern_likelihood_derivatives_;
EigenVectorXd per_pattern_likelihood_derivative_ratios_;
EigenVectorXd per_pattern_likelihood_second_derivatives_;
EigenVectorXd per_pattern_likelihood_second_derivative_ratios_;
// ** Model
// When we change from JC69Model, check that we are actually doing transpose in
// leafward calculations.
JC69Model substitution_model_;
Eigen::Matrix4d eigenmatrix_ = substitution_model_.GetEigenvectors().reshaped(4, 4);
Eigen::Matrix4d inverse_eigenmatrix_ =
substitution_model_.GetInverseEigenvectors().reshaped(4, 4);
Eigen::Vector4d eigenvalues_ = substitution_model_.GetEigenvalues();
Eigen::Vector4d diagonal_vector_;
Eigen::DiagonalMatrix<double, 4> diagonal_matrix_;
Eigen::Matrix4d transition_matrix_;
Eigen::Matrix4d derivative_matrix_;
Eigen::Matrix4d hessian_matrix_;
Eigen::Vector4d stationary_distribution_ = substitution_model_.GetFrequencies();
EigenVectorXd site_pattern_weights_;
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("GPEngine") {
EigenVectorXd empty_vector;
SitePattern hello_site_pattern = SitePattern::HelloSitePattern();
GPEngine engine(hello_site_pattern, 5, 5, "_ignore/mmapped_plv.data",
GPEngine::default_rescaling_threshold_, empty_vector, empty_vector,
empty_vector, false);
engine.SetTransitionMatrixToHaveBranchLength(0.75);
// Computed directly:
// https://en.wikipedia.org/wiki/Models_of_DNA_evolution#JC69_model_%28Jukes_and_Cantor_1969%29
CHECK(fabs(0.52590958087 - engine.GetTransitionMatrix()(0, 0)) < 1e-10);
CHECK(fabs(0.1580301397 - engine.GetTransitionMatrix()(0, 1)) < 1e-10);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 18,928
|
C++
|
.h
| 347
| 48.397695
| 97
| 0.727783
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,084
|
subsplit_dag_storage.hpp
|
phylovi_bito/src/subsplit_dag_storage.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
// The following classes are used internally by SubsplitDAG for storing the nodes and
// edges, and providing convenient views and lookups into the data. Terminology has been
// changed in order to distinguish from the public API of SubsplitDAG - Edge becomes
// DAGLine and Node becomes DAGVertex.
//
// Functionality interface of the individual components has been separated from the
// storage containers to allow for flexible representation of multi-element collections:
// class DAGLineStorage - owns the data representing an Edge (IDs of the two nodes, edge
// ID, clade) class DAGLineView - cheaply copyable wrapper for a reference to
// DAGLineStorage class DAGLine - a pure interface providing accessor methods to
// DAGLine{View,Storage} aliases LineView and ConstLineView - used within SubsplitDAG to
// access const and mutable edges class GenericLinesView - cheaply copyable view into a
// collection of edges
//
// Similar design is applied to nodes - they have storage, view, interface and
// collection classes. The node view class is called GenericSubsplitDAGNode and is
// defined in subsplit_dag_node.hpp. Additionally nodes exposes class
// GenericNeighborsView, which is a view that wraps a reference to the node neighbors
// collection - internally a std::map<NodeId, EdgeId>.
//
// The SubsplitDAGStorage class glues together all components, and is owned exclusively
// by SubsplitDAG.
#pragma once
#include <limits>
#include <vector>
#include <map>
#include <optional>
#include <functional>
#include <utility>
#include <type_traits>
#include <memory>
#include "bitset.hpp"
#include "reindexer.hpp"
#include "sugar.hpp"
using NodeId = GenericId<struct NodeIdTag>;
using EdgeId = GenericId<struct EdgeIdTag>;
using TaxonId = GenericId<struct TaxonIdTag>;
using StringTaxonIdMap = std::unordered_map<std::string, TaxonId>;
using BitsetNodeIdMap = std::unordered_map<Bitset, NodeId>;
using BitsetNodeIdVectorMap = std::unordered_map<Bitset, std::vector<NodeId>>;
using BitsetNodeIdSetMap = std::unordered_map<Bitset, std::unordered_set<NodeId>>;
using NodeIdBitsetMap = std::unordered_map<NodeId, Bitset>;
using EdgeIdPair = std::pair<EdgeId, EdgeId>;
using NodeIdPair = std::pair<NodeId, NodeId>;
using NodeIdEdgeIdPairMap = std::unordered_map<NodeId, EdgeIdPair>;
using NodeIdVector = std::vector<NodeId>;
using NodeIdSet = std::unordered_set<NodeId>;
using EdgeIdVector = std::vector<EdgeId>;
using TaxonIdVector = std::vector<TaxonId>;
using BitsetEdgeIdMap = std::unordered_map<Bitset, EdgeId>;
using EdgeIdBitsetMap = std::unordered_map<EdgeId, Bitset>;
using BitsetEdgeIdPairMap = std::unordered_map<Bitset, EdgeIdPair>;
using NodeIdVectorPair = std::pair<NodeIdVector, NodeIdVector>;
using BitsetPairVector = std::vector<std::pair<Bitset, Bitset>>;
enum class Direction { Rootward, Leafward };
static const inline size_t DirectionCount = 2;
class DirectionEnum : public EnumWrapper<Direction, size_t, DirectionCount,
Direction::Rootward, Direction::Leafward> {
public:
static inline const std::string Prefix = "Direction";
static inline const Array<std::string> Labels = {{"Rootward", "Leafward"}};
static std::string ToString(const Direction e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream& operator<<(std::ostream& os, const Direction e) {
os << ToString(e);
return os;
}
};
template <typename Derived>
class DAGLine {
public:
EdgeId GetId() const { return storage().id_; }
NodeId GetParent() const { return storage().parent_; }
NodeId GetChild() const { return storage().child_; }
SubsplitClade GetSubsplitClade() const { return storage().clade_; }
Derived& SetId(EdgeId id) {
storage().id_ = id;
return derived();
}
Derived& SetParent(NodeId id) {
storage().parent_ = id;
return derived();
}
Derived& SetChild(NodeId id) {
storage().child_ = id;
return derived();
}
Derived& SetSubsplitClade(SubsplitClade clade) {
storage().clade_ = clade;
return derived();
}
std::pair<NodeId, NodeId> GetNodeIds() const { return {GetParent(), GetChild()}; }
private:
Derived& derived() { return static_cast<Derived&>(*this); }
const Derived& derived() const { return static_cast<const Derived&>(*this); }
auto& storage() { return derived().storage(); }
const auto& storage() const { return derived().storage(); }
};
template <typename T>
class DAGLineView;
class DAGLineStorage : public DAGLine<DAGLineStorage> {
public:
DAGLineStorage() = default;
DAGLineStorage(const DAGLineStorage&) = default;
DAGLineStorage(EdgeId id, NodeId parent, NodeId child, SubsplitClade clade)
: id_{id}, parent_{parent}, child_{child}, clade_{clade} {}
template <typename T>
DAGLineStorage& operator=(DAGLineView<T> other) {
*this = other.line_;
return *this;
}
private:
// :: is workaround for GCC bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52625
template <typename>
friend class ::DAGLine;
DAGLineStorage& storage() { return *this; }
const DAGLineStorage& storage() const { return *this; }
EdgeId id_ = EdgeId(NoId);
NodeId parent_ = NodeId(NoId);
NodeId child_ = NodeId(NoId);
SubsplitClade clade_ = SubsplitClade::Unspecified;
};
template <typename T>
class DAGLineView : public DAGLine<DAGLineView<T>> {
public:
DAGLineView(T& line) : line_{line} {
Assert(line.GetSubsplitClade() != SubsplitClade::Unspecified, "Uninitialized edge");
}
template <size_t I>
auto get() const {
static_assert(I < 2, "Index out of bounds");
if constexpr (I == 0) return line_.GetNodeIds();
if constexpr (I == 1) return line_.GetId();
}
private:
template <typename>
friend class DAGLine;
friend class DAGLineStorage;
DAGLineStorage& storage() { return line_; }
const DAGLineStorage& storage() const { return line_; }
T& line_;
};
using LineView = DAGLineView<DAGLineStorage>;
using ConstLineView = DAGLineView<const DAGLineStorage>;
namespace std {
template <>
struct tuple_size<::LineView> : public integral_constant<size_t, 2> {};
template <>
struct tuple_element<0, ::LineView> {
using type = pair<::NodeId, ::NodeId>;
};
template <>
struct tuple_element<1, ::LineView> {
using type = ::EdgeId;
};
template <>
struct tuple_size<::ConstLineView> : public integral_constant<size_t, 2> {};
template <>
struct tuple_element<0, ::ConstLineView> {
using type = const pair<::NodeId, ::NodeId>;
};
template <>
struct tuple_element<1, ::ConstLineView> {
using type = const ::EdgeId;
};
} // namespace std
template <typename T>
class GenericNeighborsView {
using iterator_type =
std::conditional_t<std::is_const_v<T>, std::map<NodeId, EdgeId>::const_iterator,
std::map<NodeId, EdgeId>::iterator>;
using map_type =
std::conditional_t<std::is_const_v<T>, const std::map<NodeId, EdgeId>,
std::map<NodeId, EdgeId>>;
public:
class Iterator : public std::iterator<std::forward_iterator_tag, NodeId> {
public:
Iterator(const iterator_type& i, map_type& map) : iter_{i}, map_{map} {}
Iterator operator++() {
++iter_;
return *this;
}
bool operator!=(const Iterator& other) const { return iter_ != other.iter_; }
bool operator==(const Iterator& other) const { return iter_ == other.iter_; }
NodeId operator*() { return iter_->first; }
NodeId GetNodeId() const { return iter_->first; }
EdgeId GetEdgeId() const { return iter_->second; }
private:
iterator_type iter_;
map_type& map_;
};
GenericNeighborsView(map_type& neighbors) : neighbors_{neighbors} {}
template <typename U>
GenericNeighborsView(const GenericNeighborsView<U>& other)
: neighbors_{other.neighbors_} {}
Iterator begin() const { return {neighbors_.begin(), neighbors_}; }
Iterator end() const { return {neighbors_.end(), neighbors_}; }
size_t size() const { return neighbors_.size(); }
bool empty() const { return neighbors_.empty(); }
void RemapNodeIds(const Reindexer& reindexer) {
for (auto [vertex_id, line_id] : neighbors_) {
std::ignore = line_id;
Assert(vertex_id < reindexer.size(),
"Neighbors cannot contain an id out of bounds of the reindexer in "
"GenericNeighborsView::RemapIds.");
}
T remapped{};
for (auto [vertex_id, line_id] : neighbors_) {
remapped[NodeId(reindexer.GetNewIndexByOldIndex(size_t(vertex_id)))] = line_id;
}
neighbors_ = remapped;
}
void RemapEdgeIdxs(const Reindexer& reindexer) {
for (auto [vertex_id, line_id] : neighbors_) {
std::ignore = vertex_id;
Assert(line_id < reindexer.size(),
"Neighbors cannot contain an id out of bounds of the reindexer in "
"GenericNeighborsView::RemapIds.");
}
T remapped{};
for (auto [vertex_id, line_id] : neighbors_) {
remapped[vertex_id] = reindexer.GetNewIndexByOldIndex(size_t(line_id));
}
neighbors_ = remapped;
}
operator SizeVector() const {
SizeVector result;
for (auto i : neighbors_) {
result.push_back(size_t(i.first));
}
return result;
}
operator NodeIdVector() const {
NodeIdVector result;
for (auto i : neighbors_) {
result.push_back(i.first);
}
return result;
}
void SetNeighbors(const T& neighbors) { neighbors_ = neighbors; }
private:
friend class Iterator;
template <typename>
friend class ::GenericNeighborsView;
T& neighbors_;
};
using NeighborsView = GenericNeighborsView<std::map<NodeId, EdgeId>>;
using ConstNeighborsView = GenericNeighborsView<const std::map<NodeId, EdgeId>>;
class DAGVertex;
template <typename>
class GenericSubsplitDAGNode;
using MutableSubsplitDAGNode = GenericSubsplitDAGNode<DAGVertex>;
using SubsplitDAGNode = GenericSubsplitDAGNode<const DAGVertex>;
class DAGVertex {
public:
DAGVertex() = default;
DAGVertex(const DAGVertex&) = default;
inline DAGVertex(SubsplitDAGNode node);
inline DAGVertex(MutableSubsplitDAGNode node);
DAGVertex(NodeId id, Bitset subsplit) : id_{id}, subsplit_{std::move(subsplit)} {}
NodeId GetId() const { return id_; }
const Bitset& GetSubsplit() const { return subsplit_; }
NeighborsView GetNeighbors(Direction direction, SubsplitClade clade) {
return neighbors_.at({direction, clade});
}
ConstNeighborsView GetNeighbors(Direction direction, SubsplitClade clade) const {
return neighbors_.at({direction, clade});
}
bool IsRoot() const {
return GetNeighbors(Direction::Rootward, SubsplitClade::Left).empty() &&
GetNeighbors(Direction::Rootward, SubsplitClade::Right).empty();
}
bool IsLeaf() const {
return GetNeighbors(Direction::Leafward, SubsplitClade::Left).empty() &&
GetNeighbors(Direction::Leafward, SubsplitClade::Right).empty();
}
std::optional<std::tuple<EdgeId, Direction, SubsplitClade>> FindNeighbor(
NodeId neighbor) const {
for (auto i : neighbors_) {
auto j = i.second.find(neighbor);
if (j != i.second.end()) return {{j->second, i.first.first, i.first.second}};
}
return {};
}
DAGVertex& SetId(NodeId id) {
id_ = id;
return *this;
}
DAGVertex& SetSubsplit(Bitset subsplit) {
subsplit_ = std::move(subsplit);
return *this;
}
DAGVertex& AddNeighbor(Direction direction, SubsplitClade clade, NodeId neighbor,
EdgeId line) {
neighbors_.at({direction, clade}).insert({neighbor, line});
return *this;
}
void RemoveNeighbor(Direction direction, SubsplitClade clade, NodeId neighbor) {
neighbors_.at({direction, clade}).erase(neighbor);
}
void SetEdgeId(NodeId neighbor, EdgeId line) {
for (auto& i : neighbors_) {
auto j = i.second.find(neighbor);
if (j != i.second.end()) {
i.second.insert_or_assign(j, neighbor, line);
return;
}
}
Failwith("Neighbor not found");
}
void ClearNeighbors() {
neighbors_ = {
{{Direction::Rootward, SubsplitClade::Left}, {}},
{{Direction::Rootward, SubsplitClade::Right}, {}},
{{Direction::Leafward, SubsplitClade::Left}, {}},
{{Direction::Leafward, SubsplitClade::Right}, {}},
};
}
private:
NodeId id_ = NodeId(NoId);
Bitset subsplit_ = Bitset{{}};
std::map<std::pair<Direction, SubsplitClade>, std::map<NodeId, EdgeId>> neighbors_ = {
{{Direction::Rootward, SubsplitClade::Left}, {}},
{{Direction::Rootward, SubsplitClade::Right}, {}},
{{Direction::Leafward, SubsplitClade::Left}, {}},
{{Direction::Leafward, SubsplitClade::Right}, {}},
};
};
template <typename T>
class GenericLinesView {
using view_type = std::conditional_t<std::is_const_v<T>, ConstLineView, LineView>;
public:
class Iterator : public std::iterator<std::forward_iterator_tag, view_type> {
public:
Iterator(const GenericLinesView& view, size_t index) : view_{view}, index_{index} {}
Iterator operator++() {
++index_;
return *this;
}
bool operator!=(const Iterator& other) const { return index_ != other.index_; }
view_type operator*() const { return view_.storage_.lines_[index_]; }
private:
const GenericLinesView& view_;
size_t index_;
};
GenericLinesView(T& storage) : storage_{storage} {}
Iterator begin() const { return {*this, 0}; }
Iterator end() const { return {*this, storage_.lines_.size()}; }
size_t size() const { return storage_.lines_.size(); };
view_type operator[](size_t i) const { return storage_.lines_[i]; };
private:
friend class Iterator;
T& storage_;
};
class SubsplitDAGStorage;
using LinesView = GenericLinesView<SubsplitDAGStorage>;
using ConstLinesView = GenericLinesView<const SubsplitDAGStorage>;
template <typename T>
class GenericVerticesView {
using view_type =
std::conditional_t<std::is_const_v<T>, SubsplitDAGNode, MutableSubsplitDAGNode>;
public:
class Iterator : public std::iterator<std::forward_iterator_tag, view_type> {
public:
Iterator(T& storage, size_t index) : storage_{storage}, index_{index} {}
Iterator& operator++() {
++index_;
return *this;
}
Iterator operator++(int) { return {storage_, index_++}; }
Iterator operator+(size_t i) { return {storage_, index_ + i}; }
Iterator operator-(size_t i) { return {storage_, index_ - i}; }
bool operator<(const Iterator& other) { return index_ < other.index_; }
bool operator!=(const Iterator& other) const { return index_ != other.index_; }
view_type operator*() const;
private:
T& storage_;
size_t index_;
};
GenericVerticesView(T& storage) : storage_{storage} {}
Iterator begin() const { return {storage_, 0}; }
Iterator end() const { return {storage_, storage_.vertices_.size()}; }
Iterator cbegin() const { return {storage_, 0}; }
Iterator cend() const { return {storage_, storage_.vertices_.size()}; }
size_t size() const { return storage_.vertices_.size(); };
view_type operator[](size_t i) const;
view_type at(size_t i) const;
private:
friend class Iterator;
T& storage_;
};
using VerticesView = GenericVerticesView<SubsplitDAGStorage>;
using ConstVerticesView = GenericVerticesView<const SubsplitDAGStorage>;
// A vector that can optionally be prepended with host data for Graft purposes.
// It has no ownership over the host data, so the lifetime of the host data object
// should be greater than the given HostableVector instance.
template <typename T>
class HostableVector {
public:
explicit HostableVector(HostableVector<T>* host = nullptr)
: host_{host ? &host->data_ : nullptr} {}
T& at(size_t i) {
if (!host_) {
return data_.at(i);
}
if (i < host_->size()) {
return (*host_)[i];
}
return data_.at(i - host_->size());
}
const T& at(size_t i) const {
if (!host_) {
return data_.at(i);
}
if (i < host_->size()) {
return (*host_)[i];
}
return data_.at(i - host_->size());
}
T& operator[](size_t i) {
if (!host_) {
return data_[i];
}
if (i < host_->size()) {
return (*host_)[i];
}
return data_[i - host_->size()];
}
const T& operator[](size_t i) const {
if (!host_) {
return data_[i];
}
if (i < host_->size()) {
return (*host_)[i];
}
return data_[i - host_->size()];
}
size_t size() const {
if (!host_) {
return data_.size();
}
return host_->size() + data_.size();
}
void resize(size_t new_size) {
if (!host_) {
data_.resize(new_size);
} else {
data_.resize(new_size - host_->size());
}
}
HostableVector& operator=(const std::vector<T>& data) {
data_ = data;
return *this;
}
bool HaveHost() const { return host_; }
size_t HostSize() const {
if (!host_) {
return 0;
}
return host_->size();
}
void ResetHost(HostableVector<T>* host) {
host_ = host ? &host->data_ : nullptr;
data_ = {};
}
private:
std::vector<T> data_;
std::vector<T>* host_;
};
// Tag dispatching type to avoid confusion with copy constructor.
// See https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Tag_Dispatching
struct HostDispatchTag {};
class SubsplitDAGStorage {
public:
SubsplitDAGStorage() = default;
SubsplitDAGStorage(const SubsplitDAGStorage&) = default;
SubsplitDAGStorage(SubsplitDAGStorage& host, HostDispatchTag)
: lines_{&host.lines_}, vertices_{&host.vertices_} {}
ConstVerticesView GetVertices() const { return *this; }
VerticesView GetVertices() { return *this; }
ConstLinesView GetLines() const { return *this; }
LinesView GetLines() { return *this; }
std::optional<ConstLineView> GetLine(NodeId parent, NodeId child) const {
if (parent.value_ >= vertices_.size() || child.value_ >= vertices_.size()) {
return {};
}
auto result = vertices_.at(parent.value_).FindNeighbor(child);
if (!result.has_value()) {
return {};
}
return lines_.at(std::get<0>(result.value()).value_);
}
std::optional<ConstLineView> GetLine(EdgeId id) const {
if (id.value_ >= lines_.size()) {
return std::nullopt;
}
auto& line = lines_[id.value_];
if (line.GetId() == EdgeId(NoId)) {
return std::nullopt;
}
return line;
}
const DAGVertex& GetVertex(NodeId id) const { return vertices_.at(id.value_); }
bool ContainsVertex(NodeId id) const {
if (id.value_ >= vertices_.size()) return false;
return vertices_[id.value_].GetId() != NodeId(NoId);
}
std::optional<std::reference_wrapper<DAGVertex>> FindVertex(const Bitset& subsplit) {
for (NodeId id = 0; id < vertices_.size(); ++id.value_) {
if (vertices_[id.value_].GetSubsplit() == subsplit) {
return vertices_[id.value_];
}
}
return std::nullopt;
}
std::optional<std::reference_wrapper<const DAGVertex>> FindVertex(
const Bitset& subsplit) const {
for (NodeId id = 0; id.value_ < vertices_.size(); ++id.value_) {
if (vertices_[id.value_].GetSubsplit() == subsplit) {
return vertices_[id.value_];
}
}
return std::nullopt;
}
DAGLineStorage& AddLine(const DAGLineStorage& newLine) {
if (newLine.GetId() == EdgeId(NoId))
Failwith("Set line id before inserting to storage");
if (newLine.GetSubsplitClade() == SubsplitClade::Unspecified)
Failwith("Set clade before inserting to storage");
auto& line = GetOrInsert(lines_, newLine.GetId());
line = newLine;
return line;
}
DAGVertex& AddVertex(const DAGVertex& newVertex) {
if (newVertex.GetId() == NodeId(NoId))
Failwith("Set vertex id before inserting to storage");
auto& vertex = GetOrInsert(vertices_, newVertex.GetId());
vertex = newVertex;
return vertex;
}
void ReindexLine(EdgeId line, NodeId parent, NodeId child) {
lines_.at(line.value_).SetParent(parent).SetChild(child);
}
void SetLines(const std::vector<DAGLineStorage>& lines) { lines_ = lines; }
void SetVertices(const std::vector<DAGVertex>& vertices) { vertices_ = vertices; }
bool HaveHost() const { return lines_.HaveHost(); }
size_t HostLinesCount() const { return lines_.HostSize(); }
size_t HostVerticesCount() const { return vertices_.HostSize(); }
void ResetHost(SubsplitDAGStorage& host) {
for (NodeId id = lines_.HostSize(); id < lines_.size(); ++id) {
auto& line = lines_[id.value_];
if (line.GetParent().value_ >= vertices_.HostSize()) {
if (line.GetChild().value_ < vertices_.HostSize()) {
vertices_[line.GetChild().value_].RemoveNeighbor(
Direction::Rootward, line.GetSubsplitClade(), line.GetParent());
}
}
if (line.GetChild().value_ >= vertices_.HostSize()) {
if (line.GetParent().value_ < vertices_.HostSize()) {
vertices_[line.GetParent().value_].RemoveNeighbor(
Direction::Leafward, line.GetSubsplitClade(), line.GetChild());
}
}
}
lines_.ResetHost(&host.lines_);
vertices_.ResetHost(&host.vertices_);
}
void ConnectVertices(EdgeId line_id) {
auto& line = lines_.at(line_id.value_);
auto& parent = vertices_.at(line.GetParent().value_);
auto& child = vertices_.at(line.GetChild().value_);
parent.AddNeighbor(Direction::Leafward, line.GetSubsplitClade(), child.GetId(),
line_id);
child.AddNeighbor(Direction::Rootward, line.GetSubsplitClade(), parent.GetId(),
line_id);
}
void ConnectAllVertices() {
for (size_t i = 0; i < vertices_.size(); ++i) {
vertices_[i].ClearNeighbors();
}
for (size_t i = 0; i < lines_.size(); ++i) {
if (lines_[i].GetId() == EdgeId(NoId)) {
continue;
}
ConnectVertices(lines_[i].GetId());
}
}
std::optional<std::reference_wrapper<const DAGVertex>> FindRoot() const {
for (size_t i = 0; i < vertices_.size(); ++i) {
auto& vertex = vertices_[i];
if (vertex.GetId() == NoId) {
continue;
}
if (vertex.GetNeighbors(Direction::Rootward, SubsplitClade::Left).empty() &&
vertex.GetNeighbors(Direction::Rootward, SubsplitClade::Right).empty()) {
return vertex;
}
}
return std::nullopt;
}
void Print() const {
std::cout << "== LINES ==" << std::endl;
for (size_t id = 0; id < lines_.size(); id++) {
auto& line = lines_[id];
std::cout << "[" << id << "]: { id::" << line.GetId()
<< " parent::" << line.GetParent() << " child::" << line.GetChild()
<< " }" << std::endl;
}
std::cout << "== VERTICES ==" << std::endl;
for (size_t id = 0; id < vertices_.size(); id++) {
auto& node = vertices_[id];
std::cout << "[" << id << " " << node.GetId() << "]: { ";
for (const auto dir : DirectionEnum::Iterator()) {
for (const auto clade : SubsplitCladeEnum::Iterator()) {
std::cout << "[ ";
for (const auto adj_node_id : node.GetNeighbors(dir, clade)) {
std::cout << "adj::" << adj_node_id << " ";
auto parent = (dir == Direction::Leafward) ? node.GetId() : adj_node_id;
auto child = (dir == Direction::Rootward) ? adj_node_id : node.GetId();
auto result = vertices_.at(parent.value_).FindNeighbor(child);
if (result.has_value()) {
auto line_id = std::get<0>(result.value()).value_;
std::cout << "line::" << line_id << " ";
}
}
std::cout << " ]";
}
}
std::cout << "} " << std::endl;
}
}
private:
template <typename>
friend class GenericLinesView;
template <typename>
friend class GenericVerticesView;
template <typename T, typename Id>
static T& GetOrInsert(HostableVector<T>& data, Id id) {
if (id.value_ >= data.size()) {
data.resize(id.value_ + 1);
}
return data[id.value_];
}
HostableVector<DAGLineStorage> lines_;
HostableVector<DAGVertex> vertices_;
};
| 24,166
|
C++
|
.h
| 651
| 32.583717
| 88
| 0.66614
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,085
|
bitset.hpp
|
phylovi_bito/src/bitset.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// A class to store bitsets in their various forms.
//
// This file started life as the RbBitSet class from RevBayes by Sebastian
// Hoehna, but has evolved a lot since then!
//
// For basic operations, I'm trying to follow the interface of std::bitset, though
// this class goes way beyond what std::bitset offers.
// Note that we can't use std::bitset because we don't know the size of the
// bitsets at compile time.
#pragma once
#include <algorithm>
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "sugar.hpp"
class Bitset {
public:
using BitsetPair = std::pair<Bitset, Bitset>;
// Builds Bitset from boolean vector.
explicit Bitset(std::vector<bool> value);
// Fills entire Bitset of size `n` with `initial_value`.
explicit Bitset(size_t n, bool initial_value = false);
// Builds Bitset from string of "1" and "0"s.
explicit Bitset(std::string bits_as_str);
// Builds Bitset of size `n` with only indices in `bits_on` vector set to true.
explicit Bitset(SizeVector bits_on, size_t n);
// Builds empty Bitset.
static Bitset EmptyBitset();
// ** std::bitset Interface Methods
// These methods are modeled after the std::bitset interface.
// Get ith bit in Bitset.
bool operator[](size_t i) const;
// Get the number of bits in Bitset.
size_t size() const;
// Set given index to true.
void set(size_t i, bool value = true);
// Sets given index to false.
void reset(size_t i);
// Changes each bit to its complement.
void flip();
// Comparator:
// Bitsets are sorted with respect to their binary representation.
// (e.g. "010" < "101")
// Note: There are alternative comparators in the Bitset class.
static int Compare(const Bitset &bitset_a, const Bitset &bitset_b);
int Compare(const Bitset &other) const;
// All comparator operator behavior is consistent with std::bitset.
bool operator==(const Bitset &x) const;
bool operator!=(const Bitset &x) const;
bool operator<(const Bitset &x) const;
bool operator<=(const Bitset &x) const;
bool operator>(const Bitset &x) const;
bool operator>=(const Bitset &x) const;
// All bitwise operator behavior is consistent with std::bitset.
Bitset operator&(const Bitset &x) const;
Bitset operator|(const Bitset &x) const;
Bitset operator^(const Bitset &x) const;
Bitset operator~() const;
Bitset operator+(const Bitset &x) const;
void operator&=(const Bitset &other);
void operator|=(const Bitset &other);
// Outputs Bitset string representation to stream.
friend std::ostream &operator<<(std::ostream &os, const Bitset &bitset);
// ** Bitset Methods
// These methods are not from the std::bitset interface.
// Special Constructors:
// Make a bitset with only the specified entry turned on.
static Bitset Singleton(size_t n, size_t which_on);
// Sets entire bitset to false.
void Zero();
// Get underlying data vector.
std::vector<bool> GetData();
// Generates hash value from bitset.
size_t Hash() const;
// Outputs bitset as a string of "1" and "0"s.
std::string ToString() const;
// Outputs hash as hex string.
std::string ToHashString(const size_t length = 16) const;
// Outputs vector of all bit indices set to true.
SizeVector ToVectorOfSetBits() const;
// Are all of the bits 1?
bool All() const;
// Are any of the bits 1?
bool Any() const;
// Are all of the bits 0?
bool None() const;
// Get the number of 1s.
size_t Count() const;
// Is exactly one of the bits 1?
bool IsSingleton() const;
// Is this disjoint with the given bitset?
bool IsDisjoint(const Bitset &other) const;
// Take the minimum of the bitset and its complement.
void Minorize();
// Copy all of the bits from another bitset into this bitset, starting at
// begin, and optionally flipping the bits as they get copied.
void CopyFrom(const Bitset &other, size_t begin, bool flip);
// If the bitset only has one bit on, then we return the location of that bit.
// Otherwise, return nullopt.
std::optional<uint32_t> SingletonOption() const;
// Output as string of comma-separated indices.
std::string ToVectorOfSetBitsAsString() const;
// Returns a new Bitset with size equal to `idx_table`'s size. Each entry
// of the new bitset is determined as follows:
// - If the `idx_table` entry is an integer i, then the value is the ith
// entry of `bitset`.
// - If the entry is nullopt, then the value is False (i.e. zero).
static Bitset Remap(const Bitset &bitset, const SizeOptionVector &idx_table);
// ** Clade / MultiClade Methods
// These methods require bitsets to represent "clades". A clade is an expression of a
// subset of a taxon set. The size of the total taxon set is equal to the size of the
// clade's bitset, with each bit index representing a inclusion/exclusion of a
// specific member of that taxon set.
//
// There are bitset "types" composed of multiple clades (here, called a
// MultiClade). Subsplits are composed of two clades and PCSPs are composed of three
// clades.
// Comparator: Clades are ordered with respect to the lexigraphical representation of
// their taxon subset. (e.g. If two clades are "010" and "101", then their taxon
// subsets are {b} and {a,c}. "b" > "a", therefore "010" > "101".) Note: Sorting by
// taxon representation gives the precise opposite ordering to sorting by binary
// representation (except in the case of zero/empty set!).
static int CladeCompare(const Bitset &bitset_a, const Bitset &bitset_b);
int CladeCompare(const Bitset &other) const;
// For a specified number of clades, return the clade/taxon size.
size_t MultiCladeGetCladeSize(const size_t clade_count) const;
// For a specified number of clades, return a bitset of the ith clade.
Bitset MultiCladeGetClade(const size_t which_clade, const size_t clade_count) const;
// For a specified number of clades, return a string of "1" and "0" for each clade,
// with clades separated by a "|".
std::string MultiCladeToString(const size_t clade_count) const;
// ** Subsplit Methods
// These methods require bitset to represent "subsplits". Subsplits represent nodes
// within the SubsplitDAG. A subsplit are composed of two equal-sized, disjoint
// "clades", representing a fork in the DAG and the taxon sets descending from the
// left and right sides of the fork. Clades are normally stored in a sorted order wrt
// to their lexicographic taxon ordering: the smaller "left" clade stored in the
// 0-position, and the larger "right" clade in the 1-position.
enum class SubsplitClade : size_t { Left, Right, Unspecified };
static const inline size_t SubsplitCladeCount = 2;
class SubsplitCladeEnum
: public EnumWrapper<SubsplitClade, size_t, SubsplitCladeCount,
SubsplitClade::Left, SubsplitClade::Right> {
public:
static inline const std::string Prefix = "SubsplitClade";
static inline const Array<std::string> Labels = {{"Left", "Right"}};
static SubsplitClade Opposite(const SubsplitClade clade) {
switch (clade) {
case SubsplitClade::Left:
return SubsplitClade::Right;
case SubsplitClade::Right:
return SubsplitClade::Left;
default:
Failwith("Cannot get Opposite of Unspecified Clade");
}
}
static std::string ToString(const SubsplitClade e) {
std::stringstream ss;
ss << Prefix << "::" << Labels[e];
return ss.str();
}
friend std::ostream &operator<<(std::ostream &os, const SubsplitClade e) {
os << ToString(e);
return os;
}
};
// Does not iterate over "unspecified" clade.
using SubsplitCladeIterator =
EnumIterator<SubsplitClade, SubsplitClade::Left, SubsplitClade::Right>;
static SubsplitClade Opposite(const SubsplitClade clade) {
return SubsplitCladeEnum::Opposite(clade);
}
// Constructors:
// Each argument represents one of the clade of the subsplit.
// Each clade follows the corresponding Bitset constructor.
//
// Build a Subsplit bitset out of a compatible pair of clades.
static Bitset Subsplit(const Bitset &clade_0, const Bitset &clade_1);
// Builds Clades from strings of "1" and "0" characters.
static Bitset Subsplit(const std::string clade_0, const std::string clade_1);
// Builds Clades of size `n` with only indices in the clade vectors set to true.
static Bitset Subsplit(const SizeVector clade_0, const SizeVector clade_1,
const size_t n);
// Given two arbitrarily ordered clades of same length, return a subsplit with the
// clades in sorted order by taxon representation.
static Bitset SubsplitFromUnorderedClades(const Bitset &clade_0,
const Bitset &clade_1);
// Special Constructors:
// A "leaf subsplit" is a subsplit where one of the clades is equal
// to the empty set (zero). In practice, these are allowed in only two cases: When
// subsplit is a leaf or root. A leaf should only have a single member in its
// non-empty clade, and a root should have the full taxon set in its non-empty clade.
//
// Make a "leaf subsplit" (pairs given nonempty_clade with an empty_clade).
static Bitset LeafSubsplitOfNonemptyClade(const Bitset &nonempty_clade);
// Make a "leaf subsplit" of a given parent subsplit. The left clade of
// the parent subsplit must be non-empty and the right clade must be a singleton.
static Bitset LeafSubsplitOfParentSubsplit(const Bitset &parent_subsplit);
// Make the UCA (universal common ancestor) subsplit of the DAG root node with the
// given taxon count. Since subsplit bitsets are always big-small, the DAG root node
// subsplit consists of all 1s then 0s (e.g. 5 would return '11111|00000').
static Bitset UCASubsplitOfTaxonCount(const size_t taxon_count);
// Get the full rootsplit bitset out of a rootsplit half.
// Note: the first half of the rootsplit bitset is always larger than the second.
static Bitset RootsplitSubsplitOfClade(const Bitset &clade);
// Comparator:
// Subsplits are sorting on the following:
// (1) The number of taxa in each of their subsplits.
// (2) The std::bitset ordering of each of their respective unions.
// (3) The std::bitset ordering of each of their left clades.
static int SubsplitCompare(const Bitset &subsplit_a, const Bitset &subsplit_b);
int SubsplitCompare(const Bitset &other) const;
// Flip the order of the two clades of a subsplit.
Bitset SubsplitRotate() const;
// Sorts clades of subsplit so that they are ordered by their taxon representation.
Bitset SubsplitSortClades() const;
// Gets the size of each of each clade. This is the same as the size of the whole
// taxon set.
size_t SubsplitGetCladeSize() const;
// Get clade according to its taxon ordering.
// #350 why do we want these two overloads?
Bitset SubsplitGetClade(const size_t which_clade) const;
Bitset SubsplitGetClade(const SubsplitClade which_clade) const;
// Output subsplit as string of "1" and "0" characters, with each clade separated by a
// "|".
std::string SubsplitToString() const;
// Output subsplit to string as a comma-separated list of true bits positions, with
// each clade separated by a "|".
std::string SubsplitToVectorOfSetBitsAsString() const;
// Output subsplit as each clade's hash, with each clade separated by "|".
std::string SubsplitToHashString(const size_t length = 16) const;
// Is this the subsplit of a leaf node?
bool SubsplitIsLeaf() const;
// Is this the UCA subsplit?
bool SubsplitIsUCA() const;
// Is this the subsplit of a rootsplit?
bool SubsplitIsRootsplit() const;
// Is this the left clade of the given subsplit?
bool SubsplitIsLeftChildOf(const Bitset &parent) const;
// Is this the right clade of the given subsplit?
bool SubsplitIsRightChildOf(const Bitset &parent) const;
// Get the union of the two clades.
Bitset SubsplitCladeUnion() const;
// Get whether the given child is the left or right child to the given parent.
static SubsplitClade SubsplitIsChildOfWhichParentClade(const Bitset &parent,
const Bitset &child);
// Check whether subsplits form a child/parent pair.
static bool SubsplitIsParentChildPair(const Bitset &parent, const Bitset &child);
// Check whether subsplits are adjacent/related (whether either is the parent of the
// other).
static bool SubsplitIsAdjacent(const Bitset &subsplit_a, const Bitset &subsplit_b);
// Check whether subsplits are potentially related (ancestor/descendant) pair (union
// of descendant is a subset of one of clades of ancestor).
static bool SubsplitIsAncestorDescendantPair(const Bitset &ancestor,
const Bitset &descendant,
const SubsplitClade clade_type);
// Check whether bitset represents valid Subsplit (contains two equal-sized,
// disjoint clades).
bool SubsplitIsValid() const;
// ** PCSP methods
// These functions require the bitset to be a "PCSP bitset" (parent-child subsplit
// pair). PCSP represent edges between nodes within the SubsplitDAG. They are composed
// of three equal-sized "clades": (0) sister clade of parent, (1) focal clade of
// parent, (2) the right clade of the child. We define the "right" clade of a child
// subsplit that has a bitset with the larger lexicographic representation. The
// remaining clade are well-defined relative to the focal parent subsplit.
//
// For example, `100|011|001` is composed of the clades `100`, `011` and `001`.
// If the taxa are x0, x1, and x2 then this means the parent subsplit is (A,
// BC) with bitset `100|011`, and the child subsplit is (B, C) with bitset
// `010|001.` Child_0 is the clade `001` and child_1 is the clade `010.`
//
// For rootsplit PCSPs where the parent subsplit is the DAG root node, the PCSP
// is the sister clade (all 0s), the focal clade (all 1s), and "clade 0". For
// example, `000111010` is the PCSP from the DAG root node to the rootsplit (AC, B).
// See the unit tests at the bottom for more examples.
static inline size_t PCSPCladeCount = 3;
enum class PCSPClade : size_t { Sister, Focal, RightChild };
using PCSPCladeIterator =
EnumIterator<PCSPClade, PCSPClade::Sister, PCSPClade::RightChild>;
// Constructors:
// Build a PCSP bitset from a compatible parent-child pair of
// Subsplit bitsets.
static Bitset PCSP(const Bitset &parent_subsplit, const Bitset &child_subsplit);
// Build a PCSP bitset from explicit sister, focal, and right child clades.
static Bitset PCSP(const Bitset &sister_clade, const Bitset &focal_clade,
const Bitset &right_child_clade);
// Builds sister, focal, and right child clades from strings of "1" and "0"
// characters.
static Bitset PCSP(const std::string sister_clade, const std::string focal_clade,
const std::string right_child_clade);
// Special Constructors:
// Make a PCSP from parent subsplit to child leaf subsplit. Asserts that the left-hand
// clade of the parent subsplit is non-empty and that the right-hand clade is a
// singleton. This leaf subsplit has parent subsplit on the left and all zeroes on the
// right.
static Bitset PCSPFromRightParentCladeToLeaf(const Bitset &parent_subsplit);
// Given a rootsplit, get the PCSP connecting the DAG root node to that rootsplit
// (e.g. '1100|0011' would return '0000|1111|0011').
static Bitset PCSPFromUCAToRootsplit(const Bitset &rootsplit);
// Output PCSP as string of "1" and "0" characters, with each clade separated by a
// "|".
std::string PCSPToString() const;
// Output PCSP as each clade's hash, with each clade separated by a "|".
std::string PCSPToHashString(const size_t length = 16) const;
// Checks whether bitset represents a valid set of taxon clades for PCSP.
bool PCSPIsValid() const;
// Checks whether the PCSP child is a leaf subsplit.
bool PCSPChildIsLeaf() const;
// Sorts PCSP so that parent and child are arranged properly so that second clade is
// the focal clade of the parent and third clade is the right side of the child.
Bitset PCSPSortClades() const;
// Do the sister and focal clades union to the whole taxon set?
// Method excludes rootsplit PCSPs where sister and focal clades
// also union to the whole taxon set.
bool PCSPIsParentRootsplit() const;
// Gets the size of each of each clade. This is the same as the size of the whole
// taxon set.
size_t PCSPGetCladeSize() const;
// Get the ith clade of the PCSP.
Bitset PCSPGetClade(const size_t which_clade) const;
Bitset PCSPGetClade(const PCSPClade which_clade) const;
// Get the parent subsplit of the PCSP.
Bitset PCSPGetParentSubsplit() const;
// Get the child subsplit of the PCSP.
Bitset PCSPGetChildSubsplit() const;
// Get the number of taxa in each side of the child subsplit.
SizePair PCSPGetChildSubsplitTaxonCounts() const;
// Checks if PCSP are adjacent. In other words, PCSPs share a common node.
static bool PCSPIsParentChildPair(const Bitset &parent_pcsp,
const Bitset &child_pcsp);
protected:
// Vector of bits.
std::vector<bool> value_;
};
using Subsplit = Bitset;
using PCSP = Bitset;
using SubsplitClade = Bitset::SubsplitClade;
using SubsplitCladeEnum = Bitset::SubsplitCladeEnum;
using PCSPClade = Bitset::PCSPClade;
// This is how we inject a hash routine and a custom comparator into the std
// namespace so that we can use unordered_map and unordered_set.
// https://en.cppreference.com/w/cpp/container/unordered_map
namespace std {
template <>
struct hash<Bitset> {
size_t operator()(const Bitset &x) const { return x.Hash(); }
};
template <>
struct equal_to<Bitset> {
bool operator()(const Bitset &lhs, const Bitset &rhs) const { return lhs == rhs; }
};
} // namespace std
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("Bitset") {
Bitset bit_from_str = Bitset("00110100");
Bitset bit_from_sizevec = Bitset({2, 3, 5}, 8);
CHECK_EQ(bit_from_str, bit_from_sizevec);
Bitset a("1100");
CHECK_EQ(a[2], false);
CHECK_EQ(a[1], true);
Bitset build_up(4);
build_up.set(1);
build_up.set(3);
CHECK_EQ(build_up, Bitset("0101"));
Bitset strip_down(4, true);
strip_down.reset(0);
strip_down.reset(2);
CHECK_EQ(strip_down, Bitset("0101"));
CHECK_EQ(a.size(), 4);
CHECK_EQ(Bitset("1100"), Bitset("1100"));
CHECK_NE(Bitset("1100"), Bitset("0100"));
CHECK_LT(Bitset("0100"), Bitset("0110"));
CHECK_LT(Bitset("0100"), Bitset("0110"));
CHECK_LT(Bitset("0010"), Bitset("0100"));
CHECK_LE(Bitset("0010"), Bitset("0100"));
CHECK_LE(Bitset("1100"), Bitset("1100"));
CHECK_GT(Bitset("0110"), Bitset("0100"));
CHECK_GT(Bitset("0110"), Bitset("0100"));
CHECK_GT(Bitset("0100"), Bitset("0010"));
CHECK_GE(Bitset("0100"), Bitset("0010"));
CHECK_GE(Bitset("1100"), Bitset("1100"));
CHECK_EQ((Bitset("1100") & Bitset("1010")), Bitset("1000"));
CHECK_EQ((Bitset("1100") | Bitset("1010")), Bitset("1110"));
CHECK_EQ((Bitset("1100") ^ Bitset("1010")), Bitset("0110"));
CHECK_EQ(~Bitset("1010"), Bitset("0101"));
CHECK_EQ(Bitset("101") + Bitset("011"), Bitset("101011"));
CHECK_EQ(std::min(Bitset("1100"), Bitset("1010")), Bitset("1010"));
a &= Bitset("0110");
CHECK_EQ(a, Bitset("0100"));
CHECK_EQ(a.All(), false);
CHECK_EQ(Bitset(4, true).All(), true);
CHECK_EQ(a.Any(), true);
CHECK_EQ(Bitset(4, false).Any(), false);
CHECK_EQ(a.None(), false);
CHECK_EQ(Bitset(4, false).None(), true);
a.flip();
CHECK_EQ(a, Bitset("1011"));
a.Minorize();
CHECK_EQ(a, Bitset("0100"));
a.Minorize();
CHECK_EQ(a, Bitset("0100"));
a.CopyFrom(Bitset("10"), 0, false);
CHECK_EQ(a, Bitset("1000"));
a.CopyFrom(Bitset("10"), 0, true);
CHECK_EQ(a, Bitset("0100"));
a.CopyFrom(Bitset("10"), 2, false);
CHECK_EQ(a, Bitset("0110"));
a.CopyFrom(Bitset("10"), 2, true);
CHECK_EQ(a, Bitset("0101"));
auto singleton = Bitset("0010");
CHECK(singleton.IsSingleton());
CHECK_EQ(*singleton.SingletonOption(), 2);
CHECK_EQ(Bitset("0000").Count(), 0);
CHECK_EQ(Bitset("0100").Count(), 1);
CHECK_EQ(Bitset("011101").Count(), 4);
CHECK_EQ(Bitset("1001").ToVectorOfSetBitsAsString(), "0,3");
CHECK_EQ(Bitset("0000").ToVectorOfSetBitsAsString(), "");
}
TEST_CASE("Bitset: Clades, Subsplits, PCSPs") {
auto p = Bitset("000111");
// Subsplit: 000|111
CHECK_EQ(p.SubsplitGetClade(SubsplitClade::Left), Bitset("000"));
CHECK_EQ(p.SubsplitGetClade(SubsplitClade::Right), Bitset("111"));
// Edge: 00|01|11
CHECK_EQ(p.PCSPGetClade(PCSPClade::Sister), Bitset("00"));
CHECK_EQ(p.PCSPGetClade(PCSPClade::Focal), Bitset("01"));
CHECK_EQ(p.PCSPGetClade(PCSPClade::RightChild), Bitset("11"));
CHECK_EQ(Bitset("11001010").SubsplitCladeUnion(), Bitset("1110"));
CHECK_EQ(Bitset("10011100").SubsplitRotate(), Bitset("11001001"));
CHECK_EQ(Bitset("010101").SubsplitToVectorOfSetBitsAsString(), "1|0,2");
CHECK_EQ(Bitset("101010").SubsplitIsLeftChildOf(Bitset("111000")), true);
// #350 commented out code
// CHECK_EQ(Bitset::SubsplitIsChildOfWhichParentClade(Bitset("111000"),
// Bitset("101010")), true);
CHECK_EQ(Bitset("00100001").SubsplitIsRightChildOf(Bitset("11000011")), true);
// CHECK_EQ(Bitset::SubsplitIsChildOfWhichParentClade(Bitset("000111"),
// Bitset("101010")), false);
CHECK_EQ(Bitset("010001").SubsplitIsLeftChildOf(Bitset("110001")), false);
CHECK_EQ(Bitset("010001").SubsplitIsRightChildOf(Bitset("01000011")), false);
// Should throw because Bitsets can't be divided into equal-sized clades.
CHECK_THROWS(Bitset("11010").SubsplitIsLeftChildOf(Bitset("10101")));
CHECK_THROWS(Bitset("11010").SubsplitIsRightChildOf(Bitset("10101")));
CHECK_EQ(Bitset("101010").SubsplitIsRootsplit(), true);
CHECK_EQ(Bitset("111000").SubsplitIsRootsplit(), false);
CHECK_EQ(Bitset("11000001").SubsplitIsRootsplit(), false);
CHECK_EQ(Bitset("011101").PCSPIsValid(), false);
CHECK_EQ(Bitset("000111").PCSPIsValid(), false);
CHECK_EQ(Bitset("100100").PCSPIsValid(), false);
CHECK_EQ(Bitset("100011001").PCSPIsValid(), true);
CHECK_EQ(Bitset("100011001").PCSPChildIsLeaf(), false);
CHECK_EQ(Bitset("100011000").PCSPChildIsLeaf(), true);
CHECK_EQ(Bitset("000111010").PCSPIsParentRootsplit(), false);
CHECK_EQ(Bitset("000111000100").PCSPIsParentRootsplit(), false);
CHECK_EQ(Bitset("101010000").PCSPIsParentRootsplit(), true);
CHECK_EQ(Bitset("100011001").PCSPGetParentSubsplit(), Bitset("100011"));
CHECK_EQ(Bitset("011100001").PCSPGetParentSubsplit(), Bitset("100011"));
CHECK_EQ(Bitset("100011001").PCSPGetChildSubsplit(), Bitset("010001"));
CHECK_EQ(Bitset("100001110001").PCSPGetChildSubsplit(), Bitset("01100001"));
CHECK_EQ(Bitset("100001110001").PCSPGetChildSubsplitTaxonCounts(), SizePair({1, 2}));
CHECK_EQ(Bitset("100000111100101").PCSPGetChildSubsplitTaxonCounts(),
SizePair({2, 2}));
CHECK_EQ(Bitset::Singleton(4, 2), Bitset("0010"));
CHECK_EQ(Bitset("100010"), Bitset::Subsplit(Bitset("100"), Bitset("010")));
CHECK_EQ(Bitset("110001"), Bitset::Subsplit(Bitset("001"), Bitset("110")));
// Invalid clade pair.
CHECK_THROWS(Bitset::Subsplit(Bitset("1100"), Bitset("001")));
CHECK_THROWS(Bitset::Subsplit(Bitset("111"), Bitset("001")));
CHECK_EQ(Bitset("000110010"), Bitset::PCSP(Bitset("110000"), Bitset("100010")));
CHECK_EQ(Bitset("110001000"), Bitset::PCSP(Bitset("110001"), Bitset("001000")));
// Invalid parent-child pair.
CHECK_THROWS(Bitset::PCSP(Bitset("110001"), Bitset("010001")));
CHECK_THROWS(Bitset::PCSP(Bitset("11000101"), Bitset("010001")));
CHECK_THROWS(Bitset::PCSP(Bitset("110001"), Bitset("110100")));
CHECK_EQ(Bitset::RootsplitSubsplitOfClade(Bitset("0011")), Bitset("11000011"));
CHECK_EQ(Bitset::PCSPFromUCAToRootsplit(Bitset("11000011")), Bitset("000011110011"));
CHECK_EQ(Bitset("010000").SubsplitIsLeaf(), true);
CHECK_EQ(Bitset("010010").SubsplitIsLeaf(), false);
CHECK_EQ(Bitset("111000").SubsplitIsLeaf(), false);
CHECK_EQ(Bitset::LeafSubsplitOfNonemptyClade(Bitset("010")), Bitset("010000"));
CHECK_EQ(Bitset::LeafSubsplitOfParentSubsplit(Bitset("100001")), Bitset("001000"));
CHECK_THROWS(Bitset::LeafSubsplitOfParentSubsplit(Bitset("100011")));
CHECK_EQ(Bitset::PCSPFromRightParentCladeToLeaf(Bitset("100001")),
Bitset("100001000"));
CHECK_THROWS(Bitset::PCSPFromRightParentCladeToLeaf(Bitset("0000110")));
CHECK_THROWS(Bitset::PCSPFromRightParentCladeToLeaf(Bitset("100101")));
// Restrict a bitset.
CHECK_EQ(Bitset::Remap(Bitset("10101010101"), {0, 2, 4, 6, 8, 10}), Bitset("111111"));
// If we apply this remap 3 times we should get back to where we started.
SizeOptionVector rotate120{6, 7, 8, 0, 1, 2, 3, 4, 5};
auto to_rotate = Bitset("110010100");
CHECK_EQ(Bitset::Remap(Bitset::Remap(Bitset::Remap(to_rotate, rotate120), rotate120),
rotate120),
to_rotate);
// "Lift" a bitset.
CHECK_EQ(Bitset::Remap(Bitset("11"), {0, std::nullopt, 1}), Bitset("101"));
}
TEST_CASE("Bitset: Subsplit Sort") {
Bitset bitset_a = Bitset::Subsplit("01001", "00100");
CHECK_MESSAGE(Bitset::SubsplitCompare(bitset_a, bitset_a) == 0,
"Equality: bitset_a should be equal to itself");
// Count of bitset_a (3) comes before count of bitset_b (4).
Bitset bitset_b = Bitset::Subsplit("00100", "01011");
CHECK_MESSAGE(
Bitset::SubsplitCompare(bitset_a, bitset_b) < 0,
"Bit Count: bitset_a should be smaller/earlier sorted value than bitset_b.");
// Union of bitset_a ("01101") comes before union of bitset_c ("11100"), counts are
// equal.
Bitset bitset_c = Bitset::Subsplit("01000", "10100");
CHECK_MESSAGE(
Bitset::SubsplitCompare(bitset_a, bitset_c) < 0,
"Union: bitset_a should be smaller/earlier sorted value than bitset_c.");
// Sorted clade of bitset_a ("01001") comes before sorted clade of bitset_d ("01100"),
// counts and unions are equal.
Bitset bitset_d = Bitset::Subsplit("00001", "01100");
CHECK_MESSAGE(
Bitset::SubsplitCompare(bitset_a, bitset_d) < 0,
"Sorted Clade: bitset_a should be smaller/earlier sorted value than bitset_d.");
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 26,503
|
C++
|
.h
| 524
| 46.685115
| 88
| 0.713062
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,086
|
reindexer.hpp
|
phylovi_bito/src/reindexer.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Reindexers are argsorted index arrays that describe reordering of data due to
// modifications of data vectors, such as when adding NNI pairs to the SubsplitDAG.
// These reindexers can then be used to reorder associated data arrays whose indices
// correspond the ordering of SubsplitDAG data arrays, such as with the node or edge
// arrays.
//
// A reindexer is a one-to-one function that maps from an old indexing scheme to a new
// indexing scheme. In other words, if old index `i` maps to new index `j`, then
// reindexer.GetNewIndexByOldIndex(`i`) = `j`. This is implemented by an underlying
// SizeVector. For example, if old_vector = [A, B, C] and reindexer = [1, 2, 0], then
// new_vector = [C, A, B]. Note that old_vector and reindexer must have the same size.
#pragma once
#include <numeric>
#include "eigen_sugar.hpp"
#include "sugar.hpp"
class Reindexer {
public:
Reindexer() : data_(){};
Reindexer(size_t size) : data_(size){};
Reindexer(SizeVector data) : data_(std::move(data)){};
// ** Special Constructors
// For each position in a identity reindexer, reindexer[`i`] = `i`.
// E.g. for size = 5, reindexer = [0, 1, 2, 3, 4].
static Reindexer IdentityReindexer(const size_t size);
friend bool operator==(const Reindexer &lhs, const Reindexer &rhs) {
return lhs.GetData() == rhs.GetData();
}
friend bool operator!=(const Reindexer &lhs, const Reindexer &rhs) {
return lhs.GetData() != rhs.GetData();
}
size_t size() const { return data_.size(); }
void reserve(const size_t size) { data_.reserve(size); }
void SetReindex(const size_t old_index, const size_t new_index) {
data_.at(old_index) = new_index;
}
// Find mapped new/output index corresponding to given old/input index.
size_t GetNewIndexByOldIndex(const size_t old_index) const {
return data_.at(old_index);
}
// Find mapped old/input index corresponding to new/output index. Via linear search.
size_t GetOldIndexByNewIndex(const size_t new_index) const {
return size_t(std::find(GetData().begin(), GetData().end(), new_index) -
GetData().begin());
}
// Add new index to end of reindexer.
void AppendNewIndex() { data_.push_back(size()); }
void AppendNewIndex(const size_t new_index) { data_.push_back(new_index); }
// Get underlying index vector from reindexer.
const SizeVector &GetData() const { return data_; }
SizeVector &GetData() { return data_; }
// Check if reindexer is in a valid state (contains every index exactly once,
// ranging from 0 to reindexer_size - 1).
bool IsValid(std::optional<size_t> length = std::nullopt) const;
// ** Modification Operations
// Resize reindexer. If new indices are created, pad them with identity.
void Resize(size_t new_size);
// Builds new inverse reindexer of a given reindexer, such that input->output becomes
// output->input.
Reindexer InvertReindexer() const;
// Builds new reindexer by removing an element identified by its index and shifting
// other idx to maintain valid reindexer.
Reindexer RemoveOldIndex(const size_t remove_old_idx) const;
Reindexer RemoveNewIndex(const size_t remove_new_idx) const;
// Builds a reindexer composing apply_reindexer onto a base_reindexer. Resulting
// reindexer contains both reindexing operations combined.
Reindexer ComposeWith(const Reindexer &apply_reindexer);
// In a given reindexer, take the old_id in the reindexer and reassign it to the
// new_id and shift over the ids strictly between old_id and new_id to ensure that the
// reindexer remains valid. For example if old_id = 1 and new_id = 4, this method
// would shift 1 -> 4, 4 -> 3, 3 -> 2, and 2 -> 1.
void ReassignAndShift(const size_t old_id, const size_t new_id);
// ** Apply Operations
// Reindexes the given data vector according to the reindexer.
template <typename VectorType>
static VectorType Reindex(VectorType &old_vector, const Reindexer &reindexer,
std::optional<size_t> length = std::nullopt) {
size_t reindex_size = (length.has_value() ? length.value() : old_vector.size());
Assert(
size_t(old_vector.size()) >= reindex_size,
"The vector must be at least as long as reindex_size in Reindexer::Reindex.");
Assert(size_t(reindexer.size()) >= reindex_size,
"The reindexer must be at least as long as reindex_size in "
"Reindexer::Reindex.");
Assert(reindexer.IsValid(reindex_size),
"Reindexer must be valid in Reindexer::Reindex.");
VectorType new_vector(old_vector.size());
// Data to reindex.
for (size_t idx = 0; idx < reindex_size; idx++) {
new_vector[reindexer.GetNewIndexByOldIndex(idx)] = std::move(old_vector[idx]);
}
// Data to copy over.
for (size_t idx = reindex_size; idx < size_t(new_vector.size()); idx++) {
new_vector[idx] = std::move(old_vector[idx]);
}
return new_vector;
};
// Reindexes the given data vector concatenated with additional data values.
template <typename VectorType>
static VectorType Reindex(VectorType &old_vector, const Reindexer &reindexer,
VectorType &additional_values) {
Assert(reindexer.IsValid(), "Reindexer must be valid in Reindexer::Reindex.");
Assert(old_vector.size() + additional_values.size() ==
static_cast<Eigen::Index>(reindexer.size()),
"Size of the vector and additional values must add up to the reindexer size "
"in Reindexer::Reindex.");
VectorType new_vector(reindexer.size());
// Data to reindex.
for (Eigen::Index idx = 0; idx < old_vector.size(); idx++) {
new_vector[reindexer.GetNewIndexByOldIndex(idx)] = std::move(old_vector[idx]);
}
// Data to copy over.
for (Eigen::Index idx = 0; idx < additional_values.size(); idx++) {
new_vector[reindexer.GetNewIndexByOldIndex(old_vector.size() + idx)] =
std::move(additional_values[idx]);
}
return new_vector;
};
// Reindex data vector in-place. Expects VectorType to have `operator[]` accessor and
// `size()`.
template <typename VectorType, typename DataType>
static void ReindexInPlace(VectorType &data_vector, const Reindexer &reindexer,
size_t length, DataType &temp1, DataType &temp2) {
Assert(size_t(data_vector.size()) >= length,
"data_vector wrong size for Reindexer::ReindexInPlace.");
Assert(size_t(reindexer.size()) >= length,
"reindexer wrong size for Reindexer::ReindexInPlace.");
BoolVector updated_idx = BoolVector(length, false);
for (size_t i = 0; i < length; i++) {
size_t old_idx = i;
size_t new_idx = reindexer.GetNewIndexByOldIndex(i);
if (old_idx == new_idx) {
updated_idx[old_idx] = true;
continue;
}
// Because reindexing is one-to-one function, starting at any given index in the
// the vector, if we follow the chain of remappings from each old index to its new
// index, we will eventually form a cycle that returns to the initial old index.
// This avoid allocating a second data array to perform the reindex, as only two
// temporary values are needed. Only a boolean array is needed to check for
// already updated indexes.
bool is_current_node_updated = updated_idx[new_idx];
temp1 = data_vector[old_idx];
while (is_current_node_updated == false) {
// copy data at old_idx to new_idx, and store data at new_idx in temporary.
temp2 = data_vector[new_idx];
data_vector[new_idx] = temp1;
temp1 = temp2;
// update to next idx in cycle.
updated_idx[new_idx] = true;
old_idx = new_idx;
new_idx = reindexer.GetNewIndexByOldIndex(old_idx);
is_current_node_updated = updated_idx[new_idx];
}
}
};
// Reindex id vector. Expects VectorType to have `operator[]` accessor and
// `size()`.
template <typename VectorType, typename DataType>
static void ReindexInPlace(VectorType &data_vector, const Reindexer &reindexer,
size_t length) {
DataType temp1, temp2;
Reindexer::ReindexInPlace(data_vector, reindexer, length, temp1, temp2);
}
// Remaps each of the ids in the vector according to the reindexer.
template <typename DataType, typename VectorType = std::vector<DataType>>
static void RemapIdVector(VectorType &&data_vec, const Reindexer &reindexer) {
Assert(reindexer.IsValid(), "Reindexer must be valid in Reindexer::RemapIdVector.");
for (const auto id : data_vec) {
Assert(size_t(id) < reindexer.size(),
"The vector cannot contain an id out of bounds of the reindexer in "
"Reindexer::RemapIdVector.");
}
std::transform(data_vec.begin(), data_vec.end(), data_vec.begin(),
[reindexer](DataType &old_idx) {
return DataType(reindexer.GetNewIndexByOldIndex(size_t(old_idx)));
});
};
// ** I/O
friend std::ostream &operator<<(std::ostream &os, const Reindexer &reindexer) {
os << reindexer.GetData();
return os;
};
private:
SizeVector data_;
};
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("Reindexer: IdentityReindexer") {
// Check that IdentityReindexer returns correctly.
Reindexer correct_default({0, 1, 2, 3, 4, 5, 6, 7, 8, 9});
CHECK_EQ(correct_default, Reindexer::IdentityReindexer(10));
}
TEST_CASE("IsValidReindexer") {
// Index appears more than once.
CHECK_FALSE(Reindexer({1, 3, 0, 0}).IsValid());
// Missing an index and/or index is out of range.
CHECK_FALSE(Reindexer({1, 3, 4, 2}).IsValid());
// Valid reindexer.
CHECK(Reindexer({1, 3, 0, 2}).IsValid());
}
TEST_CASE("Reindexer: Reindex") {
// Check that Reindex throws if the given vector and reindexer have different
// sizes.
SizeVector old_size_vector{7, 8, 9};
Reindexer reindexer({2, 0, 3, 1});
CHECK_THROWS(Reindexer::Reindex(old_size_vector, reindexer));
// Check that Reindex returns correctly.
reindexer = Reindexer({2, 0, 1});
SizeVector new_size_vector = Reindexer::Reindex(old_size_vector, reindexer);
SizeVector correct_new_size_vector{8, 9, 7};
CHECK_EQ(new_size_vector, correct_new_size_vector);
// Check that Reindex also works with EigenVectorXd and additional values.
EigenVectorXd old_eigen_vector(3);
old_eigen_vector << 7, 8, 9;
EigenVectorXd additional_values(2);
additional_values << 10, 11;
reindexer = Reindexer({2, 4, 0, 3, 1});
EigenVectorXd new_eigen_vector =
Reindexer::Reindex(old_eigen_vector, reindexer, additional_values);
EigenVectorXd correct_new_eigen_vector(5);
correct_new_eigen_vector << 9, 11, 7, 10, 8;
CHECK_EQ(new_eigen_vector, correct_new_eigen_vector);
}
TEST_CASE("Reindexer: InvertReindexer") {
// Check that inverting a vector twice results in the original vector.
Reindexer reindexer({1, 3, 0, 2});
Reindexer correct_inverted_reindexer({2, 0, 3, 1});
Reindexer inverted_reindexer = reindexer.InvertReindexer();
CHECK_EQ(inverted_reindexer, correct_inverted_reindexer);
Reindexer correct_reindexer = Reindexer({1, 3, 0, 2});
reindexer = inverted_reindexer.InvertReindexer();
CHECK_EQ(reindexer, correct_reindexer);
}
TEST_CASE("Reindexer: RemapIdVector") {
// Check that Reindex throws if the given vector has an index out of bounds of the
// reindexer.
SizeVector size_vector{3, 5};
Reindexer reindexer({2, 0, 3, 1});
CHECK_THROWS(Reindexer::RemapIdVector<size_t>(size_vector, reindexer));
// Check that RemapIdVector returns correctly.
size_vector = {3, 5};
reindexer = Reindexer({2, 0, 3, 1, 6, 4, 5});
Reindexer::RemapIdVector<size_t>(size_vector, reindexer);
SizeVector correct_size_vector = {1, 4};
CHECK_EQ(size_vector, correct_size_vector);
}
TEST_CASE("Reindexer: ReassignAndShift") {
// Check that ReassignAndShift returns correctly when old_id > new_id.
Reindexer reindexer({0, 1, 2, 3, 4, 5, 6});
reindexer.ReassignAndShift(4, 1);
Reindexer correct_reindexer({0, 2, 3, 4, 1, 5, 6});
CHECK_EQ(reindexer, correct_reindexer);
reindexer.ReassignAndShift(5, 2);
correct_reindexer = Reindexer({0, 3, 4, 5, 1, 2, 6});
CHECK_EQ(reindexer, correct_reindexer);
reindexer.ReassignAndShift(1, 3);
correct_reindexer = Reindexer({0, 2, 4, 5, 3, 1, 6});
CHECK_EQ(reindexer, correct_reindexer);
// Check that ReassignAndShift returns correctly when old_id = new_id.
reindexer = Reindexer({1, 0, 4, 6, 5, 3, 2});
reindexer.ReassignAndShift(4, 4);
correct_reindexer = Reindexer({1, 0, 4, 6, 5, 3, 2});
CHECK_EQ(reindexer, correct_reindexer);
// Check that ReassignAndShift returns correctly when old_id < new_id.
reindexer = Reindexer({6, 0, 4, 1, 5, 3, 2});
reindexer.ReassignAndShift(1, 5);
correct_reindexer = Reindexer({6, 0, 3, 5, 4, 2, 1});
CHECK_EQ(reindexer, correct_reindexer);
}
TEST_CASE("Reindexer: ComposeWith") {
// Check that identity reindexer composed with a second reindexer results in that
// reindexer.
Reindexer identity_reindexer, inverted_reindexer, pairswap_reindexer,
composed_reindexer, correct_reindexer;
identity_reindexer = Reindexer::IdentityReindexer(6);
inverted_reindexer = Reindexer({5, 4, 3, 2, 1, 0});
pairswap_reindexer = Reindexer({1, 0, 3, 2, 5, 4});
composed_reindexer = identity_reindexer;
composed_reindexer = composed_reindexer.ComposeWith(inverted_reindexer);
composed_reindexer = composed_reindexer.ComposeWith(pairswap_reindexer);
correct_reindexer = Reindexer({4, 5, 2, 3, 0, 1});
CHECK_EQ(composed_reindexer, correct_reindexer);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 14,087
|
C++
|
.h
| 286
| 43.461538
| 89
| 0.680207
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,087
|
fat_beagle.hpp
|
phylovi_bito/src/fat_beagle.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include <memory>
#include <queue>
#include <utility>
#include <vector>
#include "beagle_accessories.hpp"
#include "phylo_flags.hpp"
#include "phylo_model.hpp"
#include "phylo_gradient.hpp"
#include "rooted_tree_collection.hpp"
#include "site_pattern.hpp"
#include "stick_breaking_transform.hpp"
#include "task_processor.hpp"
#include "unrooted_tree_collection.hpp"
class FatBeagle {
public:
using PackedBeagleFlags = long;
// This constructor makes the beagle_instance_
FatBeagle(const PhyloModelSpecification &specification,
const SitePattern &site_pattern,
const PackedBeagleFlags beagle_preference_flags, bool use_tip_states);
~FatBeagle();
// Delete (copy + move) x (constructor + assignment) because FatBeagle manages an
// external resource (a BEAGLE instance).
FatBeagle(const FatBeagle &) = delete;
FatBeagle(const FatBeagle &&) = delete;
FatBeagle &operator=(const FatBeagle &) = delete;
FatBeagle &operator=(const FatBeagle &&) = delete;
const BlockSpecification &GetPhyloModelBlockSpecification() const;
const PackedBeagleFlags &GetBeagleFlags() const { return beagle_flags_; };
void SetParameters(const EigenVectorXdRef param_vector);
void SetRescaling(const bool rescaling) { rescaling_ = rescaling; }
double LogLikelihood(const UnrootedTree &tree,
std::optional<PhyloFlags> flags = std::nullopt) const;
// This override performs a "classical" log likelihood calculation of a rooted tree
// considered as an unrooted tree with no time-tree extras.
double UnrootedLogLikelihood(const RootedTree &tree,
std::optional<PhyloFlags> flags = std::nullopt) const;
double LogLikelihood(const RootedTree &tree,
std::optional<PhyloFlags> flags = std::nullopt) const;
// Compute first derivative of the log likelihood with respect to each branch
// length, as a vector of first derivatives indexed by node id.
PhyloGradient Gradient(const UnrootedTree &tree,
std::optional<PhyloFlags> flags = std::nullopt) const;
PhyloGradient Gradient(const RootedTree &tree,
std::optional<PhyloFlags> flags = std::nullopt) const;
// ** Static Methods:
// We can pass these static methods to FatBeagleParallelize.
static double StaticUnrootedLogLikelihood(
const FatBeagle *fat_beagle, const UnrootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
// This override performs a "classical" log likelihood calculation of a rooted tree
// considered as an unrooted tree with no time-tree extras.
static double StaticUnrootedLogLikelihoodOfRooted(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
static double StaticRootedLogLikelihood(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
static double StaticLogDetJacobianHeightTransform(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
static PhyloGradient StaticUnrootedGradient(
const FatBeagle *fat_beagle, const UnrootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
static PhyloGradient StaticRootedGradient(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
static DoubleVector StaticGradientLogDeterminantJacobian(
const FatBeagle *fat_beagle, const RootedTree &in_tree,
std::optional<PhyloFlags> flags = std::nullopt);
template <class TOut, class TTree>
using StaticTreeFunction =
std::function<TOut(const FatBeagle *, const TTree &, std::optional<PhyloFlags>)>;
template <typename TTree>
std::vector<double> SubstitutionModelGradientFiniteDifference(
StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, SubstitutionModel *subst_model,
const std::string ¶meter_key, EigenVectorXd param_vector, double delta,
std::optional<PhyloFlags> flags = std::nullopt) const;
template <typename TTree>
std::vector<double> SubstitutionModelGradientFiniteDifference(
StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, SubstitutionModel *subst_model,
const std::string ¶meter_key, EigenVectorXd param_vector, double delta,
const Transform &transform, std::optional<PhyloFlags> flags = std::nullopt) const;
template <typename TTree>
DoubleVectorPair SubstitutionModelGradient(
StaticTreeFunction<double, TTree> f, const FatBeagle *fat_beagle,
const TTree &tree, std::optional<PhyloFlags> flags = std::nullopt) const;
private:
using BeagleInstance = int;
using BeagleOperationVector = std::vector<BeagleOperation>;
std::unique_ptr<PhyloModel> phylo_model_;
bool rescaling_;
BeagleInstance beagle_instance_;
PackedBeagleFlags beagle_flags_;
int pattern_count_;
bool use_tip_states_;
std::pair<BeagleInstance, PackedBeagleFlags> CreateInstance(
const SitePattern &site_pattern, PackedBeagleFlags beagle_preference_flags);
void SetTipStates(const SitePattern &site_pattern);
void SetTipPartials(const SitePattern &site_pattern);
void UpdateSiteModelInBeagle();
void UpdateSubstitutionModelInBeagle() const;
void UpdatePhyloModelInBeagle();
double LogLikelihoodInternals(const Node::NodePtr topology,
const std::vector<double> &branch_lengths) const;
std::pair<double, std::vector<double>> BranchGradientInternals(
const Node::NodePtr topology, const std::vector<double> &branch_lengths,
const EigenMatrixXd &dQ) const;
void UpdateBeagleTransitionMatrices(
const BeagleAccessories &baBranchGradientInternals,
const std::vector<double> &branch_lengths,
const int *const gradient_indices_ptr) const;
void SetRootPreorderPartialsToStateFrequencies(const BeagleAccessories &ba) const;
static inline void AddLowerPartialOperation(BeagleOperationVector &operations,
const BeagleAccessories &ba, int node_id,
int child0_id, int child1_id);
static inline void AddUpperPartialOperation(BeagleOperationVector &operations,
const BeagleAccessories &ba, int node_id,
int sister_id, int parent_id);
static inline std::pair<double, double> ComputeGradientEntry(
BeagleAccessories &ba, const SizeVectorVector &indices_above, int node_id,
int sister_id);
};
template <typename TOut, typename TTree, typename TTreeCollection>
std::vector<TOut> FatBeagleParallelize(
FatBeagle::StaticTreeFunction<TOut, TTree> f,
const std::vector<std::unique_ptr<FatBeagle>> &fat_beagles,
const TTreeCollection &tree_collection, EigenMatrixXdRef param_matrix,
const bool rescaling, std::optional<PhyloFlags> flags = std::nullopt) {
if (fat_beagles.empty()) {
Failwith("Please add some FatBeagles that can be used for computation.");
}
std::vector<TOut> results(tree_collection.TreeCount());
std::queue<FatBeagle *> fat_beagle_queue;
for (const auto &fat_beagle : fat_beagles) {
Assert(fat_beagle != nullptr, "Got a fat_beagle nullptr!");
fat_beagle_queue.push(fat_beagle.get());
}
std::queue<size_t> tree_number_queue;
for (size_t i = 0; i < tree_collection.TreeCount(); i++) {
tree_number_queue.push(i);
}
Assert(static_cast<Eigen::Index>(tree_collection.TreeCount()) == param_matrix.rows(),
"We param_matrix needs as many rows as we have trees.");
TaskProcessor<FatBeagle *, size_t>(
std::move(fat_beagle_queue), std::move(tree_number_queue),
[&results, &tree_collection, ¶m_matrix, &rescaling, &f, &flags](
FatBeagle *fat_beagle, size_t tree_number) {
fat_beagle->SetParameters(param_matrix.row(tree_number));
fat_beagle->SetRescaling(rescaling);
results[tree_number] =
f(fat_beagle, tree_collection.GetTree(tree_number), flags);
});
return results;
}
// Tests live in rooted_sbn_instance.hpp and unrooted_sbn_instance.hpp.
| 8,428
|
C++
|
.h
| 161
| 46.086957
| 88
| 0.727857
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,088
|
resizer.hpp
|
phylovi_bito/src/resizer.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
//
// Resizer is a tool for resizing padded data vectors. After the DAG is modified, a
// resizer instance can be used to sync the resized DAG across associated data.
#pragma once
#include "sugar.hpp"
class Resizer {
public:
Resizer(const size_t old_count, const size_t old_spare, const size_t old_alloc,
std::optional<const size_t> new_count, std::optional<const size_t> new_spare,
std::optional<const size_t> explicit_alloc = std::nullopt,
const double resizing_factor = 2.0)
: old_count_(old_count), old_spare_(old_spare), old_alloc_(old_alloc) {
new_count_ = (new_count.has_value() ? new_count.value() : GetOldCount());
new_spare_ = (new_spare.has_value() ? new_spare.value() : GetOldSpare());
new_alloc_ = GetOldAlloc();
if (GetNewPadded() > GetOldAlloc()) {
new_alloc_ = size_t(ceil(double(GetNewPadded()) * resizing_factor));
}
if (explicit_alloc.has_value()) {
Assert(explicit_alloc.value() >= GetNewCount(),
"Attempted to reallocate space smaller than count.");
new_alloc_ = explicit_alloc.value() + GetOldSpare();
}
}
size_t GetOldCount() const { return old_count_; }
size_t GetNewCount() const { return new_count_; }
size_t GetOldSpare() const { return old_spare_; }
size_t GetNewSpare() const { return new_spare_; }
size_t GetOldAlloc() const { return old_alloc_; }
size_t GetNewAlloc() const { return new_alloc_; }
size_t GetOldPadded() const { return old_count_ + old_spare_; }
size_t GetNewPadded() const { return new_count_ + new_spare_; }
template <typename VectorType, typename DataType>
void ApplyResizeToEigenVector(
VectorType &data_vec,
std::optional<const DataType> default_val = std::nullopt) const {
data_vec.conservativeResize(GetNewAlloc());
data_vec.conservativeResize(GetNewPadded());
if (default_val.has_value()) {
for (size_t i = GetOldCount(); i < GetNewPadded(); i++) {
data_vec[i] = default_val.value();
}
}
// Fill new data with default values.
if (default_val.has_value()) {
for (size_t i = GetOldCount(); i < GetNewCount(); i++) {
data_vec[i] = default_val.value();
}
// Fill spare data.
for (size_t i = GetNewCount() + GetOldSpare(); i < GetNewCount() + GetNewSpare();
i++) {
data_vec[i] = default_val.value();
}
}
}
template <typename DataType>
void ApplyResizeToStdVector(std::vector<DataType> &data_vec,
std::optional<const DataType> default_val = std::nullopt,
const bool copy_spare_data = false) const {
data_vec.reserve(GetNewCount());
data_vec.resize(GetNewPadded());
// Move spare data from old location to new location.
if (copy_spare_data) {
for (size_t i = 0; i < std::min(GetOldSpare(), GetNewSpare()); i++) {
data_vec[GetNewCount() + i] = data_vec[GetOldCount() + i];
}
}
// Fill new data with default values.
if (default_val.has_value()) {
for (size_t i = GetOldCount(); i < GetNewCount(); i++) {
data_vec[i] = default_val.value();
}
// Fill spare data.
for (size_t i = GetNewCount() + GetOldSpare(); i < GetNewCount() + GetNewSpare();
i++) {
data_vec[i] = default_val.value();
}
}
}
private:
size_t old_count_;
size_t new_count_;
size_t old_spare_;
size_t new_spare_;
size_t old_alloc_;
size_t new_alloc_;
};
| 3,619
|
C++
|
.h
| 89
| 34.786517
| 87
| 0.630712
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,532,089
|
csv.hpp
|
phylovi_bito/src/csv.hpp
|
// Copyright 2019-2022 bito project contributors.
// bito is free software under the GPLv3; see LICENSE file for details.
#pragma once
#include "csv.h"
#include "sugar.hpp"
namespace CSV {
StringDoubleMap StringDoubleMapOfCSV(const std::string& csv_path);
void StringDoubleVectorToCSV(const StringDoubleVector& v, const std::string& csv_path);
} // namespace CSV
#ifdef DOCTEST_LIBRARY_INCLUDED
TEST_CASE("CSV I/O") {
std::string csv_test_file_path = "_ignore/for_csv_test.csv";
StringDoubleVector input = {{"hi", 1e9}, {"lo", -4.}};
CSV::StringDoubleVectorToCSV(input, csv_test_file_path);
auto result = CSV::StringDoubleMapOfCSV(csv_test_file_path);
StringDoubleMap correct_result = {{"hi", 1e9}, {"lo", -4.}};
CHECK_EQ(result, correct_result);
}
#endif // DOCTEST_LIBRARY_INCLUDED
| 804
|
C++
|
.h
| 19
| 40.368421
| 87
| 0.744544
|
phylovi/bito
| 38
| 9
| 59
|
GPL-3.0
|
9/20/2024, 10:43:37 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.