id
int64 0
877k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
66
| repo_stars
int64 94
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 11
values | repo_extraction_date
stringclasses 197
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1,531,446
|
DSPCC_taxi_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/DSPCC_taxi_example.cpp
|
#include <iostream>
//#include <stack>
#include "metric/mapping/DSPCC.hpp"
#include "examples/mapping_examples/assets/helpers.cpp" // for .csv reader
#include "metric/utils/visualizer.hpp"
#include "metric/utils/metric_err.hpp"
#include "metric/distance/k-related/Standards.hpp" // we use Euclidean metric for mean squared error evaluation
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<ValueType, A1>, A2> transpose_timeseries(
Container<Container<ValueType, A1>, A2> ts) // the workaround thing. TODO remove and update csv reader this way
{
auto output = Container<Container<ValueType, A1>, A2>();
size_t n_values = ts[0].size();
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output.push_back(Container<ValueType, A1>());
}
for (size_t i=0; i<ts.size(); ++i) // loop of values in original timeseries
{
for (size_t j=0; j<n_values; ++j) // loop of timeseries
output[j].push_back(ts[i][j]);
}
return output;
}
template <typename Container>
void print_table(Container table) {
for (size_t rec_idx = 0; rec_idx<table.size(); ++rec_idx) {
for (size_t el_idx = 0; el_idx<table[0].size(); ++el_idx)
std::cout << table[rec_idx][el_idx] << " ";
std::cout << "\n";
}
}
template <
template <typename, typename> class OuterContainer,
typename OuterAllocator,
template <typename, typename> class InnerContainer,
typename InnerAllocator,
typename ValueType >
double mean_square_error(
const OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> & M1,
const OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> & M2
)
{
double overall_sum = 0;
double row_sum;
size_t row, col;
for (row = 0; row < M1.size(); row++) // we assume all inner vectors both in M1 and M2 are of the same langth
{
row_sum = 0;
for (col = 0; col < M1[0].size(); col++) // we assume M1 and M2 are of the same length too
row_sum += pow(M1[row][col] - M2[row][col], 2);
overall_sum += sqrt(row_sum / M1[0].size());
}
return overall_sum / M1.size();
}
void print_stats(std::tuple<double, double, double, double, double, double> stats) {
std::cout << " average norm of original waveforms : " << std::get<0>(stats) << "\n";
std::cout << " original waveform norm stddev : " << std::get<1>(stats) << "\n";
std::cout << " average absolute error : " << std::get<2>(stats) << "\n";
std::cout << " stddev of absolute error : " << std::get<3>(stats) << "\n";
std::cout << " average normalized error : " << std::get<4>(stats) << "\n";
std::cout << " stddev of normalized error : " << std::get<5>(stats) << "\n";
std::cout << "\n";
}
int main()
{
float magnitude = 1;
auto vdata = read_csv_num<double>("assets/taxi_weekly.csv", ",");
mat2bmp::blaze2bmp_norm(vdata, "input.bmp", magnitude);
std::vector<double> errs_pre, errs_tf, errs_full;
bool visualize = false;
for (float mix = 0; mix<=1; mix+=0.25) {
if (mix == 0.5)
visualize = true;
else
visualize = false;
auto vDSPCC = metric::DSPCC<std::vector<double>, void>(vdata, 4, 2, mix, 4);
// dataset,
// number of features of freq and time PCFAs,
// DWT subbands, share of freq features in the mixed code,
// top PCFA features
auto v_encoded = vDSPCC.time_freq_PCFA_encode(vdata);
auto v_decoded = vDSPCC.time_freq_PCFA_decode(v_encoded);
if (visualize) {
mat2bmp::blaze2bmp_norm(v_decoded, "decoded.bmp", magnitude);
write_csv(transpose_timeseries(v_decoded), "decoded.csv", ";");
}
std::cout << "\n subband_length: " << vDSPCC.get_subband_size() << "\n";
std::cout << "original record length: " << vdata[0].size() << "\n";
std::cout << " decoded record length: " << v_decoded[0].size() << "\n";
std::cout << "\ndecompression with only time-freq PSFAs done, decoded data saved\n";
auto err_tf = normalized_err_stats<metric::Euclidean<double>>(vdata, v_decoded);
print_stats(err_tf);
errs_tf.push_back(std::get<4>(err_tf));
//std::cout << "\ncomputing pre-encoded and pre_decoded vibration data...\n";
auto v_pre_encoded = vDSPCC.test_public_wrapper_encode(vdata);
auto v_pre_decoded = vDSPCC.test_public_wrapper_decode(v_pre_encoded);
if (visualize) {
write_csv(transpose_timeseries(v_pre_decoded), "pre_decoded.csv", ";");
mat2bmp::blaze2bmp_norm(v_pre_decoded, "pre_decoded.bmp", magnitude);
}
std::cout << "\ntest of pre-compression done, pre-decoded data saved\n";
auto err_pre = normalized_err_stats<metric::Euclidean<double>>(vdata, v_pre_decoded);
print_stats(err_pre);
errs_pre.push_back(std::get<4>(err_pre));
auto v_encoded2 = vDSPCC.encode(vdata);
auto v_decoded2 = vDSPCC.decode(v_encoded2);
if (visualize) {
mat2bmp::blaze2bmp_norm(v_encoded2, "encoded2.bmp", magnitude);
write_csv(transpose_timeseries(v_encoded2), "encoded2.csv", ";");
mat2bmp::blaze2bmp_norm(v_decoded2, "decoded2.bmp", magnitude);
write_csv(transpose_timeseries(v_decoded2), "decoded2.csv", ";");
}
std::cout << "\ncompletely encoded data saved\n";
auto err_full = normalized_err_stats<metric::Euclidean<double>>(vdata, v_decoded2);
print_stats(err_full);
errs_full.push_back(std::get<4>(err_full));
std::cout << "average RMSE = " << mean_square_error(v_decoded2, vdata) << "\n";
std::cout << "\n";
}
std::cout << "\nOverall results:\n pre\t tf\t full\n";
for (size_t i = 0; i<errs_full.size(); ++i) {
std::cout << errs_pre[i] << "\t" << errs_tf[i] << "\t" << errs_full[i] << "\n";
}
return 0;
}
| 6,127
|
C++
|
.cpp
| 126
| 41.571429
| 119
| 0.609941
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,447
|
SOM_and_MNIST_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/SOM_and_MNIST_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <vector>
#include <iostream>
#include <fstream>
#include <chrono>
#include <nlohmann/json.hpp>
#include "metric/mapping.hpp"
#include "../../assets/mnist/mnist_reader.hpp"
using json = nlohmann::json;
template <typename T>
void matrix_print(const std::vector<std::vector<T>> &mat)
{
std::cout << "[ " << std::endl;
for (int i = 0; i < mat.size(); i++)
{
std::cout << " [ ";
for (int j = 0; j < mat[i].size() - 1; j++)
{
std::cout << static_cast<unsigned int>(mat[i][j]) << ", ";
}
std::cout << static_cast<unsigned int>(mat[i][mat[i].size() - 1]) << " ]" << std::endl;
}
std::cout << "]" << std::endl;
std::cout << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[";
for (int i = 0; i < vec.size(); i++)
{
std::cout << vec[i] << ", ";
}
std::cout << vec[vec.size() - 1] << " ]" << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec,const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
for (auto index = 0; index < vec.size(); ++index) {
std::cout << vec[index] << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
}
template <typename T>
void image_print(const std::vector<T> &vec, const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
int max_digits = 2;
for (auto index = 0; index < vec.size(); ++index) {
int pos = 10;
int digits_num = 1;
if (vec[index] > 15)
{
digits_num++;
}
for (auto i = 0; i < max_digits - digits_num; ++i) {
std::cout << " ";
}
std::cout << static_cast<unsigned int>(vec[index]) << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
std::cout << std::endl;
}
void printDataInfo(const json& data)
{
for (const auto& [key, value]: data.items()) {
std::cout << key << " " << value.size() << std::endl;
}
}
int main()
{
std::cout << "SOM example have started" << std::endl;
std::cout << '\n';
// load mnist dataset
std::string MNIST_DATA_LOCATION = "assets/mnist";
mnist::MNIST_dataset<std::vector, std::vector<double>, uint8_t> dataset =
mnist::read_dataset<std::vector, std::vector, double, uint8_t>(MNIST_DATA_LOCATION);
std::cout << "Number of training images = " << dataset.training_images.size() << std::endl;
std::cout << "Number of training labels = " << dataset.training_labels.size() << std::endl;
std::cout << "Number of test images = " << dataset.test_images.size() << std::endl;
std::cout << "Number of test labels = " << dataset.test_labels.size() << std::endl;
std::cout << "Test label 0 = " << static_cast<unsigned int>(dataset.training_labels[0]) << std::endl;
std::cout << "Test label 0 = " << static_cast<unsigned int>(dataset.training_labels[1]) << std::endl;
std::cout << "Test label 0 = " << static_cast<unsigned int>(dataset.training_labels[2]) << std::endl;
std::cout << "Training image 0 = " << dataset.training_images[0].size() << std::endl;
for (auto i = 0; i < 5; i++)
{
std::cout << std::hex;
image_print(dataset.training_images[i], 28, 28);
std::cout << std::dec << std::endl;
}
for (auto i = 0; i < 20; i++)
{
std::cout << "Test label " << i << " = " << static_cast<unsigned int>(dataset.training_labels[i]) << std::endl;
}
//
int grid_w = 5;
int grid_h = 4;
using Vector = std::vector<double>;
using Metric = metric::Euclidean<double>;
//using Metric = metric::SSIM<double, std::vector<double>>;
using Graph = metric::Grid6;
std::uniform_real_distribution<double> distr(0, 255);
Metric distance;
std::vector<std::vector<double>> train_images = dataset.training_images;
//std::vector<std::vector<std::vector<double>>> train_images;
//
//for (auto i = 0; i < 5; i++)
//{
// std::vector<std::vector<double>> image(28, std::vector<double>());
// for (auto p = 0; p < dataset.training_images[i].size(); p++)
// {
// image[(int) (p / 28)].push_back(dataset.training_images[i][p]);
// }
// train_images.push_back(image);
//}
std::vector<std::vector<double>> test_images = dataset.test_images;
//std::vector<std::vector<std::vector<double>>> test_images;
//for (auto i = 0; i < 5; i++)
//{
// std::vector<std::vector<double>> image(28, std::vector<double>());
// for (auto p = 0; p < dataset.test_images[i].size(); p++)
// {
// image[(int) (p / 28)].push_back(dataset.test_images[i][p]);
// }
// test_images.push_back(image);
//}
//std::cout << std::hex;
//matrix_print(train_images[0]);
//matrix_print(train_images[1]);
//std::cout << std::dec << std::endl;
std::cout << "result: " << distance(train_images[0], train_images[1]) << std::endl;
std::cout << "" << std::endl;
metric::SOM<Vector, Graph, Metric> som_model(Graph(grid_w, grid_h), Metric(), 0.8, 0.2, 20, distr);
if (!som_model.isValid()) {
std::cout << "SOM is not valid" << std::endl;
return EXIT_FAILURE;
}
/* Estimate with img1 */
//std::cout << "Estimate started..." << std::endl;
//auto t1 = std::chrono::steady_clock::now();
//som_model.estimate(dataset.training_images, 50);
//auto t2 = std::chrono::steady_clock::now();
//std::cout << "Estimate ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
/* Train with img1 */
std::cout << "Full train started..." << std::endl;
auto t1 = std::chrono::steady_clock::now();
som_model.train(train_images);
auto t2 = std::chrono::steady_clock::now();
std::cout << "Full train ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
auto nodes_data = som_model.get_weights();
// clustering on the reduced data
//metric::Matrix<std::vector<double>, metric::Cosine<double>> distance_matrix(nodes_data);
//auto [assignments, exemplars, counts] = metric::affprop(distance_matrix, (float)0.25);
auto [assignments, exemplars, counts] = metric::kmeans(nodes_data, 10, 1000);
std::cout << "assignments:" << std::endl;
vector_print(assignments, grid_w, grid_h);
std::cout << std::endl;
std::cout << "counts:" << std::endl;
vector_print(counts);
std::cout << std::endl;
std::vector<std::vector<double>> result(grid_w * grid_h, std::vector<double>(10, 0));
for (auto i = 0; i < train_images.size(); i++)
{
auto bmu = som_model.BMU(train_images[i]);
result[bmu][dataset.training_labels[i]]++;
}
matrix_print(result);
std::vector<double> digits;
for (auto i = 0; i < result.size(); i++)
{
auto r = std::max_element(result[i].begin(), result[i].end());
digits.push_back(std::distance(result[i].begin(), r));
}
vector_print(digits);
int matches = 0;
int errors = 0;
for (auto i = 0; i < test_images.size(); i++)
{
auto bmu = som_model.BMU(test_images[i]);
if (digits[bmu] == dataset.test_labels[i])
{
matches++;
}
else
{
errors++;
}
}
std::cout << "matches: " << matches << " errors: " << errors << " accuracy: " << (double) matches / ((double) matches + (double) errors) << std::endl;
//
///* Train with img1 */
// std::cout << "Full train started..." << std::endl;
// t1 = std::chrono::steady_clock::now();
// som_model.train(img1);
// t2 = std::chrono::steady_clock::now();
// std::cout << "Full train ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
//std_deviation = som_model.std_deviation(img1);
//std::cout << "std deviation: " << std_deviation << std::endl;
//std::cout << std::endl;
// dimR = som_model.encode(img1[0]);
//vector_print(dimR, 6, 5);
//std::cout << std::endl;
//bmu = som_model.BMU(img1[0]);
//std::cout << "Best matching unit: " << bmu << std::endl;
//std::cout << std::endl;
////
///* Train with img2 */
// som_model.train(img2);
// dimR = som_model.encode(img1[0]);
//vector_print(dimR, 6, 5);
//std::cout << std::endl;
//bmu = som_model.BMU(img1[0]);
//std::cout << "Best matching unit: " << bmu << std::endl;
//std_deviation = som_model.std_deviation(img2);
//std::cout << "std deviation: " << std_deviation << std::endl;
// std::cout << std::endl;
return 0;
}
| 8,760
|
C++
|
.cpp
| 235
| 33.982979
| 160
| 0.607508
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,448
|
SOM_2_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/SOM_2_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <vector>
#include <iostream>
#include <fstream>
#include <chrono>
#include <nlohmann/json.hpp>
#include "metric/mapping.hpp"
#include "../../assets/mnist/mnist_reader.hpp"
using json = nlohmann::json;
template <typename T>
void matrix_print(const std::vector<std::vector<T>> &mat)
{
std::cout << "[ " << std::endl;
for (int i = 0; i < mat.size(); i++)
{
std::cout << " [ ";
for (int j = 0; j < mat[i].size() - 1; j++)
{
std::cout << mat[i][j] << ", ";
}
std::cout << mat[i][mat[i].size() - 1] << " ]" << std::endl;
}
std::cout << "]" << std::endl;
std::cout << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[ ";
for (int i = 0; i < vec.size() - 1; i++)
{
std::cout << vec[i] << ", ";
}
std::cout << vec[vec.size() - 1] << " ]" << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec,const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
for (auto index = 0; index < vec.size(); ++index) {
std::cout << vec[index] << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
}
template <typename T>
void image_print(const std::vector<T> &vec, const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
int max_digits = 2;
for (auto index = 0; index < vec.size(); ++index) {
int pos = 10;
int digits_num = 1;
if (vec[index] > 15)
{
digits_num++;
}
for (auto i = 0; i < max_digits - digits_num; ++i) {
std::cout << " ";
}
std::cout << static_cast<unsigned int>(vec[index]) << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
std::cout << std::endl;
}
void printDataInfo(const json& data)
{
for (const auto& [key, value]: data.items()) {
std::cout << key << " " << value.size() << std::endl;
}
}
int main()
{
std::cout << "SOM example have started" << std::endl;
std::cout << '\n';
//
int grid_w = 8;
int grid_h = 6;
using Vector = std::vector<double>;
using Metric = metric::Euclidean<double>;
using Graph = metric::Grid6;
std::uniform_real_distribution<double> distr(-1, 1);
Metric distance;
metric::SOM<Vector, Graph, Metric> som_model(Graph(grid_w, grid_h), Metric(), 0.8, 0.2, 20, distr);
if (!som_model.isValid()) {
std::cout << "SOM is not valid" << std::endl;
return EXIT_FAILURE;
}
for (auto i = 0; i < 6; i ++)
{
/* Load data */
std::cout << "load data" << std::endl;
std::ifstream dataFile("assets/toy_dataset_" + std::to_string(i) + ".json");
json data;
try
{
dataFile >> data;
}
catch (const std::exception& e) {
std::cout << "Error: " << e.what() << std::endl;
}
catch (...) {
std::cout << "Error: unknown" << std::endl;
}
std::vector<std::vector<double>> train_dataset = data.get<std::vector<std::vector<double>>>();
/* Estimate with img1 */
//std::cout << "Estimate started..." << std::endl;
//auto t1 = std::chrono::steady_clock::now();
//som_model.estimate(dataset.training_images, 50);
//auto t2 = std::chrono::steady_clock::now();
//std::cout << "Estimate ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
/* Train with img1 */
std::cout << "Full train started..." << std::endl;
auto t1 = std::chrono::steady_clock::now();
som_model.train(train_dataset);
auto t2 = std::chrono::steady_clock::now();
std::cout << "Full train ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
auto nodes_data = som_model.get_weights();
// clustering on the reduced data
//metric::Matrix<std::vector<double>, metric::Cosine<double>> distance_matrix(nodes_data);
//auto [assignments, exemplars, counts] = metric::affprop(distance_matrix, (float)0.25);
auto [assignments, exemplars, counts] = metric::kmeans(nodes_data, 3, 1000);
std::cout << "assignments:" << std::endl;
vector_print(assignments, grid_w, grid_h);
std::cout << std::endl;
std::cout << "counts:" << std::endl;
vector_print(counts);
std::cout << std::endl;
}
return 0;
}
| 4,675
|
C++
|
.cpp
| 145
| 28.786207
| 156
| 0.600403
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,449
|
SOM_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/SOM_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <vector>
#include <iostream>
#include <fstream>
#include <chrono>
#include <nlohmann/json.hpp>
#include "metric/mapping.hpp"
using json = nlohmann::json;
template <typename T>
void matrix_print(const std::vector<std::vector<T>> &mat)
{
std::cout << "[ " << std::endl;
for (int i = 0; i < mat.size(); i++)
{
std::cout << " [ ";
for (int j = 0; j < mat[i].size() - 1; j++)
{
std::cout << mat[i][j] << ", ";
}
std::cout << mat[i][mat[i].size() - 1] << " ]" << std::endl;
}
std::cout << "]" << std::endl;
std::cout << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[";
for (int i = 0; i < vec.size(); i++)
{
std::cout << vec[i] << ", ";
}
std::cout << vec[vec.size() - 1] << " ]" << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec,const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
for (auto index = 0; index < vec.size(); ++index) {
std::cout << vec[index] << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
}
void printDataInfo(const json& data)
{
for (const auto& [key, value]: data.items()) {
std::cout << key << " " << value.size() << std::endl;
}
}
int main()
{
std::cout << "SOM example have started" << std::endl;
std::cout << '\n';
using Vector = std::vector<double>;
using Metric = metric::Euclidean<Vector::value_type>;
using Graph = metric::Grid6;
metric::SOM<Vector, Graph, Metric> som_model(Graph(6, 5), Metric(), 0.8, 0.2, 20);
if (!som_model.isValid()) {
std::cout << "SOM is not valid" << std::endl;
return EXIT_FAILURE;
}
/* Load data */
std::cout << "load data" << std::endl;
std::ifstream dataFile("assets/data.json");
json data;
try
{
dataFile >> data;
}
catch (const std::exception& e) {
std::cout << "Error: " << e.what() << std::endl;
}
catch (...) {
std::cout << "Error: unknown" << std::endl;
}
std::cout << "print json" << std::endl;
printDataInfo(data);
std::cout << std::endl;
const auto img1 = data["img1"].get<std::vector<std::vector<double>>>();
const auto img2 = data["img2"].get<std::vector<std::vector<double>>>();
//
/* Estimate with img1 */
std::cout << "Estimate started..." << std::endl;
auto t1 = std::chrono::steady_clock::now();
som_model.estimate(img1, 50);
auto t2 = std::chrono::steady_clock::now();
std::cout << "Estimate ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
auto std_deviation = som_model.std_deviation(img1);
std::cout << "std deviation: " << std_deviation << std::endl;
std::cout << std::endl;
auto dimR = som_model.encode(img1[0]);
vector_print(dimR, 6, 5);
std::cout << std::endl;
auto bmu = som_model.BMU(img1[0]);
std::cout << "Best matching unit: " << bmu << std::endl;
std::cout << std::endl;
//
/* Train with img1 */
std::cout << "Full train started..." << std::endl;
t1 = std::chrono::steady_clock::now();
som_model.train(img1);
t2 = std::chrono::steady_clock::now();
std::cout << "Full train ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
std_deviation = som_model.std_deviation(img1);
std::cout << "std deviation: " << std_deviation << std::endl;
std::cout << std::endl;
dimR = som_model.encode(img1[0]);
vector_print(dimR, 6, 5);
std::cout << std::endl;
bmu = som_model.BMU(img1[0]);
std::cout << "Best matching unit: " << bmu << std::endl;
std::cout << std::endl;
//
/* Train with img2 */
som_model.train(img2);
dimR = som_model.encode(img1[0]);
vector_print(dimR, 6, 5);
std::cout << std::endl;
bmu = som_model.BMU(img1[0]);
std::cout << "Best matching unit: " << bmu << std::endl;
std_deviation = som_model.std_deviation(img2);
std::cout << "std deviation: " << std_deviation << std::endl;
std::cout << std::endl;
return 0;
}
| 4,434
|
C++
|
.cpp
| 135
| 29.355556
| 158
| 0.598866
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,450
|
SOM_3_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/SOM_3_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <vector>
#include <iostream>
#include <fstream>
#include <chrono>
#include <nlohmann/json.hpp>
#include "metric/mapping.hpp"
#include "../../assets/mnist/mnist_reader.hpp"
using json = nlohmann::json;
template <typename T>
void matrix_print(const std::vector<std::vector<T>> &mat)
{
std::cout << "[ " << std::endl;
for (int i = 0; i < mat.size(); i++)
{
std::cout << " [ ";
for (int j = 0; j < mat[i].size() - 1; j++)
{
std::cout << mat[i][j] << ", ";
}
std::cout << mat[i][mat[i].size() - 1] << " ]" << std::endl;
}
std::cout << "]" << std::endl;
std::cout << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[ ";
for (int i = 0; i < vec.size() - 1; i++)
{
std::cout << vec[i] << ", ";
}
std::cout << vec[vec.size() - 1] << " ]" << std::endl;
}
template <typename T>
void vector_print(const std::vector<T> &vec,const size_t width, const size_t height)
{
if ((width * height) != vec.size()) {
std::cout << "width * height != vector.size()" << std::endl;
return;
}
for (auto index = 0; index < vec.size(); ++index) {
std::cout << vec[index] << " ";
if ((index + 1) % width == 0) {
std::cout << std::endl;
}
}
}
std::tuple<std::vector<std::vector<double>>, std::vector<double>> readData(std::string filename)
{
std::fstream fin;
fin.open(filename, std::ios::in);
std::vector<double> row;
std::string line, word, w;
std::vector<std::vector<double>> rows;
std::vector<double> labels;
// omit headers
//getline(fin, line);
while (getline(fin, line))
{
//std::cout << "row " << i << std::endl;
std::stringstream s(line);
//std::cout << " -> " << line << std::endl;
row.clear();
int i = 0;
while (getline(s, word, '\t'))
{
//std::cout << " --> " << word << std::endl;
if (i >= 2)
{
labels.push_back(std::atof(word.c_str()));
}
else
{
row.push_back(std::atof(word.c_str()));
}
i++;
}
rows.push_back(row);
}
return { rows, labels };
}
void printDataInfo(const json& data)
{
for (const auto& [key, value]: data.items()) {
std::cout << key << " " << value.size() << std::endl;
}
}
int main()
{
std::cout << "SOM example have started" << std::endl;
std::cout << '\n';
//
int grid_w = 6;
int grid_h = 4;
using Vector = std::vector<double>;
using Metric = metric::Euclidean<double>;
using Graph = metric::Grid6;
std::uniform_real_distribution<double> distr(-1, 1);
Metric distance;
metric::SOM<Vector, Graph, Metric> som_model(Graph(grid_w, grid_h), Metric(), 0.8, 0.2, 20, distr);
if (!som_model.isValid()) {
std::cout << "SOM is not valid" << std::endl;
return EXIT_FAILURE;
}
/* Load data */
auto [train_dataset, labels] = readData("assets/Compound.txt");
/* Estimate with img1 */
std::cout << "Estimate started..." << std::endl;
auto t1 = std::chrono::steady_clock::now();
som_model.estimate(train_dataset, 50);
auto t2 = std::chrono::steady_clock::now();
std::cout << "Estimate ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
std::cout << std::endl;
/* Train with img1 */
std::cout << "Full train started..." << std::endl;
t1 = std::chrono::steady_clock::now();
som_model.train(train_dataset);
t2 = std::chrono::steady_clock::now();
std::cout << "Full train ended (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "s)" << std::endl;
std::cout << std::endl;
auto nodes_data = som_model.get_weights();
// clustering on the reduced data
//metric::Matrix<std::vector<double>, metric::Cosine<double>> distance_matrix(nodes_data);
//auto [assignments, exemplars, counts] = metric::affprop(distance_matrix, (float)0.25);
auto [assignments, exemplars, counts] = metric::kmeans(nodes_data, 6, 1000);
std::cout << "assignments:" << std::endl;
vector_print(assignments, grid_w, grid_h);
std::cout << std::endl;
std::cout << "counts:" << std::endl;
vector_print(counts);
std::cout << std::endl;
int matches = 0;
int errors = 0;
for (auto i = 0; i < train_dataset.size(); i++)
{
auto bmu = som_model.BMU(train_dataset[i]);
if (assignments[bmu] + 1 != labels[i])
{
matches++;
}
else
{
errors++;
}
}
std::cout << "matches: " << matches << " errors: " << errors << " accuracy: " << (double) matches / ((double) matches + (double) errors) << std::endl;
return 0;
}
| 4,815
|
C++
|
.cpp
| 155
| 27.787097
| 155
| 0.60719
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,451
|
PCFA_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/PCFA_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <iostream>
#ifndef M_PI
// MSVC does not define M_PI
#define M_PI 3.14159265358979323846
#endif
#include "metric/mapping.hpp"
#include "metric/utils/visualizer.hpp"
#include "metric/transform/discrete_cosine.hpp"
#include "../../assets/helpers.cpp"
#include <blaze/Blaze.h>
template <typename MatrixType1, typename MatrixType2> double mean_square_error(MatrixType1 M1, MatrixType2 M2)
{
assert(M1.columns() == M2.columns() && M1.rows() == M2.rows());
double overall_sum = 0;
double column_sum;
size_t r, c;
for (c = 0; c < M1.columns(); c++) {
column_sum = 0;
for (r = 0; r < M1.rows(); r++)
column_sum += pow(M1(r, c) - M2(r, c), 2);
overall_sum += sqrt(column_sum / M1.rows());
}
return overall_sum / M1.columns();
}
int main()
{
std::cout << "PCFA example have started" << std::endl;
std::cout << '\n';
// simple data
std::cout << "\n";
std::cout << "\n";
std::cout << "simple data" << std::endl;
std::cout << '\n';
// // PCFA works with arbitrary Blaze vector or STL container type that has appropriate interface, for example:
// using RecType = blaze::DynamicVector<float, blaze::rowVector>;
// using RecType = blaze::DynamicVector<float, blaze::columnVector>;
// using RecType = std::vector<float>;
using RecType = std::deque<double>;
RecType d0_blaze{0, 1, 2};
RecType d1_blaze{0, 1, 3};
std::vector<RecType> d_train = {d0_blaze, d1_blaze};
auto pcfa0 = metric::PCFA<RecType, void>(d_train, 2);
// auto pcfa = metric::PCFA_factory(d_train, 2); // we also can use factory for autodeduction
auto weights = pcfa0.weights();
auto bias = pcfa0.average();
// model saved to vector and matrix
auto pcfa = metric::PCFA<RecType, void>(weights, bias);
// model leaded, same as pcfa0
RecType d2_blaze{0, 1, 4};
RecType d3_blaze{0, 2, 2};
std::vector<RecType> d_test = {d0_blaze, d2_blaze, d3_blaze};
auto d_compressed = pcfa.encode(d_test);
std::cout << "compressed:\n";
for (size_t i = 0; i < d_compressed.size(); i++) {
for (size_t j = 0; j < d_compressed[i].size(); j++) {
std::cout << d_compressed[i][j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
auto d_restored = pcfa.decode(d_compressed);
std::cout << "restored:\n";
for (size_t i = 0; i < d_restored.size(); i++) {
for (size_t j = 0; j < d_restored[i].size(); j++) {
std::cout << d_restored[i][j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
auto d_eigenmodes = pcfa.eigenmodes();
std::cout << "eigenmodes:\n";
for (size_t i = 0; i < d_eigenmodes.size(); i++) {
for (size_t j = 0; j < d_eigenmodes[i].size(); j++) {
std::cout << d_eigenmodes[i][j] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
std::cout << "type code of compressed: " << typeid(d_compressed).name() << "\n";
std::cout << "type code of restored: " << typeid(d_restored).name() << "\n";
std::cout << "\n";
// end of simple data test
bool visualize = false;
// sine dataset
std::cout << "\n";
std::cout << "\n";
std::cout << "sine dataset" << std::endl;
std::cout << '\n';
visualize = false;
size_t n_freq_steps = 10;
size_t n_slices_per_step = 100;
size_t waveform_length = 64; // 512; //64; // 100;
blaze::DynamicMatrix<double, blaze::columnMajor> SlicesSine(waveform_length, n_freq_steps * n_slices_per_step, 0.0);
blaze::DynamicMatrix<double, blaze::columnMajor> TargetSine(1, n_freq_steps * n_slices_per_step, 0.0);
blaze::DynamicMatrix<double, blaze::columnMajor> TestSlicesSine(waveform_length, n_freq_steps, 0.0);
blaze::DynamicMatrix<double, blaze::columnMajor> TestTargetSine(1, n_freq_steps, 0.0);
double frequenz; // based on original ESN test case code
double phase = 0;
double delta_T = 0.05;
// sine generator
size_t idx = 0;
for (size_t ii = 1; ii <= n_freq_steps; ii++) // frequency change steps
{
frequenz = double(ii) / double(n_freq_steps);
for (size_t i = 0; i < n_slices_per_step;
++i) // slices with same freq and random phases (within each freq step)
{
phase = (double)rand() / RAND_MAX * 0.9 + 0.1;
TargetSine(0, idx) = frequenz; //-0.5; // works for positive values without offset
for (size_t t = 0; t < waveform_length; t++) // draw waveform: 100 points in each slice
{
SlicesSine(t, idx) = sin(2 * M_PI * (frequenz * double(t) * delta_T + phase));
}
idx++;
}
}
idx = 0;
for (size_t i = 1; i <= n_freq_steps; i++) // frequency steps
{
frequenz = double(i) / double(n_freq_steps);
phase = 0; //(double)rand()/RAND_MAX; // 0;
TestTargetSine(0, idx) = frequenz; //-0.5;
for (size_t t = 0; t < waveform_length; t++) // draw waveform: 100 points in each slice
{
TestSlicesSine(t, idx) = sin(2 * M_PI * (frequenz * double(t) * delta_T + phase));
}
idx++;
}
//
// minimal direct linear mapping
visualize = true;
if (visualize) {
mat2bmp::blaze2bmp(SlicesSine, "SlicesSine.bmp");
mat2bmp::blaze2bmp(TestSlicesSine, "TestSlicesSine.bmp");
blaze_dm_to_csv(blaze::DynamicMatrix<double, blaze::rowMajor>(SlicesSine), "training_dataset.csv");
blaze_dm_to_csv(blaze::DynamicMatrix<double, blaze::rowMajor>(TestSlicesSine), "test_data.csv");
}
auto direct_sine = metric::PCFA_col_factory(SlicesSine, 8); // factory deduces type
if (visualize) {
auto avg = direct_sine.average();
mat2bmp::blaze2bmp(avg, "TestSliceSine_averages.bmp");
blaze_dm_to_csv(avg, "TestSliceSine_averages.csv");
}
auto direct_compressed_sine = direct_sine.encode(TestSlicesSine);
if (visualize) {
mat2bmp::blaze2bmp_norm(direct_compressed_sine, "TestSliceSine_compressed.bmp");
blaze_dm_to_csv(direct_compressed_sine, "TestSliceSine_compressed.csv");
}
auto direct_restored_sine = direct_sine.decode(direct_compressed_sine);
if (visualize) {
mat2bmp::blaze2bmp(direct_restored_sine, "TestSliceSine_restored.bmp");
blaze_dm_to_csv(direct_restored_sine, "TestSliceSine_restored.csv");
}
std::cout << "avg error: " << mean_square_error(direct_restored_sine, TestSlicesSine) << "\n";
std::cout << "compare visually restored.bmp to TestSliceSine.bmp\n";
std::cout << "\n";
if (visualize) {
auto Eigenmodes = direct_sine.eigenmodes();
blaze_dm_to_csv(Eigenmodes, "TestSliceSine_eigenmodes.csv");
mat2bmp::blaze2bmp(Eigenmodes, "TestSliceSine_eigenmodes.bmp");
}
//
// with DCT
std::cout << "\n";
std::cout << "\n";
std::cout << "using DCT" << std::endl;
std::cout << '\n';
// turning data to frequence domain: enable to run DirectMapping in frequences
visualize = true;
if (visualize) {
mat2bmp::blaze2bmp(SlicesSine, "SlicesSine_original.bmp");
mat2bmp::blaze2bmp(TestSlicesSine, "TestSlicesSine_original.bmp");
}
blaze::DynamicMatrix<double> TestSlicesSineOriginal = TestSlicesSine; // saved for computation of error
// apply DCT to input
metric::apply_DCT(SlicesSine);
metric::apply_DCT(TestSlicesSine);
// blaze::DynamicMatrix<double> TestSliceSine_DCT_restored = TestSlicesSine;
// metric::apply_DCT(TestSliceSine_DCT_restored, true);
// if (visualize) {
// mat2bmp::blaze2bmp(TestSliceSine_DCT_restored, "TestSlicesSine_DCT_restored.bmp");
// }
// direct linear mapping on spectrum
visualize = false;
if (visualize) {
mat2bmp::blaze2bmp(SlicesSine, "SlicesSine_DCT.bmp");
mat2bmp::blaze2bmp(TestSlicesSine, "TestSlicesSine_DCT.bmp");
}
auto direct_sine_DCT = metric::PCFA_col_factory(SlicesSine, 8);
auto direct_compressed_sine_DCT = direct_sine_DCT.encode(TestSlicesSine);
if (visualize)
mat2bmp::blaze2bmp_norm(direct_compressed_sine_DCT, "TestSliceSine_compressed_DCT.bmp");
auto direct_restored_sine_DCT = direct_sine_DCT.decode(direct_compressed_sine_DCT);
if (visualize)
mat2bmp::blaze2bmp(direct_restored_sine_DCT, "TestSliceSine_restored_DCT.bmp");
visualize = true;
metric::apply_DCT(direct_restored_sine_DCT, true);
if (visualize) {
mat2bmp::blaze2bmp(direct_restored_sine_DCT, "TestSliceSine_restored_unDCT.bmp");
}
std::cout << "with DCT: avg error: " << mean_square_error(direct_restored_sine_DCT, TestSlicesSineOriginal) << "\n";
std::cout << "compare visually TestSliceSine_restored_unDCT.bmp to TestSliceSine_original.bmp\n";
//
// grooves energy data
std::cout << "\n";
std::cout << "\n";
std::cout << "grooves energy data" << std::endl;
std::cout << '\n';
using V = float; // double;
size_t n_features = 8;
auto all_data = read_csv_blaze<V>("assets/PtAll_AllGrooves_energy_5.csv", ","); // all parts all unmixed channels
blaze::DynamicMatrix<V> training_dataset = submatrix(all_data, 0, 1, all_data.rows(), all_data.columns() - 2);
blaze::DynamicMatrix<V> test_data = read_csv_blaze<V>("assets/test_data_input.csv", ",");
mat2bmp::blaze2bmp_norm(training_dataset, "groove_training_dataset.bmp");
mat2bmp::blaze2bmp_norm(test_data, "groove_test_data.bmp");
blaze_dm_to_csv(training_dataset, "groove_training_dataset.csv");
blaze_dm_to_csv(test_data, "groove_test_data.csv");
auto model = metric::PCFA_col_factory(training_dataset, n_features);
auto avg = model.average();
mat2bmp::blaze2bmp_norm(avg, "groove_averages.bmp");
blaze_dm_to_csv(avg, "groove_averages.csv");
auto compressed = model.encode(test_data);
mat2bmp::blaze2bmp_norm(compressed, "groove_compressed.bmp");
blaze_dm_to_csv(compressed, "groove_compressed.csv");
auto restored = model.decode(compressed);
mat2bmp::blaze2bmp_norm(restored, "groove_restored.bmp");
blaze_dm_to_csv(restored, "groove_restored.csv");
// also making feature output for the training dataset
auto all_features = model.encode(training_dataset);
mat2bmp::blaze2bmp_norm(all_features, "groove_all_features.bmp");
blaze_dm_to_csv(all_features, "groove_all_features.csv");
// view contribution of each feature
auto I = blaze::IdentityMatrix<V>(n_features);
for (size_t feature_idx = 0; feature_idx < n_features; ++feature_idx) {
blaze::DynamicMatrix<V> unit_feature = submatrix(I, 0, feature_idx, I.rows(), 1);
auto unit_waveform = model.decode(unit_feature, false);
mat2bmp::blaze2bmp_norm(unit_waveform, "groove_unit_waveform_" + std::to_string(feature_idx) + ".bmp");
blaze_dm_to_csv(unit_waveform, "groove_unit_waveform_" + std::to_string(feature_idx) + ".csv");
}
// same using eigenmodes getter
auto Eigenmodes = model.eigenmodes();
blaze_dm_to_csv(Eigenmodes, "groove_eigenmodes.csv");
//
// row_wise PCFA with trans() on each input and output
std::cout << "\n";
std::cout << "\n";
std::cout << "grooves energy data with trans()" << std::endl;
std::cout << '\n';
auto all_data_r = read_csv_blaze<V>("assets/PtAll_AllGrooves_energy_5.csv", ","); // all parts all unmixed channels
blaze::DynamicMatrix<V> training_dataset_r =
submatrix(all_data_r, 0, 1, all_data_r.rows(), all_data_r.columns() - 2);
blaze::DynamicMatrix<V> test_data_r = read_csv_blaze<V>("assets/test_data_input.csv", ",");
mat2bmp::blaze2bmp_norm(training_dataset_r, "groove_training_dataset_r.bmp");
mat2bmp::blaze2bmp_norm(test_data_r, "groove_test_data_r.bmp");
blaze_dm_to_csv(training_dataset_r, "groove_training_dataset_r.csv");
blaze_dm_to_csv(test_data_r, "groove_test_data_r.csv");
blaze::DynamicMatrix<V, blaze::rowMajor> training_dataset_r_t = trans(training_dataset_r);
auto model_r = metric::PCFA_factory(training_dataset_r_t, n_features);
blaze::DynamicMatrix<V> avg_r_out = blaze::trans(model_r.average_mat());
mat2bmp::blaze2bmp_norm(avg_r_out, "groove_averages_r.bmp");
blaze_dm_to_csv(avg_r_out, "groove_averages_r.csv");
auto compressed_r = model_r.encode(trans(test_data_r));
blaze::DynamicMatrix<V> compressed_r_out = trans(compressed_r);
mat2bmp::blaze2bmp_norm(compressed_r_out, "groove_compressed_r.bmp");
blaze_dm_to_csv(compressed_r_out, "groove_compressed_r.csv");
auto restored_r = model_r.decode(compressed_r);
blaze::DynamicMatrix<V> restored_r_out = trans(restored_r);
mat2bmp::blaze2bmp_norm(restored_r_out, "groove_restored_r.bmp");
blaze_dm_to_csv(restored_r_out, "groove_restored_r.csv");
// also making feature output for the training dataset
auto all_features_r = model_r.encode(trans(training_dataset_r));
blaze::DynamicMatrix<V> all_features_r_out = trans(all_features_r);
mat2bmp::blaze2bmp_norm(all_features_r_out, "groove_all_features_r.bmp");
blaze_dm_to_csv(all_features_r_out, "groove_all_features_r.csv");
std::cout << "\n\n\n";
return 0;
}
| 12,554
|
C++
|
.cpp
| 283
| 41.80212
| 117
| 0.695924
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,452
|
DSPCC_vibration_example.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/DSPCC_vibration_example.cpp
|
#include <iostream>
//#include <stack>
#include "metric/mapping/DSPCC.hpp"
#include "examples/mapping_examples/assets/helpers.cpp" // for .csv reader
#include "metric/utils/visualizer.hpp"
#include "metric/utils/metric_err.hpp"
#include "metric/distance/k-related/Standards.hpp" // we use Euclidean metric for mean squared error evaluation
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<ValueType, A1>, A2> transpose_timeseries(
Container<Container<ValueType, A1>, A2> ts) // the workaround thing. TODO remove and update csv reader this way
{
auto output = Container<Container<ValueType, A1>, A2>();
size_t n_values = ts[0].size();
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output.push_back(Container<ValueType, A1>());
}
for (size_t i=0; i<ts.size(); ++i) // loop of values in original timeseries
{
for (size_t j=0; j<n_values; ++j) // loop of timeseries
output[j].push_back(ts[i][j]);
}
return output;
}
template <typename Container>
void print_table(Container table) {
for (size_t rec_idx = 0; rec_idx<table.size(); ++rec_idx) {
for (size_t el_idx = 0; el_idx<table[0].size(); ++el_idx)
std::cout << table[rec_idx][el_idx] << " ";
std::cout << "\n";
}
}
template <
template <typename, typename> class OuterContainer,
typename OuterAllocator,
template <typename, typename> class InnerContainer,
typename InnerAllocator,
typename ValueType >
double mean_square_error(
const OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> & M1,
const OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> & M2
)
{
double overall_sum = 0;
double row_sum;
size_t row, col;
for (row = 0; row < M1.size(); row++) // we assume all inner vectors both in M1 and M2 are of the same langth
{
row_sum = 0;
for (col = 0; col < M1[0].size(); col++) // we assume M1 and M2 are of the same length too
row_sum += pow(M1[row][col] - M2[row][col], 2);
overall_sum += sqrt(row_sum / M1[0].size());
}
return overall_sum / M1.size();
}
void print_stats(std::tuple<double, double, double, double, double, double> stats) {
std::cout << " average norm of original waveforms : " << std::get<0>(stats) << "\n";
std::cout << " original waveform norm stddev : " << std::get<1>(stats) << "\n";
std::cout << " average absolute error : " << std::get<2>(stats) << "\n";
std::cout << " stddev of absolute error : " << std::get<3>(stats) << "\n";
std::cout << " average normalized error : " << std::get<4>(stats) << "\n";
std::cout << " stddev of normalized error : " << std::get<5>(stats) << "\n";
std::cout << "\n";
}
int main()
{
// RMSE and metric error check
//*
{
using R = std::vector<double>;
R d0 {0, 1, 2, 3};
R d1 {0, 1, 2, 3};
std::vector<R> d = {d0, d1};
auto d_upd = d;
d_upd[0][3] = 5;
std::cout << std::endl <<"d:" << std::endl;
print_table(d); // some normalization issue when using DCT persists..
std::cout << std::endl << "d_upd:" << std::endl;
print_table(d_upd); // some normalization issue when using DCT persists..
print_stats(normalized_err_stats<metric::Euclidean<double>>(d, d_upd));
std::cout << "average RMSE = " << mean_square_error(d, d_upd) << std::endl;
}
// */
// vibration example
//*
float magnitude = 15;
auto raw_vdata = read_csv_num<double>("assets/vibration_smaller_3.csv", ",");
//auto raw_vdata = read_csv_num<double>("assets/vibration.csv", ",");
// auto raw_vdata = read_csv_num<double>("assets/vibration_smaller_3_no_peaks.csv", ",");
// auto raw_vdata = read_csv_num<double>("assets/vibration_smaller_3_added_peaks.csv", ",");
auto vdata = transpose_timeseries(raw_vdata);
mat2bmp::blaze2bmp_norm(vdata, "input.bmp", magnitude);
// std::stack<size_t> length_stack;
// auto decomposed = metric::sequential_DWT(vdata[0], length_stack, 5, 8);
// auto restored = metric::sequential_iDWT(decomposed, length_stack, 5);
// return 0;
std::vector<double> errs_pre, errs_tf, errs_full;
bool visualize = false;
for (float mix = 0; mix<=1; mix+=0.25) {
//float mix = 0.5; {
if (mix == 0.5)
visualize = true;
else
visualize = false;
auto vDSPCC = metric::DSPCC<std::vector<double>, void>(vdata, 10, 16, mix, 10);
// dataset,
// number of features of freq and time PCFAs,
// DWT subbands, share of freq features in the mixed code,
// top PCFA features
auto v_encoded = vDSPCC.time_freq_PCFA_encode(vdata);
auto v_decoded = vDSPCC.time_freq_PCFA_decode(v_encoded);
if (visualize) {
mat2bmp::blaze2bmp_norm(v_decoded, "decoded.bmp", magnitude);
write_csv(transpose_timeseries(v_decoded), "decoded.csv", ";");
}
std::cout << std::endl << " subband_length: " << vDSPCC.get_subband_size() << std::endl;
std::cout << "original record length: " << vdata[0].size() << std::endl;
std::cout << " decoded record length: " << v_decoded[0].size() << std::endl;
std::cout << std::endl << "decompression with only time-freq PSFAs done, decoded data saved" << std::endl;
auto err_tf = normalized_err_stats<metric::Euclidean<double>>(vdata, v_decoded);
print_stats(err_tf);
errs_tf.push_back(std::get<4>(err_tf));
//std::cout << std::endl << "computing pre-encoded and pre_decoded vibration data..." << std::endl;
auto v_pre_encoded = vDSPCC.test_public_wrapper_encode(vdata);
auto v_pre_decoded = vDSPCC.test_public_wrapper_decode(v_pre_encoded);
if (visualize) {
write_csv(transpose_timeseries(v_pre_decoded), "pre_decoded.csv", ";");
mat2bmp::blaze2bmp_norm(v_pre_decoded, "pre_decoded.bmp", magnitude);
}
std::cout << std::endl << "test of pre-compression done, pre-decoded data saved" << std::endl;
auto err_pre = normalized_err_stats<metric::Euclidean<double>>(vdata, v_pre_decoded);
print_stats(err_pre);
errs_pre.push_back(std::get<4>(err_pre));
auto v_encoded2 = vDSPCC.encode(vdata);
auto v_decoded2 = vDSPCC.decode(v_encoded2);
if (visualize) {
mat2bmp::blaze2bmp_norm(v_encoded2, "encoded2.bmp", magnitude);
write_csv(transpose_timeseries(v_encoded2), "encoded2.csv", ";");
mat2bmp::blaze2bmp_norm(v_decoded2, "decoded2.bmp", magnitude);
write_csv(transpose_timeseries(v_decoded2), "decoded2.csv", ";");
}
std::cout << std::endl << "completely encoded data saved" << std::endl;
auto err_full = normalized_err_stats<metric::Euclidean<double>>(vdata, v_decoded2);
print_stats(err_full);
errs_full.push_back(std::get<4>(err_full));
std::cout << "average RMSE = " << mean_square_error(v_decoded2, vdata) << std::endl;
std::cout << std::endl;
}
std::cout << std::endl << "Overall results:" << std::endl << " pre\t tf\t full" << std::endl;
for (size_t i = 0; i<errs_full.size(); ++i) {
std::cout << errs_pre[i] << "\t" << errs_tf[i] << "\t" << errs_full[i] << std::endl;
}
//*/
return 0;
}
| 7,600
|
C++
|
.cpp
| 155
| 42.070968
| 119
| 0.605599
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,453
|
DSPCC_example 2.cpp
|
metric-space-ai_metric/examples/mapping_examples/continious_mappers/clustering/DSPCC_example 2.cpp
|
#include <iostream>
//#include <stack>
#include "metric/mapping/DSPCC.hpp"
#include "examples/mapping_examples/assets/helpers.cpp" // for .csv reader
#include "metric/utils/visualizer.hpp"
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<ValueType, A1>, A2> transpose_timeseries(
Container<Container<ValueType, A1>, A2> ts) // the workaround thing. TODO remove and update csv reader this way
{
auto output = Container<Container<ValueType, A1>, A2>();
size_t n_values = ts[0].size();
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output.push_back(Container<ValueType, A1>());
}
for (size_t i=0; i<ts.size(); ++i) // loop of values in original timeseries
{
for (size_t j=0; j<n_values; ++j) // loop of timeseries
output[j].push_back(ts[i][j]);
}
return output;
}
template <typename Container>
void print_table(Container table) {
for (size_t rec_idx = 0; rec_idx<table.size(); ++rec_idx) {
for (size_t el_idx = 0; el_idx<table[0].size(); ++el_idx)
std::cout << table[rec_idx][el_idx] << " ";
std::cout << "\n";
}
}
int main()
{
// small dataset
/*
using RecType = std::vector<double>;
RecType d0 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18};
RecType d1 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 100};
std::vector<RecType> d = {d0, d1};
float freq_time_balance = 1; // try values from 0 to 1 (e g 0, 0.5, 1) to get the following portions of freq-domain: 0, 4/9, 8/9
auto bundle = metric::DSPCC1<RecType, void>(d, 2, 4, freq_time_balance, 0.5);
//auto pre_encoded = bundle.test_public_wrapper_encode(d);
//auto pre_decoded = bundle.test_public_wrapper_decode(pre_encoded);
auto encoded = bundle.encode(d);
auto decoded = bundle.decode(encoded);
std::cout << "\noriginal:\n";
print_table(d);
std::cout << "\ndecoded:\n";
print_table(decoded); // some normalization issue when using DCT persists..
std::cout << "\nmix_index: " << bundle.get_mix_idx() << "\n";
std::cout << "\nsimple test done\n";
//return 0;
//*/
// vibration example
//*
float magnitude = 80;
auto raw_vdata = read_csv_num<double>("vibration_smaller.csv", ",");
auto vdata = transpose_timeseries(raw_vdata);
mat2bmp::blaze2bmp_norm(vdata, "input.bmp", magnitude);
// std::stack<size_t> length_stack;
// auto decomposed = metric::sequential_DWT(vdata[0], length_stack, 5, 8);
// auto restored = metric::sequential_iDWT(decomposed, length_stack, 5);
// return 0;
auto vDSPCC = metric::DSPCC<std::vector<double>, void>(vdata, 8, 8, 0.1, 0);
auto v_encoded = vDSPCC.encode(vdata);
auto v_decoded = vDSPCC.decode(v_encoded);
mat2bmp::blaze2bmp_norm(v_decoded, "decoded.bmp", magnitude);
write_csv(transpose_timeseries(v_decoded), "decoded.csv", ";");
std::cout << "\nmix_index: " << vDSPCC.get_crop_idx() << "\n";
std::cout << "record length: " << vdata[0].size() << "\n";
std::cout << "\nmain vibration test done, decoded data saved\n";
std::cout << "\ncomputing pre-encoded and pre_decoded vibration data...\n";
auto v_pre_encoded = vDSPCC.test_public_wrapper_encode(vdata);
auto v_pre_decoded = vDSPCC.test_public_wrapper_decode(v_pre_encoded);
write_csv(transpose_timeseries(v_pre_decoded), "pre_decoded.csv", ";");
mat2bmp::blaze2bmp_norm(v_pre_decoded, "pre_decoded.bmp", magnitude);
std::cout << "\ndone, pre_decoded data saved\n";
//*/
return 0;
}
| 3,669
|
C++
|
.cpp
| 79
| 41.329114
| 132
| 0.635696
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,454
|
wavelet_example.cpp
|
metric-space-ai_metric/examples/transform_examples/wavelet_example.cpp
|
#include <vector>
#include <iostream>
//#include "metric/transform.hpp"
//#include "transform/wavelet.hpp"
#include "metric/transform/wavelet.hpp"
#include "assets/helpers.cpp"
#include "metric/utils/visualizer.hpp"
//#include "transform/helper_functions.cpp" // TODO remove
int main() {
//std::vector<double> data = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0};
std::vector<double> data = {0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
//std::vector<double> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
int wavelet = 4;
auto encoded = wavelet::dwt(data, wavelet);
auto decoded = wavelet::idwt(std::get<0>(encoded), std::get<1>(encoded), wavelet, data.size());
std::cout << "\n1d:\n";
std::cout << "splitted L:\n";
auto encoded_l = std::get<0>(encoded);
for (size_t i = 0; i<encoded_l.size(); ++i)
std::cout << encoded_l[i] << "\n";
std::cout << "restored:\n";
for (size_t i = 0; i<decoded.size(); ++i)
std::cout << decoded[i] << "\n";
using Container = std::deque<double>;
Container zeros = Container(10, 0);
Container peak = zeros;
peak[4] = 1;
std::vector<Container> data2d = {zeros, zeros, zeros, zeros, peak, zeros, zeros, zeros, zeros, zeros, zeros, zeros, zeros, zeros};
//std::vector<Container> data2d = {zeros, peak}; // TODO add data size control
std::cout << "\n2d:\n";
std::cout << "input:\n";
for (size_t i = 0; i<data2d.size(); ++i) {
for (size_t j = 0; j<data2d[0].size(); ++j)
std::cout << data2d[i][j] << " ";
std::cout << "\n";
}
auto splitted = wavelet::dwt2(data2d, wavelet);
std::cout << "slpitted LL:\n";
for (size_t i = 0; i<std::get<0>(splitted).size(); ++i) {
for (size_t j = 0; j<std::get<0>(splitted)[0].size(); ++j)
std::cout << std::get<0>(splitted)[i][j] << " ";
std::cout << "\n";
}
auto restored = wavelet::idwt2(std::get<0>(splitted), std::get<1>(splitted), std::get<2>(splitted), std::get<3>(splitted), wavelet, data2d.size(), data2d[0].size());
std::cout << "restored:\n";
for (size_t i = 0; i<restored.size(); ++i) {
for (size_t j = 0; j<restored[0].size(); ++j)
std::cout << restored[i][j] << " ";
std::cout << "\n";
}
// cameraman image test
auto cm = read_csv_num<double>("assets/cameraman.csv", ",");
auto cm_splitted = wavelet::dwt2(cm, 4);
auto cm_restored = wavelet::idwt2(cm_splitted, 4, cm.size(), cm[0].size());
mat2bmp::blaze2bmp(cm_restored, "cm_restored.bmp", 1.0/256.0);
mat2bmp::blaze2bmp(std::get<0>(cm_splitted), "cm_ll.bmp", 1.0/256.0);
mat2bmp::blaze2bmp(std::get<1>(cm_splitted), "cm_lh.bmp", 1.0/256.0);
mat2bmp::blaze2bmp(std::get<2>(cm_splitted), "cm_hl.bmp", 1.0/256.0);
mat2bmp::blaze2bmp(std::get<3>(cm_splitted), "cm_hh.bmp", 1.0/256.0);
// showing reference file
//auto cm_splitted_ref = read_csv_num<double>("assets/cm_ref_8.csv", ",");
//mat2bmp::blaze2bmp(cm_splitted_ref, "cm_splitted_ref.bmp", 1.0/256.0);
return 0;
}
| 3,088
|
C++
|
.cpp
| 64
| 42.609375
| 169
| 0.578193
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,455
|
donut_example.cpp
|
metric-space-ai_metric/examples/transform_examples/donut_example.cpp
|
#include "metric/transform/distance_potential_minimization.hpp"
//#include "donuts_test_io.hpp"
#include "assets/helpers.cpp"
#include "donuts.hpp"
#include <iostream>
#include <filesystem>
//#include "assets/helpers.cpp" // only for csv reader called in debug f() function
//#include "metric/utils/visualizer.hpp" // for only blaze2bmp_norm
//void filter_donut(std::string filename) {
// auto donut = read_png_donut<double>(filename);
// vector2bmp(matrix2vv(donut), filename + ".input.bmp");
// std::cout << "processing " << filename << "\n";
// donut = radial_diff(donut);
// auto mask = weightingMask<double>(donut.rows(), donut.columns(), donut.columns()/3, 6);
// donut = mask % donut;
// vector2bmp(matrix2vv(donut), filename + ".filtered.bmp");
//}
void fit_donut(std::string filename)
{
//auto donut = read_png_donut<double>("assets/donuts/crop/crop_2020-07-27_16_23_01_776_donut1_128.png");
//auto donut = read_png_donut<double>(filename);
auto donut = read_csv_blaze<double>(filename, ",");
vector2bmp(matrix2vv(donut), filename + ".input.bmp");
std::cout << "processing " << filename << ": " << donut.columns() << "*" << donut.rows() << "\n";
//blaze::DynamicMatrix<double> debug_out = blaze::submatrix(donut, 0, 0, 10, 10);
//std::cout << "input:\n" << debug_out << "\n";
auto donut_input = donut;
donut = radial_diff(donut);
vector2bmp(matrix2vv(donut), filename + ".filtered.bmp");
//debug_out = blaze::submatrix(donut, 0, 0, 10, 10);
//std::cout << "filtered:\n" << debug_out << "\n";
//auto mask = weightingMask<double>(donut.rows(), donut.columns(), donut.columns()/3, 6);
auto mask = weightingMask<double>(donut.rows(), donut.columns(), donut.columns()*0.4, 15); // new donuts
donut = mask % donut;
vector2bmp(matrix2vv(mask), filename + ".mask_outer.bmp");
//mask = weightingMask<double>(128, 128, 10, 2);
//vector2bmp(matrix2vv(mask), "mask_inner.bmp");
vector2bmp(matrix2vv(donut), filename + ".mask_applied.bmp");
auto [u, v] = metric::DPM_detail::gvf(donut, 0.1, 1, 10);
vector2bmp(matrix2vv(u), filename + ".u.bmp");
vector2bmp(matrix2vv(v), filename + ".v.bmp");
//auto donut = read_png_donut<double>("assets/donuts/crop/crop_2020-07-27_16_23_01_776_donut1.png");
//auto donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_128.png");
//auto donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_256.png");
//size_t steps = 200;
//std::vector<double> sigma = {50,30,15,5};
size_t steps = 50; //1000; // 20;
std::vector<double> sigma = {2}; //{1.75}; //{2, 1.25}; //{5, 2}; // {15, 2}
// double init_x = donut.columns() / 2;
// double init_y = donut.rows() / 2;
// double init_r = donut.columns() / 3;
double init_x = donut.columns() / 2;
double init_y = donut.rows() / 2;
double init_r = donut.columns() / 2 * 0.85;
auto blurred = metric::DPM_detail::gaussianBlur(donut, sigma[0]);
vector2bmp(matrix2vv(blurred), filename + ".blurred.bmp");
auto t1 = std::chrono::steady_clock::now();
auto result = metric::fit_hysteresis(donut, init_x, init_y, init_r, steps, sigma);//, 0.2, 1e-8);
auto t2 = std::chrono::steady_clock::now();
std::cout << "fitting result:\n xc = " << result[0] << " yc = " << result[1] << " a = " << result[2] << " b = " << result[3]
<< " phi = " << result[4] << std::endl;
std::cout << "initial guess:\n xc = " << init_x << " yc = " << init_y << " a = " << init_r << " b = " << init_r
<< " phi = " << 0 << std::endl;
std::cout << " (Overall time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000
<< " s)" << std::endl;
//vector2bmp(matrix2vv(donut), filename + "input_filtered.bmp");
blaze::DynamicMatrix<double> donut_painted = donut;
auto points = metric::DPM_detail::ellipse2grid(
donut_painted.rows(), donut_painted.columns(),
result[0], result[1],
result[2], result[3],
result[4]
);
for (size_t i = 0; i < points[0].size(); ++i) {
donut_painted(points[1][i], points[0][i]) = -1;
donut_input(points[1][i], points[0][i]) = -1;
}
auto init_points = metric::DPM_detail::ellipse2grid(
donut_painted.rows(), donut_painted.columns(),
init_x, init_y,
init_r, init_r,
0
);
for (size_t i = 0; i < init_points[0].size(); ++i) {
if (init_points[0][i] < donut_input.columns() && init_points[1][i] < donut_input.rows()) {
donut_painted(init_points[1][i], init_points[0][i]) = -0.5;
donut_input(init_points[1][i], init_points[0][i]) = -0.5;
} else {
std::cout << init_points[0][i] << " " << init_points[1][i] << "\n";
}
}
vector2bmp(matrix2vv(donut_painted), filename + ".fitting_result.bmp");
vector2bmp(matrix2vv(donut_input), filename + ".fitting_result_on_filtered.bmp");
}
//* donut images
int main() {
//fit_donut("assets/donuts/crop/crop_2020-07-27_16_23_01_776_donut1_128.png");
//fit_donut("ring.png");
//fit_donut("assets/donuts/crop/donut1.png");
//fit_donut("assets/donuts/crop/donut2.png");
fit_donut("assets/donuts/crop/donut1.csv");
fit_donut("assets/donuts/crop/donut2.csv");
return 0;
}
//*/
/* ellipse2grid test
int main() {
auto ellipse = metric::DPM_detail::ellipse2grid(50, 50, 24, 15.7, 5.3, 10.5, -1.1);
for (auto v : ellipse) {
std::cout << v << "\n length: " << v.size() << "\n";
}
return 0;
}
//*/
/* gvf & torsioin test, fits ok
int main() {
blaze::DynamicMatrix<double> I = {
{1, 0, 0, 0, 0 ,0, 0, 0, 0, 1},
{0, 8, 2, 0, 0 ,0, 0, 0, 7, 0},
{0, 0, 2, 0, 0 ,0, 0, 0, 0, 0},
{0, 0, 2, 5, 0 ,0, 0, 0, 0, 0},
{0, 0, 3, 6, 2 ,0, 0, 0, 0, 0},
{1, 0, 2, 4, 1 ,0, 0, 1, 0, 0},
{0, 0, 2, 0, 0 ,0, 1, 2, 1, 0},
{0, 0, 1, 0, 0 ,1, 2, 3, 2, 1},
{0, 5, 0, 0, 0 ,0, 1, 2, 6, 0},
{1, 0, 2, 0, 0 ,0, 0, 1, 0, 1}
};
auto [u, v] = metric::DPM_detail::gvf(I, 0.1, 1, 10);
std::cout << "u:\n" << u << "v:\n" << v << "\n";
std::vector<blaze::DynamicVector<double>> x_y_theta = metric::DPM_detail::ellipse2grid(10, 10, 5, 5, 2, 3, 0.1);
std::cout << "x:\n" << x_y_theta[0] << "y:\n" << x_y_theta[1] << "theta:\n" <<x_y_theta[2] << "\n";
double torsion = metric::DPM_detail::torsion_moment(u, v, x_y_theta[0], x_y_theta[1], x_y_theta[2], 5, 5, 0.1);
std::cout << "torsion moment: " << torsion << "\n";
return 0;
}
// */
/*
int main() { // convert single file to csv, png reader fails!
auto donut = read_png_donut<double>("test01.png");
blaze::DynamicMatrix<double> fragm = blaze::submatrix(donut, 0, 0, 20, 20);
std::cout << fragm;
blaze_dm_to_csv(fragm, "fragm01.csv");
blaze_dm_to_csv(donut, "test01.csv");
// auto donut = read_png_donut<double>("assets/donuts/crop/donut1.png");
// blaze_dm_to_csv(donut, "assets/donuts/crop/donut1.csv");
// donut = read_png_donut<double>("assets/donuts/crop/donut2.png");
// blaze_dm_to_csv(donut, "assets/donuts/crop/donut2.csv");
// donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_128.png");
// blaze_dm_to_csv(donut, "assets/donuts/crop/donut_6_radial_outer_128.csv");
// donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_256.png");
// blaze_dm_to_csv(donut, "assets/donuts/crop/donut_6_radial_outer_256.csv");
return 0;
}
// */
/* batch
int main() {
//std::string path = "assets/donuts/crop/crop256";
std::string path = "assets/donuts/crop/crop128";
for (const auto & entry : std::filesystem::directory_iterator(path)) {
//std::cout << entry.path() << std::endl;
if (entry.path().extension() == ".png")
fit_donut(entry.path());
//filter_donut(entry.path());
}
return 0;
}
// */
//// fit_ellipse by parts - for debugging
//int f() { // function for debug purpose
////int main() {
//// auto donut = read_png_donut<double>("assets/donuts/crop/crop_2020-07-27_16_23_01_776_donut1_128.png");
//// donut = radial_diff(donut);
//// auto mask = weightingMask<double>(donut.rows(), donut.columns(), donut.columns()/3, 6);
//// donut = mask % donut;
// /* // generate & save ring
// auto ring = weightingMask<double>(128 - 7, 128 - 7, 40, 6);
// blaze::DynamicMatrix<double> donut (128, 128, 0);
// blaze::submatrix(donut, 7, 7, 128 - 7, 128 - 7) = ring;
// blaze_dm_to_csv(donut, "ring.csv");
// vector2bmp(matrix2vv(donut), "ring.bmp");
// // */
// auto donut = read_csv_blaze<double>("test01.csv", ",");
// //vector2bmp(matrix2vv(mask), "mask_outer.bmp");
// //mask = weightingMask<double>(128, 128, 10, 2);
// //vector2bmp(matrix2vv(mask), "mask_inner.bmp");
// //vector2bmp(matrix2vv(donut), "mask_applied.bmp");
// //auto donut = read_png_donut<double>("assets/donuts/crop/crop_2020-07-27_16_23_01_776_donut1.png");
// //auto donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_128.png");
// //auto donut = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_256.png");
// //size_t steps = 200;
// //std::vector<double> sigma = {50,30,15,5};
// //size_t steps = 1000; // 20;
// std::vector<double> sigma = {2}; //{1.75}; //{2, 1.25}; //{5, 2}; // {15, 2}
// double init_x = donut.columns() / 2;
// double init_y = donut.rows() / 2;
// double init_r = donut.columns() / 3;
//// std::cout << "initial ellipse position:\n xc = " << init_x << " yc = " << init_y << " a = " << init_r << " b = " << init_r
//// << " phi = " << 0 << std::endl;
// //* // padding & gradient tests
//// blaze::DynamicMatrix<double> A {{1, 2, 3, 4, 5}, {6, 7, 8, 9, 10}, {11, 12, 13, 14, 15}, {16, 17, 18, 19, 20}};
//// //blaze::DynamicMatrix<double> A {{0, 0, 0, 0}, {0, 1, 0, 0}, {0, 0, 0, 0}};
//// auto B = metric::DPM_detail::addPad(A);
//// std::cout << A << "\n";
//// std::cout << B << "\n";
//// //auto G = metric::DPM_detail::gradient(A); //B);
//// //std::cout << std::get<0>(G) << "\n" << std::get<1>(G) << "\n";
//// metric::DPM_detail::updatePad(A);
//// std::cout << A << "\n";
//// metric::DPM_detail::updatePad(B);
//// std::cout << B << "\n";
//// blaze::submatrix(B, 1, 1, 4, 5) = {{51, 52, 53, 54, 55}, {56, 57, 58, 59, 60}, {61, 62, 63, 64, 65}, {66, 67, 68, 69, 70}};
//// std::cout << B << "\n";
//// metric::DPM_detail::updatePad(B);
//// std::cout << B << "\n";
//// auto C = metric::DPM_detail::removePad(B);
//// std::cout << C << "\n";
// // */
// /* // GVF test
// blaze::DynamicMatrix<double> I = {
// {1, 0, 0, 0, 0 ,0, 0, 0, 0, 1},
// {0, 8, 2, 0, 0 ,0, 0, 0, 7, 0},
// {0, 0, 2, 0, 0 ,0, 0, 0, 0, 0},
// {0, 0, 2, 5, 0 ,0, 0, 0, 0, 0},
// {0, 0, 3, 6, 2 ,0, 0, 0, 0, 0},
// {1, 0, 2, 4, 1 ,0, 0, 1, 0, 0},
// {0, 0, 2, 0, 0 ,0, 1, 2, 1, 0},
// {0, 0, 1, 0, 0 ,1, 2, 3, 2, 1},
// {0, 5, 0, 0, 0 ,0, 1, 2, 6, 0},
// {1, 0, 2, 0, 0 ,0, 0, 1, 0, 1}
// };
// auto [h_1, v_1] = metric::DPM_detail::gvf(I, 0.1, 1, 10);
// std::cout << h_1 << "\n" << v_1 << "\n";
// return 0;
//// // original code
//// blaze::DynamicMatrix<double> f(I);
//// size_t m = f.rows();
//// size_t n = f.columns();
//// // normalization
//// auto fmin = blaze::min(f);
//// auto fmax = blaze::max(f);
//// if (fmax <= fmin) {
//// std::cout << "Error: constant Input Matrix." << std::endl;
//// }
//// for (size_t i = 0; i < m; i++) {
//// for (size_t j = 0; j < n; j++) {
//// f(i, j) = (f(i, j) - fmin) / (fmax - fmin);
//// }
//// }
//// auto f2 = metric::DPM_detail::addPad(f);
//// //std::cout << f2 << "\n";
//// auto [fx, fy] = metric::DPM_detail::gradient(f2);
//// //std::cout << fx << "\n" << fy << "\n";
//// // originl code
//// // square of magnitude
//// blaze::DynamicMatrix<double> fxy_square((m + 2), (n + 2));
//// for (size_t i = 0; i < m + 2; i++) {
//// for (size_t j = 0; j < n + 2; ++j) {
//// fxy_square(i, j) = fx(i, j) * fx(i, j) + fy(i, j) * fy(i, j);
//// }
//// }
//// blaze::DynamicMatrix<double> u1(fx);
//// blaze::DynamicMatrix<double> v1(fy);
//// //blaze::DynamicMatrix<double> Lu1((m + 2), (n + 2));
//// //blaze::DynamicMatrix<double> Lv1((m + 2), (n + 2));
//// blaze::DynamicMatrix<double> Lu1;//((m + 2), (n + 2), 0);
//// blaze::DynamicMatrix<double> Lv1;//((m + 2), (n + 2), 0);
//// //for (size_t it = 0; it < iter; it++) {
//// for (size_t it = 0; it < 1; it++) {
//// metric::DPM_detail::updatePad(u1);
//// metric::DPM_detail::updatePad(v1);
//// Lu1 = metric::DPM_detail::laplacian(u1);
//// Lv1 = metric::DPM_detail::laplacian(v1);
//// for (size_t i = 0; i < (m + 2); i++) {
//// for (size_t j = 0; j < (n + 2); ++j) {
//// u1(i, j) = u1(i, j) + alpha * (mu * Lu1(i, j) - fxy_square(i, j) * (u1(i, j) - fx(i, j)));
//// v1(i, j) = v1(i, j) + alpha * (mu * Lv1(i, j) - fxy_square(i, j) * (v1(i, j) - fy(i, j)));
//// }
//// }
//// }
//// //std::cout << fxy_square << "\n";
//// std::cout << u1 << "\n";
//// std::cout << v1 << "\n";
// // */
// /* // mask test
// auto mask = weightingMask<double>(donut.rows(), donut.columns(), donut.columns()/3, 6);
// vector2bmp(matrix2vv(mask), "mask_outer.bmp");
// //mask = weightingMask<double>(128, 128, 10, 2);
// //vector2bmp(matrix2vv(mask), "mask_inner.bmp");
// blaze::DynamicMatrix<double> applied = mask % donut;
// vector2bmp(matrix2vv(applied), "mask_applied.bmp");
// //std::cout << mask << "\n";
// // */
// //* // fit_ellipse & forces test
// init_x = donut.columns() / 2;
// init_y = donut.rows() / 2;
// init_r = donut.columns() / 3;
// //size_t filtersize = round(sigma[0] * 6); // 3 sigma
// //metric::imfilter<double, 1, metric::FilterType::GAUSSIAN, metric::PadDirection::BOTH, metric::PadType::CONST> f(filtersize, filtersize, sigma[0]);
// //auto I1 = f(donut);
// //auto I1 = metric::DPM_detail::gaussianBlur(donut, sigma[0]);
//// auto gk = metric::DPM_detail::gaussianKernel(sigma[0]);
//// blaze::DynamicMatrix<double> I1 = metric::DPM_detail::blackPaddedConv(donut, gk);
//// vector2bmp(matrix2vv(I1), "blurred.bmp");
//// mat2bmp::blaze2bmp_norm(I1, "blurred_norm.bmp");
//// //I1 = blaze::submatrix(I1, (I1.rows() - donut.rows()) / 2, (I1.columns() - donut.columns()) / 2, donut.rows(), donut.columns());
//// //vector2bmp(matrix2vv(I1), "blurred_cropped.bmp");
//// std::cout << "blur input: min: " << blaze::min(donut) << ", max: " << blaze::max(donut) << "\n";
//// std::cout << "GVF input: min: " << blaze::min(I1) << ", max: " << blaze::max(I1) << "\n";
// auto I1 = donut;
// auto [u1, v1] = metric::DPM_detail::gvf(I1, 0.1, 1, 10);
// vector2bmp(matrix2vv(u1), "u1.bmp");
// vector2bmp(matrix2vv(v1), "v1.bmp");
// u1 = read_csv_blaze<double>("u.csv", ","); // TODO remove
// v1 = read_csv_blaze<double>("v.csv", ",");
// vector2bmp(matrix2vv(u1), "u2.bmp");
// vector2bmp(matrix2vv(v1), "v2.bmp");
// std::cout << "input: " << blaze::min(I1) << ".." << blaze::max(I1) << "\n";
// std::cout << "u1: " << blaze::min(u1) << ".." << blaze::max(u1) << "\n";
// std::cout << "v1: " << blaze::min(v1) << ".." << blaze::max(v1) << "\n";
// //blaze_dm_to_csv(I1, "test01_read.csv");
// //blaze_dm_to_csv(u1, "u1.csv");
// //blaze_dm_to_csv(v1, "v1.csv");
// //std::cout << "\ninput: \n" << donut << "\n";
// //std::cout << "\nblurred: \n" << I1 << "\n";
// //std::cout << "\nforse field: \n" << u1 << "\n" << v1 << "\n";
// std::vector<double> ep = { init_x, init_y, init_r, init_r, 0 }; // initial parameter guess
// double thresh = 1e-6;
// double incr = 0.2;
// blaze::DynamicVector<double> increment = { incr, incr, incr, incr, M_PI / 180 * incr }; // increment in each iteration
// blaze::DynamicVector<double> threshold = { thresh, thresh, thresh, thresh, thresh }; // threshold for forces/torsinal moments
// double half_min_size = (donut.rows() < donut.columns() ? donut.rows() : donut.columns()) / 2.0;
// std::vector<double> bound = { 5, half_min_size, 5, half_min_size }; // the lower/upper bounds of a and b
// std::cout << "\nfit_ellipse test:\n";
//// ep = metric::DPM_detail::fit_ellipse(ep, sigma[0] / 5 * increment, sigma[0] / 5 * threshold, bound, u1, v1, steps / sigma.size());
//// for (size_t i = 0; i < ep.size(); ++i) {
//// std::cout << ep[i] << "\t";
//// }
//// std::cout << "\n by parts:\n";
// size_t m = u1.rows();
// size_t n = u1.columns();
// double xc = init_x;
// double yc = init_y;
// double a = init_r;
// double b = init_r;
// double phi = 0;
// increment = sigma[0] / 5 * increment;
// threshold = sigma[0] / 5 * threshold;
// size_t local_n_iter = 2000;
// for (size_t it = 0; it < local_n_iter; ++it) {
// // compute grid points from ellipse parameter
// std::vector<blaze::DynamicVector<double>> x_y_theta = metric::DPM_detail::ellipse2grid(m, n, xc, yc, a, b, phi);
// // torsion along the ellpise about center
// double torsion = metric::DPM_detail::torsion_moment(u1, v1, x_y_theta[0], x_y_theta[1], x_y_theta[2], xc, yc, phi);
// //std::cout << "theta: \n" << x_y_theta[0] << "\n" << x_y_theta[1] << "\n" << x_y_theta[2] << "\n";
// //std::cout << "gvf_x: min: " << blaze::min(u1) << ", max: " << blaze::max(u1) << "\n";
// //std::cout << "gvf_y: min: " << blaze::min(v1) << ", max: " << blaze::max(v1) << "\n";
// // update phi
// if (torsion > threshold[4]) {
// phi = phi + increment[4];
// }
// if (torsion < -threshold[4]) {
// phi = phi - increment[4];
// }
// std::cout << "torsion_moment: \n" << torsion << "\n";
// // F_around
// blaze::DynamicMatrix<double> iresult = metric::DPM_detail::contourForces(u1, v1, x_y_theta[0], x_y_theta[1]);
// blaze::DynamicVector<double, blaze::rowVector> F_round = blaze::sum<blaze::columnwise>(iresult);
// for (size_t i = 0; i < F_round.size(); ++i) {
// F_round[i] = F_round[i] / double(x_y_theta[2].size());
// }
// std::vector<double> Fround(F_round.size());
// for (size_t i = 0; i < F_round.size(); ++i) {
// Fround[i] = F_round[i];
// }
// std::cout << "f_round: \n" << F_round << "\n";
// std::vector<double> x_index1;
// std::vector<double> y_index1;
// std::vector<double> x_index2;
// std::vector<double> y_index2;
// std::vector<double> x_index3;
// std::vector<double> y_index3;
// std::vector<double> x_index4;
// std::vector<double> y_index4;
// for (size_t i = 0; i < x_y_theta[0].size(); ++i) {
// if (x_y_theta[2][i] > M_PI * 3 / 4 && x_y_theta[2][i] < M_PI * 5 / 4) {
// x_index1.push_back(x_y_theta[0][i]);
// y_index1.push_back(x_y_theta[1][i]);
// }
// if (x_y_theta[2][i] < M_PI / 4 || x_y_theta[2][i] > M_PI * 7 / 4) {
// x_index2.push_back(x_y_theta[0][i]);
// y_index2.push_back(x_y_theta[1][i]);
// }
// if (x_y_theta[2][i] > M_PI / 4 && x_y_theta[2][i] < M_PI * 3 / 4) {
// x_index3.push_back(x_y_theta[0][i]);
// y_index3.push_back(x_y_theta[1][i]);
// }
// if (x_y_theta[2][i] > M_PI * 5 / 4 && x_y_theta[2][i] < M_PI * 7 / 4) {
// x_index4.push_back(x_y_theta[0][i]);
// y_index4.push_back(x_y_theta[1][i]);
// }
// }
// double F_left = metric::DPM_detail::force(u1, v1, x_index1, y_index1, std::cos(phi), std::sin(phi));
// double F_right = metric::DPM_detail::force(u1, v1, x_index2, y_index2, -std::cos(phi), -std::sin(phi));
// double F_up = metric::DPM_detail::force(u1, v1, x_index3, y_index3, std::sin(phi), -std::cos(phi));
// double F_down = metric::DPM_detail::force(u1, v1, x_index4, y_index4, -std::sin(phi), std::cos(phi));
// // std::cout << "it=" << it << " --> " << F_left << " " << F_right << " " << F_up << " " << F_down
// // << std::endl;
// // update xc and yc
// double F_left_right = F_round[0] * 1 + F_round[1] * 0;
// if (F_left_right > threshold[0]) {
// xc = xc + increment[0];
// ;
// } else if (F_left_right < -threshold[0]) {
// xc = xc - increment[0];
// }
// double F_down_up = F_round[0] * 0 + F_round[1] * 1;
// if (F_down_up > threshold[1]) {
// yc = yc + increment[1];
// } else if (F_down_up < -threshold[1]) {
// yc = yc - increment[1];
// }
// // update xc and yc again according to diagonal force
// double F_diag1 = F_round[0] * 0.7071 + F_round[1] * 0.7071;
// if (F_diag1 > threshold[0] + threshold[1]) {
// xc = xc + increment[0];
// yc = yc + increment[1];
// } else if (F_diag1 < -threshold[0] - threshold[1]) {
// xc = xc - increment[0];
// yc = yc - increment[1];
// }
// double F_diag2 = F_round[0] * (-0.7071) + F_round[1] * 0.7071;
// if (F_diag2 > threshold[0] + threshold[1]) {
// xc = xc - increment[0];
// yc = yc + increment[1];
// } else if (F_diag2 < -threshold[0] - threshold[1]) {
// xc = xc + increment[0];
// yc = yc - increment[1];
// }
// // update a and b
// if (F_left + F_right > threshold[2])
// a = a - increment[2];
// else if (F_left + F_right < -threshold[2])
// a = a + increment[2];
// if (F_up + F_down > threshold[3])
// b = b - increment[3];
// else if (F_up + F_down < -threshold[3])
// b = b + increment[3];
// if (b > a) {
// std::swap(a, b);
// phi = std::fmod(phi + M_PI / 2, M_PI);
// }
// // restrict a and b using lower and upper bounds
// if (a > bound[1])
// a = bound[1];
// if (a < bound[0])
// a = bound[0];
// if (b > bound[3])
// b = bound[3];
// if (b < bound[2])
// b = bound[2];
// std::cout << "iter result: " << xc << " " << yc << " " << a << " " << b << " " << phi << " | " << it << "\n\n"; // TODO remove
// }
// // */
// return 0;
//}
| 23,464
|
C++
|
.cpp
| 466
| 47.463519
| 154
| 0.509733
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,456
|
energy_encoder_example.cpp
|
metric-space-ai_metric/examples/transform_examples/energy_encoder_example.cpp
|
#include "metric/transform/energy_encoder.hpp"
#include <iostream>
int main() {
//std::vector<double> data = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
std::vector<double> data = {0, 1, 2, 3, 4, 5, 6, 7};
//std::vector<double> data = {0};
// std::vector<double> data;
// for (size_t i = 0; i<50000; ++i) {
// data.push_back((double)i);
// }
auto e = metric::EnergyEncoder(5, 1);
//auto e = metric::energy_encoder();
auto result = e(data);
std::cout << "\n\nenergies per subband:\n";
for (size_t i = 0; i<result.size(); ++i) {
std::cout << result[i] << " ";
}
std::cout << "\n\n";
std::cout << "testing subband size computation:\n";
std::cout << "\nl_split_estim l_split_actual | input_extim_estim input_estim_by_actual | l_split_estim_by_input_estim\n";
size_t lx = 50000, lf = 10;
size_t subbands_num = 2;
std::vector<double> x(lx, 0.0);
for (size_t i = 1; i < 18; ++i) {
//std::cout << metric::subband_size(lx, i, lf) << "\n";
std::stack<size_t> subband_length;
auto split = metric::sequential_DWT<std::vector, std::vector<double>, std::allocator<std::vector<double>>>(x, subband_length, 5, subbands_num);
auto lr = split[0].size();
for (size_t j = 1; j<split.size(); ++j) { // check of size equality of subbands
if (lr != split[j].size())
std::cout << "\ninequal split size: " << j << " " << split[j].size() << "\n";
}
auto lr_estim = metric::subband_size(lx, i, lf);
auto lx_estim = metric::original_size(lr, i, lf);
std::cout << lr_estim << " " << lr << " | "
<< metric::original_size(lr_estim, i, lf) << " " << lx_estim << " | "
<< metric::subband_size(lx_estim, i, lf)
<< "\n";
subbands_num *= 2;
}
std::cout << "\n\ntesting computation of frequency bounds for 4 splits, 40000 points:\n";
auto e2 = metric::EnergyEncoder(5, 4);
auto bounds = e2.freq_bounds(40000);
for (size_t i=0; i< bounds.size(); ++i)
std::cout << bounds[i] << "\n";
std::cout << "\n\ntesting computation of frequency bounds for 4 splits, 39999 points:\n";
bounds = e2.freq_bounds(39999);
for (size_t i=0; i< bounds.size(); ++i)
std::cout << bounds[i] << "\n";
std::cout << "\n\n";
return 0;
}
| 2,413
|
C++
|
.cpp
| 52
| 39.403846
| 151
| 0.541684
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,457
|
dwt2_types_example.cpp
|
metric-space-ai_metric/examples/transform_examples/dwt2_types_example.cpp
|
#include "metric/transform/wavelet.hpp"
#include <blaze/Blaze.h>
#include <iostream>
#include <type_traits> // for IsMatrixCheck
#include <vector>
// ---- IsMatrix test
template <typename Container>
typename std::enable_if<blaze::IsMatrix<Container>::value, void>::type MatrixCheck(Container)
{
std::cout << "\nMatrix\n";
}
template <typename Container>
typename std::enable_if<!blaze::IsMatrix<Container>::value, void>::type MatrixCheck(Container)
{
std::cout << "\nNot matrix\n";
}
// ----
template <typename T> std::ostream &operator<<(std::ostream &out, std::deque<T> const &a)
{
// out << "[ ";
for (auto x : a) {
out << x << "\n";
}
// out << "]";
return out;
}
int main()
{
{
using T = blaze::DynamicVector<double>;
// using T = std::deque<double>;
T a{0, 0, 0, 0, 1, 0, 0, 0};
T b{0, 1, 1, 1, 0};
auto result = wavelet::conv_valid(a, b);
std::cout << result << "\n";
std::cout << wavelet::conv(a, b) << "\n";
auto coeffs = wavelet::dbwavf<T>(4);
std::cout << "\n" << coeffs << "\n";
auto filters = wavelet::orthfilt(coeffs);
std::cout << "\nfilters:\n"
<< std::get<0>(filters) << "\n"
<< std::get<1>(filters) << "\n"
<< std::get<2>(filters) << "\n"
<< std::get<3>(filters) << "\n";
auto filtered = wavelet::dwt(a, 4);
std::cout << "\nfiltered:\n" << std::get<0>(filtered) << "\n" << std::get<1>(filtered) << "\n";
auto restored = wavelet::idwt(std::get<0>(filtered), std::get<1>(filtered), 4, a.size());
std::cout << "\nrestored:\n" << restored << "\n";
}
// return 0;
{
// IsMatrix test
blaze::DynamicMatrix<double> a{{0}};
std::vector<std::vector<double>> b{{0}};
blaze::DynamicVector<double> c{0};
blaze::CompressedMatrix<double> d{{0}};
blaze::SymmetricMatrix<blaze::CompressedMatrix<double>> e{{0}};
MatrixCheck(a);
MatrixCheck(b);
MatrixCheck(c);
MatrixCheck(d);
MatrixCheck(e);
// MatrixCheck(b);
}
{
// using T = blaze::DynamicMatrix<double>;
using T = blaze::CompressedMatrix<double>;
// using T = std::vector<blaze::DynamicVector<double>>; // runs old overload
T data2d = {
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
};
auto splitted = wavelet::dwt2(data2d, 4);
std::cout << "\n\nblaze 2d test: splitted 1:\n" << std::get<0>(splitted) << "\n\n";
auto restored =
wavelet::idwt2(std::get<0>(splitted), std::get<1>(splitted), std::get<2>(splitted), std::get<3>(splitted),
4, data2d.rows(), data2d.columns()); // for non-matrix type
// auto restored = wavelet::idwt2(std::get<0>(splitted), std::get<1>(splitted), std::get<2>(splitted),
// std::get<3>(splitted), 4, data2d.size(), data2d[0].size());
std::cout << "\n\nblaze 2d test: restored:\n" << restored << "\n\n";
}
// old test
using Container = std::deque<double>;
int wavelet = 4;
Container zeros = Container(10, 0);
Container peak = zeros;
peak[4] = 1;
std::vector<Container> data2d = {zeros, zeros, zeros, zeros, peak, zeros, zeros,
zeros, zeros, zeros, zeros, zeros, zeros, zeros};
std::cout << "\n2d:\n";
std::cout << "input:\n";
for (size_t i = 0; i < data2d.size(); ++i) {
for (size_t j = 0; j < data2d[0].size(); ++j)
std::cout << data2d[i][j] << " ";
std::cout << "\n";
}
auto splitted = wavelet::dwt2(data2d, wavelet);
std::cout << "slpitted LL:\n";
for (size_t i = 0; i < std::get<0>(splitted).size(); ++i) {
for (size_t j = 0; j < std::get<0>(splitted)[0].size(); ++j)
std::cout << std::get<0>(splitted)[i][j] << " ";
std::cout << "\n";
}
auto restored = wavelet::idwt2(std::get<0>(splitted), std::get<1>(splitted), std::get<2>(splitted),
std::get<3>(splitted), wavelet, data2d.size(), data2d[0].size());
std::cout << "restored:\n";
for (size_t i = 0; i < restored.size(); ++i) {
for (size_t j = 0; j < restored[0].size(); ++j)
std::cout << restored[i][j] << " ";
std::cout << "\n";
}
return 0;
}
| 4,310
|
C++
|
.cpp
| 116
| 34.198276
| 109
| 0.571668
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,458
|
hysteresis_fitting_exampe.cpp
|
metric-space-ai_metric/examples/transform_examples/hysteresis_fitting_exampe.cpp
|
#include "metric/transform/distance_potential_minimization.hpp"
#include <iostream>
#include <chrono>
//#define MEASURE
int main()
{
blaze::DynamicVector<double> x = { 131, 130, 145, 264, 309, 285, 303, 175, 152, 299, 252, 148, 155, 152, 274, 167,
266, 143, 300, 203, 259, 208, 224, 289, 172, 141, 147, 207, 165, 267, 295, 128, 275, 249, 125, 130, 260, 278,
225, 256, 147, 149, 145, 205, 149, 292, 157, 149, 307, 151, 264, 298, 137, 156, 307, 209, 226, 157, 144, 129,
146, 212, 213, 306, 300, 200, 217, 300, 134, 266, 273, 142, 137, 280, 204, 173, 257, 297, 139, 286, 212, 166,
301, 281, 134, 288, 278, 206, 290, 263, 167, 276, 211, 165, 135, 149, 303, 239, 133, 128, 321, 152, 18, 99, 297,
214, 297, 46, 331, 87, 314, 296, 343, 81, 441, 61, 157, 479, 133, 313, 191, 251, 71, 298, 107, 454, 160, 468,
106, 365, 184, 153, 161, 471, 491, 485, 387, 180, 128, 100, 385, 46, 245, 26, 170, 290, 84, 477, 23, 122, 368,
131, 438, 473, 74, 371, 246, 420, 491, 390, 84, 359, 108, 100, 208, 407, 461, 339, 5, 251, 380, 476, 39, 88,
326, 202, 397, 158, 408, 60, 189, 33, 59, 167, 466, 4, 340, 464, 173, 196, 342, 26, 145, 226, 152, 103, 421,
148, 364, 82 };
blaze::DynamicVector<double> y = { 203, 155, 244, 323, 235, 159, 280, 304, 288, 305, 322, 277, 284, 122, 139, 299,
319, 148, 224, 107, 327, 329, 325, 310, 101, 232, 258, 321, 309, 316, 169, 171, 306, 121, 194, 206, 327, 322,
332, 335, 260, 123, 270, 333, 271, 181, 113, 282, 265, 107, 333, 205, 207, 268, 213, 325, 332, 108, 243, 232,
140, 335, 328, 278, 196, 106, 329, 252, 192, 148, 146, 246, 234, 154, 112, 104, 136, 179, 164, 171, 328, 119,
203, 169, 234, 314, 309, 103, 200, 148, 123, 163, 328, 288, 152, 146, 248, 116, 174, 170, 202, 85, 136, 76, 294,
371, 265, 104, 378, 174, 299, 81, 48, 7, 20, 102, 317, 344, 303, 238, 20, 224, 130, 27, 130, 373, 87, 334, 87,
139, 89, 85, 136, 256, 162, 85, 53, 378, 22, 95, 38, 271, 14, 209, 362, 81, 371, 92, 230, 126, 141, 243, 48,
382, 305, 29, 172, 304, 231, 106, 56, 292, 366, 345, 379, 171, 247, 174, 10, 102, 215, 277, 255, 312, 113, 131,
355, 146, 160, 323, 105, 11, 243, 246, 53, 100, 138, 367, 133, 5, 2, 10, 280, 90, 147, 5, 54, 97, 266, 89 };
auto t1 = std::chrono::steady_clock::now();
auto result = metric::fit_hysteresis(x, y, 400, 500);
//auto result = metric::fit_hysteresis(x, y, 500, 600);
auto t2 = std::chrono::steady_clock::now();
std::cout << " xc " << result[0] << " yc= " << result[1] << " a " << result[2] << " b= " << result[3]
<< " phi= " << result[4] << std::endl;
std::cout << " (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000
<< " s)" << std::endl;
return 0;
}
| 2,843
|
C++
|
.cpp
| 35
| 74.314286
| 120
| 0.560514
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,459
|
daubechies_mat_example.cpp
|
metric-space-ai_metric/examples/transform_examples/daubechies_mat_example.cpp
|
//#include <vector>
#include <iostream>
#include <chrono>
//#include "metric/transform.hpp"
#include "metric/transform/wavelet.hpp"
#include "assets/helpers.cpp"
#include "metric/utils/visualizer.hpp"
int main() {
// ----- 1d DWT transform example
blaze::DynamicVector<double> v {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13};
//blaze::DynamicVector<double> v {0, 0.2, 0.4, 0.6, 0.8, 1};
auto db4 = wavelet::DaubechiesMat<double>(v.size());
std::cout << "Daubechies D4 transform matrix, upper half for low-pass, lower hals for high-pass:\n" << db4 << "\n";
blaze::DynamicVector<double> encoded = db4*v; // dont use 'auto' here!!, it will result in matrix type, whereas we need column vector
std::cout << "decomposed vector:\n" << encoded << "\n";
auto decoded = db4.transpose()*encoded;
std::cout << "restored vector:\n" << decoded << "\n";
// ----- 2d DWT examples
blaze::DynamicMatrix<double> img { // 8*10 image, even size required
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
};
auto db4_w = wavelet::DaubechiesMat<double>(img.columns(), 6); // transform matrix for ROWS of approptiate size (as width of the image)
auto db4_h = wavelet::DaubechiesMat<double>(img.rows(), 6); // for COLUMNS (image height)
// order (number of coefficients) should not be greater than length of input vector!
blaze::DynamicMatrix<double> db4_w_t = blaze::trans(db4_w); // transposed matrices for inverse trancform
blaze::DynamicMatrix<double> db4_h_t = blaze::trans(db4_h);
auto encoded_img = wavelet::dwt2s(img, db4_w, db4_h); // whole image transform, results in single image of all subbands concatenated
auto decoded_img = wavelet::dwt2s(encoded_img, db4_w_t, db4_h_t); // same function, transposed transform matrices for inverse transform
std::cout << "decomposed image:\n" << encoded_img << "\n";
std::cout << "restored image:\n" << decoded_img << "\n";
auto encoded_img_tuple = wavelet::dwt2(img, db4_w, db4_h); // transform with outputting subbands apart in a tuple
auto decoded_img_2 = wavelet::idwt2(encoded_img_tuple, db4_w_t, db4_h_t); // here we also need transposed matrices for inverse transform
std::cout << "low-low subband of decomposed image: \n" << std::get<0>(encoded_img_tuple) << "\n";
std::cout << "restored image: \n" << decoded_img_2 << "\n";
// DWT periodizided examples
{ // serialized matrix example
auto db4_w = wavelet::DaubechiesMat_e<double>(img.columns(), img.columns()*img.rows(), 6); // transform matrix for ROWS of approptiate size (as width of the image)
auto db4_h = wavelet::DaubechiesMat_e<double>(img.rows(), img.columns()*img.rows(), 6); // for COLUMNS (image height)
// order (number of coefficients) should not be greater than length of input vector!
blaze::DynamicMatrix<double> db4_w_t = blaze::trans(db4_w); // transposed matrices for inverse trancform
blaze::DynamicMatrix<double> db4_h_t = blaze::trans(db4_h);
auto encoded_img = wavelet::dwt2s_e(img, db4_w, db4_h); // whole image transform, results in single image of all subbands concatenated
auto decoded_img = wavelet::dwt2s_e(encoded_img, db4_w_t, db4_h_t); // same function, transposed transform matrices for inverse transform
std::cout << "decomposed image (sm ver) :\n" << encoded_img << "\n";
std::cout << "restored image (sm ver) :\n" << decoded_img << "\n";
auto encoded_img_tuple = wavelet::dwt2_e(img, db4_w, db4_h); // transform with outputting subbands apart in a tuple
auto decoded_img_2 = wavelet::idwt2_e(encoded_img_tuple, db4_w_t, db4_h_t); // here we also need transposed matrices for inverse transform
std::cout << "low-low subband of decomposed image (sm ver) : \n" << std::get<0>(encoded_img_tuple) << "\n";
std::cout << "restored image (sm ver) : \n" << decoded_img_2 << "\n";
}
// Cameraman
int DaubechiesOrder = 6; //10; // SETUP HERE wavelet type (2 till 20 even only)
int l_scale = 300; //3000; // SETUP HERE lum scaling in visualizer
auto cm_b = read_csv_blaze<double>("assets/cameraman.csv", ",");
//auto cm_b = read_csv_blaze<double>("assets/test.csv", ",");
std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds> t1, t2;
{
t1 = std::chrono::steady_clock::now();
auto db4_w = wavelet::DaubechiesMat<double>(cm_b.columns(), DaubechiesOrder); // transform matrix for ROWS of approptiate size (as width of the image)
auto db4_h = wavelet::DaubechiesMat<double>(cm_b.rows(), DaubechiesOrder); // for COLUMNS (image height)
blaze::CompressedMatrix<double> db4_w_t = blaze::trans(db4_w); // transposed matrices for inverse trancform
blaze::CompressedMatrix<double> db4_h_t = blaze::trans(db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "Daubechies transform matrices creation time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
t1 = std::chrono::steady_clock::now();
auto cm_splitted_periodized = wavelet::dwt2s(cm_b, db4_w, db4_h);
auto cm_splitted_periodized_2 = wavelet::dwt2s(cm_splitted_periodized, db4_w, db4_h);
auto cm_splitted_periodized_3 = wavelet::dwt2s(cm_splitted_periodized_2, db4_w, db4_h);
auto cm_splitted_periodized_4 = wavelet::dwt2s(cm_splitted_periodized_3, db4_w, db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "4x split time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
auto cm_decoded_periodized = wavelet::dwt2s(cm_splitted_periodized, db4_w_t, db4_h_t);
mat2bmp::blaze2bmp(cm_splitted_periodized/l_scale, "cm_splited_per.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized/l_scale, "cm_restored_per.bmp");
t1 = std::chrono::steady_clock::now();
auto cm_decoded_periodized_cascade = wavelet::dwt2s(cm_splitted_periodized_4, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
t2 = std::chrono::steady_clock::now();
std::cout << "4x compose time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
mat2bmp::blaze2bmp(cm_splitted_periodized_2/l_scale, "cm_splited_per_2.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_3/l_scale, "cm_splited_per_3.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_4/l_scale, "cm_splited_per_4.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized_cascade/l_scale, "cm_restored_per_cascade.bmp");
}
{ // serialized image version
//auto dmat_n = wavelet::DaubechiesMat_e<double>(6, 24, 4);
//std::cout << "dmat 10*6, d4: " << dmat_n << "\n";
t1 = std::chrono::steady_clock::now();
auto db4_w = wavelet::DaubechiesMat_e<double>(cm_b.columns(), cm_b.columns()*cm_b.rows(), DaubechiesOrder); // transform matrix for ROWS of approptiate size (as width of the image)
auto db4_h = wavelet::DaubechiesMat_e<double>(cm_b.rows(), cm_b.columns()*cm_b.rows(), DaubechiesOrder); // for COLUMNS (image height)
blaze::CompressedMatrix<double> db4_w_t = blaze::trans(db4_w); // transposed matrices for inverse trancform
blaze::CompressedMatrix<double> db4_h_t = blaze::trans(db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "Daubechies transform matrices creation time (serialized ver): " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
t1 = std::chrono::steady_clock::now();
auto cm_splitted_periodized = wavelet::dwt2s_e(cm_b, db4_w, db4_h);
auto cm_splitted_periodized_2 = wavelet::dwt2s_e(cm_splitted_periodized, db4_w, db4_h);
auto cm_splitted_periodized_3 = wavelet::dwt2s_e(cm_splitted_periodized_2, db4_w, db4_h);
auto cm_splitted_periodized_4 = wavelet::dwt2s_e(cm_splitted_periodized_3, db4_w, db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "4x split time (serialized ver): " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
auto cm_decoded_periodized = wavelet::dwt2s_e(cm_splitted_periodized, db4_w_t, db4_h_t);
mat2bmp::blaze2bmp(cm_splitted_periodized/l_scale, "cm_splited_per_e.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized/l_scale, "cm_restored_per_e.bmp");
t1 = std::chrono::steady_clock::now();
auto cm_decoded_periodized_cascade = wavelet::dwt2s_e(cm_splitted_periodized_4, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s_e(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s_e(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s_e(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
t2 = std::chrono::steady_clock::now();
std::cout << "4x decompose time (serialized ver): " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
mat2bmp::blaze2bmp(cm_splitted_periodized_2/l_scale, "cm_splited_per_2_e.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_3/l_scale, "cm_splited_per_3_e.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_4/l_scale, "cm_splited_per_4_e.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized_cascade/l_scale, "cm_restored_per_cascade_e.bmp");
}
{ // loop-based periodized convolution version
t1 = std::chrono::steady_clock::now();
auto cm_splitted_periodized = wavelet::dwt2_l(cm_b, DaubechiesOrder);
auto cm_splitted_periodized_2 = wavelet::dwt2_l(cm_splitted_periodized, DaubechiesOrder);
auto cm_splitted_periodized_3 = wavelet::dwt2_l(cm_splitted_periodized_2, DaubechiesOrder);
auto cm_splitted_periodized_4 = wavelet::dwt2_l(cm_splitted_periodized_3, DaubechiesOrder);
t2 = std::chrono::steady_clock::now();
std::cout << "4x split time (loop ver): " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
//auto cm_decoded_periodized = wavelet::dwt2s_e(cm_splitted_periodized, db4_w_t, db4_h_t);
mat2bmp::blaze2bmp(cm_splitted_periodized/l_scale, "cm_splited_loop_e.bmp");
//mat2bmp::blaze2bmp(cm_decoded_periodized/l_scale, "cm_restored_loop_per_e.bmp");
// TODO add composition
mat2bmp::blaze2bmp(cm_splitted_periodized_2/l_scale, "cm_splited_loop_2_e.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_3/l_scale, "cm_splited_loop_3_e.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_4/l_scale, "cm_splited_loop_4_e.bmp");
}
// {
// // test reshape trick
// blaze::DynamicVector<double> v {1, 2, 3, 4, 5, 6};
// blaze::DynamicVector<double> u (6); // {0, 0, 0, 0, 0, 0};
// blaze::DynamicMatrix<double> space_matrix{{1, 0}, {0, -1}};
// blaze::CustomMatrix<double, blaze::unaligned, blaze::unpadded, blaze::rowMajor> vit(&v[0], 2, 3);
// blaze::CustomMatrix<double, blaze::unaligned, blaze::unpadded, blaze::rowMajor> ujs(&u[0], 2, 3);
// ujs = space_matrix * vit;
// std::cout << ujs << '\n';
// std::cout << u << std::endl;
// std::cout << vit << '\n';
// std::cout << v << std::endl;
// std::cout << space_matrix << std::endl;
// std::cout << "\ndone\n" << std::endl;
// }
return 0;
}
| 12,170
|
C++
|
.cpp
| 158
| 69.803797
| 188
| 0.64564
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,460
|
dwt_benchmark.cpp
|
metric-space-ai_metric/examples/transform_examples/dwt_benchmark.cpp
|
#include <vector>
#include <iostream>
#include <chrono>
#include <random>
//#include "transform/wavelet.hpp"
#include "metric/transform/energy_encoder.hpp"
template <typename T>
std::vector<T>
extract_energies_static(std::vector<T> x)
{
auto [x0, tmp1] = wavelet::dwt(x, 5);
auto [x1, tmp2] = wavelet::dwt(x0, 5);
auto [x11, tmp3] = wavelet::dwt(x1, 5);
auto [x111, x112] = wavelet::dwt(x11, 5);
auto [x1111, x1112] = wavelet::dwt(x111, 5);
auto [x1121, x1122] = wavelet::dwt(x112, 5);
auto [x11111, x11112] = wavelet::dwt(x1111, 5);
auto [x11121, x11122] = wavelet::dwt(x1112, 5);
auto [x11211, x11212] = wavelet::dwt(x1121, 5);
auto [x11221, x11222] = wavelet::dwt(x1122, 5);
std::vector<std::vector<T>> subbands = {x11111, x11112, x11121, x11122, x11211, x11212, x11221, x11222};
std::vector<T> energies(subbands.size());
for (int i = 0; i < subbands.size(); ++i)
{
T sum = 0;
for (int j = 0; j < subbands[i].size(); ++j)
{
sum += subbands[i][j] * subbands[i][j];
}
energies[i] = sum / T(subbands[0].size());
}
//std::cout << subbands[0].size() << std::endl;
return energies;
}
template <typename T>
std::vector<T>
extract_energies_static_3(std::vector<T> x)
{
auto [x111, x112] = wavelet::dwt(x, 5);
auto [x1111, x1112] = wavelet::dwt(x111, 5);
auto [x1121, x1122] = wavelet::dwt(x112, 5);
auto [x11111, x11112] = wavelet::dwt(x1111, 5);
auto [x11121, x11122] = wavelet::dwt(x1112, 5);
auto [x11211, x11212] = wavelet::dwt(x1121, 5);
auto [x11221, x11222] = wavelet::dwt(x1122, 5);
std::vector<std::vector<T>> subbands = {x11111, x11112, x11121, x11122, x11211, x11212, x11221, x11222};
std::vector<T> energies(subbands.size());
for (int i = 0; i < subbands.size(); ++i)
{
T sum = 0;
for (int j = 0; j < subbands[i].size(); ++j)
{
sum += subbands[i][j] * subbands[i][j];
}
energies[i] = sum / T(subbands[0].size());
}
//std::cout << subbands[0].size() << std::endl;
return energies;
}
int main() {
size_t runs = 1000; // set number of runs
size_t len = 48000; // set length of test random vector
std::random_device dev;
std::mt19937 rng(dev());
std::uniform_real_distribution<> dist(-1, 1);
std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds> t1, t2;
double sum;
std::vector<double> x, result, result2;
for (size_t i = 0; i<len; ++i)
x.push_back(dist(rng));
std::cout << "running energy extractors " << runs << " times on vector of length " << len << "\n";
auto e = metric::EnergyEncoder(5, 3);
sum = 0;
for (size_t i = 0; i<runs; ++i) {
t1 = std::chrono::steady_clock::now();
result = e(x);
t2 = std::chrono::steady_clock::now();
sum += double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
}
std::cout << "\n\n\ntesting functor:\nenergies per subband:\n";
for (size_t i = 0; i<result.size(); ++i) {
std::cout << result[i] << " ";
}
std::cout << "\n" << result.size() << " energy values\n\n";
std::cout //<< "\n\nresult1: " //<< result1
<< "average time: " << sum/runs
<< " s" << std::endl;
std::cout << "" << std::endl;
sum = 0;
for (size_t i = 0; i<runs; ++i) {
t1 = std::chrono::steady_clock::now();
result2 = extract_energies_static_3(x);
t2 = std::chrono::steady_clock::now();
sum += double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
}
std::cout << "\n\n\ntesting static ref function:\n\nenergies per subband:\n";
for (size_t i = 0; i<result2.size(); ++i) {
std::cout << result2[i] << " ";
}
std::cout << "\n" << result2.size() << " energy values\n\n";
std::cout //<< "\n\nresult1: " //<< result1
<< "average time: " << sum/runs
<< " s" << std::endl;
std::cout << "" << std::endl;
return 0;
}
| 4,135
|
C++
|
.cpp
| 106
| 33.009434
| 108
| 0.565773
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,461
|
image_filter_example.cpp
|
metric-space-ai_metric/examples/transform_examples/image_filter_example.cpp
|
//#include "metric/utils/image_processing/image_filter.hpp"
//#include "metric/utils/image_processing/convolution.hpp"
//#include "metric/transform/wavelet2d.hpp"
#include "metric/transform/distance_potential_minimization.hpp"
#include "donuts.hpp" // for only png reader
#include "assets/helpers.cpp"
#include "metric/utils/visualizer.hpp"
int main() {
blaze::DynamicMatrix<double> cm = read_csv_blaze<double>("assets/cameraman.csv", ",");
mat2bmp::blaze2bmp_norm(cm, "input_cameraman.bmp");
double sigma = 5; //1.45;
/* // using image_filter.hpp
size_t filtersize = round(sigma * 6); // 3 sigma
//metric::imfilter<double, 1, metric::FilterType::GAUSSIAN, metric::PadDirection::BOTH, metric::PadType::SYMMETRIC> f(filtersize, filtersize, sigma);
metric::imfilter<double, 1, metric::FilterType::GAUSSIAN, metric::PadDirection::BOTH, metric::PadType::CONST> f(filtersize, filtersize, sigma);
auto cm_blurred = f(cm);
mat2bmp::blaze2bmp_norm(cm_blurred, "blurred_cameraman.bmp");
cm_blurred = blaze::submatrix(cm_blurred, (cm_blurred.rows() - cm.rows()) / 2, (cm_blurred.columns() - cm.columns()) / 2, cm.rows(), cm.columns());
//cm_blurred = blaze::submatrix(cm_blurred, 0, 0, cm.rows(), cm.columns());
mat2bmp::blaze2bmp_norm(cm_blurred, "cropped_blurred_cameraman.bmp");
// */
/* using convolution.hpp
auto kernel = gaussianKernel(sigma);
mat2bmp::blaze2bmp_norm(kernel, "gkernel.bmp");
//auto conv = metric::Convolution2d<double, 1>(cm.columns(), cm.rows(), kernel.columns(), kernel.rows());
auto conv = metric::Convolution2d<double, 1>(cm.rows(), cm.columns(), kernel.rows(), kernel.columns());
auto cm_blurred = conv({cm}, kernel)[0];
mat2bmp::blaze2bmp_norm(cm_blurred, "cameraman_blurred.bmp");
// */
//* using wavelet2d.hpp
// auto kernel = gaussianKernel(sigma);
// mat2bmp::blaze2bmp_norm(kernel, "gkernel.bmp");
// //auto conv = wavelet::Convolution2dCustom<double, 1>(cm.rows(), cm.columns(), kernel.rows(), kernel.columns());
// auto conv = Convolution2dCustomStride1<double, 1>(cm.columns(), cm.rows(), kernel.columns(), kernel.rows());
// //auto conv = wavelet::Convolution2dCustom<double, 1>(cm.columns(), cm.rows(), kernel.columns(), kernel.rows());
// auto cm_blurred = conv({cm}, kernel)[0];
// mat2bmp::blaze2bmp_norm(cm_blurred, "cameraman_blurred.bmp");
// blaze::DynamicMatrix<double> cm_blurred_padded (cm.rows(), cm.columns(), 0);
// blaze::submatrix(
// cm_blurred_padded,
// (cm.rows() - cm_blurred.rows())/2, (cm.columns() - cm_blurred.columns())/2,
// cm_blurred.rows(), cm_blurred.columns()
// ) = cm_blurred;
// mat2bmp::blaze2bmp_norm(cm_blurred_padded, "cameraman_blurred_padded.bmp");
//cm = read_png_donut<double>("assets/donuts/crop/donut_6_radial_outer_128.png");
std::cout << "input: " << blaze::max(cm) << ".." << blaze::min(cm) << "\n";
mat2bmp::blaze2bmp_norm(cm, "input_1.bmp");
vector2bmp(matrix2vv(blaze::DynamicMatrix<double>(cm/256.0)), "input_2.bmp");
vector2bmp(matrix2vv(cm), "input_3.bmp");
//auto cm_blurred_padded = metric::DPM_detail::gaussianBlur(cm, sigma);
auto kernel = metric::DPM_detail::gaussianKernel(sigma);
auto cm_blurred_padded = metric::DPM_detail::blackPaddedConv(cm, kernel);
mat2bmp::blaze2bmp_norm(cm_blurred_padded, "blurred_padded_1.bmp");
vector2bmp(matrix2vv(blaze::DynamicMatrix<double>(cm_blurred_padded/256.0)), "blurred_padded_2.bmp");
vector2bmp(matrix2vv(cm_blurred_padded), "blurred_padded_3.bmp");
//std::cout << cm_blurred_padded << "\n";
std::cout << "output: " << blaze::max(cm_blurred_padded) << ".." << blaze::min(cm_blurred_padded) << "\n";
// */
return 0;
}
| 3,808
|
C++
|
.cpp
| 60
| 59.6
| 153
| 0.668367
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,462
|
dwt_reducer.cpp
|
metric-space-ai_metric/examples/transform_examples/dwt_reducer.cpp
|
#ifndef M_PI
// MSVC does not define M_PI
#define M_PI 3.14159265358979323846
#endif
#include "metric/transform.hpp"
#include "metric/utils/visualizer.hpp"
#include "assets/helpers.cpp"
//#include "transform/helper_functions.cpp" // TODO remove
#include <vector>
#include <iostream>
#include <chrono>
template <typename T>
std::vector<T>
extract_energies(std::vector<T> x)
{
auto [x0, tmp1] = wavelet::dwt(x, 5);
auto [x1, tmp2] = wavelet::dwt(x0, 5);
auto [x11, tmp3] = wavelet::dwt(x1, 5);
auto [x111, x112] = wavelet::dwt(x11, 5);
auto [x1111, x1112] = wavelet::dwt(x111, 5);
auto [x1121, x1122] = wavelet::dwt(x112, 5);
auto [x11111, x11112] = wavelet::dwt(x1111, 5);
auto [x11121, x11122] = wavelet::dwt(x1112, 5);
auto [x11211, x11212] = wavelet::dwt(x1121, 5);
auto [x11221, x11222] = wavelet::dwt(x1122, 5);
std::vector<std::vector<T>> subbands = {x11111, x11112, x11121, x11122, x11211, x11212, x11221, x11222};
std::vector<T> energies(subbands.size());
for (int i = 0; i < subbands.size(); ++i)
{
T sum = 0;
for (int j = 0; j < subbands[i].size(); ++j)
{
sum += subbands[i][j] * subbands[i][j];
}
energies[i] = sum / T(subbands[0].size());
}
std::cout << subbands[0].size() << std::endl;
return energies;
}
template <typename T>
std::vector<std::vector<T>>
sequential_DWT(std::vector<T> x)
{
auto [x0, tmp1] = wavelet::dwt(x, 5);
auto [x1, tmp2] = wavelet::dwt(x0, 5);
auto [x11, tmp3] = wavelet::dwt(x1, 5);
auto [x111, x112] = wavelet::dwt(x11, 5);
auto [x1111, x1112] = wavelet::dwt(x111, 5);
auto [x1121, x1122] = wavelet::dwt(x112, 5);
auto [x11111, x11112] = wavelet::dwt(x1111, 5);
auto [x11121, x11122] = wavelet::dwt(x1112, 5);
auto [x11211, x11212] = wavelet::dwt(x1121, 5);
auto [x11221, x11222] = wavelet::dwt(x1122, 5);
std::vector<std::vector<T>> subbands = {x11111, x11112, x11121, x11122, x11211, x11212, x11221, x11222};
return subbands;
}
template <typename T>
std::vector<T>
get_energies(std::vector<std::vector<T>> subbands)
{
std::vector<T> energies(subbands.size());
for (int i = 0; i < subbands.size(); ++i)
{
T sum = 0;
for (int j = 0; j < subbands[i].size(); ++j)
{
sum += subbands[i][j] * subbands[i][j];
}
energies[i] = sum / T(subbands[0].size());
}
std::cout << subbands[0].size() << std::endl;
return energies;
}
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<Container<ValueType, A1>, A2>, std::allocator<Container<Container<ValueType, A1>, A2>>> divide_timeseries(
Container<Container<ValueType, A1>, A2> ts,
size_t block_length)
{
auto output = Container<Container<Container<ValueType, A1>, A2>, std::allocator<Container<Container<ValueType, A1>, A2>>>();
size_t n_values = ts[0].size();
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
auto vector_of_blocks = Container<Container<ValueType, A1>, A2>();
vector_of_blocks.push_back(Container<ValueType, A1>()); // create first block of current timeseries
output.push_back(vector_of_blocks);
}
size_t block = 0;
size_t pos_in_block = 0;
for (size_t i=0; i<ts.size(); ++i) // loop of values in original timeseries
{
if (pos_in_block<block_length) // TODO ad check of last incomplete block
{
// push values
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output[j][block].push_back(ts[i][j]);
}
++pos_in_block;
}
else
{
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output[j].push_back(Container<ValueType, A1>()); // create next block of current timeseries
}
pos_in_block = 0;
++block;
}
}
return output;
}
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<ValueType, A1>, A2> transpose_timeseries(
Container<Container<ValueType, A1>, A2> ts) // TODO remove and update csv reader this way
{
auto output = Container<Container<ValueType, A1>, A2>();
size_t n_values = ts[0].size();
for (size_t j=0; j<n_values; ++j) // loop of timeseries
{
output.push_back(Container<ValueType, A1>());
}
for (size_t i=0; i<ts.size(); ++i) // loop of values in original timeseries
{
for (size_t j=0; j<n_values; ++j) // loop of timeseries
output[j].push_back(ts[i][j]);
}
return output;
}
template <template <class, class> class Container, class ValueType, class A1, class A2>
Container<Container<Container<ValueType, A1>, A2>, std::allocator<Container<Container<ValueType, A1>, A2>>> apply_seq_DWT(
Container<Container<ValueType, A1>, A2> ts)
{
auto output = Container<Container<Container<ValueType, A1>, A2>, std::allocator<Container<Container<ValueType, A1>, A2>>>();
for (size_t j=0; j<ts.size(); ++j) // loop of timeseries
{
output.push_back(sequential_DWT(ts[j]));
}
return output;
}
template <template <class, class> class Container, class ValueType, class A1, class A2, class A3>
Container<Container<ValueType, A1>, A2> apply_energy_extractor(
Container<Container<Container<ValueType, A1>, A2>, A3> ts_blocks)
{
auto output = Container<Container<ValueType, A1>, A2>();
for (size_t j=0; j<ts_blocks.size(); ++j) // loop of timeseries
{
output.push_back(Container<ValueType, A1>());
for (size_t i=0; i<ts_blocks[j].size(); ++i) // loop of blocks
{
auto energies = extract_energies(ts_blocks[j][i]);
ValueType energy = 0;
for (size_t k=0; k<energies.size(); ++k)
energy += energies[k];
output[j].push_back(energy);
}
}
return output;
}
template <template <class, class> class Container, class ValueType, class A1, class A2, class A3>
Container<Container<Container<ValueType, A1>, A2>, A3> apply_energy_extractor_separate_subbands(
Container<Container<Container<ValueType, A1>, A2>, A3> ts_blocks)
{
auto output = Container<Container<Container<ValueType, A1>, A2>, A3>();
for (size_t j=0; j<ts_blocks.size(); ++j) // loop of timeseries
{
output.push_back(Container<Container<ValueType, A1>, A2>());
for (size_t i=0; i<ts_blocks[j].size(); ++i) // loop of blocks
{
auto energies = extract_energies(ts_blocks[j][i]);
output[j].push_back(energies);
}
}
return output;
}
int main() {
std::cout << "DWT reducer example have started" << std::endl;
std::cout << '\n';
auto x = wavelet::linspace<std::vector<double>>(0.0, 1.0, 3200);
std::cout << wavelet::wmaxlev(x.size(), 5) << std::endl;
auto start = std::chrono::steady_clock::now();
auto energies = extract_energies(x);
auto end = std::chrono::steady_clock::now();
std::cout << "elapsed time : "
<< std::chrono::duration_cast<std::chrono::microseconds>(end - start).count()
<< " µs" << std::endl;
std::cout << "result: [" ;
for (int i = 0; i < energies.size()-1; ++i)
{
std::cout << energies[i] << ", " ;
}
std::cout<< energies[energies.size()-1] << "]"<< std::endl;
//*/
// single
// auto training_dataset = read_csv_blaze<float>("Pt01_Ch01_Grooves_1_to_7.csv");
// // std::cout << training_dataset << "\n";
// auto test_data = read_csv_blaze<float>("Pt01_Ch01_Groove09.csv");
// mat2bmp::blaze2bmp(training_dataset, "training_dataset.bmp");
// mat2bmp::blaze2bmp(test_data, "test_data.bmp");
// splt first
// auto all_vibration_ts = read_csv_num<float>("Pt01_Ch01_Grooves_1_to_7.csv");
// auto blocks = divide_timeseries(all_vibration_ts, 60);
// auto energies = apply_energy_extractor(blocks);
// write_csv(energies, "energy_per_groove_overall.csv");
// auto energies_by_subbands = apply_energy_extractor_separate_subbands(blocks);
// for (size_t i=0; i<energies_by_subbands.size(); ++i) {
// write_csv(energies_by_subbands[i], "energy_per_groove_" + std::to_string(i) + ".csv");
// }
// DWT first
auto all_vibration_ts = read_csv_num<float>("assets/PtAll_AllGrooves_energy_5.csv"); // all parts
auto transposed = transpose_timeseries(all_vibration_ts);
auto subband_waveforms = apply_seq_DWT(transposed);
for (size_t i=0; i<subband_waveforms.size(); ++i) {
write_csv(subband_waveforms[i], "filtered_waveform_groove_pAll_" + std::to_string(i) + ".csv"); // all parts
}
return 0;
}
| 8,844
|
C++
|
.cpp
| 216
| 35.152778
| 128
| 0.615663
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,463
|
daubechies_conv2d_example.cpp
|
metric-space-ai_metric/examples/transform_examples/daubechies_conv2d_example.cpp
|
#include <iostream>
#include <chrono>
#include "metric/transform/wavelet.hpp"
#include "metric/transform/wavelet2d.hpp"
#include "assets/helpers.cpp"
#include "metric/utils/visualizer.hpp"
int main() {
// Cameraman
int DaubechiesOrder = 4; //10; // SETUP HERE wavelet type (2 till 20 even only)
int l_scale = 300; //3000; // SETUP HERE lum scaling in visualizer
auto cm_b = read_csv_blaze<double>("assets/cameraman.csv", ",");
//auto cm_b = read_csv_blaze<double>("assets/test.csv", ",");
//auto c2d = wavelet::Convolution2dCustom<double, 1>(cm_b.rows(), cm_b.columns(), DaubechiesOrder, DaubechiesOrder);
auto kernels = wavelet::create2dKernels<blaze::DynamicMatrix<double>>(DaubechiesOrder);
std::cout << std::get<0>(kernels) << "\n";
std::cout << std::get<1>(kernels) << "\n";
std::cout << std::get<2>(kernels) << "\n";
std::cout << std::get<3>(kernels) << "\n";
auto splitted = wavelet::dwt2_conv2(cm_b, kernels);
mat2bmp::blaze2bmp(std::get<0>(splitted), "cm_splited_conv2d_ll.bmp");
mat2bmp::blaze2bmp(std::get<1>(splitted), "cm_splited_conv2d_lh.bmp");
mat2bmp::blaze2bmp(std::get<2>(splitted), "cm_splited_conv2d_hl.bmp");
mat2bmp::blaze2bmp(std::get<3>(splitted), "cm_splited_conv2d_hh.bmp");
/* // old example
std::chrono::time_point<std::chrono::steady_clock, std::chrono::nanoseconds> t1, t2;
{
t1 = std::chrono::steady_clock::now();
auto db4_w = wavelet::DaubechiesMat<double>(cm_b.columns(), DaubechiesOrder); // transform matrix for ROWS of approptiate size (as width of the image)
auto db4_h = wavelet::DaubechiesMat<double>(cm_b.rows(), DaubechiesOrder); // for COLUMNS (image height)
blaze::CompressedMatrix<double> db4_w_t = blaze::trans(db4_w); // transposed matrices for inverse trancform
blaze::CompressedMatrix<double> db4_h_t = blaze::trans(db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "Daubechies transform matrices creation time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
t1 = std::chrono::steady_clock::now();
auto cm_splitted_periodized = wavelet::dwt2s(cm_b, db4_w, db4_h);
auto cm_splitted_periodized_2 = wavelet::dwt2s(cm_splitted_periodized, db4_w, db4_h);
auto cm_splitted_periodized_3 = wavelet::dwt2s(cm_splitted_periodized_2, db4_w, db4_h);
auto cm_splitted_periodized_4 = wavelet::dwt2s(cm_splitted_periodized_3, db4_w, db4_h);
t2 = std::chrono::steady_clock::now();
std::cout << "4x split time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
auto cm_decoded_periodized = wavelet::dwt2s(cm_splitted_periodized, db4_w_t, db4_h_t);
mat2bmp::blaze2bmp(cm_splitted_periodized/l_scale, "cm_splited_per.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized/l_scale, "cm_restored_per.bmp");
t1 = std::chrono::steady_clock::now();
auto cm_decoded_periodized_cascade = wavelet::dwt2s(cm_splitted_periodized_4, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
cm_decoded_periodized_cascade = wavelet::dwt2s(cm_decoded_periodized_cascade, db4_w_t, db4_h_t);
t2 = std::chrono::steady_clock::now();
std::cout << "4x compose time: " << double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000 << "\n";
mat2bmp::blaze2bmp(cm_splitted_periodized_2/l_scale, "cm_splited_per_2.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_3/l_scale, "cm_splited_per_3.bmp");
mat2bmp::blaze2bmp(cm_splitted_periodized_4/l_scale, "cm_splited_per_4.bmp");
mat2bmp::blaze2bmp(cm_decoded_periodized_cascade/l_scale, "cm_restored_per_cascade.bmp");
}
// */
return 0;
}
| 4,032
|
C++
|
.cpp
| 58
| 62.655172
| 169
| 0.667005
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,464
|
helpers.cpp
|
metric-space-ai_metric/examples/transform_examples/assets/helpers.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Panda Team
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <sstream>
#include <cstring>
//#include <boost/gil/extension/io/png.hpp> // for read_png_donut, may be enabled // needs -lpng
#include <blaze/Blaze.h>
template <typename T> T convert_to(const std::string & str)
{
std::istringstream s(str);
T num;
s >> num;
return num;
} // templated version of stof, stod, etc., thanks to https://gist.github.com/mark-d-holmberg/862733
template <class ContainerType>
ContainerType read_csv(std::string filename, std::string sep=",")
{ // works with string, does not convert to numbers
typedef typename ContainerType::value_type LINE;
std::string line;
int pos;
ContainerType array = {};
std::ifstream in(filename);
if (!in.is_open()) {
std::cout << "Failed to open file: " << filename << std::endl;
return array;
}
while (getline(in, line)) {
LINE ln;
while( (pos = line.find(sep)) >= 0) {
std::string field = line.substr(0, pos);
line = line.substr(pos+1);
ln.push_back(field);
}
ln.push_back(line);
array.push_back(ln);
}
return array;
}
template <class ContainerType>
void write_csv(ContainerType data, std::string filename, std::string sep=",") // container of containers expected, TODO add check
{
std::ofstream outputFile;
outputFile.open(filename);
for (auto i = 0; i < data.size(); ++i) {
for (auto j = 0; j < data[i].size(); j++) {
outputFile << std::to_string(data[i][j]) << sep;
}
outputFile << std::endl;
}
outputFile.close();
} // TODO add return flag
template <class ValueType>
void blaze_dm_to_csv(blaze::DynamicMatrix<ValueType> data, std::string filename, std::string sep=",")
{
std::ofstream outputFile;
outputFile.open(filename);
for (auto i = 0; i < data.rows(); ++i) {
for (auto j = 0; j < data.columns(); j++) {
//outputFile << std::to_string(data(i, j)) << sep;
outputFile << std::to_string(data(i, j));
if (j < data.columns() - 1)
outputFile << sep;
}
outputFile << std::endl;
}
outputFile.close();
} // TODO add return flag
template <class ValueType>
std::vector<std::vector<ValueType>> read_csv_num(std::string filename, std::string sep=";")
{ // code dubbing with read_csv, TODO unify and remove one of these functions
typedef typename std::vector<ValueType> LINE;
std::string line;
int pos;
std::vector<std::vector<ValueType>> array = {};
std::ifstream in(filename);
if(!in.is_open()) {
std::cout << "Failed to open file" << std::endl;
return array;
}
while( getline(in,line) ) {
LINE ln;
while( (pos = line.find(sep)) >= 0) {
std::string field = line.substr(0, pos);
line = line.substr(pos+1);
ln.push_back(convert_to<ValueType>(field));
}
ln.push_back(convert_to<ValueType>(line));
array.push_back(ln);
}
return array;
}
template <class ValueType>
blaze::DynamicMatrix<ValueType, blaze::rowMajor> read_csv_blaze(const std::string & filename, std::string sep = ";")
{
auto array = read_csv<std::vector<std::vector<std::string>>>(filename, sep);
auto m = blaze::DynamicMatrix<ValueType, blaze::rowMajor>(array.size(), array[0].size());
for (size_t i=0; i<array.size(); ++i)
for (size_t j=0; j<array[0].size(); ++j)
m(i, j) = convert_to<ValueType>(array[i][j]);
return m;
}
//template <template <class, bool> class BlazeContainerType, class ValueType, bool SO>
//bool read_csv_blaze(const std::string & filename, BlazeContainerType<ValueType, SO> & matrix, std::string sep = ";") {
template <template <class, bool> class BlazeContainerType, class ValueType>
bool read_csv_blaze(const std::string & filename, BlazeContainerType<ValueType, blaze::rowMajor> & matrix, std::string sep = ";") {
//typedef typename std::vector<std::string> LINE;
std::string line;
int pos;
//std::vector<LINE> array = {};
std::ifstream in(filename);
if(!in.is_open()) {
std::cout << "Failed to open file" << std::endl;
return false;
}
size_t row_idx = 0;
while( getline(in, line) ) {
//LINE ln;
size_t col_idx = 0;
while( (pos = line.find(sep) ) >= 0) {
std::string field = line.substr(0, pos);
std::cout << field << "\n";
std::cout << line << "\n";
line = line.substr(pos+1);
//ln.push_back(field);
matrix(row_idx, col_idx) = convert_to<ValueType>(field);
++col_idx;
}
matrix(row_idx, col_idx) = convert_to<ValueType>(line);
++row_idx;
//ln.push_back(line);
//array.push_back(ln);
}
return true;
}
// ------ added for donut example
/*
blaze::DynamicMatrix<double> cv2blaze(cv::Mat in) {
blaze::DynamicMatrix<double> out (in.rows, in.cols, 0);
for (int y = 0; y<in.rows; y++) {
for (int x = 0; x<in.cols; x++) {
out(y, x) = in.at<double>(y, x);
}
}
return out;
}
cv::Mat blaze2cv(blaze::DynamicMatrix<double> in) {
cv::Mat out (in.rows(), in.columns(), cv::DataType<double>::type);
for (std::size_t y = 0; y<in.rows(); y++) {
for (std::size_t x = 0; x<in.columns(); x++) {
out.at<double>(y, x) = in(y, x);
}
}
return out;
}
// */
template <class ContainerType>
void vv_to_csv(ContainerType data, std::string filename, std::string sep=",") // container
{
std::ofstream outputFile;
outputFile.open(filename);
for (auto i = 0; i < data.size(); ++i) {
for (auto j = 0; j < data[i].size(); j++) {
outputFile << std::to_string(data[i][j]);
if (j < data[i].size() - 1)
outputFile << sep;
}
outputFile << std::endl;
}
outputFile.close();
} // TODO add return flag
template <typename T>
void vector2bmp(std::vector<std::vector<T>> m, std::string filename, double amplify = 1)
{ // TODO combine with blaze2bmp
int h = m.size();
assert(h>0);
int w = m[0].size();
int x, y, r, g, b;
FILE *f;
unsigned char *img = NULL;
int filesize = 54 + 3*w*h;
img = (unsigned char *)malloc(3*w*h);
std::memset(img,0,3*w*h);
for(int i=0; i<w; i++)
{
for(int j=0; j<h; j++)
{
x=i; y=j;
r = 0;
g = 0;
b = 0;
int p = m[j][i]*255*amplify;
if (p > 0) // green for positive, red for negative
{
g = p;
b = p;
}
else
r = -p;
if (r > 255) r=255;
if (g > 255) g=255;
if (b > 255) b=255;
img[(x+y*w)*3+2] = (unsigned char)(r);
img[(x+y*w)*3+1] = (unsigned char)(g);
img[(x+y*w)*3+0] = (unsigned char)(b);
}
}
unsigned char bmpfileheader[14] = {'B','M', 0,0,0,0, 0,0, 0,0, 54,0,0,0};
unsigned char bmpinfoheader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmppad[3] = {0,0,0};
bmpfileheader[ 2] = (unsigned char)(filesize );
bmpfileheader[ 3] = (unsigned char)(filesize>> 8);
bmpfileheader[ 4] = (unsigned char)(filesize>>16);
bmpfileheader[ 5] = (unsigned char)(filesize>>24);
bmpinfoheader[ 4] = (unsigned char)( w );
bmpinfoheader[ 5] = (unsigned char)( w>> 8);
bmpinfoheader[ 6] = (unsigned char)( w>>16);
bmpinfoheader[ 7] = (unsigned char)( w>>24);
bmpinfoheader[ 8] = (unsigned char)( h );
bmpinfoheader[ 9] = (unsigned char)( h>> 8);
bmpinfoheader[10] = (unsigned char)( h>>16);
bmpinfoheader[11] = (unsigned char)( h>>24);
f = fopen(filename.c_str(),"wb");
fwrite(bmpfileheader,1,14,f);
fwrite(bmpinfoheader,1,40,f);
for(int i=0; i<h; i++)
{
fwrite(img+(w*(h-i-1)*3),3,w,f);
fwrite(bmppad,1,(4-(w*3)%4)%4,f);
}
free(img);
fclose(f);
}
/* // disabled since requires boost png, may be enabled if dependency is satisfied
template <typename T>
blaze::DynamicMatrix<T> read_png_donut(std::string filename) {
boost::gil::rgb8_image_t img;
boost::gil::read_image(filename, img, boost::gil::png_tag());
auto gray = boost::gil::color_converted_view<boost::gil::gray8_pixel_t>(const_view(img));
blaze::DynamicMatrix<T> p (gray.height(), gray.width());
for (int y=0; y<gray.height(); ++y) {
for (int x=0; x<gray.width(); ++x) {
p(y, x) = gray(x, y)/255.0;
}
}
return p;
}
// */
template <typename T>
std::vector<std::vector<T>> matrix2vv(const blaze::DynamicMatrix<T> & mat) {
std::vector<std::vector<double>> v;
for (size_t i = 0; i < mat.rows(); ++i) {
std::vector<double> line;
for (size_t j = 0; j < mat.columns(); ++j) {
line.push_back(mat(i, j));
}
v.push_back(line);
}
return v;
}
| 9,521
|
C++
|
.cpp
| 266
| 28.988722
| 131
| 0.565146
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,466
|
PMQ_example.cpp
|
metric-space-ai_metric/examples/utils_examples/PMQ_example.cpp
|
// #define USE_VECTOR_SORT 1
#include "metric/utils/poor_mans_quantum.hpp"
#include <vector>
#include <iostream>
#include <chrono>
#include <map>
int main()
{
std::vector<float> data1 = {0.1, 0.3, 0.2, 0.4, 0.5, 0.6, 0.7};
std::vector<double> data2 = {
3.67318577121409,
4.81232300464080,
4.54554010352612,
2.94836769047915,
4.39746699579531,
3.24810526131811,
5.51626689664595,
3.96743349080552,
5.63599965727829,
3.57494150938997,
4.58943336671822,
3.93720877428168,
1.97804106994821,
3.01786847422095,
4.61251129816695,
3.94511387001132,
2.88126799754728,
3.37362146113286,
4.24951774056280,
3.00698099345040,
4.97495022481131,
3.35929049327394,
5.80886262051925,
2.92013374926117,
4.19918944407526,
2.47897343823105,
3.27636887471093,
3.40674968498920,
4.40133633981880,
4.94213331923694,
4.30048596759573,
3.62692934136900,
4.81548850809250,
4.79888699215672,
4.12020528195636,
4.57124762971448,
4.41279601031139,
3.01303811664848,
4.75956832591478,
3.34279870090165,
3.39608151862383,
4.17694682232941,
3.69249653013725,
3.86817964708411,
4.59535767388410,
5.04683278430523,
3.80204136738816,
4.32767816390720,
3.76169849541027,
4.22959689322031,
4.43999790482263,
3.38313407111077,
4.27483678691167,
4.60110203246830,
4.09230795123896,
5.72984139157236,
3.39144255526168,
3.26294022830219,
2.25012069361237,
4.91048257964711,
4.86708255294733,
3.92010716094196,
4.89847598937714,
4.18370342309125,
4.29079013488445,
4.11294471702105,
4.43995218887244,
4.10166244370034,
6.78733522781344,
2.83333496980536,
2.14570091731031,
2.85931885533037,
2.90665654376040,
3.56639070352518,
3.83153012172455,
3.78146643997342,
4.54133443571938,
4.38926620384326,
4.75122898468833,
5.77825589929100,
5.22306255173381,
2.71674389539523,
1.67104548371666,
4.90193146695171,
2.16436131626481,
4.06675691143687,
4.03547948583758,
6.22716807816787,
3.93078574597795,
3.49267693538550,
4.23580967257624,
4.24580485189384,
4.07004520941681,
3.39141948992005,
2.77740661981337,
4.31650036079772,
2.65713076337807,
2.96781565592783,
5.33121588506497,
3.58109680497015};
metric::PMQ<metric::Normal> rv0(3.5, 1, 50);
metric::PMQ rv1(data1);
metric::PMQ rv2(data2);
auto rv5 = rv1 * 0.5;
auto rv6 = rv5 + rv2;
auto isSignificantEqual = rv6 == rv0;
std::cout << "is_equal: " << isSignificantEqual << "\%" << std::endl;
auto isNormalDistributed = rv6.is_normal_distributed();
std::cout << "is normal distributed: " << isNormalDistributed * 100 << "\%" << std::endl;
return 0;
}
| 3,316
|
C++
|
.cpp
| 121
| 19.305785
| 93
| 0.605842
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,467
|
MetricDecisionTree_example.cpp
|
metric-space-ai_metric/examples/ensemble_examples/MetricDecisionTree_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Max Filippov
*/
#include <iostream>
#include <vector>
#include <deque>
#include <chrono>
#include <functional>
#include <variant>
#include "assets/helpers.cpp" // csv reader
#include "metric/mapping/ensembles.hpp"
#include "metric/distance.hpp"
template <typename T>
void vector_print(const std::vector<T>& vec)
{
std::cout << "[";
for (size_t i = 0; i < vec.size(); i++) {
if (i < vec.size() - 1) {
std::cout << vec[i] << ", ";
} else {
std::cout << vec[i] << "]" << std::endl;
}
}
}
int main()
{
std::cout << "Metric Decision Tree example have started" << std::endl;
std::cout << '\n';
typedef std::variant<double, std::vector<double>, std::vector<std::vector<double>>, std::string> V; // field type
typedef std::vector<V> Record;
std::vector<std::vector<double>> img1 = { // needs to be larger than blur kernel size coded intarnally as 11
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
};
std::vector<std::vector<double>> img2
= { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } };
std::vector<Record> selection
= { { V((double)2), V(std::vector<double>({ 1, 2 })), V(std::vector<double>({ 0, 1, 1, 1, 1, 1, 2, 3 })),
V(img1), V(""), V((double)1) },
{ V((double)2), V(std::vector<double>({ 1, 5 })), V(std::vector<double>({ 1, 1, 1, 1, 1, 2, 3, 4 })),
V(img2), V("A"), V((double)1) },
{ V((double)1), V(std::vector<double>({ 4, 5 })), V(std::vector<double>({ 2, 2, 2, 1, 1, 2, 0, 0 })),
V(img2), V("AA"), V((double)2) },
{ V((double)2), V(std::vector<double>({ 1, 2 })), V(std::vector<double>({ 3, 3, 2, 2, 1, 1, 0, 0 })),
V(img1), V("AAA"), V((double)1) },
{ V((double)2), V(std::vector<double>({ 5 })), V(std::vector<double>({ 4, 3, 2, 1, 0, 0, 0, 0 })),
V(img1), V("AAAA"), V((double)1) },
{ V((double)2), V(std::vector<double>({ 1, 4, 5 })), V(std::vector<double>({ 4, 3, 2, 1, 0, 0, 0, 0 })),
V(img2), V("BAAA"), V((double)1) },
{ V((double)1), V(std::vector<double>({ 1, 2, 3, 4 })),
V(std::vector<double>({ 5, 3, 2, 1, 0, 0, 0, 0 })), V(img2), V("BBAA"), V((double)3) },
{ V((double)1), V(std::vector<double>({ 1 })), V(std::vector<double>({ 4, 6, 2, 2, 1, 1, 0, 0 })),
V(img1), V("BBA"), V((double)1) },
{ V((double)2), V(std::vector<double>({ 4, 5 })), V(std::vector<double>({ 3, 7, 2, 1, 0, 0, 0, 0 })),
V(img2), V("BB"), V((double)1) },
{ V((double)2), V(std::vector<double>({ 1, 2, 4, 5 })),
V(std::vector<double>({ 2, 5, 1, 1, 0, 0, 1, 2 })), V(img1), V("B"), V((double)1) } };
// vector of accessors for field 0
auto field0accessors = [](const Record& r) { return std::get<double>(r[0]); };
// vector of accessors for field 1
auto field1accessors = [](const Record& r) {
std::vector<double> v(std::get<std::vector<double>>(r[1]));
v.resize(4);
return v;
};
// vector of accessors for field 2
auto field2accessors = [](const Record& r) {
std::vector<double> v(std::get<std::vector<double>>(r[2]));
v.resize(8);
return v;
};
// vector of accessors for field 3
auto field3accessors = [](const Record& r) { return std::get<std::vector<std::vector<double>>>(r[3]); };
// vector of accessors for field 4
auto field4accessors = [](const Record& r) { return std::get<std::string>(r[4]); };
// label accessor (for single record)
std::function<int(const Record&)> response = [](const Record& r) { return (int)std::abs(std::get<double>(r[5])); };
// build dimension and Dimension objects
typedef double InternalType;
// features
using a0_type = decltype(field0accessors);
using a1_type = decltype(field1accessors);
using a2_type = decltype(field2accessors);
using a3_type = decltype(field3accessors);
using a4_type = decltype(field4accessors);
auto dim0 = metric::make_dimension(metric::Euclidean<InternalType>(), field0accessors);
auto dim1 = metric::make_dimension(metric::Manhatten<InternalType>(), field1accessors);
auto dim2 = metric::make_dimension(metric::P_norm<InternalType>(), field2accessors);
auto dim3 = metric::make_dimension(metric::Euclidean_thresholded<InternalType>(), field2accessors);
auto dim4 = metric::make_dimension(metric::Cosine<InternalType>(), field2accessors);
auto dim5 = metric::make_dimension(metric::SSIM<double, std::vector<InternalType>>(), field3accessors);
auto dim6 = metric::make_dimension(metric::TWED<InternalType>(0, 1), field2accessors);
auto dim7 = metric::make_dimension(metric::Edit<char>(), field4accessors);
auto dim10 = metric::make_dimension(metric::EMD<InternalType>(8, 8), field2accessors);
typedef std::variant<metric::Dimension<metric::Euclidean<InternalType>, a0_type>,
metric::Dimension<metric::Manhatten<InternalType>, a1_type>,
metric::Dimension<metric::P_norm<InternalType>, a2_type>,
metric::Dimension<metric::Euclidean_thresholded<InternalType>, a2_type>,
metric::Dimension<metric::Cosine<InternalType>, a2_type>,
metric::Dimension<metric::SSIM<double, std::vector<InternalType>>, a3_type>,
metric::Dimension<metric::TWED<InternalType>, a2_type>,
metric::Dimension<metric::EMD<InternalType>, a2_type>, // matrix C is temporary created inside functor
metric::Dimension<metric::Edit<std::string::value_type>, a4_type>>
VariantType;
std::vector<VariantType> dims = { dim0, dim1, dim2, dim3, dim4, dim5, dim6, dim7, dim10 };
std::vector<Record> test_sample = { selection[0], selection[2], selection[6] };
std::vector<int> prediction;
auto startTime = std::chrono::steady_clock::now();
auto endTime = std::chrono::steady_clock::now();
std::cout << "Metric Desicion Tree: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto model = metric::DT<Record>();
std::cout << "Metric Desicion Tree training... " << std::endl;
model.train(selection, dims, response);
endTime = std::chrono::steady_clock::now();
std::cout << "\n";
std::cout << "Metric Desicion Tree trained (Time = "
<< double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000
<< " s)" << std::endl;
model.predict(test_sample, dims, prediction);
std::cout << "\n";
std::cout << "Metric Desicion Tree prediction: " << std::endl;
vector_print(prediction);
std::cout << "\n";
prediction.clear();
model.predict(test_sample, dims, prediction);
std::cout << "\n";
std::cout << "Metric Desicion Tree prediction2: " << std::endl;
vector_print(prediction);
std::cout << "\n";
std::cout << "Distances separately: " << std::endl;
// test Edit separately
metric::Edit<char> edit_functor;
auto edit_dist = edit_functor("AAAB", "AAC");
std::cout << "\nEdit distance: " << edit_dist << "\n";
// test SSIM separately
metric::SSIM<double, std::vector<double>> SSIM_functor;
auto SSIM_dist = SSIM_functor(img1, img2);
std::cout << "\nSSIM distance: " << SSIM_dist << "\n";
return 0;
}
| 10,268
|
C++
|
.cpp
| 165
| 54.515152
| 119
| 0.512464
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,468
|
Boosting_example.cpp
|
metric-space-ai_metric/examples/ensemble_examples/Boosting_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "metric/mapping/ensembles.hpp"
#include "assets/helpers.cpp" // csv reader
#include <variant>
#include <deque> // for Record test
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[";
for (size_t i = 0; i < vec.size(); i++)
{
if (i < vec.size() - 1)
{
std::cout << vec[i] << ", ";
}
else
{
std::cout << vec[i] << "]" << std::endl;
}
}
}
int main()
{
std::cout << "Boosting example have started" << std::endl;
std::cout << '\n';
//*
typedef std::vector<std::variant<int, double, std::string, std::vector<std::string>, std::vector<double> > > Record1;
using Record = std::vector<double>; // may be of arbitrary type, with appropriate accessors
std::vector<Record> payments = {
{0,3,5,0},
{1,4,5,0},
{2,5,2,1},
{3,6,2,1}
};
std::vector<std::function<double(Record)> > features;
for (int i = 0; i < (int)payments[0].size() - 1; ++i) {
features.push_back(
[=](auto r) { return r[i]; } // we need closure: [=] instead of [&] !! THIS DIFFERS FROM API !!
);
}
std::function<bool(Record)> response = [](Record r) {
if (r[r.size() - 1] >= 0.5)
return true;
else
return false;
};
std::vector<Record> test_sample = { {3,6,2, 1} };
// test on Iris
using IrisRec = std::vector<std::string>;
std::vector<IrisRec> iris_str = read_csv<std::vector<IrisRec>>("./assets/iris.csv");
std::deque<IrisRec> iris_strD = read_csv<std::deque<IrisRec>>("./assets/iris.csv");
iris_str.erase(iris_str.begin()); // remove headers
iris_strD.erase(iris_strD.begin()); // remove headers
std::vector<IrisRec> IrisTestRec = { iris_str[5] }; // 1
std::deque<IrisRec> IrisTestRecD = { iris_strD[5] }; // 1
std::vector<IrisRec> IrisTestMultipleRec = { iris_str[5], iris_str[8], iris_str[112] }; // 1, 1, 0
std::cout << iris_str.size() << std::endl;
std::cout << iris_str[0].size() << std::endl;
std::vector<std::function<double(IrisRec)> > features_iris;
for (int i = 1; i < (int)iris_str[0].size() - 1; ++i) { // skip 1st column
if (i < (int)iris_str[0].size() - 1) {
features_iris.push_back(
[=](auto r) { return std::stod(r[i]); } // we need closure: [=] instead of [&]
);
}
else { // TODO remove in order to test response accessor
features_iris.push_back(
[=](auto r) {
if (r[i] == "\"setosa\"") {
// cout << r[i] << ", " << i << endl;
return 1.0;
}
else
return 0.0;
}
);
}
}
std::function<bool(IrisRec)> response_iris = [](IrisRec r) {
if (r[r.size() - 1] == "\"setosa\"") // (std::stod(r[r.size() - 1]) >= 0.5)
return true;
else
return false;
};
for (size_t i = 0; i < iris_str[0].size() - 2; i++)
std::cout << features_iris[i](iris_str[10]) << ", ";
std::cout << std::endl;
std::vector<bool> prediction;
// SVM
////
// using SVM with default metaparams
std::cout << "Boost SVM on Iris: " << std::endl;
auto startTime = std::chrono::steady_clock::now();
auto svmModel_3 = metric::edmClassifier<IrisRec, CSVM>();
auto boostSvmModel_3 = metric::Boosting<IrisRec, metric::edmClassifier<IrisRec, CSVM>, metric::SubsampleRUS<IrisRec>>(10, 0.75, 0.5, svmModel_3);
std::cout << "training... " << std::endl;
boostSvmModel_3.train(iris_str, features_iris, response_iris, true);
auto endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
boostSvmModel_3.predict(IrisTestRec, features_iris, prediction);
std::cout << "Boost SVM predict on single Iris: " << std::endl;
vector_print(prediction);
boostSvmModel_3.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Boost SVM predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
////
// using SVM with metaparams
std::cout << "Boost specialized SVM on Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto svmModel_4 = metric::edmSVM<IrisRec>(C_SVC, RBF, 3, 0, 100, 0.001, 1, 0, NULL, NULL, 0.5, 0.1, 1, 0);
auto boostSvmModel_4 = metric::Boosting<IrisRec, metric::edmSVM<IrisRec>, metric::SubsampleRUS<IrisRec>>(10, 0.75, 0.5, svmModel_4);
std::cout << "training... " << std::endl;
boostSvmModel_4.train(iris_str, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
boostSvmModel_4.predict(IrisTestRec, features_iris, prediction);
std::cout << "Boost specialized SVM predict on single Iris: " << std::endl;
vector_print(prediction);
boostSvmModel_4.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Boost specialized SVM predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
// C4.5
//
// using C4.5 with default metaparams
std::cout << "Boosting C4.5 on Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto c45Model_2 = metric::edmClassifier<IrisRec, libedm::CC45>();
auto boostC45Model_2 = metric::Boosting<IrisRec, metric::edmClassifier<IrisRec, CC45>, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, c45Model_2);
std::cout << "training... " << std::endl;
boostC45Model_2.train(iris_str, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
boostC45Model_2.predict(IrisTestRec, features_iris, prediction);
std::cout << "Boosting C4.5 predict on single Iris: " << std::endl;
vector_print(prediction);
boostC45Model_2.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Boosting C4.5 predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
//
// using C4.5 with metaparams
std::cout << "Boosting with metaparams C4.5 on Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto c45Model_3 = metric::edmC45<IrisRec>(2, 1e-3, 0.25, true);
auto boostC45Model_3 = metric::Boosting<IrisRec, metric::edmC45<IrisRec>, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, c45Model_3);
std::cout << "training... " << std::endl;
boostC45Model_3.train(iris_str, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
boostC45Model_3.predict(IrisTestRec, features_iris, prediction);
std::cout << "Boosting with metaparams C4.5 predict on single Iris: " << std::endl;
vector_print(prediction);
boostC45Model_3.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Boosting with metaparams C4.5 predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
return 0;
}
| 7,311
|
C++
|
.cpp
| 165
| 41.49697
| 159
| 0.65955
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,469
|
CWA_example.cpp
|
metric-space-ai_metric/examples/ensemble_examples/CWA_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <vector>
#include <iostream>
#include "metric/mapping/ensembles/DT/correlation_weighted_accuracy.hpp"
int main()
{
std::cout << "Correlation Weighted Accuracy example have started" << std::endl;
std::cout << '\n';
std::vector<int> g1 = {3, 2, 2, 3, 1, 1}; // Known groups
std::vector<int> g2 = {4, 2, 2, 2, 1, 1}; // Predicted groups
double cwa = metric::correlation_weighted_accuracy(g1, g2);
std::cout << "CWA: " << cwa << std::endl;
return 0;
}
| 722
|
C++
|
.cpp
| 19
| 35.105263
| 83
| 0.676259
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,470
|
Bagging_example.cpp
|
metric-space-ai_metric/examples/ensemble_examples/Bagging_example.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "metric/mapping/ensembles.hpp"
#include "assets/helpers.cpp" // csv reader
#include <variant>
#include <deque> // for Record test
template <typename T>
void vector_print(const std::vector<T> &vec)
{
std::cout << "[";
for (size_t i = 0; i < vec.size(); i++)
{
if (i < vec.size() - 1)
{
std::cout << vec[i] << ", ";
}
else
{
std::cout << vec[i] << "]" << std::endl;
}
}
}
int main()
{
std::cout << "Bagging example have started" << std::endl;
std::cout << '\n';
typedef std::vector<std::variant<int, double, std::string, std::vector<std::string>, std::vector<double>>> Record1;
using Record = std::vector<double>; // may be of arbitrary type, with appropriate accessors
std::vector<Record> payments = { { 0, 3, 5, 0 }, { 1, 4, 5, 0 }, { 2, 5, 2, 1 }, { 3, 6, 2, 1 } };
std::vector<std::function<double(Record)>> features;
for (int i = 0; i < (int)payments[0].size() - 1; ++i) {
features.push_back(
[=](auto r) { return r[i]; } // we need closure: [=] instead of [&] !! THIS DIFFERS FROM API !!
);
}
std::function<bool(Record)> response = [](Record r) {
if (r[r.size() - 1] >= 0.5)
return true;
else
return false;
};
std::vector<Record> test_sample = { { 3, 6, 2, 1 } };
// test on Iris
using IrisRec = std::vector<std::string>;
std::vector<IrisRec> iris_str = read_csv<std::vector<IrisRec>>("./assets/iris.csv");
std::deque<IrisRec> iris_strD = read_csv<std::deque<IrisRec>>("./assets/iris.csv");
iris_str.erase(iris_str.begin()); // remove headers
iris_strD.erase(iris_strD.begin()); // remove headers
std::vector<IrisRec> IrisTestRec = { iris_str[5] }; // 1
std::deque<IrisRec> IrisTestRecD = { iris_strD[5] }; // 1
std::vector<IrisRec> IrisTestMultipleRec = { iris_str[5], iris_str[8], iris_str[112] }; // 1, 1, 0
std::deque<IrisRec> IrisTestMultipleRecD = { iris_strD[5], iris_strD[8], iris_strD[112] }; // 1, 1, 0
std::cout << iris_str.size() << std::endl;
std::cout << iris_str[0].size() << std::endl;
std::vector<std::function<double(IrisRec)>> features_iris;
for (int i = 1; i < (int)iris_str[0].size() - 1; ++i) { // skip 1st column
if (i < (int)iris_str[0].size() - 1) {
features_iris.push_back([=](auto r) { return std::stod(r[i]); } // we need closure: [=] instead of [&]
);
} else { // TODO remove in order to test response accessor
features_iris.push_back([=](auto r) {
if (r[i] == "\"setosa\"") {
return 1.0;
} else
return 0.0;
});
}
}
std::function<bool(IrisRec)> response_iris = [](IrisRec r) {
if (r[r.size() - 1] == "\"setosa\"")
return true;
else
return false;
};
for (size_t i = 0; i < iris_str[0].size() - 2; i++)
std::cout << features_iris[i](iris_str[10]) << ", ";
std::cout << std::endl;
std::vector<bool> prediction;
// SVM
////
// using Bagging on both specialized and default SVM
std::cout << "Bagging on both specialized and default SVM on Iris: " << std::endl;
auto startTime = std::chrono::steady_clock::now();
using SVMWeakLrnVariant = std::variant<metric::edmSVM<IrisRec>, metric::edmClassifier<IrisRec, CSVM> >;
std::vector<SVMWeakLrnVariant> svm_models_1 = {};
SVMWeakLrnVariant svmModel_5 = metric::edmSVM<IrisRec>(C_SVC, RBF, 3, 0, 100, 0.001, 1, 0, NULL, NULL, 0.5, 0.1, 1, 0);
SVMWeakLrnVariant svmModel_6 = metric::edmClassifier<IrisRec, CSVM>();
svm_models_1.push_back(svmModel_5);
svm_models_1.push_back(svmModel_6);
auto baggingSVMmodel_1 = metric::Bagging<IrisRec, SVMWeakLrnVariant, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, { 0.3, 0.7 }, svm_models_1); // 30% of first weak learner type, 70% of second
std::cout << "training... " << std::endl;
baggingSVMmodel_1.train(iris_str, features_iris, response_iris, true);
auto endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
baggingSVMmodel_1.predict(IrisTestRec, features_iris, prediction);
std::cout << "Bagging on both specialized and default SVM predict on single Iris: " << std::endl;
vector_print(prediction);
baggingSVMmodel_1.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Bagging on both specialized and default SVM predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
////
// using Bagging on both specialized and default SVM with deque
std::cout << "Bagging on both specialized and default SVM on deque Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto baggingSVMmodel_2 = metric::Bagging<IrisRec, SVMWeakLrnVariant, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, { 0.3, 0.7 }, svm_models_1); // 30% of first weak learner type, 70% of second
std::cout << "training... " << std::endl;
baggingSVMmodel_2.train(iris_strD, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
baggingSVMmodel_2.predict(IrisTestRecD, features_iris, prediction);
std::cout << "Bagging on both specialized and default SVM predict on single deque Iris: " << std::endl;
vector_print(prediction);
baggingSVMmodel_2.predict(IrisTestMultipleRecD, features_iris, prediction);
std::cout << "Bagging on both specialized and default SVM predict on multiple deque Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
// C4.5
//
// using Bagging on both specialized and default C4.5
std::cout << "Bagging on both specialized and default C4.5 on Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
using C45WeakLrnVariant = std::variant<metric::edmC45<IrisRec>, metric::edmClassifier<IrisRec, CC45> >;
std::vector<C45WeakLrnVariant> c45_models_1 = {};
C45WeakLrnVariant c45Model_4 = metric::edmC45<IrisRec>(2, 1e-3, 0.25, true);
C45WeakLrnVariant c45Model_5 = metric::edmClassifier<IrisRec, CC45>();
c45_models_1.push_back(c45Model_4);
c45_models_1.push_back(c45Model_5);
auto baggingC45model_1 = metric::Bagging<IrisRec, C45WeakLrnVariant, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, { 0.3, 0.7 }, c45_models_1); // 30% of first weak learner type, 70% of second
std::cout << "training... " << std::endl;
baggingC45model_1.train(iris_str, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
baggingC45model_1.predict(IrisTestRec, features_iris, prediction);
std::cout << "Bagging on both specialized and default C4.5 predict on single Iris: " << std::endl;
vector_print(prediction);
baggingC45model_1.predict(IrisTestMultipleRec, features_iris, prediction);
std::cout << "Bagging on both specialized and default C4.5 predict on multiple Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
//
// using Bagging on both specialized and default C4.5 with deque
std::cout << "Bagging on both specialized and default C4.5 on deque Iris: " << std::endl;
startTime = std::chrono::steady_clock::now();
auto baggingC45model_2 = metric::Bagging<IrisRec, C45WeakLrnVariant, metric::SubsampleRUS<IrisRec> >(10, 0.75, 0.5, { 0.3, 0.7 }, c45_models_1); // 30% of first weak learner type, 70% of second
std::cout << "training... " << std::endl;
baggingC45model_2.train(iris_strD, features_iris, response_iris, true);
endTime = std::chrono::steady_clock::now();
std::cout << "trained (Time = " << double(std::chrono::duration_cast<std::chrono::microseconds>(endTime - startTime).count()) / 1000000 << " s)" << std::endl;
baggingC45model_2.predict(IrisTestRecD, features_iris, prediction);
std::cout << "Bagging on both specialized and default C4.5 predict on single deque Iris: " << std::endl;
vector_print(prediction);
baggingC45model_2.predict(IrisTestMultipleRecD, features_iris, prediction);
std::cout << "Bagging on both specialized and default C4.5 predict on multiple deque Iris: " << std::endl;
vector_print(prediction);
std::cout << "\n";
return 0;
}
| 8,755
|
C++
|
.cpp
| 162
| 49.740741
| 194
| 0.658197
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,471
|
helpers.cpp
|
metric-space-ai_metric/examples/ensemble_examples/assets/helpers.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Panda Team
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <sstream>
#include <blaze/Blaze.h>
template <typename T> T convert_to(const std::string & str)
{
std::istringstream s(str);
T num;
s >> num;
return num;
} // templated version of stof, stod, etc., thanks to https://gist.github.com/mark-d-holmberg/862733
template <class ContainerType>
ContainerType read_csv(std::string filename, std::string sep=",") {
typedef typename ContainerType::value_type LINE;
std::string line;
int pos;
ContainerType array = {};
std::ifstream in(filename);
if (!in.is_open()) {
std::cout << "Failed to open file" << std::endl;
return array;
}
while (getline(in, line)) {
LINE ln;
while( (pos = line.find(sep)) >= 0) {
std::string field = line.substr(0, pos);
line = line.substr(pos+1);
ln.push_back(field);
}
ln.push_back(line);
array.push_back(ln);
}
return array;
}
//template <class ValueType>
//std::vector<std::vector<ValueType>> read_csv_num(std::string filename, std::string sep=";") {
// typedef typename std::vector<ValueType> LINE;
// std::string line;
// int pos;
// std::vector<std::vector<ValueType>> array = {};
// std::ifstream in(filename);
// if(!in.is_open()) {
// std::cout << "Failed to open file" << std::endl;
// return array;
// }
// while( getline(in,line) ) {
// LINE ln;
// while( (pos = line.find(sep)) >= 0) {
// std::string field = line.substr(0, pos);
// line = line.substr(pos+1);
// ln.push_back(convert_to<ValueType>(field));
// }
// ln.push_back(convert_to<ValueType>(line));
// array.push_back(ln);
// }
// return array;
//}
template <class ValueType>
blaze::DynamicMatrix<ValueType, blaze::rowMajor> read_csv_blaze(const std::string & filename)
{
auto array = read_csv<std::vector<std::vector<std::string>>>(filename, ";");
auto m = blaze::DynamicMatrix<ValueType, blaze::rowMajor>(array.size(), array[0].size());
for (size_t i=0; i<array.size(); ++i)
{
for (size_t j=0; j<array[0].size(); ++j)
{
m(i, j) = convert_to<ValueType>(array[i][j]);
}
}
return m;
}
//template <template <class, bool> class BlazeContainerType, class ValueType, bool SO>
//bool read_csv_blaze(const std::string & filename, BlazeContainerType<ValueType, SO> & matrix, std::string sep = ";") {
template <template <class, bool> class BlazeContainerType, class ValueType>
bool read_csv_blaze(const std::string & filename, BlazeContainerType<ValueType, blaze::rowMajor> & matrix, std::string sep = ";") {
//typedef typename std::vector<std::string> LINE;
std::string line;
int pos;
//std::vector<LINE> array = {};
std::ifstream in(filename);
if(!in.is_open()) {
std::cout << "Failed to open file" << std::endl;
return false;
}
size_t row_idx = 0;
while( getline(in, line) ) {
//LINE ln;
size_t col_idx = 0;
while( (pos = line.find(sep) ) >= 0) {
std::string field = line.substr(0, pos);
std::cout << field << "\n";
std::cout << line << "\n";
line = line.substr(pos+1);
//ln.push_back(field);
matrix(row_idx, col_idx) = convert_to<ValueType>(field);
++col_idx;
}
matrix(row_idx, col_idx) = convert_to<ValueType>(line);
++row_idx;
//ln.push_back(line);
//array.push_back(ln);
}
return true;
}
| 3,883
|
C++
|
.cpp
| 112
| 29.785714
| 131
| 0.597281
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,472
|
kmeans.cpp
|
metric-space-ai_metric/metric/mapping/kmeans.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 M.Welsch
*/
#ifndef _METRIC_MAPPING_KMEANS_CPP
#define _METRIC_MAPPING_KMEANS_CPP
/*
A k-means implementation with optimized seeding.
Input (vector of fixed-size vector, clustersize)
for example:
std::vector<std::vector<float, 5>> data{
{0, 0, 0, 0, 0},
{1.74120000000000, 4.07812000000000, -0.0927036000000, 41.7888000000000, 41.7888000000000},
{7.75309000000000, 16.2466000000000, 3.03956000000000, 186.074000000000, 186.074000000000},
{2.85493000000000, 3.25380000000000, 2.50559000000000, 68.5184000000000, 68.5184000000000},
{5.81414000000000, 8.14015000000000, 3.22950000000000, 139.539000000000, 139.539000000000},
{2.57927000000000, 2.63399000000000, 2.46802000000000, 61.9026000000000, 61.9026000000000}};
auto [means, idx] = kmeans(data, 4); // clusters the data in 4 groups.
means: A vector holding the means (same type as input data)
idx: A vector containing the cluster index
*/
#include "../distance/k-related/Standards.hpp"
#include <algorithm>
#include <cassert>
#include <random>
#include <string>
#include <vector>
namespace metric {
namespace kmeans_details {
inline std::string default_measure(void) { return "Euclidean"; }
template <typename T> T distance(const std::vector<T> &a, const std::vector<T> &b, std::string distance_measure)
{
assert(a.size() == b.size()); // data vectors have not the same length
if (distance_measure.compare("Euclidean") == 0)
return metric::Euclidean<T>()(a, b);
else if (distance_measure.compare("rms") == 0) {
T val = metric::Euclidean<T>()(a, b);
return val * val;
} else if (distance_measure.compare("manhatten") == 0)
return metric::Manhatten<T>()(a, b);
else if (distance_measure.compare("cosine_inverted") == 0)
return metric::CosineInverted<T>()(a, b);
else {
return metric::Euclidean<T>()(a, b);
}
}
/*
closest distance between datapoints and means.
*/
template <typename T>
std::vector<T> closest_distance(const std::vector<std::vector<T>> &means, const std::vector<std::vector<T>> &datapoints,
int k, std::string distance_measure)
{
std::vector<T> distances;
distances.reserve(k);
for (auto &d : datapoints) {
T closest = kmeans_details::distance(d, means[0], distance_measure);
for (auto &m : means) {
T distance = kmeans_details::distance(d, m, distance_measure);
if (distance < closest)
closest = distance;
}
distances.push_back(closest);
}
return distances;
}
/*
means initialization based on the [kmeans++](https://en.wikipedia.org/wiki/K-means%2B%2B) algorithm.
*/
template <typename T>
std::vector<std::vector<T>> random_init(const std::vector<std::vector<T>> &data, int k, std::string distance_measure,
long long random_seed)
{
assert(k > 0);
using input_size_t = typename std::vector<T>::size_type;
std::vector<std::vector<T>> means;
// Using a very simple PRBS generator, parameters selected according to
// https://en.wikipedia.org/wiki/Linear_congruential_generator#Parameters_in_common_use
// std::random_device rand_device;
// std::linear_congruential_engine<uint64_t, 6364136223846793005, 1442695040888963407, UINT64_MAX> rand_engine(
// rand_device());
if (random_seed == -1) {
random_seed = std::chrono::system_clock::now().time_since_epoch().count();
}
std::default_random_engine random_generator(random_seed);
// Select first mean at random from the set
{
std::uniform_int_distribution<input_size_t> uniform_generator(0, data.size() - 1);
means.push_back(data[uniform_generator(random_generator)]);
}
for (int count = 1; count < k; ++count) {
// Calculate the distance to the closest mean for each data point
auto distances = closest_distance(means, data, k, distance_measure);
// Pick a random point weighted by the distance from existing means
// TODO: This might convert floating point weights to ints, distorting the distribution for small weights
std::discrete_distribution<size_t> generator(distances.begin(), distances.end());
means.push_back(data[generator(random_generator)]);
}
return means;
}
/*
find closest mean for a data point
*/
template <typename T>
int findClosestMean(const std::vector<T> &datapoint, const std::vector<std::vector<T>> &means,
std::string distance_measure)
{
assert(!means.empty());
T smallest_distance = kmeans_details::distance(datapoint, means[0], distance_measure);
// typename std::vector<T>::size_type index = 0;
int index = 0;
T distance;
for (int i = 1; i < means.size(); ++i) {
distance = kmeans_details::distance(datapoint, means[i], distance_measure);
if (distance < smallest_distance) {
smallest_distance = distance;
index = i;
}
}
return index;
}
/*
index of the closest means
*/
template <typename T>
void update_assignments(std::vector<int> &assignments, const std::vector<std::vector<T>> &data,
const std::vector<std::vector<T>> &means, std::string distance_measure)
{
for (int i = 0; i < data.size(); ++i) {
assignments[i] = findClosestMean(data[i], means, distance_measure);
}
}
/*
means based on datapoints and their cluster assignments.
*/
template <typename T>
std::tuple<std::vector<int>, int> update_means(std::vector<std::vector<T>> &means,
const std::vector<std::vector<T>> &data,
const std::vector<int> &assignments, const int &k)
{
std::vector<std::vector<T>> old_means = means;
std::vector<int> count(k, int(0));
for (int i = 0; i < std::min(assignments.size(), data.size()); ++i) {
count[assignments[i]] += 1;
for (int j = 0; j < std::min(data[i].size(), means[assignments[i]].size()); ++j) {
means[assignments[i]][j] += data[i][j];
}
}
int updated = 0;
for (int i = 0; i < k; ++i) {
if (count[i] == 0) {
means[i] = old_means[i];
} else {
for (int j = 0; j < means[i].size(); ++j) {
means[i][j] /= double(count[i]);
if (means[i][j] != old_means[i][j])
updated += 1;
}
}
}
return {count, updated};
}
inline void rearrange_assignments(std::vector<int> &assignments)
{
std::vector<int> from_list;
std::vector<int> to_list;
from_list.push_back(assignments[0]);
to_list.push_back(int(0));
for (int i = 1; i < assignments.size(); ++i) {
bool hit = false;
for (int j = 0; j < from_list.size(); ++j) {
if (from_list[j] == assignments[i]) {
hit = true;
}
}
if (!hit) {
from_list.push_back(assignments[i]);
to_list.push_back(from_list.size() - 1);
}
}
for (int i = 0; i < assignments.size(); ++i) {
int old_indx;
for (int j = 0; j < to_list.size(); ++j) {
if (from_list[j] == assignments[i])
old_indx = j;
}
assignments[i] = to_list[old_indx];
}
}
} // end namespace kmeans_details
template <typename T>
std::tuple<std::vector<int>, std::vector<std::vector<T>>, std::vector<int>>
kmeans(const std::vector<std::vector<T>> &data, int k, int maxiter, std::string distance_measure, long long random_seed)
{
static_assert(
std::is_arithmetic<T>::value && std::is_signed<T>::value,
"kmeans_lloyd requires the template parameter T to be a signed arithmetic type (e.g. float, double, int)");
assert(k > 0); // k must be greater than zero
assert(data.size() >= k); // there must be at least k data points
std::vector<std::vector<T>> means = kmeans_details::random_init(data, k, distance_measure, random_seed);
std::vector<int> assignments(data.size());
// Calculate new meansData until convergence is reached
int t = 0;
int updated_number_of_means = 0;
std::vector<int> counts(k, int(0));
do {
kmeans_details::update_assignments(assignments, data, means, distance_measure);
auto [updated_counts, updated_number_of_means] = kmeans_details::update_means(means, data, assignments, k);
counts = updated_counts;
++t;
} while (updated_number_of_means != int(0) && t < maxiter);
// kmeans_details::rearrange_assignments(assignments); // hide by Stepan Mmaontov 28 10 2019 - rearranging
// asssignments does not reflect with counts and means
return {assignments, means, counts};
}
} // namespace metric
#endif
| 8,184
|
C++
|
.cpp
| 219
| 34.849315
| 120
| 0.699345
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,473
|
ESN.cpp
|
metric-space-ai_metric/metric/mapping/ESN.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_MAPPING_ESN_CPP
#define _METRIC_MAPPING_ESN_CPP
#include "ESN.hpp"
#include <cassert>
namespace metric {
namespace ESN_details {
// --------------------------------- math functions:
template <typename T>
blaze::DynamicMatrix<T> get_readout_no_echo( // for networks with disabled echo
const blaze::DynamicMatrix<T> &Samples, const blaze::DynamicMatrix<T> &W_in);
template <typename T>
blaze::DynamicMatrix<T> get_readout( // echo mode
const blaze::DynamicMatrix<T> &Samples, const blaze::DynamicMatrix<T> &W_in,
const blaze::CompressedMatrix<T> &W, // TODO make sparse
const T alpha = 0.5, const size_t washout = 0);
template <typename T>
blaze::DynamicMatrix<T> ridge(const blaze::DynamicMatrix<T> &Target, const blaze::DynamicMatrix<T> &Readout,
const T beta = 0.5);
// --------------------------------- math functions:
template <typename T>
blaze::DynamicMatrix<T> get_readout_no_echo( // for networks with disabled echo
const blaze::DynamicMatrix<T> &Samples, const blaze::DynamicMatrix<T> &W_in)
{
size_t slice_size = W_in.columns();
assert(slice_size > 0);
slice_size = slice_size - 1; // size of input vector. first column is for offset and will be used separately
assert(Samples.rows() == slice_size);
auto W_in_submatrix = submatrix(W_in, 0UL, 1UL, W_in.rows(), slice_size); // all except first column
auto w_in_offset = submatrix(W_in, 0UL, 0UL, W_in.rows(), 1UL); // first column
auto Ones = blaze::DynamicMatrix<T>(w_in_offset.columns(), Samples.columns(), 1);
return evaluate(tanh(W_in_submatrix * Samples + w_in_offset * Ones));
}
template <typename T>
blaze::DynamicMatrix<T> get_readout( // echo mode
const blaze::DynamicMatrix<T> &Samples, const blaze::DynamicMatrix<T> &W_in,
const blaze::CompressedMatrix<T> &W, // TODO make sparse
const T alpha, const size_t washout)
{
if (washout == 0 && alpha == 1) // if echo disabled, we run faster overload without sample loop
return get_readout_no_echo(Samples, W_in);
size_t x_size = W.rows(); // size of inter-step echo buffer
assert(x_size == W.columns()); // W must be square
size_t slice_size = W_in.columns();
assert(slice_size > 0);
slice_size = slice_size - 1; // size of input vector. first column is for offset and will be used separately
assert(Samples.rows() == slice_size);
auto x = blaze::DynamicMatrix<T>(x_size, 1UL,
0.0); // matrix type is due to impossibility of addition of matrix and vector
// TODO set initial random values, if needed
// here we only define symbolyc expressions, all computations are deferred
auto W_in_submatrix = submatrix(W_in, 0UL, 1UL, W_in.rows(), slice_size); // all except first column
auto w_in_offset = submatrix(W_in, 0UL, 0UL, W_in.rows(), 1UL); // first column
size_t n = 0;
blaze::DynamicMatrix<T> current_slice =
columns(Samples, {n}); // will be updated inside loop. Type is set in order to force evaluation
// TODO refactor: try n of vector type
auto input_summand =
W_in_submatrix * current_slice +
w_in_offset; // define all symbolic expressions out of loop (but dependent on n ones must be updated)
auto x_prev_summand = W * x;
assert(Samples.columns() > washout); // TODO consider >= x_size in order to avoid undetermined system
auto Output = blaze::DynamicMatrix<T>(x_size, Samples.columns() - washout);
for (n = 0; n < Samples.columns(); n++) {
current_slice = columns(Samples, {n}); // update for each n // TODO consider making n also Blaze expression?
x = evaluate(tanh(input_summand + x_prev_summand) * alpha + x * (1 - alpha));
if (n >= washout)
columns(Output, {n - washout}) = x; // we output all readout
}
return Output;
}
template <typename T>
blaze::DynamicMatrix<T> ridge(const blaze::DynamicMatrix<T> &Target, const blaze::DynamicMatrix<T> &Readout,
const T beta // = 0.5
)
{
auto I = blaze::IdentityMatrix<T>(Readout.rows());
auto TR = trans(Readout);
return Target * TR * inv(Readout * TR + I * beta);
}
} // namespace ESN_details
template <typename RecType, typename Metric>
void ESN<RecType, Metric>::create_W(const size_t w_size, const value_type w_connections, const value_type w_sr)
{
W = blaze::CompressedMatrix<value_type>(w_size, w_size, 0.0); // TODO make sparse
if (w_sr > 0) {
// TODO disable the following W fullfilling code if considered to use graph
auto uniform_int = std::uniform_int_distribution<int>(0, w_size - 1);
auto uniform_value_type = std::uniform_real_distribution<value_type>(-1, 1);
int count;
size_t r_row, r_col;
for (r_col = 0; r_col < W.columns(); r_col++)
for (count = 0; count < w_connections; count++) {
r_row = uniform_int(rgen);
if (W.find(r_row, r_col) == W.end(r_row)) // find(..) works for compressed matrix only
W.insert(r_row, r_col, uniform_value_type(rgen));
else
count--; // retry
}
// spectral radius normalization
blaze::DynamicVector<blaze::complex<value_type>, blaze::columnVector> eig(0UL, 0.0);
blaze::DynamicMatrix<value_type> Wd =
W; // DynamicMatrix needed for eigen call // TODO disable this line if considered to use graph
// TODO enable the following 3 lines if considered to use graph
// auto graph = metric::graph::RandomUniform<double, false>(w_size, -1, 1, w_connections);
// blaze::DynamicMatrix<double> Wd = graph.get_matrix(); // DynamicMAtrix needed for eigen call
// W = Wd;
eigen(Wd, eig);
auto sr = max(sqrt(pow(real(eig), 2) + pow(imag(eig), 2)));
W = W * w_sr / sr;
}
}
template <typename RecType, typename Metric>
ESN<RecType, Metric>::ESN(
const size_t w_size, // = 500, // number of elements in reservoir
const value_type w_connections, // = 10, // number of interconnections (for each reservoir element)
const value_type w_sr, // = 0.6, // desired spectral radius of the reservoir
const value_type alpha_, // = 0.5, // leak rate
const size_t washout_, // = 1, // number of samples excluded from output for washout
const value_type beta_ // = 0.5, // ridge solver metaparameter
)
: alpha(alpha_), beta(beta_), washout(washout_)
{
assert(w_connections / (float)w_size < 0.5);
rgen.seed(std::random_device{}());
create_W(w_size, w_connections, w_sr);
}
template <typename RecType, typename Metric>
ESN<RecType, Metric>::ESN(const blaze::DynamicMatrix<value_type> &W_in_, const blaze::CompressedMatrix<value_type> &W_,
const blaze::DynamicMatrix<value_type> &W_out_,
const value_type alpha_, // = 0.5, // leak rate
const size_t washout_, // = 1, // number of samples excluded from output for washout
const value_type beta_ // = 0.5, // ridge solver metaparameter
)
: W_in(W_in_), W(W_), W_out(W_out_), alpha(alpha_), beta(beta_), washout(washout_)
{
trained = true;
rgen.seed(std::random_device{}());
}
template <typename RecType, typename Metric> ESN<RecType, Metric>::ESN(const std::string &filename)
{
blaze::DynamicVector<value_type> params;
// saved as: archive << W_in << W << W_out << params;
blaze::Archive<std::ifstream> archive(filename);
archive >> W_in;
archive >> W;
archive >> W_out;
archive >> params;
alpha = params[0];
beta = params[1];
washout = params[2];
trained = true;
rgen.seed(std::random_device{}());
}
template <typename RecType, typename Metric>
void ESN<RecType, Metric>::train(const blaze::DynamicMatrix<value_type> &Samples,
const blaze::DynamicMatrix<value_type> &Target)
{
size_t in_size = Samples.rows();
auto uniform_double = std::uniform_real_distribution<value_type>(-1, 1);
size_t r_row, r_col;
W_in = blaze::DynamicMatrix<value_type>(W.rows(), in_size + 1, 0.0);
for (r_row = 0; r_row < W.rows(); r_row++)
for (r_col = 0; r_col <= in_size; r_col++) {
assert(r_row < W_in.rows() && r_col < W_in.columns());
W_in(r_row, r_col) = uniform_double(rgen);
}
blaze::DynamicMatrix<value_type> Readout =
ESN_details::get_readout(Samples, // input signal
W_in, // input weights
W, // reservoir internal weights (square matrix)
alpha, washout // leak rate, number of Samples excluded from output for washout
);
blaze::DynamicMatrix<value_type> target_submat =
submatrix(Target, 0UL, washout, Target.rows(), Target.columns() - washout);
W_out = ESN_details::ridge(target_submat, Readout, beta);
trained = true;
}
template <typename RecType, typename Metric>
void ESN<RecType, Metric>::train(const std::vector<RecType> &Samples, const std::vector<RecType> &Target)
{
auto SamplesMat = vector_to_blaze(Samples);
auto TargetMat = vector_to_blaze(Target);
train(SamplesMat, TargetMat);
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename ESN<RecType, Metric>::value_type>
ESN<RecType, Metric>::predict(const blaze::DynamicMatrix<value_type> &Samples)
{
assert(trained);
blaze::DynamicMatrix<value_type> Readout =
ESN_details::get_readout(Samples, // input signal
W_in, // input weights
W, // reservoir internal weights (square matrix)
alpha, washout // leak rate, number of samples excluded from output for washout
);
return W_out * Readout;
}
template <typename RecType, typename Metric>
std::vector<RecType> ESN<RecType, Metric>::predict(const std::vector<RecType> &Samples)
{
auto SamplesMat = vector_to_blaze(Samples);
auto PredictionMat = predict(SamplesMat);
return blaze2RecType<RecType>(PredictionMat);
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename ESN<RecType, Metric>::value_type>
ESN<RecType, Metric>::vector_to_blaze(const std::vector<RecType> &In)
{
blaze::DynamicMatrix<value_type> Out(In[0].size(), In.size(), 0); // transpose
for (size_t i = 0; i < In.size(); ++i) // TODO optimize by using iterators
for (size_t j = 0; j < In[0].size(); ++j)
Out(j, i) = In[i][j];
return Out;
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 1, std::vector<R>>::type
ESN<RecType, Metric>::blaze2RecType(const blaze::DynamicMatrix<typename ESN<R, Metric>::value_type> &In)
{
std::vector<RecType> Out;
for (size_t i = 0; i < In.columns(); ++i) {
RecType rec;
for (size_t j = 0; j < In.rows(); ++j)
rec.push_back(In(j, i)); // transpose
Out.push_back(rec);
}
return Out;
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 2, std::vector<R>>::type
ESN<RecType, Metric>::blaze2RecType(const blaze::DynamicMatrix<typename ESN<R, Metric>::value_type> &In)
{ // only blaze row-vector
std::vector<RecType> Out;
for (size_t i = 0; i < In.columns(); ++i) {
RecType rec(In.rows()); // blaze specific
for (size_t j = 0; j < In.rows(); ++j)
rec[j] = In(j, i); // blaze specific // transpose
Out.push_back(rec);
}
return Out;
}
template <typename RecType, typename Metric> void ESN<RecType, Metric>::save(const std::string &filename)
{
if (trained) {
blaze::DynamicVector<value_type> params = {alpha, beta, washout};
blaze::Archive<std::ofstream> archive(filename);
archive << W_in << W << W_out << params;
} // else {
// std::cout << "Attempt to save untrained model" << std::endl;
//}
}
template <typename RecType, typename Metric>
std::tuple<blaze::DynamicMatrix<typename ESN<RecType, Metric>::value_type>,
blaze::CompressedMatrix<typename ESN<RecType, Metric>::value_type>,
blaze::DynamicMatrix<typename ESN<RecType, Metric>::value_type>, typename ESN<RecType, Metric>::value_type,
size_t, typename ESN<RecType, Metric>::value_type>
ESN<RecType, Metric>::get_components()
{
assert(trained);
return std::make_tuple(W_in, W, W_out, alpha, washout, beta);
}
} // namespace metric
#endif // header guard
| 11,958
|
C++
|
.cpp
| 270
| 41.685185
| 119
| 0.695144
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,474
|
autoencoder.cpp
|
metric-space-ai_metric/metric/mapping/autoencoder.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 M.Welsch
*/
#include "autoencoder.hpp"
namespace metric {
template <typename InputDataType, typename Scalar> Autoencoder<InputDataType, Scalar>::Autoencoder() : normValue(255) {}
template <typename InputDataType, typename Scalar>
Autoencoder<InputDataType, Scalar>::Autoencoder(const std::string &jsonString) : Autoencoder()
{
this->constructFromJsonString(jsonString);
}
// template<typename InputDataType, typename Scalar>
// Autoencoder<InputDataType, Scalar>::Autoencoder(const std::vector<InputDataType> inputData,
// size_t featuresLength, InputDataType normValue) :
// featuresLength(featuresLength), normValue(normValue)
// {
// /* Create layers */
// net.addLayer(dnn::FullyConnected<Scalar, dnn::ReLU<Scalar>>(featuresLength, 1024));
// net.addLayer(dnn::FullyConnected<Scalar, dnn::ReLU<Scalar>>(1024, 256));
// net.addLayer(dnn::FullyConnected<Scalar, dnn::ReLU<Scalar>>(256, 64));
// net.addLayer(dnn::FullyConnected<Scalar, dnn::ReLU<Scalar>>(64, 256));
// net.addLayer(dnn::FullyConnected<Scalar, dnn::ReLU<Scalar>>(256, 1024));
// net.addLayer(dnn::FullyConnected<Scalar, dnn::Sigmoid<Scalar>>(1024, featuresLength));
// //net.addLayer(Conv2d<Scalar, ReLU<Scalar>>(featuresLength, 28, 1, 1, 5, 5));
// //net.addLayer(Conv2dTranspose<Scalar, Sigmoid<Scalar>>(24, 24, 1, 1, 5, 5));
//
// net.setCallback(dnn::VerboseCallback<Scalar>());
// /* Set output layer */
//
// /* Create optimizer object */
// net.setOptimizer(dnn::RMSProp<Scalar>());
// //opt->learningRate = 0.01;
//
// /* Set callback function object */
//
// net.setOutput(dnn::RegressionMSE<Scalar>());
// /* Initialize parameters with N(0, 0.01^2) using random seed 123 */
// net.init(0, 0.01, 123);
//
// loadTrainData(inputData);
// }
template <typename InputDataType, typename Scalar>
void Autoencoder<InputDataType, Scalar>::loadTrainData(const std::vector<InputDataType> data)
{
trainData = convertData(data);
}
template <typename InputDataType, typename Scalar>
void Autoencoder<InputDataType, Scalar>::train(const std::vector<InputDataType> &data, size_t epochs, size_t batchSize)
{
loadTrainData(data);
auto t1 = std::chrono::high_resolution_clock::now();
this->fit(trainData, trainData, batchSize, epochs, 123);
auto t2 = std::chrono::high_resolution_clock::now();
auto d = std::chrono::duration_cast<std::chrono::duration<double>>(t2 - t1);
std::cout << "Training time: " << d.count() << " s" << std::endl;
}
template <typename InputDataType, typename Scalar>
std::vector<InputDataType> Autoencoder<InputDataType, Scalar>::predict(const std::vector<InputDataType> data)
{
auto t1 = std::chrono::high_resolution_clock::now();
auto prediction = dnn::Network<Scalar>::predict(convertData(data));
auto t2 = std::chrono::high_resolution_clock::now();
auto d = std::chrono::duration_cast<std::chrono::duration<double>>(t2 - t1);
std::cout << "Prediction time: " << d.count() << " s" << std::endl;
return convertToOutput(prediction);
}
template <typename InputDataType, typename Scalar>
typename Autoencoder<InputDataType, Scalar>::Matrix
Autoencoder<InputDataType, Scalar>::convertData(const std::vector<InputDataType> &inputData)
{
/* Convert features to scalar type */
std::vector<Scalar> dataScalar(inputData.begin(), inputData.end());
auto featuresLength = this->layers[0]->inputSize;
Matrix data(dataScalar.size() / featuresLength, featuresLength, dataScalar.data());
/* Norm features [0..1] */
if (normValue != 0) {
data /= Scalar(normValue);
}
return data;
}
template <typename InputDataType, typename Scalar>
void Autoencoder<InputDataType, Scalar>::setNormValue(InputDataType _normValue)
{
normValue = _normValue;
}
template <typename InputDataType, typename Scalar>
std::vector<InputDataType> Autoencoder<InputDataType, Scalar>::convertToOutput(const Matrix &data,
bool doDenormalization)
{
Matrix temp(data);
if (doDenormalization and (normValue != 0)) {
temp *= normValue;
}
std::vector<InputDataType> output;
for (auto i = 0; i < temp.rows(); ++i) {
auto dataPointer = blaze::row(temp, i).data();
std::vector<Scalar> vectorScalar(dataPointer, dataPointer + temp.columns());
output.insert(output.end(), vectorScalar.begin(), vectorScalar.end());
}
return output;
}
template <typename InputDataType, typename Scalar>
std::vector<Scalar> Autoencoder<InputDataType, Scalar>::encode(const std::vector<InputDataType> &data)
{
assert(this->num_layers() % 2 == 0);
auto input = convertData(data);
const size_t encoderLastLayerNumber = this->num_layers() / 2;
this->layers[0]->forward(input);
for (size_t i = 1; i < encoderLastLayerNumber; i++) {
this->layers[i]->forward(this->layers[i - 1]->output());
}
Matrix output = this->layers[encoderLastLayerNumber - 1]->output();
std::vector<Scalar> vectorScalar(output.data(), output.data() + output.columns());
return vectorScalar;
}
template <typename InputDataType, typename Scalar>
std::vector<InputDataType> Autoencoder<InputDataType, Scalar>::decode(const std::vector<Scalar> &data)
{
assert(this->num_layers() % 2 == 0);
Matrix latentVector(1, data.size(), data.data());
const size_t decoderFirstLayerNumber = this->num_layers() / 2;
this->layers[decoderFirstLayerNumber]->forward(latentVector);
for (size_t i = decoderFirstLayerNumber + 1; i < this->num_layers(); i++) {
this->layers[i]->forward(this->layers[i - 1]->output());
}
Matrix output = this->layers[this->num_layers() - 1]->output();
return convertToOutput(output);
}
} // namespace metric
| 5,852
|
C++
|
.cpp
| 132
| 42.454545
| 120
| 0.720063
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,475
|
DSPCC.cpp
|
metric-space-ai_metric/metric/mapping/DSPCC.cpp
|
#include "DSPCC.hpp"
#include "metric/transform/discrete_cosine.hpp"
#include "metric/transform/wavelet.hpp"
namespace metric {
// ------------------------------------------
// common functions
// recursive split for arbitrary depth
// TODO consider creating special class for DWT split in tree order (with stack encaplulated)
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator>
DWT_split(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> x,
std::stack<size_t> &subband_length, int wavelet_type, size_t subbands_num)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> out;
if (x.size() * 2 <= subbands_num) {
for (size_t el = 0; el < x.size(); ++el) {
auto split = wavelet::dwt(x[el], wavelet_type);
out.push_back(std::get<0>(split));
out.push_back(std::get<1>(split));
}
subband_length.push(x[0].size());
return DWT_split(out, subband_length, wavelet_type, subbands_num);
} else {
return x;
}
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator>
DWT_unsplit(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> in,
std::stack<size_t> &subband_length, int wavelet_type)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> x;
if (in.size() > 1) {
for (size_t el = 0; el < in.size(); el += 2) { // we assume size of deque is even, TODO check
x.push_back(wavelet::idwt(in[el], in[el + 1], wavelet_type, subband_length.top()));
}
subband_length.pop();
return DWT_unsplit(x, subband_length, wavelet_type);
} else {
return in;
}
}
// template <typename T>
// std::deque<std::vector<T>>
// sequential_DWT( // old overload
// std::vector<T> x,
// std::stack<size_t> & subband_length,
// int wavelet_type,
// size_t subbands_num
// ) {
// std::deque<std::vector<T>> deque_x = {x};
// return DWT_split(deque_x, subband_length, wavelet_type, subbands_num);
// }
template <template <typename, typename> class OuterContainer, class InnerContainer, typename OuterAllocator>
OuterContainer<InnerContainer, OuterAllocator> // TODO better use -> for deduction by return value
sequential_DWT(InnerContainer x, std::stack<size_t> &subband_length, int wavelet_type, size_t subbands_num)
{
OuterContainer<InnerContainer, OuterAllocator> deque_x = {x};
return DWT_split(deque_x, subband_length, wavelet_type, subbands_num);
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
InnerContainer<ValueType, InnerAllocator>
sequential_iDWT(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> in,
std::stack<size_t> &subband_length, int wavelet_type)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> deque_out =
DWT_unsplit(in, subband_length, wavelet_type);
return deque_out[0];
}
// ------------------------------------------
// class DSPCC
template <typename RecType, typename Metric>
DSPCC<RecType, Metric>::DSPCC(const std::vector<RecType> &TrainingDataset, size_t n_features_, size_t n_subbands_,
float time_freq_balance_, size_t n_top_features_)
{
select_train<RecType>(TrainingDataset, n_features_, n_subbands_, time_freq_balance_, n_top_features_);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 1, void>::type
DSPCC<RecType, Metric>::select_train(const std::vector<RecType> &TrainingDataset, size_t n_features_,
size_t n_subbands_, float time_freq_balance_, size_t n_top_features_)
{
train(TrainingDataset, n_features_, n_subbands_, time_freq_balance_, n_top_features_);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 2, void>::type
DSPCC<RecType, Metric>::select_train(const std::vector<RecType> &TrainingDataset, size_t n_features_,
size_t n_subbands_, float time_freq_balance_, size_t n_top_features_)
{
using ValueType = DSPCC<RecType, Metric>::value_type;
// convert from Blaze to STL // TODO move to separate private method
std::vector<RecTypeInner> ConvertedDataset;
for (size_t i = 0; i < TrainingDataset.size(); ++i) {
RecTypeInner line;
for (size_t j = 0; j < TrainingDataset[i].size(); ++j) {
line.push_back(TrainingDataset[i][j]);
}
ConvertedDataset.push_back(line);
}
train(ConvertedDataset, n_features_, n_subbands_, time_freq_balance_, n_top_features_);
}
template <typename RecType, typename Metric>
void DSPCC<RecType, Metric>::train(const std::vector<DSPCC<RecType, Metric>::RecTypeInner> &TrainingDataset,
size_t n_features_, size_t n_subbands_, float time_freq_balance_,
size_t n_top_features_)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
time_freq_balance = time_freq_balance_;
resulting_subband_length = 0;
n_features = n_features_; // number of features selected from both PCFAs
n_features_freq = std::round(n_features_ * time_freq_balance_);
n_features_time = n_features_ - n_features_freq;
for (size_t n = 2; n <= n_subbands_; n = n * 2)
n_subbands = n;
n_top_subbands = n_top_features_;
auto PreEncoded = outer_encode(TrainingDataset);
for (size_t subband_idx = 0; subband_idx < std::get<0>(PreEncoded).size(); ++subband_idx) {
freq_PCA_models.push_back(
metric::PCFA<RecTypeInner, void>(std::get<0>(PreEncoded)[subband_idx], n_features_freq));
time_PCA_models.push_back(
metric::PCFA<RecTypeInner, void>(std::get<1>(PreEncoded)[subband_idx], n_features_time));
}
std::vector<std::vector<RecTypeInner>> time_freq_PCFA_encoded = time_freq_PCFA_encode(PreEncoded);
std::vector<RecTypeInner> series = mixed_code_serialize(time_freq_PCFA_encoded);
top_PCA_model.push_back(metric::PCFA<RecTypeInner, void>(series, n_top_subbands));
}
template <typename RecType, typename Metric>
std::tuple<std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>,
std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>>
DSPCC<RecType, Metric>::outer_encode(const std::vector<DSPCC<RecType, Metric>::RecTypeInner> &Curves)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
using ElementType = typename RecTypeInner::value_type;
std::deque<std::vector<RecTypeInner>> FreqData;
std::deque<std::vector<RecTypeInner>> TimeData;
for (size_t subband_idx = 0; subband_idx < (n_subbands); ++subband_idx) {
std::vector<RecTypeInner> SubbandData;
for (size_t record_idx = 0; record_idx < Curves.size(); ++record_idx) {
RecTypeInner rec = {0};
SubbandData.push_back(rec); // TODO optimize
}
TimeData.push_back(SubbandData);
FreqData.push_back(SubbandData);
}
// compute size and crop input
size_t depth = (size_t)std::floor(std::log2(n_subbands));
size_t max_subband_size = subband_size(Curves[0].size(), depth); // TODO check if not empty
size_t appropriate_subband_size = crop_index(max_subband_size, 1);
size_t crop_size = original_size(appropriate_subband_size, depth);
for (size_t record_idx = 0; record_idx < Curves.size(); ++record_idx) {
std::stack<size_t> subband_length_local;
RecTypeInner cropped_record(Curves[record_idx].begin(), Curves[record_idx].begin() + crop_size);
std::deque<RecTypeInner> current_rec_subbands_timedomain =
sequential_DWT<std::deque, RecTypeInner, std::allocator<RecTypeInner>>(
cropped_record, subband_length_local, 5,
n_subbands); // TODO replace 5!! // TODO support different RecType types
if (resulting_subband_length == 0) { // only during the first run
resulting_subband_length = current_rec_subbands_timedomain[0].size();
subband_length = subband_length_local;
}
std::deque<RecTypeInner> current_rec_subbands_freqdomain(current_rec_subbands_timedomain);
metric::apply_DCT_STL(
current_rec_subbands_freqdomain, false,
resulting_subband_length); // transform all subbands at once (only first mix_idx values are replaced, the
// rest is left unchanged!), TODO refactor cutting!!
for (size_t subband_idx = 0; subband_idx < current_rec_subbands_timedomain.size(); ++subband_idx) {
TimeData[subband_idx][record_idx] = current_rec_subbands_timedomain[subband_idx];
FreqData[subband_idx][record_idx] = current_rec_subbands_freqdomain[subband_idx];
}
}
return std::make_tuple(FreqData, TimeData);
}
template <typename RecType, typename Metric>
std::vector<typename DSPCC<RecType, Metric>::RecTypeInner> DSPCC<RecType, Metric>::outer_decode(
const std::tuple<std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>,
std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>> &TimeFreqData)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::deque<std::vector<RecTypeInner>> FreqData = std::get<0>(TimeFreqData);
std::deque<std::vector<RecTypeInner>> TimeData = std::get<1>(TimeFreqData);
std::vector<RecTypeInner> Curves;
for (size_t record_idx = 0; record_idx < TimeData[0].size(); ++record_idx) { // TODO check if [0] element exists
std::vector<RecTypeInner> subbands_freqdomain;
std::vector<RecTypeInner> subbands_timedomain;
for (size_t subband_idx = 0; subband_idx < TimeData.size(); ++subband_idx) {
subbands_timedomain.push_back(TimeData[subband_idx][record_idx]);
subbands_freqdomain.push_back(FreqData[subband_idx][record_idx]);
}
metric::apply_DCT_STL(subbands_freqdomain, true);
std::stack<size_t> subband_length_copy(subband_length);
RecTypeInner restored_waveform_freq = sequential_iDWT(subbands_freqdomain, subband_length_copy, 5);
subband_length_copy = subband_length;
RecTypeInner restored_waveform_time = sequential_iDWT(subbands_timedomain, subband_length_copy, 5);
RecTypeInner restored_waveform_out;
for (size_t el_idx = 0; el_idx < restored_waveform_freq.size(); ++el_idx) {
restored_waveform_out.push_back((restored_waveform_freq[el_idx] * time_freq_balance +
restored_waveform_time[el_idx] * (1 - time_freq_balance)));
}
Curves.push_back(restored_waveform_out);
}
return Curves;
}
template <typename RecType, typename Metric>
std::vector<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>
DSPCC<RecType, Metric>::time_freq_PCFA_encode(const std::vector<typename DSPCC<RecType, Metric>::RecTypeInner> &Data)
{
return time_freq_PCFA_encode(outer_encode(Data));
}
template <typename RecType, typename Metric>
std::vector<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>> DSPCC<RecType, Metric>::time_freq_PCFA_encode(
const std::tuple<std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>,
std::deque<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>> &PreEncoded)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<std::vector<RecTypeInner>> Encoded;
for (size_t subband_idx = 0; subband_idx < std::get<0>(PreEncoded).size(); ++subband_idx) {
auto freq_encoded_subband = freq_PCA_models[subband_idx].encode(std::get<0>(PreEncoded)[subband_idx]);
auto time_encoded_subband = time_PCA_models[subband_idx].encode(std::get<1>(PreEncoded)[subband_idx]);
// here we crop and concatenate codes
std::vector<RecTypeInner> encoded_subband;
for (size_t record_idx = 0; record_idx < freq_encoded_subband.size(); ++record_idx) {
RecTypeInner mixed_codes;
for (size_t el_idx = 0; el_idx < n_features_freq; ++el_idx) {
mixed_codes.push_back(freq_encoded_subband[record_idx][el_idx]);
}
for (size_t el_idx = 0; el_idx < n_features_time; ++el_idx) {
mixed_codes.push_back(
time_encoded_subband[record_idx][el_idx]); // we concatenate all features to single vector
}
encoded_subband.push_back(mixed_codes);
}
Encoded.push_back(encoded_subband);
}
return Encoded;
}
template <typename RecType, typename Metric>
std::vector<typename DSPCC<RecType, Metric>::RecTypeInner> DSPCC<RecType, Metric>::time_freq_PCFA_decode(
const std::vector<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>> &Codes)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::deque<std::vector<RecTypeInner>> FreqData;
std::deque<std::vector<RecTypeInner>> TimeData;
for (size_t subband_idx = 0; subband_idx < Codes.size();
++subband_idx) { // divide each vector of codes into freq and time parts and rearrange data by subbands
std::vector<RecTypeInner> freq_codes;
std::vector<RecTypeInner> time_codes;
for (size_t record_idx = 0; record_idx < Codes[subband_idx].size(); ++record_idx) {
RecTypeInner freq_code_part(Codes[subband_idx][record_idx].begin(),
Codes[subband_idx][record_idx].begin() + n_features_freq);
RecTypeInner time_code_part(Codes[subband_idx][record_idx].begin() + n_features_freq,
Codes[subband_idx][record_idx].end());
freq_codes.push_back(freq_code_part);
time_codes.push_back(time_code_part);
}
auto decoded_subband_freq = freq_PCA_models[subband_idx].decode(freq_codes);
auto decoded_subband_time = time_PCA_models[subband_idx].decode(time_codes);
FreqData.push_back(decoded_subband_freq);
TimeData.push_back(decoded_subband_time);
}
return outer_decode(std::make_tuple(FreqData, TimeData));
}
template <typename RecType, typename Metric>
std::vector<typename DSPCC<RecType, Metric>::RecTypeInner> DSPCC<RecType, Metric>::mixed_code_serialize(
const std::vector<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>> &PCFA_encoded)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
using ElementType = typename RecTypeInner::value_type;
std::vector<RecTypeInner> serialized_dataset;
for (size_t record_idx = 0; record_idx < PCFA_encoded[0].size(); ++record_idx) {
RecTypeInner serialized_record;
for (size_t subband_idx = 0; subband_idx < PCFA_encoded.size(); ++subband_idx) {
serialized_record.insert(serialized_record.end(),
std::make_move_iterator(PCFA_encoded[subband_idx][record_idx].begin()),
std::make_move_iterator(PCFA_encoded[subband_idx][record_idx].end()));
}
serialized_dataset.push_back(serialized_record);
}
return serialized_dataset;
}
template <typename RecType, typename Metric>
std::vector<std::vector<typename DSPCC<RecType, Metric>::RecTypeInner>>
DSPCC<RecType, Metric>::mixed_code_deserialize(const std::vector<typename DSPCC<RecType, Metric>::RecTypeInner> &Codes)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<std::vector<RecTypeInner>> deserialized;
for (size_t subband_idx = 0; subband_idx < (n_subbands); ++subband_idx) {
std::vector<RecTypeInner> SubbandData;
for (size_t record_idx = 0; record_idx < Codes.size(); ++record_idx) {
RecTypeInner rec = {0};
SubbandData.push_back(rec); // TODO optimize
}
deserialized.push_back(SubbandData);
}
for (size_t record_idx = 0; record_idx < Codes.size(); ++record_idx) {
size_t current_idx = 0;
for (size_t subband_idx = 0; subband_idx < freq_PCA_models.size(); ++subband_idx) {
RecTypeInner mixed_code(Codes[record_idx].begin() + current_idx,
Codes[record_idx].begin() + current_idx + n_features_freq);
current_idx += n_features_freq;
mixed_code.insert(mixed_code.end(), Codes[record_idx].begin() + current_idx,
Codes[record_idx].begin() + current_idx + n_features_time);
current_idx += n_features_time;
deserialized[subband_idx][record_idx] = mixed_code;
}
}
return deserialized;
}
template <typename RecType, typename Metric>
std::vector<RecType> DSPCC<RecType, Metric>::encode(const std::vector<RecType> &Data)
{
return select_encode<RecType>(Data);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 1, // STL case
std::vector<RecType>>::type
DSPCC<RecType, Metric>::select_encode(const std::vector<RecType> &Data)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<RecTypeInner> Codes;
std::vector<std::vector<RecTypeInner>> time_freq_PCFA_encoded = time_freq_PCFA_encode(Data);
std::vector<RecTypeInner> series = mixed_code_serialize(time_freq_PCFA_encoded);
return top_PCA_model[0].encode(series);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 2, // Blaze vector case
std::vector<RecType>>::type
DSPCC<RecType, Metric>::select_encode(const std::vector<RecType> &Data)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<RecTypeInner> ConvertedData;
for (size_t i = 0; i < Data.size(); ++i) {
RecTypeInner line;
for (size_t j = 0; j < Data[i].size(); ++j) {
line.push_back(Data[i][j]);
}
ConvertedData.push_back(line);
}
std::vector<RecTypeInner> Codes;
std::vector<std::vector<RecTypeInner>> time_freq_PCFA_encoded = time_freq_PCFA_encode(ConvertedData);
std::vector<RecTypeInner> series = mixed_code_serialize(time_freq_PCFA_encoded);
auto pre_output = top_PCA_model[0].encode(series);
std::vector<RecType> output;
// convert back
for (size_t i = 0; i < pre_output.size(); ++i) {
RecType line(pre_output[i].size());
for (size_t j = 0; j < pre_output[i].size(); ++j) {
line[j] = pre_output[i][j];
}
output.push_back(line);
}
return output;
}
template <typename RecType, typename Metric>
std::vector<RecType> DSPCC<RecType, Metric>::decode(const std::vector<RecType> &Codes)
{
return select_decode<RecType>(Codes);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 1, // STL case
std::vector<RecType>>::type
DSPCC<RecType, Metric>::select_decode(const std::vector<RecType> &Codes)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<std::vector<RecTypeInner>> deserialized = mixed_code_deserialize(top_PCA_model[0].decode(Codes));
return time_freq_PCFA_decode(deserialized);
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 2, // Blaze case
std::vector<RecType>>::type
DSPCC<RecType, Metric>::select_decode(const std::vector<RecType> &Codes)
{
using RecTypeInner = DSPCC<RecType, Metric>::RecTypeInner;
std::vector<RecTypeInner> ConvertedCodes;
for (size_t i = 0; i < Codes.size(); ++i) {
RecTypeInner line;
for (size_t j = 0; j < Codes[i].size(); ++j) {
line.push_back(Codes[i][j]);
}
ConvertedCodes.push_back(line);
}
std::vector<std::vector<RecTypeInner>> deserialized =
mixed_code_deserialize(top_PCA_model[0].decode(ConvertedCodes));
auto pre_output = time_freq_PCFA_decode(deserialized);
std::vector<RecType> output;
// convert back
for (size_t i = 0; i < pre_output.size(); ++i) {
RecType line(pre_output[i].size());
for (size_t j = 0; j < pre_output[i].size(); ++j) {
line[j] = pre_output[i][j];
}
output.push_back(line);
}
return output;
}
template <typename RecType, typename Metric> size_t DSPCC<RecType, Metric>::crop_index(size_t length, float crop_share)
{
// computing 2^n value nearest to given share value
float crop_factor = crop_share * length; // TODO check in time_freq_balance_ is in [0, 1]
size_t n = 4; // we skip 2^1
size_t n_prev = 0;
size_t mix_index = 0;
while (true) {
if (n > crop_factor) {
if (n > length) { // overrun
mix_index = n_prev;
break;
}
if (crop_factor - n_prev > n - crop_factor) // we stick to n_prev or to n, not greater than max index
mix_index = n;
else
mix_index = n_prev;
break;
}
n_prev = n;
n = n * 2; // n is ever degree of 2
}
return mix_index;
}
// template <typename RecType, typename Metric>
// size_t
// DSPCC<RecType, Metric>::subband_size(size_t original_size, size_t depth, size_t wavelet_length) { // rounding issue
// size_t n = 1;
// float sum = 0;
// for (size_t i=1; i<=depth; ++i){
// n = n*2;
// sum += (wavelet_length - 2)/(float)n; // -2 instead of -1 because of floor rounding within cast
// }
// return original_size/(float)n + sum;
// }
template <typename RecType, typename Metric>
size_t DSPCC<RecType, Metric>::subband_size(size_t original_size, size_t depth, size_t wavelet_length)
{
size_t sz = original_size;
for (size_t i = 1; i <= depth; ++i) {
sz = (sz + wavelet_length - 1) / 2.0;
}
return sz;
}
template <typename RecType, typename Metric>
size_t DSPCC<RecType, Metric>::original_size(size_t subband_size, size_t depth, size_t wavelet_length)
{
size_t n = 1;
float sum = 0;
for (size_t i = 1; i <= depth; ++i) {
n = n * 2;
sum += (wavelet_length - 2) / (float)n;
}
return n * (subband_size - sum);
}
} // namespace metric
| 21,135
|
C++
|
.cpp
| 459
| 43.35512
| 119
| 0.723155
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,476
|
esn_switch_detector.cpp
|
metric-space-ai_metric/metric/mapping/esn_switch_detector.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2021 Panda Team
*/
#include "esn_switch_detector.hpp"
// class SwitchPredictor
// ---- public
template <typename value_type>
SwitchPredictor<value_type>::SwitchPredictor(const blaze::DynamicMatrix<value_type> &training_data,
const blaze::DynamicMatrix<value_type> &labels, const size_t wnd_size_,
const size_t cmp_wnd_sz_, const size_t washout_,
const value_type contrast_threshold_, const value_type alpha_,
const value_type beta_)
: wnd_size(wnd_size_), cmp_wnd_sz(cmp_wnd_sz_), washout(washout_), contrast_threshold(contrast_threshold_),
alpha(alpha_), beta(beta_)
{
train(training_data, labels);
}
template <typename value_type>
template <typename RecType> // to be deduced
SwitchPredictor<value_type>::SwitchPredictor(const std::vector<RecType> &training_data,
const std::vector<RecType> &labels, const size_t wnd_size_,
const size_t cmp_wnd_sz_, const size_t washout_,
const value_type contrast_threshold_, const value_type alpha_,
const value_type beta_)
: wnd_size(wnd_size_), cmp_wnd_sz(cmp_wnd_sz_), washout(washout_), contrast_threshold(contrast_threshold_),
alpha(alpha_), beta(beta_)
{
train(training_data, labels);
}
template <typename value_type> SwitchPredictor<value_type>::SwitchPredictor(const std::string &filename)
{
blaze::DynamicMatrix<value_type> W_in;
blaze::CompressedMatrix<value_type> W;
blaze::DynamicMatrix<value_type> W_out;
blaze::DynamicVector<value_type> params;
// saved as: archive << W_in << W << W_out << params;
blaze::Archive<std::ifstream> archive(filename);
archive >> W_in;
archive >> W;
archive >> W_out;
archive >> params;
alpha = params[0];
beta = params[1];
washout = params[2];
if (params.size() > 3) { // new model format
wnd_size = params[3];
cmp_wnd_sz = params[4];
contrast_threshold = params[5];
} else { // compatibility with old model format: new paramaters set to defaults
wnd_size = 15;
cmp_wnd_sz = 150;
contrast_threshold = 0.3;
}
esn = metric::ESN<std::vector<value_type>, void>(W_in, W, W_out, alpha, washout, beta);
}
template <typename value_type>
blaze::DynamicMatrix<value_type> SwitchPredictor<value_type>::encode(const blaze::DynamicMatrix<value_type> &dataset)
{
auto data = preprocess(dataset);
auto prediction = esn.predict(data);
blaze::DynamicMatrix<value_type, blaze::rowMajor> out = blaze::trans(prediction);
blaze::DynamicMatrix<value_type> sl_entropy(out.rows(), 1, 0);
for (size_t i = wnd_size; i < out.rows(); ++i) {
blaze::DynamicMatrix<value_type> wnd_row = blaze::submatrix(out, i - wnd_size, 0, wnd_size, 1);
blaze::DynamicVector<value_type> wnd = blaze::column(wnd_row, 0);
sl_entropy(i, 0) = class_entropy(wnd, 0.5);
}
blaze::DynamicMatrix<value_type> postproc(out.rows(), 1, 0);
bool prev_l_flag = false;
for (size_t i = cmp_wnd_sz; i < out.rows() - cmp_wnd_sz; ++i) {
bool l_flag = false;
if (sl_entropy(i, 0) > 0.4) {
blaze::DynamicMatrix<value_type> wnd_past = blaze::submatrix(out, i - cmp_wnd_sz, 0, cmp_wnd_sz, 1);
blaze::DynamicMatrix<value_type> wnd_fut = blaze::submatrix(out, i, 0, cmp_wnd_sz, 1);
int label = 0;
if (blaze::mean(wnd_past) - blaze::mean(wnd_fut) < -contrast_threshold) {
label = 1;
l_flag = true;
}
if (blaze::mean(wnd_past) - blaze::mean(wnd_fut) > contrast_threshold) {
label = -1;
l_flag = true;
}
if (!prev_l_flag)
postproc(i, 0) = label;
}
prev_l_flag = l_flag;
}
return postproc;
}
template <typename value_type>
template <typename RecType>
std::vector<value_type> SwitchPredictor<value_type>::encode(const std::vector<RecType> &dataset)
{
auto data = preprocess(dataset);
auto out = esn.predict(data);
std::vector<value_type> sl_entropy(out.size(), 0);
for (size_t i = wnd_size; i < out.size(); ++i) {
blaze::DynamicVector<value_type> wnd(wnd_size);
for (size_t j = 0; j < wnd_size; ++j) {
wnd[j] =
out[i - wnd_size + j][0]; // TODO remove ugly inner containers or update for multidimensional labels
}
sl_entropy[i] = class_entropy(wnd, 0.5);
}
std::vector<value_type> postproc(out.size(), 0);
bool prev_l_flag = false;
for (size_t i = cmp_wnd_sz; i < out.size() - cmp_wnd_sz; ++i) {
bool l_flag = false;
if (sl_entropy[i] > 0.4) {
value_type mean_past = 0;
value_type mean_fut = 0;
for (size_t j = 0; j < cmp_wnd_sz; ++j) {
mean_past += out[i - cmp_wnd_sz + j][0];
mean_fut += out[i + j][0];
}
mean_past /= (value_type)cmp_wnd_sz;
mean_fut /= (value_type)cmp_wnd_sz;
int label = 0;
if (mean_past - mean_fut < -contrast_threshold) {
label = 1;
l_flag = true;
}
if (mean_past - mean_fut > contrast_threshold) {
label = -1;
l_flag = true;
}
if (!prev_l_flag)
postproc[i] = label;
}
prev_l_flag = l_flag;
}
return postproc;
}
template <typename value_type>
std::tuple<std::vector<unsigned long long int>, std::vector<value_type>>
SwitchPredictor<value_type>::encode_raw(const std::vector<unsigned long long int> &indices,
const std::vector<std::vector<value_type>> &dataset)
{
buffer.insert(buffer.end(), dataset.begin(), dataset.end());
buffer_idx.insert(buffer_idx.end(), indices.begin(), indices.end());
assert(buffer.size() == buffer_idx.size());
std::vector<value_type> result = {};
std::vector<unsigned long long int> result_indices = {};
int overbuf = buffer.size() - washout - 2 * cmp_wnd_sz;
if (overbuf > 0) { // warmup finished
std::vector<value_type> all_result = encode(buffer);
result.insert(result.begin(), all_result.end() - cmp_wnd_sz - overbuf, all_result.end() - cmp_wnd_sz);
result_indices.insert(result_indices.begin(), buffer_idx.end() - cmp_wnd_sz - overbuf,
buffer_idx.end() - cmp_wnd_sz);
buffer.erase(buffer.begin(), buffer.begin() + overbuf);
buffer_idx.erase(buffer_idx.begin(), buffer_idx.begin() + overbuf);
assert(result.size() == overbuf);
assert(result_indices.size() == overbuf);
}
return std::make_tuple(result_indices, result);
}
template <typename value_type>
std::vector<std::tuple<unsigned long long int, value_type>>
SwitchPredictor<value_type>::encode(const std::vector<unsigned long long int> &indices,
const std::vector<std::vector<value_type>> &dataset)
{
auto raw_results = encode_raw(indices, dataset);
std::vector<unsigned long long int> result_idx = std::get<0>(raw_results);
std::vector<value_type> result = std::get<1>(raw_results);
return make_pairs(result_idx, result);
}
template <typename value_type>
std::vector<std::tuple<unsigned long long int, value_type>>
SwitchPredictor<value_type>::make_pairs(const std::vector<unsigned long long int> &indices,
const std::vector<value_type> &raw_switches)
{
std::vector<std::tuple<unsigned long long int, value_type>> pairs = {};
for (size_t i = 0; i < raw_switches.size(); ++i) {
if (raw_switches[i] != 0) {
auto sw = std::make_tuple(indices[i], raw_switches[i]);
pairs.push_back(sw);
}
}
return pairs;
}
template <typename value_type> void SwitchPredictor<value_type>::save(const std::string &filename)
{
auto components = esn.get_components();
auto W_in = std::get<0>(components);
auto W = std::get<1>(components);
auto W_out = std::get<2>(components);
auto alpha = std::get<3>(components);
auto washout = std::get<4>(components);
auto beta = std::get<5>(components);
blaze::DynamicVector<value_type> params = {alpha, beta, washout, wnd_size, cmp_wnd_sz, contrast_threshold};
blaze::Archive<std::ofstream> archive(filename);
archive << W_in << W << W_out << params;
}
template <typename value_type>
std::tuple<size_t, size_t, size_t, value_type, value_type, value_type> SwitchPredictor<value_type>::get_parameters()
{
return std::make_tuple(wnd_size, cmp_wnd_sz, washout, contrast_threshold, alpha, beta);
}
// ---- private
template <typename value_type>
value_type SwitchPredictor<value_type>::v_stddev(const std::vector<value_type> &v, const bool population)
{
value_type mean = std::accumulate(v.begin(), v.end(), 0.0) / v.size();
value_type sq_sum = std::inner_product(
v.begin(), v.end(), v.begin(), 0.0, [](value_type const &x, value_type const &y) { return x + y; },
[mean](value_type const &x, value_type const &y) { return (x - mean) * (y - mean); });
if (population)
return std::sqrt(sq_sum / v.size());
else
return std::sqrt(sq_sum / (v.size() - 1));
}
template <typename value_type>
blaze::DynamicMatrix<value_type, blaze::rowMajor>
SwitchPredictor<value_type>::preprocess(const blaze::DynamicMatrix<value_type, blaze::rowMajor> &input)
{
blaze::DynamicVector<value_type> feature_stddev(input.rows(), 0);
int new_label = 0;
for (size_t i = wnd_size; i < feature_stddev.size(); ++i) {
auto wnd1 = blaze::submatrix(input, i - wnd_size, 0, wnd_size, 1);
auto wnd2 = blaze::submatrix(input, i - wnd_size, 1, wnd_size, 1);
auto wnd3 = blaze::submatrix(input, i - wnd_size, 2, wnd_size, 1);
feature_stddev[i] = stddev(wnd1) + stddev(wnd2) + stddev(wnd3);
}
blaze::DynamicMatrix<value_type> ds_all(input.rows(), 4, 0);
// blaze::submatrix(ds_all, 0, 0, input.rows(), 3) = blaze::submatrix(input, 0, 0, input.rows(), 3);
blaze::column(ds_all, 0) = blaze::column(input, 0);
blaze::column(ds_all, 1) = blaze::column(input, 1);
blaze::column(ds_all, 2) = blaze::column(input, 2);
blaze::column(ds_all, 3) = feature_stddev;
blaze::DynamicMatrix<value_type, blaze::rowMajor> output = blaze::trans(ds_all);
return output;
}
template <typename value_type>
template <typename RecType>
std::vector<RecType> SwitchPredictor<value_type>::preprocess(const std::vector<RecType> &input)
{
int new_label = 0;
std::vector<RecType> output(input);
for (size_t i = 0; i < output.size(); ++i) {
if (i < wnd_size) {
output[i].push_back(0);
} else {
std::vector<value_type> wnd1(wnd_size, 0);
std::vector<value_type> wnd2(wnd_size, 0);
std::vector<value_type> wnd3(wnd_size, 0);
for (size_t j = wnd_size; j > 0; --j) {
wnd1[wnd_size - j] = output[i - j][0];
wnd2[wnd_size - j] = output[i - j][1];
wnd3[wnd_size - j] = output[i - j][2];
}
output[i].push_back(v_stddev(wnd1) + v_stddev(wnd2) + v_stddev(wnd3));
}
}
return output;
}
template <typename value_type>
value_type SwitchPredictor<value_type>::class_entropy(const blaze::DynamicVector<value_type> &data,
const value_type threshold)
{
int sum = 0;
value_type sz = data.size();
for (size_t i = 0; i < sz; ++i) {
if (data[i] > threshold)
++sum;
}
value_type p1 = sum / sz;
if (sum == 0 || sum == sz)
return 0;
else
return -p1 * log2(p1) - (1 - p1) * log2(1 - p1);
}
template <typename value_type>
void SwitchPredictor<value_type>::train(const blaze::DynamicMatrix<value_type> &training_data,
const blaze::DynamicMatrix<value_type> &labels)
{
assert(training_data.rows() == labels.rows());
assert(training_data.columns() == 3); // TODO relax if needed
auto data = preprocess(training_data);
blaze::DynamicMatrix<value_type> target(labels.rows(), 1, 0);
int new_label = 0;
for (size_t i = wnd_size; i < labels.rows(); ++i) {
if (labels(i, 0) >= 1)
new_label = 1;
if (labels(i, 0) <= -1)
new_label = 0;
target(i, 0) = new_label;
}
esn = metric::ESN<std::vector<value_type>, void>(500, 5, 0.99, alpha, washout, beta);
esn.train(data, blaze::trans(target));
}
template <typename value_type>
template <typename RecType> // to be deduced
void SwitchPredictor<value_type>::train(const std::vector<RecType> &training_data, const std::vector<RecType> &labels)
{
assert(training_data.size() == labels.size());
assert(training_data[0].size() == 3); // TODO relax if needed
auto data = preprocess(training_data);
std::vector<RecType> target(labels.size(), {0});
int new_label = 0;
for (size_t i = wnd_size; i < labels.size(); ++i) {
if (labels[i][0] >= 1)
new_label = 1;
if (labels[i][0] <= -1)
new_label = 0;
target[i] = {new_label};
}
esn = metric::ESN<std::vector<value_type>, void>(500, 5, 0.99, alpha, washout, beta);
esn.train(data, target);
}
| 12,303
|
C++
|
.cpp
| 313
| 36.370607
| 118
| 0.676638
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,477
|
kmedoids.cpp
|
metric-space-ai_metric/metric/mapping/kmedoids.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 M.Welsch
*/
#ifndef _METRIC_MAPPING_KMEDOIDS_CPP
#define _METRIC_MAPPING_KMEDOIDS_CPP
#include "../distance.hpp"
#include "../space/matrix.hpp"
#include <algorithm>
#include <limits>
#include <tuple>
#include <vector>
namespace metric {
namespace kmedoids_details {
template <typename RecType, typename Metric, typename T = typename metric::Matrix<RecType, Metric>::distType>
T update_cluster(const metric::Matrix<RecType, Metric> &DM, std::vector<int> &seeds, std::vector<int> &assignments,
std::vector<int> &sec_nearest, std::vector<int> &counts)
{
if (sec_nearest.size() != assignments.size()) {
sec_nearest.resize(assignments.size());
}
// go through and assign each object to nearest medoid, keeping track of total distance.
T total_distance = 0;
for (int i = 0; i < assignments.size(); i++) {
T d1, d2; // smallest, second smallest distance to medoid, respectively
int m1, m2; // index of medoids with distances d1, d2 from object i, respectively
d1 = d2 = std::numeric_limits<T>::max();
m1 = m2 = seeds.size();
for (int m = 0; m < seeds.size(); m++) {
T d = DM(i, seeds[m]);
if (d < d1 || seeds[m] == i) { // prefer the medoid in case of ties.
d2 = d1;
m2 = m1;
d1 = d;
m1 = m;
} else if (d < d2) {
d2 = d;
m2 = m;
}
}
counts[m1] += 1;
assignments[i] = m1;
sec_nearest[i] = m2;
total_distance += d1;
}
return total_distance;
}
template <typename RecType, typename Metric>
void init_medoids(int k, const metric::Matrix<RecType, Metric> &DM, std::vector<int> &seeds,
std::vector<int> &assignments, std::vector<int> &sec_nearest, std::vector<int> &counts)
{
seeds.clear();
// find first object: object minimum distance to others
int first_medoid = 0;
using T = typename metric::Matrix<RecType, Metric>::distType;
T min_dissim = std::numeric_limits<T>::max();
for (int i = 0; i < DM.size(); i++) {
T total = 0;
for (int j = 0; j < DM.size(); j++) {
total += DM(i, j);
}
if (total < min_dissim) {
min_dissim = total;
first_medoid = i;
}
}
// add first object to medoids and compute medoid ids.
seeds.push_back(first_medoid);
kmedoids_details::update_cluster(DM, seeds, assignments, sec_nearest, counts);
// now select next k-1 objects according to KR's BUILD algorithm
for (int cur_k = 1; cur_k < k; cur_k++) {
int best_obj = 0;
T max_gain = 0;
for (int i = 0; i < DM.size(); i++) {
if (seeds[assignments[i]] == i)
continue;
T gain = 0;
for (int j = 0; j < DM.size(); j++) {
T DMj = DM(j, seeds[assignments[j]]); // D from j to its medoid
gain += std::max(DMj - DM(i, j), T(0)); // gain from selecting i
}
if (gain >= max_gain) { // set the next medoid to the object that
max_gain = gain; // maximizes the gain function.
best_obj = i;
}
}
seeds.push_back(best_obj);
kmedoids_details::update_cluster(DM, seeds, assignments, sec_nearest, counts);
}
}
template <typename RecType, typename Metric, typename T = typename metric::Matrix<RecType, Metric>::distType>
T cost(int i, int h, const metric::Matrix<RecType, Metric> &DM, std::vector<int> &seeds, std::vector<int> &assignments,
std::vector<int> &sec_nearest)
{
T total = 0;
for (int j = 0; j < assignments.size(); j++) {
int mi = seeds[i]; // object id of medoid i
T dhj = DM(h, j); // distance between object h and object j
int mj1 = seeds[assignments[j]]; // object id of j's nearest medoid
T dj1 = DM(mj1, j); // distance to j's nearest medoid
// check if D bt/w medoid i and j is same as j's current nearest medoid.
if (DM(mi, j) == dj1) {
T dj2 = std::numeric_limits<T>::max();
if (seeds.size() > 1) { // look at 2nd nearest if there's more than one medoid.
int mj2 = seeds[sec_nearest[j]]; // object id of j's 2nd-nearest medoid
dj2 = DM(mj2, j); // D to j's 2nd-nearest medoid
}
total += std::min(dj2, dhj) - dj1;
} else if (dhj < dj1) {
total += dhj - dj1;
}
}
return total;
}
} // namespace kmedoids_details
template <typename RecType, typename Metric>
auto kmedoids(const metric::Matrix<RecType, Metric> &DM, int k)
-> std::tuple<std::vector<int>, std::vector<int>, std::vector<int>>
{
using T = typename std::invoke_result<Metric, const RecType &, const RecType &>::type;
// check arguments
size_t n = DM.size();
assert(n >= 2); // error("There must be at least two points.")
assert(k <= n); // Attempt to run PAM with more clusters than data.
// sum up the distance matrix
T Dsum = 0;
for (int i = 0; i < DM.size(); ++i) {
for (int j = i; j < DM.size(); ++j) {
auto distance = DM(i, j);
if (i != j)
Dsum += 2 * distance;
else
Dsum += distance;
}
}
std::vector<int> seeds(k);
std::vector<int> counts(k, 0);
std::vector<int> assignments(n, 0);
std::vector<int> sec_nearest(n, 0); // Index of second closest medoids. Used by PAM.
T total_distance; // Total distance tp their medoid
T epsilon = 1e-15; // Normalized sensitivity for convergence
// set initianl medoids
kmedoids_details::init_medoids(k, DM, seeds, assignments, sec_nearest, counts);
T tolerance = epsilon * Dsum / (DM.size() * DM.size());
while (true) {
// initial cluster
for (int i = 0; i < counts.size(); ++i) {
counts[i] = 0;
}
total_distance = kmedoids_details::update_cluster(DM, seeds, assignments, sec_nearest, counts);
// vars to keep track of minimum
T minTotalCost = std::numeric_limits<T>::max();
int minMedoid = 0;
int minObject = 0;
// iterate over each medoid
for (int i = 0; i < k; i++) {
// iterate over all non-medoids
for (int h = 0; h < assignments.size(); h++) {
if (seeds[assignments[h]] == h)
continue;
// see if the total cost of swapping i & h was less than min
T curCost = kmedoids_details::cost(i, h, DM, seeds, assignments, sec_nearest);
if (curCost < minTotalCost) {
minTotalCost = curCost;
minMedoid = i;
minObject = h;
}
}
}
// convergence check
if (minTotalCost >= -tolerance)
break;
// install the new medoid if we found a beneficial swap
seeds[minMedoid] = minObject;
assignments[minObject] = minMedoid;
}
return {assignments, seeds, counts};
}
// TO DO: dublicate version
template <typename T>
std::tuple<std::vector<int>, std::vector<int>, std::vector<int>> kmedoids_(const std::vector<std::vector<T>> &D, int k,
int iters // added by Max Filippov
)
{
// can be optimized: TODO operate on indices and global dataset instead of copied subset table
// check arguments
int n = D.size();
assert(n >= 2); // error("There must be at least two points.")
assert(k <= n); // Attempt to run PAM with more clusters than data.
// build the (pairwaise) distance matrix
T Dsum = 0; // sum all distances // added by Max Filippov // TODO optimize
for (auto el1 : D)
for (auto el2 : el1)
Dsum += el2;
std::vector<int> seeds(k);
std::vector<int> counts(k, 0);
std::vector<int> assignments(n, 0);
std::vector<int> sec_nearest(n, 0); /// Index of second closest medoids. Used by PAM.
T total_distance; /// Total distance tp their medoid
T epsilon = 1e-15; /// Normalized sensitivity for convergence
// set initianl medoids
metric::Matrix<std::vector<T>, metric::Euclidean<T>> dm(D);
kmedoids_details::init_medoids(k, dm, seeds, assignments, sec_nearest, counts);
T tolerance = epsilon * Dsum / (D[0].size() * D.size());
while (true) {
iters--; // added by Max Filippov
// initial cluster
for (std::size_t i = 0; i < counts.size(); ++i) {
counts[i] = 0;
}
total_distance = kmedoids_details::update_cluster(dm, seeds, assignments, sec_nearest, counts);
// vars to keep track of minimum
T minTotalCost = std::numeric_limits<T>::max();
int minMedoid = 0;
int minObject = 0;
// iterate over each medoid
for (int i = 0; i < k; i++) {
// iterate over all non-medoids
for (std::size_t h = 0; h < assignments.size(); h++) {
if (static_cast<std::size_t>(seeds[assignments[h]]) == h)
continue;
// see if the total cost of swapping i & h was less than min
T curCost = kmedoids_details::cost(i, h, dm, seeds, assignments, sec_nearest);
if (curCost < minTotalCost) {
minTotalCost = curCost;
minMedoid = i;
minObject = h;
}
}
}
// convergence check
if (minTotalCost >= -tolerance)
break;
if (iters < 0) {
// std::cout << "\nWarning: exiting kmedoids_ due to exceeding max number of iterations\n";
break;
}
// install the new medoid if we found a beneficial swap
seeds[minMedoid] = minObject;
assignments[minObject] = minMedoid;
}
return {assignments, seeds, counts};
}
} // namespace metric
#endif
| 8,896
|
C++
|
.cpp
| 247
| 32.890688
| 119
| 0.65706
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,478
|
dbscan.cpp
|
metric-space-ai_metric/metric/mapping/dbscan.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 M.Welsch
*/
#ifndef _METRIC_MAPPING_DBSCAN_CPP
#define _METRIC_MAPPING_DBSCAN_CPP
#include "../distance/k-related/Standards.hpp"
#include <cassert>
#include <deque>
#include <numeric>
#include <string>
#include <vector>
namespace metric {
// --------------------------------------------------------------
// DBSCAN
// --------------------------------------------------------------
namespace dbscan_details {
// key steps
template <typename T, typename DistanceMatrix> std::deque<int> region_query(const DistanceMatrix &D, int p, T eps)
{
std::deque<int> nbs;
for (std::size_t i = 0; i < D.size(); ++i) {
if (D(p, i) < eps) {
nbs.push_back(i);
}
}
return nbs;
}
// a changing arguments function
template <typename T, typename DistanceMatrix>
int update_cluster(const DistanceMatrix &D, // distance matrix
const int &k, // the index of current cluster
const int &p, // the index of seeding point
const T &eps, // radius of neighborhood
const int &minpts, // minimum number of neighbors of a density point
std::deque<int> &nbs, // eps-neighborhood of p
std::vector<int> &assignments, // assignment vector
std::vector<bool> &visited)
{ // visited indicators
assignments[p] = k;
int cnt = 1;
while (!std::empty(nbs)) {
// q = shift!(nbs)
int q = nbs[0];
nbs.pop_front();
if (!visited[q]) {
visited[q] = true;
auto qnbs = region_query(D, q, eps);
if (qnbs.size() >= minpts) {
for (auto x : qnbs) {
if (assignments[x] == 0)
nbs.push_back(x);
}
}
}
if (assignments[q] == 0) {
assignments[q] = k;
cnt += 1;
}
}
return cnt;
}
} // namespace dbscan_details
// main algorithm
template <typename RecType, typename Metric, typename T>
std::tuple<std::vector<int>, std::vector<int>, std::vector<int>> dbscan(const Matrix<RecType, Metric> &DM, T eps,
std::size_t minpts)
{
// check arguments
auto n = DM.size();
assert(n >= 2); // error("There must be at least two points.")
assert(eps > 0); // error("eps must be a positive real value.")
assert(minpts >= 1); // error("minpts must be a positive integer.")
// initialize
std::vector<int> seeds;
std::vector<int> counts;
std::vector<int> assignments(n, int(0));
std::vector<bool> visited(n, false);
std::vector<int> visitseq(n);
std::iota(visitseq.begin(), visitseq.end(), 0); // (generates a linear index vector [0, 1, 2, ...])
// main loop
int k = 0;
for (int p : visitseq) {
if (assignments[p] == 0 && !visited[p]) {
visited[p] = true;
auto nbs = dbscan_details::region_query(DM, p, eps);
if (nbs.size() >= minpts) {
k += 1;
auto cnt = dbscan_details::update_cluster(DM, k, p, eps, minpts, nbs, assignments, visited);
seeds.push_back(p);
counts.push_back(cnt);
}
}
}
// make output
return {assignments, seeds, counts};
}
} // namespace metric
#endif
| 3,125
|
C++
|
.cpp
| 101
| 28
| 114
| 0.625498
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,479
|
SOM.cpp
|
metric-space-ai_metric/metric/mapping/SOM.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "SOM.hpp"
namespace metric {
template <class RecType, class Graph, class Metric, class Distribution>
SOM<RecType, Graph, Metric, Distribution>::SOM(size_t nodesNumber, Metric metric) : metric(metric), graph(nodesNumber)
{
valid = graph.isValid();
random_seed = std::chrono::system_clock::now().time_since_epoch().count();
neighborhood_start_size = std::sqrt(double(getNodesNumber()));
neighborhood_range_decay = 2.0;
}
template <class RecType, class Graph, class Metric, class Distribution>
SOM<RecType, Graph, Metric, Distribution>::SOM(size_t nodesWidth, size_t nodesHeight, Metric metric)
: metric(metric), graph(nodesWidth, nodesHeight)
{
valid = graph.isValid();
random_seed = std::chrono::system_clock::now().time_since_epoch().count();
neighborhood_start_size = std::sqrt(double(getNodesNumber()));
neighborhood_range_decay = 2.0;
}
template <class RecType, class Graph, class Metric, class Distribution>
SOM<RecType, Graph, Metric, Distribution>::SOM(const Graph &graph, const Metric &metric, double start_learn_rate,
double finish_learn_rate, size_t iterations, Distribution distribution)
: metric(metric), graph(graph), distribution(distribution), start_learn_rate(start_learn_rate),
finish_learn_rate(finish_learn_rate), iterations(iterations)
{
valid = graph.isValid();
random_seed = std::chrono::system_clock::now().time_since_epoch().count();
neighborhood_start_size = std::sqrt(double(getNodesNumber()));
neighborhood_range_decay = 2.0;
}
template <class RecType, class Graph, class Metric, class Distribution>
SOM<RecType, Graph, Metric, Distribution>::SOM(const Graph &graph, const Metric &metric, double start_learn_rate,
double finish_learn_rate, size_t iterations, Distribution distribution,
double neighborhood_start_size, double neighborhood_range_decay,
long long random_seed)
: metric(metric), graph(graph), distribution(distribution), start_learn_rate(start_learn_rate),
finish_learn_rate(finish_learn_rate), iterations(iterations), neighborhood_start_size(neighborhood_start_size),
neighborhood_range_decay(neighborhood_range_decay), random_seed(random_seed)
{
valid = graph.isValid();
}
// template <class RecType, class Metric, class Graph>
// SOM<RecType, Metric, Graph>::~SOM() = default;
template <class RecType, class Graph, class Metric, class Distribution>
void SOM<RecType, Graph, Metric, Distribution>::train(const std::vector<std::vector<T>> &samples)
{
subsampled_train(samples, samples.size());
}
template <class RecType, class Graph, class Metric, class Distribution>
void SOM<RecType, Graph, Metric, Distribution>::estimate(const std::vector<std::vector<T>> &samples,
const size_t sampleSize)
{
subsampled_train(samples, sampleSize);
}
template <class RecType, class Graph, class Metric, class Distribution>
std::vector<double> SOM<RecType, Graph, Metric, Distribution>::encode(const RecType &sample) const
{
std::vector<double> dim_reduced(getNodesNumber());
for (size_t i = 0; i < dim_reduced.size(); ++i) {
dim_reduced[i] = metric(sample, weights[i]);
}
return dim_reduced;
}
/** Best matching unit **/
template <class RecType, class Graph, class Metric, class Distribution>
size_t SOM<RecType, Graph, Metric, Distribution>::BMU(const RecType &sample) const
{
assert(sample.size() == input_dimensions); // input sample has not same getNodesNumber than SOM;
double minDist = std::numeric_limits<T>::max();
size_t index = 0;
for (size_t i = 0; i < weights.size(); ++i) {
T dist = metric(sample, weights[i]);
if (dist < minDist) {
minDist = dist;
index = i;
}
}
return index;
}
template <class RecType, class Graph, class Metric, class Distribution>
double SOM<RecType, Graph, Metric, Distribution>::std_deviation(const std::vector<std::vector<T>> &samples) const
{
double total_distances = 0;
double std_deviation = 0;
double closest_distance;
total_distances = 0;
for (size_t i = 0; i < samples.size(); i++) {
auto dimR = encode(samples[i]);
auto bmu = BMU(samples[i]);
// dimR[bmu] - is distance to the closest node, we use it as difference of value and mean of the values
closest_distance = dimR[bmu] * dimR[bmu];
total_distances += closest_distance;
}
return sqrt(total_distances / samples.size());
}
template <class RecType, class Graph, class Metric, class Distribution>
void SOM<RecType, Graph, Metric, Distribution>::updateWeights(const std::vector<std::vector<T>> &new_weights)
{
weights = new_weights;
weights_changed_ = true;
}
// PRIVATE
template <class RecType, class Graph, class Metric, class Distribution>
void SOM<RecType, Graph, Metric, Distribution>::subsampled_train(const std::vector<std::vector<T>> &samples,
int sampleSize)
{
// initialize weight matrix at first training call
if (input_dimensions == 0) {
// set sample dimension
input_dimensions = samples[0].size();
// init weights
weights = std::vector<std::vector<T>>(getNodesNumber(), std::vector<T>(input_dimensions));
std::default_random_engine random_generator(random_seed);
// Fill weights by uniform distributed values
for (auto &weight : weights) {
for (auto &w : weight) {
w = distribution(random_generator);
}
}
}
assert(input_dimensions == samples[0].size());
if (start_learn_rate < finish_learn_rate) {
finish_learn_rate = 0;
}
double learn_rate_base = start_learn_rate - finish_learn_rate;
// Random updating
std::vector<size_t> randomized_samples(samples.size());
std::iota(randomized_samples.begin(), randomized_samples.end(), 0);
size_t idx = 0;
std::default_random_engine random_generator(random_seed);
while (idx < iterations) {
// shuffle samples after all was processed
std::shuffle(randomized_samples.begin(), randomized_samples.end(), random_generator);
for (auto idx_r = 0; idx_r < randomized_samples.size(); idx_r++) {
// break if we use subsampling (i.e. train on a part of the whole samples)
if (idx_r >= sampleSize)
break;
// learn_rate_base *= 1.0 / double(idx); // Linear
// learn_rate_base *= 1.0 - idx / double(iterations); // Inverse of Time Learnrate
// learn_rate_base *= std:exp(idx / double(iterations); // Power Series
double progress_invert_stage = (1.0 - idx / double(iterations));
double curr_learn_rate = progress_invert_stage * learn_rate_base + finish_learn_rate;
double neighborhood_size = progress_invert_stage * neighborhood_start_size;
size_t samples_idx = randomized_samples[idx_r];
// Get the closest node index
size_t bmu_index = BMU(samples[samples_idx]);
const size_t neighbours_num = std::max(size_t(round(neighborhood_size)), size_t(0));
auto neighbours = graph.getNeighbours(bmu_index, neighbours_num);
// update weights of the BMU and its neighborhoods.
for (size_t deep = 0; deep < neighbours.size(); ++deep) {
for (size_t i = 0; i < neighbours[deep].size(); ++i) {
const size_t neighbour_index = neighbours[deep][i];
double remoteness_factor = 1;
// if no more neighbours are affected, the remoteness_factor returns to 1!
if (neighbours_num != 0) {
const double sigma = neighborhood_size / neighborhood_range_decay;
remoteness_factor = std::exp((deep * deep) / (-2 * sigma * sigma));
}
// correct coordinates in the input_dimensions space (in other words: weights) depends from the
// error
for (size_t k = 0; k < input_dimensions; ++k) {
const double error = samples[samples_idx][k] - weights[neighbour_index][k];
weights[neighbour_index][k] += T(error * curr_learn_rate * remoteness_factor);
}
}
}
}
++idx;
}
}
} // namespace metric
| 7,949
|
C++
|
.cpp
| 177
| 41.661017
| 118
| 0.718916
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,480
|
Redif.cpp
|
metric-space-ai_metric/metric/mapping/Redif.cpp
|
#include "Redif.hpp"
#include <iostream>
namespace metric {
template <typename Tv, class Metric>
Redif<Tv, Metric>::Redif(const std::vector<std::vector<Tv>> &trainData, size_t nNeighbors /* = 10 */,
size_t nIter /* = 15 */, Metric metric /*= Metric() */
)
{
this->nNeighbors = nNeighbors;
this->metric = metric;
xTrain = blaze::DynamicMatrix<Tv>(trainData.size(), trainData[0].size());
for (size_t i = 0; i < xTrain.rows(); i++)
for (size_t j = 0; j < xTrain.columns(); j++)
xTrain(i, j) = trainData[i][j];
xTrainEncoded = trainModel(nIter);
}
template <typename Tv, class Metric> blaze::DynamicMatrix<Tv> Redif<Tv, Metric>::trainModel(size_t nIter)
{
blaze::DynamicMatrix<Tv> xTrainDe(this->xTrain);
const size_t n = xTrainDe.rows();
/// Iterative backward Diffusion process
for (size_t i = 0; i < nIter; i++) {
blaze::DynamicMatrix<Tv> localDistMat = getLocalDistMatrix(xTrainDe);
blaze::DynamicMatrix<Tv> graphLap = calcWeightedGraphLaplacian(localDistMat);
this->LArray.push_back(graphLap);
blaze::DynamicMatrix<Tv> D(n, n, 0);
for (size_t i = 0; i < D.rows(); i++)
D(i, i) = graphLap(i, i);
blaze::DynamicMatrix<Tv> invLeft(n, n, 0);
invLeft = blaze::inv(D + 0.25 * graphLap);
xTrainDe = invLeft * (D * xTrainDe);
}
return xTrainDe;
}
template <typename Tv, class Metric>
blaze::DynamicMatrix<Tv> Redif<Tv, Metric>::getLocalDistMatrix(const blaze::DynamicMatrix<Tv> &dataSample)
{
size_t n = dataSample.rows();
blaze::DynamicMatrix<size_t> ixx(nNeighbors, n, 0);
for (size_t i = 0; i < nNeighbors; i++)
for (size_t j = 0; j < n; j++)
ixx(i, j) = j;
blaze::DynamicMatrix<Tv> dist(n, n, 0);
for (size_t i = 0; i < n; i++)
for (size_t j = 0; j < n; j++) {
if (i == j) {
dist(i, j) = 0;
continue;
}
blaze::DynamicVector<Tv, blaze::rowVector> rowi = row(dataSample, i);
blaze::DynamicVector<Tv, blaze::rowVector> rowj = row(dataSample, j);
std::vector<Tv> veci, vecj;
for (size_t k = 0; k < rowi.size(); ++k) {
veci.push_back(rowi[k]);
vecj.push_back(rowj[k]);
}
dist(i, j) = metric(veci, vecj);
}
blaze::DynamicMatrix<size_t> knnMat(nNeighbors, n, 0);
blaze::DynamicMatrix<Tv> knnDistMat(nNeighbors, n, 0);
for (size_t i = 0; i < n; i++) {
blaze::DynamicVector<size_t> tempIdx(n);
blaze::DynamicVector<Tv> tempDist(n);
for (size_t j = 0; j < n; j++) {
tempIdx[j] = j;
tempDist[j] = dist(j, i);
}
Quicksort(tempIdx, tempDist, 0, n - 1);
for (size_t j = 0; j < nNeighbors; j++) {
knnMat(j, i) = tempIdx[j + 1];
knnDistMat(j, i) = tempDist[j + 1];
}
}
blaze::DynamicMatrix<Tv> localDist(n, n, 0);
for (size_t i = 0; i < nNeighbors; i++)
for (size_t j = 0; j < n; j++)
localDist(ixx(i, j), knnMat(i, j)) = knnDistMat(i, j);
blaze::DynamicMatrix<Tv> retLocalDist(n, n, 0);
for (size_t i = 0; i < n; i++)
for (size_t j = 0; j < n; j++)
retLocalDist(i, j) = (localDist(i, j) > localDist(j, i)) ? localDist(i, j) : localDist(j, i);
return retLocalDist;
}
template <typename Tv, class Metric>
blaze::DynamicMatrix<Tv> Redif<Tv, Metric>::calcWeightedGraphLaplacian(const blaze::DynamicMatrix<Tv> &localDist)
{
size_t n = localDist.rows();
/// Degree matrix
blaze::DynamicMatrix<Tv> D(n, n, 0);
for (size_t i = 0; i < D.rows(); i++) {
double sum = 0;
for (size_t j = 0; j < n; j++)
sum += localDist(i, j);
if (sum == 0)
sum = 1;
D(i, i) = 1 / sum;
}
blaze::DynamicMatrix<Tv> A(localDist.rows(), localDist.columns(), 0);
A = D * localDist * D / n;
blaze::DynamicMatrix<Tv> D1(n, n, 0);
for (size_t i = 0; i < D.rows(); i++) {
double sum = 0;
for (size_t j = 0; j < n; j++)
sum += A(i, j);
if (sum == 0)
sum = 1 / n;
D1(i, i) = sum;
}
blaze::DynamicMatrix<Tv> graphLap(n, n, 0);
graphLap = D1 - A;
return graphLap;
}
template <typename Tv, class Metric>
std::vector<std::vector<Tv>> Redif<Tv, Metric>::encode(const std::vector<std::vector<Tv>> &x)
{
blaze::DynamicMatrix<Tv> x_as_matrix(x.size(), x[0].size(), 0);
for (size_t i = 0; i < x_as_matrix.rows(); i++)
for (size_t j = 0; j < x_as_matrix.columns(); j++)
x_as_matrix(i, j) = x[i][j];
auto encoded_data = encode(x_as_matrix);
std::vector<std::vector<Tv>> encoded_data_as_vectors(encoded_data.rows(), std::vector<Tv>(encoded_data.columns()));
for (size_t i = 0; i < encoded_data.rows(); i++)
for (size_t j = 0; j < encoded_data.columns(); j++)
encoded_data_as_vectors[i][j] = encoded_data(i, j);
return encoded_data_as_vectors;
}
template <typename Tv, class Metric>
blaze::DynamicMatrix<Tv> Redif<Tv, Metric>::encode(const blaze::DynamicMatrix<Tv> &x)
{
size_t nTrain = xTrain.rows();
size_t nX = x.rows();
size_t nIter = LArray.size();
/// Find nearest Neighbors of each record of x in xTrain
size_t l = 1; /// Number of nearest neighbors
blaze::DynamicVector<size_t> l_idx = blaze::DynamicVector<Tv>(nX);
size_t k_check = sqrt(nTrain); /// Parameter for checking model on the record
/// Compute distances
for (size_t i = 0; i < nX; i++) {
blaze::DynamicMatrix<Tv> distTotal(nTrain + 1, nTrain + 1, 0);
for (size_t k = 0; k < nTrain + 1; k++)
for (size_t j = 0; j < nTrain + 1; j++) {
if (k == j)
continue;
blaze::DynamicVector<Tv, blaze::rowVector> rowk;
blaze::DynamicVector<Tv, blaze::rowVector> rowj;
if (k < nTrain)
rowk = row(xTrain, k);
else
rowk = row(x, i);
if (j < nTrain)
rowj = row(xTrain, j);
else
rowj = row(x, i);
std::vector<Tv> veck, vecj;
for (size_t s = 0; s < rowk.size(); ++s) {
veck.push_back(rowk[s]);
vecj.push_back(rowj[s]);
}
distTotal(k, j) = metric(veck, vecj);
}
blaze::DynamicVector<Tv, blaze::columnVector> dist = column(distTotal, distTotal.columns() - 1);
blaze::DynamicVector<size_t> idxVec(dist.size());
for (size_t k = 0; k < idxVec.size(); k++)
idxVec[k] = k;
Quicksort(idxVec, dist, 0, idxVec.size() - 1);
double oneDist = 0;
if (dist[0] == dist[1]) {
size_t j = 0;
j = (idxVec[0] > idxVec[1]) ? 0 : 1;
if (j == 0) {
l_idx[i] = idxVec[1];
oneDist = dist[1];
} else {
l_idx[i] = idxVec[0];
oneDist = dist[0];
}
} else {
l_idx[i] = idxVec[1];
oneDist = dist[1];
}
/// Check if Model is able to denoise Test Posize_t
blaze::DynamicVector<Tv, blaze::columnVector> temp = column(distTotal, l_idx[i]);
for (size_t k = 0; k < idxVec.size(); k++)
idxVec[k] = k;
Quicksort(idxVec, temp, 0, idxVec.size() - 1);
if (oneDist > temp[k_check - 1])
std::cout << "Error. Testposize_t of Index " << i << " cannot be denoised based on underlying model.\n";
}
blaze::DynamicMatrix<Tv> xEncoded = xTrain;
/// Iterative backward Diffusion process
for (size_t i = 0; i < nX; i++)
for (size_t j = 0; j < xEncoded.columns(); j++)
xEncoded(l_idx[i], j) = x(i, j);
for (size_t i = 0; i < nIter; i++) {
blaze::DynamicMatrix<Tv> D(nTrain, nTrain, 0);
for (size_t k = 0; k < D.rows(); k++)
D(k, k) = LArray[i](k, k);
xEncoded = blaze::inv(D + 0.25 * LArray[i]) * (D * xEncoded);
}
blaze::DynamicMatrix<Tv> xEncodedRes(nX, xEncoded.columns());
for (size_t i = 0; i < nX; i++) {
for (size_t j = 0; j < xEncoded.columns(); j++) {
xEncodedRes(i, j) = xEncoded(l_idx[i], j);
}
}
return xEncodedRes;
}
template <typename Tv, class Metric>
std::vector<std::vector<Tv>> Redif<Tv, Metric>::decode(const std::vector<std::vector<Tv>> &xEncoded)
{
blaze::DynamicMatrix<Tv> x_as_matrix(xEncoded.size(), xEncoded[0].size(), 0);
for (size_t i = 0; i < x_as_matrix.rows(); i++)
for (size_t j = 0; j < x_as_matrix.columns(); j++)
x_as_matrix(i, j) = xEncoded[i][j];
auto decoded_data = decode(x_as_matrix);
std::vector<std::vector<Tv>> decoded_data_as_vectors(decoded_data.rows(), std::vector<Tv>(decoded_data.columns()));
for (size_t i = 0; i < decoded_data.rows(); i++)
for (size_t j = 0; j < decoded_data.columns(); j++)
decoded_data_as_vectors[i][j] = decoded_data(i, j);
return decoded_data_as_vectors;
}
template <typename Tv, class Metric>
blaze::DynamicMatrix<Tv> Redif<Tv, Metric>::decode(const blaze::DynamicMatrix<Tv> &xEncoded)
{
size_t nX = xEncoded.rows();
size_t nTrain = xTrain.rows();
blaze::DynamicVector<size_t> l_idx = blaze::DynamicVector<size_t>(nX);
/// Compute distances
for (size_t i = 0; i < nX; i++) {
blaze::DynamicVector<Tv, blaze::rowVector> rowi;
rowi = row(xEncoded, i);
std::vector<Tv> veci;
for (size_t s = 0; s < rowi.size(); ++s) {
veci.push_back(rowi[s]);
}
Tv closest_dist = -1;
for (size_t k = 0; k < nTrain; k++) {
blaze::DynamicVector<Tv, blaze::rowVector> rowi, rowk;
rowk = row(xTrainEncoded, k);
std::vector<Tv> veck;
for (size_t s = 0; s < rowk.size(); ++s) {
veck.push_back(rowk[s]);
}
Tv dist = metric(veci, veck);
if (closest_dist < 0 || closest_dist > dist) {
l_idx[i] = k;
closest_dist = dist;
}
}
}
size_t n = xTrainEncoded.rows();
size_t nIter = LArray.size();
for (size_t i = 0; i < l_idx.size(); i++)
for (size_t j = 0; j < xTrainEncoded.columns(); j++)
xTrainEncoded(l_idx[i], j) = xEncoded(i, j);
for (size_t i = 0; i < nIter; i++) {
blaze::DynamicMatrix<Tv> temp = LArray[nIter - i - 1];
blaze::DynamicMatrix<Tv> D(n, n, 0);
for (size_t k = 0; k < n; k++)
D(k, k) = temp(k, k);
xTrainEncoded = blaze::inv(D) * (D + 0.25 * LArray[nIter - i - 1]) * xTrainEncoded;
}
blaze::DynamicMatrix<Tv> xDecoded = blaze::DynamicMatrix<Tv>(l_idx.size(), xTrainEncoded.columns(), 0);
for (size_t i = 0; i < xDecoded.rows(); i++)
for (size_t j = 0; j < xDecoded.columns(); j++)
xDecoded(i, j) = xTrainEncoded(l_idx[i], j);
return xDecoded;
}
template <typename Tv, class Metric>
template <bool flag>
size_t Redif<Tv, Metric>::partition(blaze::DynamicVector<size_t> &idx, blaze::DynamicVector<Tv, flag> &val,
size_t start, size_t end)
{
double pivot = val[end];
size_t P_index = start;
size_t i;
size_t idxTemp;
double valTemp;
for (i = start; i < end; i++) {
if (val[i] <= pivot) {
valTemp = val[i];
val[i] = val[P_index];
val[P_index] = valTemp;
idxTemp = idx[i];
idx[i] = idx[P_index];
idx[P_index] = idxTemp;
P_index++;
}
}
valTemp = val[end];
val[end] = val[P_index];
val[P_index] = valTemp;
idxTemp = idx[end];
idx[end] = idx[P_index];
idx[P_index] = idxTemp;
return P_index;
}
template <typename Tv, class Metric>
template <bool flag>
void Redif<Tv, Metric>::Quicksort(blaze::DynamicVector<size_t> &idx, blaze::DynamicVector<Tv, flag> &data, size_t start,
size_t end)
{
if (start < end) {
int P_index = partition(idx, data, start, end);
if (P_index != 0)
Quicksort(idx, data, start, P_index - 1);
Quicksort(idx, data, P_index + 1, end);
}
}
} // namespace metric
| 10,787
|
C++
|
.cpp
| 310
| 31.732258
| 120
| 0.625108
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,481
|
hierarchClustering.cpp
|
metric-space-ai_metric/metric/mapping/hierarchClustering.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include <algorithm>
#include <limits>
#include <tuple>
#include <vector>
namespace metric {
inline std::tuple<int, int> getMinPosition(std::vector<std::vector<double>> &distanceMatrix)
{
int minX = 0;
int minY = 1;
double dist = std::numeric_limits<double>::max();
for (size_t i = 0; i < distanceMatrix.size(); i++) {
for (size_t j = i + 1; j < distanceMatrix.size(); j++) {
if (distanceMatrix[i][j] < dist) {
dist = distanceMatrix[i][j];
minX = i;
minY = j;
}
}
}
return std::make_tuple(minX, minY);
}
template <typename T> std::pair<bool, int> findInVector(const std::vector<T> &vecOfElements, const T &element)
{
std::pair<bool, int> result;
// Find given element in vector
auto it = std::find(vecOfElements.begin(), vecOfElements.end(), element);
if (it != vecOfElements.end()) {
result.second = distance(vecOfElements.begin(), it);
result.first = true;
} else {
result.first = false;
result.second = -1;
}
return result;
}
template <typename T, typename Distance> void HierarchicalClustering<T, Distance>::initialize()
{
for (size_t i = 0; i < sourceData.size(); i++) {
auto cluster = Cluster<T>({sourceData[i]});
clusters.push_back(cluster);
}
}
template <typename T, typename Distance>
std::vector<std::vector<double>> HierarchicalClustering<T, Distance>::calculateDistances()
{
std::vector<std::vector<double>> distanceMatrix(clusters.size(), std::vector<double>(clusters.size()));
Distance distancer;
for (size_t i = 0; i < clusters.size(); i++) {
for (size_t j = 0; j < clusters.size(); j++) {
distanceMatrix[i][j] = distancer(clusters[i].centroid, clusters[j].centroid);
}
}
return distanceMatrix;
}
template <typename T, typename Distance> void HierarchicalClustering<T, Distance>::hierarchical_clustering()
{
initialize();
std::vector<std::vector<double>> distanceMatrix;
std::vector<Cluster<T>> newClusters;
std::vector<T> newData;
std::vector<int> mergedIndexes;
int closest;
std::pair<bool, int> loockupResultX;
std::pair<bool, int> loockupResultY;
int x;
int y;
while (clusters.size() > clustersNum) {
distanceMatrix = calculateDistances();
newClusters.clear();
mergedIndexes.clear();
while ((int)mergedIndexes.size() / 2 < (int)clusters.size() / 2) {
std::tie(x, y) = getMinPosition(distanceMatrix);
distanceMatrix[x][y] = std::numeric_limits<double>::max();
loockupResultX = findInVector<int>(mergedIndexes, x);
loockupResultY = findInVector<int>(mergedIndexes, y);
if (!loockupResultX.first && !loockupResultY.first) {
mergedIndexes.push_back(x);
mergedIndexes.push_back(y);
newData.clear();
for (size_t i = 0; i < clusters[x].data.size(); i++) {
newData.push_back(clusters[x].data[i]);
}
for (size_t i = 0; i < clusters[y].data.size(); i++) {
newData.push_back(clusters[y].data[i]);
}
auto cluster = Cluster<T>(newData);
newClusters.push_back(cluster);
}
}
// get the last element
if (mergedIndexes.size() < clusters.size()) {
for (size_t i = 0; i < clusters.size(); i++) {
loockupResultX = findInVector<int>(mergedIndexes, i);
if (!loockupResultX.first) {
mergedIndexes.push_back(i);
newData.clear();
for (size_t j = 0; j < clusters[i].data.size(); j++) {
newData.push_back(clusters[i].data[j]);
}
auto cluster = Cluster<T>(newData);
newClusters.push_back(cluster);
}
}
}
clusters = newClusters;
}
}
} // namespace metric
| 3,716
|
C++
|
.cpp
| 114
| 29.473684
| 110
| 0.683254
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,482
|
affprop.cpp
|
metric-space-ai_metric/metric/mapping/affprop.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 M.Welsch
*/
#include "affprop.hpp"
#include "../distance/k-related/Standards.hpp"
#include <blaze/Math.h>
#include <cassert>
#include <string>
#include <tuple>
#include <vector>
namespace metric {
namespace affprop_details {
// build similarity matrix
template <typename T, typename DistanceMatrix>
blaze::SymmetricMatrix<blaze::DynamicMatrix<T, blaze::rowMajor>> similarity_matrix(const DistanceMatrix &DM,
const T preference)
{
blaze::SymmetricMatrix<blaze::DynamicMatrix<T, blaze::rowMajor>> SM(DM.size());
T pmin = 0;
T pmax = -2e21;
for (std::size_t i = 0; i < DM.size(); ++i) {
for (std::size_t j = i; j < DM.size(); ++j) {
T similarity = -DM(i, j);
if (similarity < pmin)
pmin = similarity;
if (similarity > pmax)
pmax = similarity;
SM(i, j) = similarity;
}
}
for (std::size_t i = 0; i < SM.columns(); i++) {
SM(i, i) = preference * pmax + (1 - preference) * pmin;
}
return SM;
}
template <typename T, typename SymetricMatrix>
T update_responsibilities(blaze::DynamicMatrix<T, blaze::rowMajor> &R, const SymetricMatrix &S,
const blaze::DynamicMatrix<T, blaze::rowMajor> &A, const T &damp)
{
auto n = S.rows();
T maxabs = 0;
std::vector<std::size_t> I1(n); // I1[i] is the column index of the maximum element in (A+S) vector
std::vector<T> Y1(n); // Y1[i] is the maximum element in (A+S) vector
std::vector<T> Y2(n); // Y2[i] is the second maximum element in (A+S) vector
// Find the first and second maximum elements along each row
for (std::size_t i = 0; i < n; ++i) {
T v1 = A(i, 0) + S(i, 0);
T v2 = A(i, 1) + S(i, 1);
if (v1 > v2) {
I1[i] = 0;
Y1[i] = v1;
Y2[i] = v2;
} else {
I1[i] = 1;
Y1[i] = v2;
Y2[i] = v1;
}
}
for (std::size_t j = 2; j < n; ++j) {
for (std::size_t i = 0; i < n; ++i) {
T v = A(i, j) + S(i, j);
if (v > Y2[i]) {
if (v > Y1[i]) {
Y2[i] = Y1[i];
I1[i] = j;
Y1[i] = v;
} else
Y2[i] = v;
}
}
}
// update R values
for (std::size_t j = 0; j < n; ++j) {
for (std::size_t i = 0; i < n; ++i) {
T Rij_old = R(i, j);
T mv = (j == I1[i] ? Y2[i] : Y1[i]);
T Rij_new = S(i, j) - mv;
// update
R(i, j) = damp * Rij_old + (1 - damp) * Rij_new;
// compute convergenze criteria
T abs_ij = std::abs(Rij_old - Rij_new);
if (abs_ij > maxabs)
maxabs = abs_ij;
}
}
return maxabs;
}
// compute availabilities
template <typename T, typename Matrix> T update_availabilities(Matrix &A, const Matrix &R, const T &damp)
{
auto n = R.rows();
T maxabs = 0;
for (std::size_t j = 0; j < n; ++j) {
T rjj = R(j, j);
// compute sum
T sum = 0;
for (std::size_t i = 0; i < n; ++i) {
if (i != j) {
T r = R(i, j);
if (r > 0)
sum += r;
}
}
for (std::size_t i = 0; i < n; ++i) {
T Aij_old = A(i, j);
T Aij_new;
if (i == j)
Aij_new = sum;
else {
T r = R(i, j);
T u = rjj + sum;
if (r > 0)
u -= r;
Aij_new = (u < 0 ? u : 0);
}
// update
A(i, j) = damp * Aij_old + (1 - damp) * Aij_new;
// compute convergenze criteria
T abs_ij = std::abs(Aij_old - Aij_new);
if (abs_ij > maxabs)
maxabs = abs_ij;
}
}
return maxabs;
}
// extract all exemplars
template <typename T, typename Matrix> std::vector<std::size_t> extract_exemplars(const Matrix &A, const Matrix &R)
{
auto n = A.rows();
std::vector<std::size_t> r;
for (std::size_t i = 0; i < n; ++i) {
if (A(i, i) + R(i, i) > 0)
r.push_back(i);
}
return r;
}
// get assignments
template <typename T, typename SymmetricMatrix>
std::tuple<std::vector<std::size_t>, std::vector<std::size_t>>
get_assignments(const SymmetricMatrix &S, const std::vector<std::size_t> &exemplars)
{
auto n = S.rows();
auto k = exemplars.size();
// TODO: check performance of lambda
auto extractor = [&S, &exemplars](auto i, auto j) { return S(exemplars[i], j); };
std::vector<std::size_t> a(n);
std::vector<std::size_t> cnts(k, 0);
for (std::size_t j = 0; j < n; ++j) {
std::size_t p = 0;
T v = extractor(0, j); // Se[0][j];
for (std::size_t i = 1; i < k; ++i) {
T s = extractor(i, j); // Se[i][j];
if (s > v) {
v = s;
p = i;
}
}
a[j] = p;
}
for (std::size_t i = 0; i < k; ++i) {
a[exemplars[i]] = i;
}
for (std::size_t i = 0; i < n; ++i) {
cnts[a[i]] += 1;
}
return {a, cnts};
}
} // end namespace affprop_details
/**
* @brief
*
* @param data distance matrix
* @param preference
* @param maxiter
* @param tol
* @param damp
* @return
*/
template <typename RecType, typename Metric>
auto AffProp<RecType, Metric>::operator()(const Matrix<RecType, Metric> &DM) const
-> std::tuple<std::vector<std::size_t>, std::vector<std::size_t>, std::vector<std::size_t>>
{
// check arguments
auto n = DM.size();
assert(n >= 2); // the number of samples must be at least 2.
assert(tol > 0); // tol must be a positive value.
assert(0 <= damp && damp < 1); // damp must be between 0 and 1.
assert(0 <= preference && preference < 1); // preference must be between 0 and 1.
// build similarity matrix with preference
auto S = metric::affprop_details::similarity_matrix(DM, preference);
// initialize messages
blaze::DynamicMatrix<Value, blaze::rowMajor> R(n, n);
blaze::DynamicMatrix<Value, blaze::rowMajor> A(n, n);
// main loop
int t = 0;
bool isConverged = false;
while (!isConverged && t < maxiter) {
t += 1;
// compute new messages
Value maxabsR = metric::affprop_details::update_responsibilities(R, S, A, damp);
Value maxabsA = metric::affprop_details::update_availabilities(A, R, damp);
// determine convergence
Value ch = std::max(maxabsA, maxabsR) / (1 - damp);
isConverged = (ch < tol);
}
// extract exemplars and assignments
auto exemplars = metric::affprop_details::extract_exemplars<Value>(A, R);
auto [assignments, counts] = metric::affprop_details::get_assignments<Value>(S, exemplars);
return {assignments, exemplars, counts};
}
} // namespace metric
| 6,206
|
C++
|
.cpp
| 214
| 26.093458
| 115
| 0.608324
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,483
|
KOC.cpp
|
metric-space-ai_metric/metric/mapping/KOC.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "KOC.hpp"
#include "kmedoids.hpp"
#include "metric/space/matrix.hpp"
namespace metric {
namespace KOC_details {
template <typename T> std::vector<size_t> sort_indexes(const std::vector<T> &v)
{
// initialize original index locations
std::vector<size_t> idx(v.size());
std::iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
std::sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) { return v[i1] > v[i2]; });
return idx;
}
template <class RecType, class Graph, class Metric, class Distribution>
void KOC<RecType, Graph, Metric, Distribution>::train(const std::vector<RecType> &samples, int num_clusters,
int min_cluster_size)
{
som.train(samples);
calculate_std_deviations_for_nodes(samples, samples.size());
std::tie(clusters, centroids, clusters_counts) = clusterize_nodes(num_clusters, min_cluster_size);
}
// template <class RecType, class Graph, class Metric, class Distribution>
// void KOC<RecType, Graph, Metric, Distribution>::estimate(const std::vector<RecType>& samples, const size_t
// sampleSize, int num_clusters, int min_cluster_size)
//{
// som_.estimate(samples, sampleSize);
// calculate_std_deviations_for_nodes(samples, sampleSize);
// std::tie(clusters, centroids, clusters_counts) = clusterize_nodes(num_clusters, min_cluster_size);
// }
template <class RecType, class Graph, class Metric, class Distribution>
std::vector<bool> KOC<RecType, Graph, Metric, Distribution>::check_if_anomaly(const std::vector<RecType> &samples)
{
std::vector<bool> result;
for (size_t i = 0; i < samples.size(); i++) {
// if entropy less then min entropy level then it is anomaly
result.push_back(check_if_anomaly(samples[i]));
}
return result;
}
template <class RecType, class Graph, class Metric, class Distribution>
bool KOC<RecType, Graph, Metric, Distribution>::check_if_anomaly(const RecType &sample)
{
auto reduced = som.encode(sample);
auto bmu = som.BMU(sample);
// if closest distance more then max closest distance level then it is anomaly
return reduced[bmu] > nodes_std_deviations[bmu] * anomaly_sigma;
}
template <class RecType, class Graph, class Metric, class Distribution>
std::vector<int> KOC<RecType, Graph, Metric, Distribution>::assign_to_clusters(const std::vector<RecType> &samples)
{
std::vector<int> assignments;
auto anomalies = check_if_anomaly(samples);
for (size_t i = 0; i < samples.size(); i++) {
// we want 0 label as anomaly
if (anomalies[i]) {
assignments.push_back(0);
} else {
auto bmu = som.BMU(samples[i]);
assignments.push_back(clusters[bmu]);
}
}
return assignments;
};
template <class RecType, class Graph, class Metric, class Distribution>
std::tuple<std::vector<size_t>, std::vector<typename RecType::value_type>>
KOC<RecType, Graph, Metric, Distribution>::top_outliers(const std::vector<RecType> &samples, int count)
{
auto assignments = assign_to_clusters(samples);
std::vector<T> distances;
for (int i = 0; i < samples.size(); i++) {
auto reduced = som.encode(samples[i]);
auto bmu = som.BMU(samples[i]);
distances.push_back(reduced[bmu] - nodes_std_deviations[bmu] * anomaly_sigma);
}
auto idxs = sort_indexes(distances);
std::vector<T> sorted_distances;
// std::vector<int> sorted_assignments;
if (idxs.size() > count) {
idxs.resize(count);
}
for (int i = 0; i < idxs.size(); i++) {
sorted_distances.push_back(distances[idxs[i]]);
// sorted_assignments.push_back(assignments[idxs[i]]);
}
return {idxs, sorted_distances};
};
/// PRIVATE
template <class RecType, class Graph, class Metric, class Distribution>
void KOC<RecType, Graph, Metric, Distribution>::calculate_std_deviations_for_nodes(const std::vector<RecType> &samples,
int sampleSize)
{
if (sampleSize > samples.size()) {
sampleSize = samples.size();
}
// Random samples
std::vector<size_t> randomized_samples(samples.size());
std::iota(randomized_samples.begin(), randomized_samples.end(), 0);
// shuffle samples after all was processed
std::default_random_engine random_generator(random_seed);
std::shuffle(randomized_samples.begin(), randomized_samples.end(), random_generator);
int num_nodes = som.getNodesNumber();
std::vector<int> closest_distances(num_nodes, 0);
std::vector<T> square_distances_sum(num_nodes, 0);
for (size_t i = 0; i < sampleSize; i++) {
size_t sample_idx = randomized_samples[i];
auto sample = next(samples.begin(), sample_idx);
auto reduced = som.encode(*sample);
auto bmu = som.BMU(*sample);
square_distances_sum[bmu] += reduced[bmu] * reduced[bmu];
closest_distances[bmu]++;
}
nodes_std_deviations = std::vector<T>(num_nodes);
for (size_t i = 0; i < num_nodes; i++) {
if (closest_distances[i] > 0) {
nodes_std_deviations[i] = sqrt(square_distances_sum[i] / closest_distances[i]);
} else {
nodes_std_deviations[i] = 0;
}
}
}
template <class RecType, class Graph, class Metric, class Distribution>
std::tuple<std::vector<int>, std::vector<int>, std::vector<int>>
KOC<RecType, Graph, Metric, Distribution>::clusterize_nodes(int num_clusters, int min_cluster_size)
{
int current_min_cluster_size = -1;
auto nodes_data = som.get_weights();
if (min_cluster_size > nodes_data.size()) {
min_cluster_size = nodes_data.size();
}
while (current_min_cluster_size < min_cluster_size) {
// clustering on the reduced data
metric::Matrix<RecType, Metric> matrix(nodes_data, metric);
auto [assignments, seeds, counts] = metric::kmedoids(matrix, num_clusters);
std::vector<int>::iterator result = std::min_element(counts.begin(), counts.end());
current_min_cluster_size = counts[std::distance(counts.begin(), result)];
int num_ones = 0;
for (auto i : counts) {
if (i <= 1) {
num_ones++;
}
}
int new_num_clusters = num_clusters * std::min(0.5, 0.1 * num_ones);
if (new_num_clusters == num_clusters) {
new_num_clusters--;
}
if (new_num_clusters <= 0) {
new_num_clusters = 1;
}
num_clusters = new_num_clusters;
if (current_min_cluster_size >= min_cluster_size) {
for (size_t i = 0; i < assignments.size(); i++) {
// we want 0 label as anomaly, so increment original labels
assignments[i]++;
}
return {assignments, seeds, counts};
}
}
return {std::vector<int>(), std::vector<int>(), std::vector<int>()};
}
} // namespace KOC_details
//
template <class RecType, class Graph, class Metric, class Distribution>
KOC_factory<RecType, Graph, Metric, Distribution>::KOC_factory(size_t nodesNumber, double anomaly_sigma,
double start_learn_rate, double finish_learn_rate,
size_t iterations, T distribution_min,
T distribution_max)
: graph(nodesNumber), metric(), distribution(distribution_min, distribution_max), anomaly_sigma(anomaly_sigma),
start_learn_rate(start_learn_rate), finish_learn_rate(finish_learn_rate), iterations(iterations),
random_seed(std::chrono::system_clock::now().time_since_epoch().count()),
neighborhood_start_size(std::sqrt(double(nodesNumber))), neighborhood_range_decay(2.0)
{
}
template <class RecType, class Graph, class Metric, class Distribution>
KOC_factory<RecType, Graph, Metric, Distribution>::KOC_factory(size_t nodesWidth, size_t nodesHeight,
double anomaly_sigma, double start_learn_rate,
double finish_learn_rate, size_t iterations,
T distribution_min, T distribution_max)
: graph(nodesWidth, nodesHeight), metric(), distribution(distribution_min, distribution_max),
anomaly_sigma(anomaly_sigma), start_learn_rate(start_learn_rate), finish_learn_rate(finish_learn_rate),
iterations(iterations), random_seed(std::chrono::system_clock::now().time_since_epoch().count()),
neighborhood_start_size(std::sqrt(double(nodesWidth * nodesHeight))), neighborhood_range_decay(2.0)
{
}
template <class RecType, class Graph, class Metric, class Distribution>
KOC_factory<RecType, Graph, Metric, Distribution>::KOC_factory(size_t nodesNumber, double anomaly_sigma,
double start_learn_rate, double finish_learn_rate,
size_t iterations, T distribution_min,
T distribution_max, double neighborhood_start_size,
double neighborhood_range_decay, long long random_seed)
: graph(nodesNumber), metric(), distribution(distribution_min, distribution_max), anomaly_sigma(anomaly_sigma),
start_learn_rate(start_learn_rate), finish_learn_rate(finish_learn_rate), iterations(iterations),
random_seed(random_seed), neighborhood_start_size(neighborhood_start_size),
neighborhood_range_decay(neighborhood_range_decay)
{
}
template <class RecType, class Graph, class Metric, class Distribution>
KOC_factory<RecType, Graph, Metric, Distribution>::KOC_factory(size_t nodesWidth, size_t nodesHeight,
double anomaly_sigma, double start_learn_rate,
double finish_learn_rate, size_t iterations,
T distribution_min, T distribution_max,
double neighborhood_start_size,
double neighborhood_range_decay, long long random_seed)
: graph(nodesWidth, nodesHeight), metric(), distribution(distribution_min, distribution_max),
anomaly_sigma(anomaly_sigma), start_learn_rate(start_learn_rate), finish_learn_rate(finish_learn_rate),
iterations(iterations), random_seed(random_seed), neighborhood_start_size(neighborhood_start_size),
neighborhood_range_decay(neighborhood_range_decay)
{
}
template <class RecType, class Graph, class Metric, class Distribution>
KOC_details::KOC<RecType, Graph, Metric, Distribution>
KOC_factory<RecType, Graph, Metric, Distribution>::operator()(const std::vector<RecType> &samples, int num_clusters,
int min_cluster_size)
{
KOC_details::KOC<RecType, Graph, Metric, Distribution> koc(
graph, metric, anomaly_sigma, start_learn_rate, finish_learn_rate, iterations, distribution,
neighborhood_start_size, neighborhood_range_decay, random_seed);
koc.train(samples, num_clusters, min_cluster_size);
return koc;
}
} // namespace metric
| 10,334
|
C++
|
.cpp
| 227
| 41.854626
| 119
| 0.718675
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,484
|
deterministic_switch_detector.cpp
|
metric-space-ai_metric/metric/mapping/deterministic_switch_detector.cpp
|
#include "esn_switch_detector.hpp"
template <typename value_type>
DetSwitchDetector<value_type>::DetSwitchDetector(const blaze::DynamicMatrix<value_type> &W1_,
const blaze::DynamicMatrix<value_type> &Wo_, const size_t wnd_size_,
const value_type update_rate_)
: wnd_size(wnd_size_), update_rate(update_rate_), W1(W1_), Wo(Wo_)
{
}
template <typename value_type>
DetSwitchDetector<value_type>::DetSwitchDetector(const size_t wnd_size_, const value_type update_rate_,
const value_type w_r, const value_type w_g, const value_type w_b,
const value_type w_s, const value_type magn, const value_type rgb_offs,
const value_type s_offs, const value_type rgb_wo,
const value_type s_wo)
: wnd_size(wnd_size_), update_rate(update_rate_)
{
W1 = {
{magn * w_r, 0, 0, 0, magn * rgb_offs},
{0, magn * w_g, 0, 0, magn * rgb_offs},
{0, 0, magn * w_b, 0, magn * rgb_offs},
{0, 0, 0, magn * w_s, magn * s_offs},
};
Wo = {{rgb_wo, rgb_wo, rgb_wo, s_wo}};
}
template <typename value_type>
blaze::DynamicMatrix<value_type> DetSwitchDetector<value_type>::encode(const blaze::DynamicMatrix<value_type> &dataset)
{
// additional feature: sliding window stddev
blaze::DynamicVector<value_type> feature_stddev(dataset.rows(),
0); // TODO remove saving, stddev can be computed on the fly
for (size_t i = wnd_size; i < feature_stddev.size(); ++i) {
auto wnd1 = blaze::submatrix(dataset, i - wnd_size, 1, wnd_size, 1);
auto wnd2 = blaze::submatrix(dataset, i - wnd_size, 2, wnd_size, 1);
auto wnd3 = blaze::submatrix(dataset, i - wnd_size, 3, wnd_size, 1);
feature_stddev[i] = stddev(wnd1) + stddev(wnd2) + stddev(wnd3);
}
blaze::DynamicMatrix<value_type> ds_all(dataset.rows(), 4, 0);
// blaze::submatrix(ds_all, 0, 0, dataset.rows(), 3) = blaze::submatrix(dataset, 0, 1, dataset.rows(), 3); // fails
// on datasets larger than 100000
blaze::column(ds_all, 0) = blaze::column(dataset, 1);
blaze::column(ds_all, 1) = blaze::column(dataset, 2);
blaze::column(ds_all, 2) = blaze::column(dataset, 3);
blaze::column(ds_all, 3) = feature_stddev;
// filtering
// value_type sliding_prev = 0;
value_type sliding_prev = lat; // we keep latency value between calls
// unsigned int bin_out;
unsigned int bin_prev = 0;
blaze::DynamicMatrix<value_type> out(ds_all.rows(), 1, 0);
for (size_t i = wnd_size; i < dataset.rows(); ++i) {
blaze::DynamicVector<value_type> in1(ds_all.columns() + 1);
blaze::subvector(in1, 0, ds_all.columns()) = blaze::trans(blaze::row(ds_all, i));
in1[ds_all.columns()] = 1; // offset element
auto layer1 = blaze::tanh(W1 * in1);
value_type mixed_out = blaze::sum(Wo * layer1) / blaze::sum(Wo);
value_type latency_out;
if (mixed_out > 0.5) { // switching on immediately
latency_out = 1;
} else { // switch off latency
value_type upd = (mixed_out * update_rate + sliding_prev * (1 - update_rate));
latency_out = mixed_out > upd ? mixed_out : upd; // reset latency
}
sliding_prev = latency_out;
int bin_out = 0;
if (latency_out > 0.5) { // binarize
bin_out = 1;
}
if (bin_out == 1 && bin_prev == 0)
out(i, 0) = 1;
if (bin_out == 0 && bin_prev == 1)
out(i, 0) = -1;
bin_prev = bin_out;
}
lat = sliding_prev; // we keep latency value between calls
return out;
}
| 3,343
|
C++
|
.cpp
| 76
| 40.355263
| 119
| 0.656615
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,485
|
PCFA.cpp
|
metric-space-ai_metric/metric/mapping/PCFA.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_MAPPING_PCFA_CPP
#define _METRIC_MAPPING_PCFA_CPP
#include "PCFA.hpp"
namespace metric {
template <class BlazeMatrix>
blaze::DynamicMatrix<typename BlazeMatrix::ElementType>
PCA_col(const BlazeMatrix &In, int n_components, blaze::DynamicVector<typename BlazeMatrix::ElementType> &averages)
{
auto Result = blaze::DynamicMatrix<typename BlazeMatrix::ElementType>(n_components, In.rows(), 0);
averages = blaze::sum<blaze::rowwise>(In) / In.columns();
auto CenteredInput = blaze::DynamicMatrix<typename BlazeMatrix::ElementType>(In.rows(), In.columns(), 0);
for (size_t col = 0; col < In.columns(); col++)
column(CenteredInput, col) = column(In, col) - averages;
blaze::SymmetricMatrix<blaze::DynamicMatrix<typename BlazeMatrix::ElementType>> CovMat =
blaze::evaluate(CenteredInput * trans(CenteredInput));
blaze::DynamicVector<typename BlazeMatrix::ElementType, blaze::columnVector> w(In.rows()); // for eigenvalues
blaze::DynamicMatrix<typename BlazeMatrix::ElementType, blaze::rowMajor> V(In.rows(),
In.rows()); // for eigenvectors
eigen(CovMat, w, V);
// sort and select
size_t lower_idx = 0;
size_t upper_idx = w.size() - 1;
int count = 0;
while (count < n_components && upper_idx > lower_idx) {
if (-w[lower_idx] > w[upper_idx]) {
blaze::row(Result, count) = blaze::row(V, lower_idx); // add eigenpair
lower_idx++;
} else {
blaze::row(Result, count) = blaze::row(V, upper_idx); // add eigenpair
upper_idx--;
}
count++;
}
return Result;
}
template <class BlazeMatrix>
blaze::DynamicMatrix<typename BlazeMatrix::ElementType>
PCA(const BlazeMatrix &In, int n_components,
blaze::DynamicVector<typename BlazeMatrix::ElementType, blaze::rowVector> &averages)
{
auto Result = blaze::DynamicMatrix<typename BlazeMatrix::ElementType>(n_components, In.columns(), 0);
averages = blaze::sum<blaze::columnwise>(In) / In.rows();
auto CenteredInput = blaze::DynamicMatrix<typename BlazeMatrix::ElementType>(In.rows(), In.columns(), 0);
for (size_t row_idx = 0; row_idx < In.rows(); row_idx++)
blaze::row(CenteredInput, row_idx) = blaze::row(In, row_idx) - averages;
if (n_components < 1)
return Result;
blaze::SymmetricMatrix<blaze::DynamicMatrix<typename BlazeMatrix::ElementType>> CovMat =
blaze::evaluate(trans(CenteredInput) * CenteredInput);
blaze::DynamicVector<typename BlazeMatrix::ElementType, blaze::columnVector> w(CovMat.rows()); // for eigenvalues
blaze::DynamicMatrix<typename BlazeMatrix::ElementType, blaze::rowMajor> V(CovMat.rows(),
CovMat.rows()); // for eigenvectors
eigen(CovMat, w, V);
// sort and select
size_t lower_idx = 0;
size_t upper_idx = w.size() - 1;
int count = 0;
while (count < n_components && upper_idx > lower_idx) {
if (-w[lower_idx] > w[upper_idx]) {
blaze::row(Result, count) = blaze::row(V, lower_idx); // add eigenpair
lower_idx++;
} else {
blaze::row(Result, count) = blaze::row(V, upper_idx); // add eigenpair
upper_idx--;
}
count++;
}
return Result; // eigenvectors in rows
}
// simple linear encoder based on PCA_col, accepts curves in columns
template <typename V> PCFA_col<V>::PCFA_col(const blaze::DynamicMatrix<value_type> &TrainingData, size_t n_features)
{
W_encode = metric::PCA_col(TrainingData, n_features, averages);
W_decode = trans(W_encode); // computed once and saved
}
template <typename V>
blaze::DynamicMatrix<typename PCFA_col<V>::value_type>
PCFA_col<V>::encode(const blaze::DynamicMatrix<PCFA_col<V>::value_type> &Data)
{
auto CenteredInput = blaze::DynamicMatrix<PCFA_col<V>::value_type>(Data.rows(), Data.columns(), 0);
for (size_t col = 0; col < Data.columns(); col++)
column(CenteredInput, col) = column(Data, col) - averages;
return W_encode * CenteredInput;
// return W_encode * Data;
}
template <typename V>
blaze::DynamicMatrix<typename PCFA_col<V>::value_type>
PCFA_col<V>::decode(const blaze::DynamicMatrix<PCFA_col<V>::value_type> &Codes, bool unshift)
{
if (unshift) {
auto Noncentered = W_decode * Codes;
auto Centered =
blaze::DynamicMatrix<typename PCFA_col<V>::value_type>(Noncentered.rows(), Noncentered.columns());
for (size_t col = 0; col < Noncentered.columns(); col++)
column(Centered, col) = column(Noncentered, col) + averages;
return Centered;
} else {
return W_decode * Codes;
}
}
template <typename V> blaze::DynamicMatrix<typename PCFA_col<V>::value_type> PCFA_col<V>::average()
{
auto avg = blaze::DynamicMatrix<typename PCFA_col<V>::value_type>(averages.size(), 1);
column(avg, 0) = averages;
return avg;
// return expand(averages, 1); // expand absents in local version of Blaze-lib
}
template <typename V> blaze::DynamicMatrix<typename PCFA_col<V>::value_type> PCFA_col<V>::eigenmodes()
{
auto Eigenmodes = blaze::DynamicMatrix<typename PCFA_col<V>::value_type>(W_decode.rows(), W_decode.columns() + 1);
column(Eigenmodes, 0) = averages;
submatrix(Eigenmodes, 0, 1, W_decode.rows(), W_decode.columns()) = W_decode;
return Eigenmodes;
}
// simple linear encoder based on PCA, accepts curves in rows
template <typename RecType, typename Metric>
PCFA<RecType, Metric>::PCFA(const blaze::DynamicMatrix<value_type> &TrainingData, size_t n_features)
{
W_decode = metric::PCA(TrainingData, n_features, averages);
W_encode = trans(W_decode); // computed once and saved
}
template <typename RecType, typename Metric>
PCFA<RecType, Metric>::PCFA(const std::vector<RecType> &TrainingData, const size_t n_features)
{
blaze::DynamicMatrix<value_type> blaze_in(TrainingData.size(), TrainingData[0].size(), 0);
for (size_t i = 0; i < TrainingData.size(); ++i)
for (size_t j = 0; j < TrainingData[0].size(); ++j)
blaze_in(i, j) = TrainingData[i][j];
W_decode = metric::PCA(blaze_in, n_features, averages);
W_encode = trans(W_decode); // computed once and saved
}
template <typename RecType, typename Metric>
PCFA<RecType, Metric>::PCFA(const blaze::DynamicMatrix<value_type> &Weights,
const blaze::DynamicVector<value_type, blaze::rowVector> &avgs)
{
W_decode = Weights;
W_encode = trans(W_decode); // computed once and saved
averages = avgs;
}
template <typename RecType, typename Metric>
PCFA<RecType, Metric>::PCFA(const std::vector<RecType> &Weights, const RecType &avgs)
{
W_decode = blaze::DynamicMatrix<value_type>(Weights.size(), Weights[0].size(), 0);
for (size_t i = 0; i < Weights.size(); ++i)
for (size_t j = 0; j < Weights[0].size(); ++j)
W_decode(i, j) = Weights[i][j];
W_encode = trans(W_decode); // computed once and saved
averages = blaze::DynamicVector<value_type, blaze::rowVector>(avgs.size(), 0);
for (size_t i = 0; i < avgs.size(); ++i) {
averages[i] = avgs[i];
}
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>
PCFA<RecType, Metric>::encode(const blaze::DynamicMatrix<PCFA<RecType, Metric>::value_type> &Data)
{
auto CenteredInput = blaze::DynamicMatrix<PCFA<RecType, Metric>::value_type>(Data.rows(), Data.columns(), 0);
for (size_t row_idx = 0; row_idx < Data.rows(); row_idx++)
blaze::row(CenteredInput, row_idx) = blaze::row(Data, row_idx) - averages;
return CenteredInput * W_encode;
}
template <typename RecType, typename Metric>
std::vector<RecType> PCFA<RecType, Metric>::encode(const std::vector<RecType> &Data)
{
auto DataBlaze = vector_to_blaze(Data);
auto CenteredInput =
blaze::DynamicMatrix<PCFA<RecType, Metric>::value_type>(DataBlaze.rows(), DataBlaze.columns(), 0);
for (size_t row_idx = 0; row_idx < DataBlaze.rows(); row_idx++)
blaze::row(CenteredInput, row_idx) = blaze::row(DataBlaze, row_idx) - averages;
blaze::DynamicMatrix<PCFA<RecType, Metric>::value_type> Out = CenteredInput * W_encode;
return blaze2RecType<RecType>(Out);
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>
PCFA<RecType, Metric>::decode(const blaze::DynamicMatrix<PCFA<RecType, Metric>::value_type> &Codes, bool unshift)
{
if (unshift) {
auto Noncentered = Codes * W_decode;
auto Centered =
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>(Noncentered.rows(), Noncentered.columns());
for (size_t row_idx = 0; row_idx < Noncentered.rows(); row_idx++)
blaze::row(Centered, row_idx) = blaze::row(Noncentered, row_idx) + averages;
return Centered;
} else {
return Codes * W_decode;
}
}
template <typename RecType, typename Metric>
std::vector<RecType> PCFA<RecType, Metric>::decode(const std::vector<RecType> &Codes, bool unshift)
{
auto CodesBlaze = vector_to_blaze(Codes);
if (unshift) {
auto Noncentered = CodesBlaze * W_decode;
auto Centered =
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>(Noncentered.rows(), Noncentered.columns());
for (size_t row_idx = 0; row_idx < Noncentered.rows(); row_idx++)
blaze::row(Centered, row_idx) = blaze::row(Noncentered, row_idx) + averages;
return blaze2RecType<RecType>(Centered);
} else {
return blaze2RecType<RecType>(CodesBlaze * W_decode);
}
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type> PCFA<RecType, Metric>::average_mat()
{
auto avg = blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>(1, averages.size());
blaze::row(avg, 0) = averages;
return avg;
// return expand(averages, 0); // expand absents in local version of Blaze-lib
}
template <typename RecType, typename Metric> RecType PCFA<RecType, Metric>::average()
{
blaze::DynamicMatrix<value_type> result(1, averages.size());
blaze::row(result, 0) = averages;
return blaze2RecType<RecType>(result)[0];
}
template <typename RecType, typename Metric> std::vector<RecType> PCFA<RecType, Metric>::weights()
{
return blaze2RecType<RecType>(W_decode);
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type> PCFA<RecType, Metric>::eigenmodes_mat()
{
auto Eigenmodes =
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>(W_decode.rows() + 1, W_decode.columns());
blaze::row(Eigenmodes, 0) = averages;
submatrix(Eigenmodes, 1, 0, W_decode.rows(), W_decode.columns()) = W_decode;
return Eigenmodes;
}
template <typename RecType, typename Metric> std::vector<RecType> PCFA<RecType, Metric>::eigenmodes()
{
return blaze2RecType<RecType>(eigenmodes_mat());
}
template <typename RecType, typename Metric>
blaze::DynamicMatrix<typename PCFA<RecType, Metric>::value_type>
PCFA<RecType, Metric>::vector_to_blaze(const std::vector<RecType> &In)
{
blaze::DynamicMatrix<value_type> Out(In.size(), In[0].size(), 0);
for (size_t i = 0; i < In.size(); ++i)
for (size_t j = 0; j < In[0].size(); ++j)
Out(i, j) = In[i][j];
return Out;
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 1,
std::vector<R>>::type // here we support only STL vector
PCFA<RecType, Metric>::blaze2RecType(const blaze::DynamicMatrix<typename PCFA<R, Metric>::value_type> &In)
{
std::vector<RecType> Out;
for (size_t i = 0; i < In.rows(); ++i) {
RecType rec;
for (size_t j = 0; j < In.columns(); ++j)
rec.push_back(In(i, j));
Out.push_back(rec);
}
return Out;
}
template <typename RecType, typename Metric>
template <typename R>
typename std::enable_if<determine_container_type<R>::code == 2, std::vector<R>>::type
PCFA<RecType, Metric>::blaze2RecType(const blaze::DynamicMatrix<typename PCFA<R, Metric>::value_type> &In)
{ // only blaze row-vector
std::vector<RecType> Out;
for (size_t i = 0; i < In.rows(); ++i) {
RecType rec(In.columns()); // blaze specific
for (size_t j = 0; j < In.columns(); ++j)
rec[j] = In(i, j); // blaze specific
Out.push_back(rec);
}
return Out;
}
template <typename BlazeMatrix>
PCFA_col<typename BlazeMatrix::ElementType> PCFA_col_factory(const BlazeMatrix &TrainingData, size_t n_features)
{
return PCFA_col<typename BlazeMatrix::ElementType>(TrainingData, n_features);
}
template <typename ElementType>
PCFA<blaze::DynamicMatrix<ElementType>, void> PCFA_factory(blaze::DynamicMatrix<ElementType> TrainingData,
size_t n_features) // special factory for Blaze matrix
{
return PCFA<blaze::DynamicMatrix<ElementType>, void>(TrainingData, n_features);
}
template <template <typename, typename> class Container, typename ValueType, typename Allocator>
PCFA<Container<ValueType, Allocator>, void> PCFA_factory(std::vector<Container<ValueType, Allocator>> &TrainingData,
size_t n_features)
{
return PCFA<Container<ValueType, Allocator>, void>(TrainingData, n_features);
}
template <template <typename, bool> class Container, typename ValueType, bool F>
PCFA<Container<ValueType, F>, void> PCFA_factory(std::vector<Container<ValueType, F>> &TrainingData, size_t n_features)
{
return PCFA<Container<ValueType, F>, void>(TrainingData, n_features);
}
} // namespace metric
#endif
| 13,156
|
C++
|
.cpp
| 302
| 41.288079
| 119
| 0.726343
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,486
|
ensembles.cpp
|
metric-space-ai_metric/metric/mapping/ensembles/ensembles.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_MAPPING_ENSEMBLES_ENSEMBLES_CPP
#define _METRIC_MAPPING_ENSEMBLES_ENSEMBLES_CPP
#include "../ensembles.hpp"
#include <algorithm>
#include <functional>
#include <limits>
#include <memory>
#include <random>
#include <tuple>
#include <variant>
#include <vector>
#include <iostream>
#include "DT/edm_wrappers.hpp"
namespace metric {
// Subsample
template <class Record>
template <typename ConType>
void Subsample<Record>::operator()(
const ConType &data, // input dataset
const std::vector<std::function<double(Record)>>
&features, // accessors // no need in features, used as placeholder for possible new overrides
const std::function<bool(Record)> &response, // accessors // here response is not used, but it is in overrides
const std::vector<double> &weights, // probability that the record gets to the subsample
double portion, // size of subsample / overall
double portion_major, // placeholder for overrides, not in use in base class
ConType &out_data, // output subsample
bool replacement // mode of subsampling: with replacement (bootstrap) or without
) const
{
std::vector<El> roulette;
double sum = 0;
for (size_t i = 0; i < weights.size(); i++)
sum += weights[i];
double intv = 0;
for (size_t i = 0; i < weights.size(); i++) {
intv += weights[i];
El el;
el.original_idx = i;
el.value = intv / sum;
roulette.push_back(el);
}
if (portion < 0) {
portion = 0;
std::cout << "\nWarning: attempt to specify portion less than 0, set to 0\n";
}
if (portion > 1) {
portion = 1;
std::cout << "\nWarning: attempt to specify portion greater than 1, set to 1\n";
}
int out_size = std::floor(data.size() * portion);
for (int out_idx = 0; out_idx < out_size; out_idx++) {
int i = montecarlo(roulette, replacement);
if (i < 0) {
std::cout << "\nWarning: unable to get unreplaced sample: no one left in dataset\n";
break;
}
out_data.push_back(data[i]);
}
}
template <class Record> int Subsample<Record>::montecarlo(std::vector<El> &roulette, bool replacement) const
{
double r = (double)std::rand() / (RAND_MAX);
size_t rSize = roulette.size();
size_t i;
for (i = 0; i < rSize; i++) {
if (r <= roulette[i].value) {
int ret = roulette[i].original_idx;
if (replacement) {
roulette.erase(roulette.begin() + i);
}
return ret;
}
}
return -1;
}
template <class Record>
void Subsample<Record>::print_roulette(std::vector<El> &roulette) const // prints roulette, for debug purpose only
{
std::cout << std::endl << "roulette:" << std::endl;
for (int i = 0; i < roulette.size(); i++) {
std::cout << "value: " << roulette[i].value << ", orig idx: " << roulette[i].original_idx << std::endl;
}
std::cout << std::endl;
}
// SubsampleRUS
template <class Record>
template <typename ConType>
void SubsampleRUS<Record>::operator()(
const ConType &data,
const std::vector<std::function<double(Record)>>
&features, // accessors // not in use here, but is a part of interface
const std::function<bool(Record)> &response, // accessors // here response is not used, but it is in overrides
const std::vector<double> &weights, // probability that the record gets to the subsample
double portion, // size of subsample / overall
double portion_major, // share of major class
ConType &out_data, // output subsample
bool replacement // mode of subsampling: with replacement (bootstrap) or without
)
{
if (data.size() > 0)
if (data[0].size() < 1) // special check if there is the field in dataset behind the accessor
{
out_data = {};
return;
}
double sum_class1 = 0;
double sum_class0 = 0;
int count_class1 = 0;
int count_class0 = 0;
for (size_t i = 0; i < weights.size(); i++) // determine major class, find sum of weights for each class
if (response(data[i])) {
sum_class1 += weights[i];
count_class1++;
} else {
sum_class0 += weights[i];
count_class0++;
}
bool major_class;
double sum_major;
double sum_minor;
if (count_class0 > count_class1) {
major_class = false;
sum_major = sum_class0;
sum_minor = sum_class1;
} else {
major_class = true;
sum_major = sum_class1;
sum_minor = sum_class0;
}
typedef typename Subsample<Record>::El El; // defined in base class template
std::vector<El> roulette_major; // independent roulettes for each class
std::vector<El> roulette_minor;
double intv_major = 0;
double intv_minor = 0;
for (size_t i = 0; i < weights.size(); i++) // fullfill both roulettes in one loop of records
{
El el;
el.original_idx = i;
if (response(data[i]) == major_class) // add element to major class roulette
{
intv_major += weights[i]; // increment the sector in roulette
el.value = intv_major / sum_major;
roulette_major.push_back(el);
} else // ... to minor class (assuming we have just 2 classes)
{
intv_minor += weights[i];
el.value = intv_minor / sum_minor;
roulette_minor.push_back(el);
}
} // roulettes ready
if (portion < 0) {
// Warning: attempt to specify portion less than 0, set to 0
portion = 0;
}
if (portion > 1) {
// Warning: attempt to specify portion greater than 1, set to 1
portion = 1;
}
if (portion_major <= 0) {
// Warning: attempt to specify portion for major class less than 0, set to 0.5
portion_major = 0.5;
}
if (portion_major >= 1) {
// Warning: attempt to specify portion for major class greater than 1, set to 0.5
portion_major = 0.5;
}
int out_size = std::floor(data.size() * portion); // size of output subsample set
int out_size_major = std::floor(data.size() * portion * portion_major); // number of major class elements
int out_size_minor = out_size - out_size_major;
if (out_size_minor < 0)
out_size_minor = 0;
for (int out_idx = 0; out_idx < out_size_major; out_idx++) // pick the required number of major class elements
{
int i = this->montecarlo(roulette_major, replacement);
if (i < 0) {
break;
}
out_data.push_back(data[i]);
}
for (int out_idx = 0; out_idx < out_size_minor; out_idx++) // pick the required number of minor class elements
{
int i = this->montecarlo(roulette_minor, replacement);
if (i < 0) {
break;
}
out_data.push_back(data[i]);
}
std::random_device rng;
std::mt19937 urng(rng());
std::shuffle(out_data.begin(), out_data.end(), urng);
}
// TestCl - example learner class
template <class Record> TestCl<Record>::TestCl(int var_idx_, bool invert_) : var_idx(var_idx_), invert(invert_) {}
template <class Record>
template <typename ConType>
void TestCl<Record>::train(ConType &data, std::vector<std::function<double(Record)>> &features,
std::function<bool(Record)> &response)
{ // dummy code, amed to test access to data
auto max_val = -std::numeric_limits<double>::max();
auto min_val = std::numeric_limits<double>::max();
for (auto i = data.cbegin(); i != data.cend(); i++) {
double current_val = features[var_idx](*i);
if (current_val < min_val)
min_val = current_val;
if (current_val > max_val)
max_val = current_val;
}
middle = min_val + (max_val - min_val) / 2;
std::cout << "TestCl trained on: " << std::endl;
for (auto i = data.cbegin(); i != data.cend(); i++)
std::cout << features[var_idx](*i) << std::endl;
std::cout << " - foung middle: " << middle << std::endl;
}
template <class Record>
template <typename ConType>
void TestCl<Record>::predict(ConType data, std::vector<std::function<double(Record)>> features,
std::vector<bool> &predictions)
{ // dummy code, uses only one feature for prediction
predictions = {};
for (auto i = data.cbegin(); i != data.cend(); i++)
if (features[var_idx](*i) >= middle) // TODO add invert flag usage
predictions.push_back(true);
else
predictions.push_back(false);
}
template <class Record> std::shared_ptr<metric::TestCl<Record>> TestCl<Record>::clone()
{
std::shared_ptr<TestCl<Record>> sptr = std::make_shared<TestCl<Record>>(var_idx, invert);
return sptr;
} // non-learned classifier factory, meant for ensemble building within strong classifier train method
// Boosting
template <class Record, class WeakLearner,
typename Subsampler> // data record type, single weak learner type, subsampler functor type
Boosting<Record, WeakLearner, Subsampler>::Boosting(int ensemble_size_, double share_overall_, double share_minor_,
WeakLearner weak_classifier_)
: ensemble_size(ensemble_size_), // number of learners to be created
share_overall(share_overall_), // passed to subsampler
share_minor(share_minor_), // passed to subsampler
weak_classifier(weak_classifier_) // classifier object of type WeakLearner, created by user with desired
// metaparameters. It will be cloned multiple (ensemble_size_) times into an
// ensemble, and each instance will be learned using the unique subsample
{
if (ensemble_size_ < 2) {
// Warning: specified ensemble size is less than 2, set to minimal possible size 2
ensemble_size = 2;
}
if (share_overall_ <= 0 || share_overall_ >= 1) {
// Warning: specified wrong overall share value, set to 0.5
share_overall = 0.5;
}
if (share_minor_ <= 0 || share_overall_ >= 1) {
// Warning: specified wrong minor class share value, set to 0.5
share_minor = 0.5;
}
}
template <class Record, class WeakLearner,
typename Subsampler> // data record type, single weak learner type, subsampler functor type
template <typename ConType> // container type
void Boosting<Record, WeakLearner, Subsampler>::train(
ConType &data, // labeled data. .size() method is used to determine the size of dataset
std::vector<std::function<double(Record)>>
&features, // feature accessors, .size() method is used in order to determine number of features
std::function<bool(Record)> &response, // label accessors
bool replacement // passed to subsampler
)
{
n_samples = data.size();
n_features = features.size();
std::vector<double> D_t; // sample weights
double init_val = 1.0 / n_samples; // initial weights
for (int i = 0; i < n_samples; i++) {
D_t.push_back(init_val); // initial distribution
}
ens_alpha.clear();
ens_classifiers.clear();
auto new_ensemble_size =
ensemble_size; // save desired size, since it can be decreased if subsampler fails to obtain enough subsamples
for (size_t t = 1; t <= ensemble_size; t++) //
{
ConType subset = {};
Subsampler subsampler;
subsampler(data, features, response, D_t, share_overall, 1 - share_minor, subset,
replacement); // generate subset
if (subset.size() < 1) {
new_ensemble_size--;
continue;
}
auto wcl = weak_classifier.clone();
wcl->train(subset, features, response);
std::vector<bool> predictions;
double eps = 0;
wcl->predict(data, features, predictions);
for (int i = 0; i < n_samples; i++) {
if (predictions[i] != response(data[i])) // (pred != response(data[i]))
eps += D_t[i];
}
eps = eps / n_samples; // normalize
// in some pseudocodes here we check eps >= 0.5, but as we multiply by alpha, that can be negative for inverted
// classifiers, we can use even inverted classifiers (with another inversion applied)
double alpha = std::log((1 - eps) / eps) / 2;
ens_alpha.push_back(alpha);
ens_classifiers.push_back(wcl);
double D_t_sum = 0;
for (int i = 0; i < n_samples; i++) {
D_t[i] = D_t[i] * exp(-alpha * (response(data[i]) ? 1 : -1) * (predictions[i] ? 1 : -1));
D_t_sum += D_t[i];
}
if (D_t_sum !=
0) // TODO profile and (if needed) replace equality check for double with check if all alphas are inf
for (int i = 0; i < n_samples; i++) // update weights
{
D_t[i] = D_t[i] / D_t_sum;
}
else // special case: weights are undefined
{
for (int i = 0; i < n_samples; i++)
D_t[i] = init_val;
}
}
ensemble_size = new_ensemble_size;
}
template <class Record, class WeakLearner,
typename Subsampler> // data record type, single weak learner class, subsampler functor
template <typename ConType>
void Boosting<Record, WeakLearner, Subsampler>::predict(
ConType &data, // dataset of unlabeled of labeled data for prediction. Labels will not be accessed
std::vector<std::function<double(Record)>> &features, // featuure accessors
std::vector<bool> &predictions) // out var
{
if (features.size() <= 0 || data.size() <= 0) {
for (size_t i = 0; i < predictions.size(); i++) // undefined prediction: random output
predictions[i] = std::rand() % 2 == 1;
return;
}
predictions = std::vector<bool>(data.size(), false);
auto pred = std::vector<double>(data.size(), 0);
for (size_t t = 0; t < ensemble_size; t++) {
std::vector<bool> pr_bool;
ens_classifiers[t]->predict(data, features, pr_bool);
for (size_t i = 0; i < data.size(); i++) {
pred[i] += (pr_bool[i] ? 1 : -1) * ens_alpha[t];
}
}
for (size_t i = 0; i < data.size(); i++)
predictions[i] = pred[i] >= 0 ? true : false;
}
template <class Record, class WeakLearner,
typename Subsampler> // data record type, single weak learner class, subsampler functor
std::shared_ptr<Boosting<Record, WeakLearner, Subsampler>> Boosting<Record, WeakLearner, Subsampler>::clone()
{
auto weak_sptr = weak_classifier.clone();
std::shared_ptr<Boosting<Record, WeakLearner, Subsampler>> sptr =
std::make_shared<Boosting>(ensemble_size, share_overall, share_minor, *weak_sptr);
return sptr;
}
// Bagging_tuple - old interface based passing weak learner types within a tuple
template <class Record, class WeakLearnerTuple, typename Subsampler>
Bagging_tuple<Record, WeakLearnerTuple, Subsampler>::Bagging_tuple(int ensemble_size_, double share_overall_,
double share_minor_,
std::vector<double> type_weinght_,
WeakLearnerTuple weak_classifiers_)
: ensemble_size(ensemble_size_), share_overall(share_overall_), share_minor(share_minor_),
type_weight(type_weinght_), weak_classifiers(weak_classifiers_)
{
}
template <class Record, class WeakLearnerTuple, typename Subsampler>
template <typename ConType>
void Bagging_tuple<Record, WeakLearnerTuple, Subsampler>::train(ConType &data,
std::vector<std::function<double(Record)>> &features,
std::function<bool(Record)> &response, bool replacement)
{
n_samples = data.size();
n_features = features.size();
std::vector<double> D_t; // sample weights, vector is not needed here, left for compatibility
double init_val = 1.0 / n_samples; // initial weights
for (int i = 0; i < n_samples; i++) {
D_t.push_back(init_val);
}
ensemble = {}; // init tuple of vectors
double denom = 0;
for (double s : type_weight)
denom += s;
for (int type = 0; type < weak_type_num; type++) {
for (int t = 1; t <= std::round(ensemble_size * type_weight[type] / denom); t++) {
ConType subset = {};
Subsampler subsampler;
subsampler(data, features, response, D_t, share_overall, 1 - share_minor, subset, replacement);
// subset ready for training
std::vector<double> dummy = {};
for_index(type, weak_classifiers, ensemble, 1, subset, features, response, dummy);
}
}
}
template <class Record, class WeakLearnerTuple, typename Subsampler>
template <typename ConType>
void Bagging_tuple<Record, WeakLearnerTuple, Subsampler>::predict(ConType &data,
std::vector<std::function<double(Record)>> &features,
std::vector<bool> &predictions)
{
if (features.size() <= 0 || data.size() <= 0) {
for (size_t i = 0; i < predictions.size(); i++) // undefined prediction: random output
predictions[i] = std::rand() % 2 == 1;
return;
}
predictions = std::vector<bool>(data.size(), false);
auto pred = std::vector<double>(data.size(), 0);
for (int type = 0; type < weak_type_num; type++) {
auto type_prediction = std::vector<double>(data.size(), 0);
std::function<bool(Record)> dummy_response_var;
for_index(type, weak_classifiers, ensemble, 2, data, features, dummy_response_var,
type_prediction); // TODO consider replace vector<double> with vector<int>
for (size_t i = 0; i < data.size(); i++)
pred[i] = pred[i] + type_prediction[i];
}
for (size_t i = 0; i < data.size(); i++)
predictions[i] = pred[i] > 0 ? true : false;
}
// TODO implement clone() if needed to use as weak
template <class Record, class WeakLearnerTuple, typename Subsampler>
template <typename T1, typename T2, typename ConType>
void Bagging_tuple<Record, WeakLearnerTuple,
Subsampler>::myfunc( // used both for train and predict in order to access weak learner of any type
T1 &t1, T2 &t2, int sw, // sw is train or predict mode switcher
ConType &data, std::vector<std::function<double(Record)>> &features, std::function<bool(Record)> &response,
std::vector<double> &prediction) // out
{
if (sw == 1) { // train // TODO divide myfunc into separate functions, or at least enum and case switcher
auto wcl = t1.clone();
wcl->train(data, features, response);
t2.push_back(wcl);
} else {
if (sw == 2) { // predict for the given type of learners
prediction = std::vector<double>(data.size(), 0);
for (size_t t = 0; t < t2.size(); t++) {
std::vector<bool> pr_bool;
t2[t]->predict(data, features, pr_bool);
for (size_t i = 0; i < data.size(); i++) {
prediction[i] += (pr_bool[i] ? 1 : -1);
}
}
}
}
};
// // tuple runtime processing members
template <class Record, class WeakLearnerTuple, typename Subsampler>
template <std::size_t I, typename ConType, typename... Tp, typename... Tp2>
inline typename std::enable_if<I == sizeof...(Tp), void>::type
Bagging_tuple<Record, WeakLearnerTuple, Subsampler>::for_index(int, std::tuple<Tp...> &, std::tuple<Tp2...> &, int,
ConType &, std::vector<std::function<double(Record)>> &,
std::function<bool(Record)> &, std::vector<double> &)
{
}
template <class Record, class WeakLearnerTuple, typename Subsampler>
template <std::size_t I, typename ConType, typename... Tp, typename... Tp2>
inline typename std::enable_if <
I<sizeof...(Tp), void>::type Bagging_tuple<Record, WeakLearnerTuple, Subsampler>::for_index(
int index, std::tuple<Tp...> &t1, std::tuple<Tp2...> &t2,
int sw, // 1 - train, 2 - predict
ConType &data, std::vector<std::function<double(Record)>> &features, std::function<bool(Record)> &response,
std::vector<double> &prediction) // out, only used in prediction mode (sw == 2)
// define for_index function of type void only if I < els in pack
{
if (index == 0)
myfunc<el<I, std::tuple<Tp...>>, el<I, std::tuple<Tp2...>>>(std::get<I>(t1), std::get<I>(t2), sw, data,
features, response, prediction);
for_index<I + 1>(index - 1, t1, t2, sw, data, features, response, prediction); // Tp... not needed
}
//*/
// Bagging - uses variant type for passing weak learners of random type
template <class Record, class WeakLearnerVariant, typename Subsampler> // vector of variant of weak learners
Bagging<Record, WeakLearnerVariant, Subsampler>::Bagging(int ensemble_size_, double share_overall_, double share_minor_,
std::vector<double> type_weinght_,
std::vector<WeakLearnerVariant> weak_classifiers_)
: ensemble_size(ensemble_size_), // overall number of learners to be created
share_overall(share_overall_), // passed to subsampler
share_minor(share_minor_), // passed to subsampler
type_weight(type_weinght_), // share of learner of each type in ensemble
weak_classifiers(weak_classifiers_) // vector of weak classifiers of respective types
{
}
template <class Record, class WeakLearnerVariant, typename Subsampler>
template <typename ConType>
void Bagging<Record, WeakLearnerVariant, Subsampler>::train(
ConType &data, // labeled data. .size() method is used to determine the size of dataset
std::vector<std::function<double(Record)>>
&features, // feature accessors, .size() method is used in order to determine number of features
std::function<bool(Record)> &response, // label accessors
bool replacement // passed to subsampler
)
{
auto train_visitor = [&](auto &t) { t.train(data, features, response); };
n_samples = data.size();
n_features = features.size();
std::vector<double> D_t; // sample weights, used for compatibility ever with initial values
double init_val = 1.0 / n_samples;
for (int i = 0; i < n_samples; i++) {
D_t.push_back(init_val);
}
ensemble = {};
double denom = 0;
for (double s : type_weight)
denom += s;
for (size_t i = 0; i < weak_classifiers.size(); i++) // learner type loop
{
auto learner = weak_classifiers[i]; // copy
for (int t = 1; t <= std::round(ensemble_size * type_weight[i] / denom);
t++) // create and learn the needed number of learners of the current type
{
ConType subset = {};
Subsampler subsampler;
subsampler(data, features, response, D_t, share_overall, 1 - share_minor, subset, replacement);
// subset ready for training
std::visit(train_visitor, learner);
ensemble.push_back(learner);
}
}
}
template <class Record, class WeakLearnerVariant, typename Subsampler>
template <typename ConType>
void Bagging<Record, WeakLearnerVariant, Subsampler>::predict(ConType &data,
std::vector<std::function<double(Record)>> &features,
std::vector<bool> &predictions)
{
if (features.size() <= 0 || data.size() <= 0) {
for (size_t i = 0; i < predictions.size(); i++) // undefined prediction: random output
predictions[i] = std::rand() % 2 == 1;
return;
}
predictions = std::vector<bool>(data.size(), false);
auto pred = std::vector<int>(data.size(), 0);
auto pred_bool = std::vector<bool>(data.size(), 0);
auto predict_visitor = [&](auto &t) { t.predict(data, features, pred_bool); };
for (auto learner : ensemble) {
std::visit(predict_visitor, learner);
for (size_t i = 0; i < data.size(); i++) {
pred[i] += pred_bool[i] ? 1 : -1;
}
}
for (size_t i = 0; i < data.size(); i++)
predictions[i] = pred[i] > 0 ? true : false;
}
template <class Record, class WeakLearnerVariant,
typename Subsampler> // data record type, single weak learner class, subsampler functor
std::shared_ptr<Bagging<Record, WeakLearnerVariant, Subsampler>>
Bagging<Record, WeakLearnerVariant, Subsampler>::clone()
{
std::vector<WeakLearnerVariant> clSet = {};
for (auto learner : weak_classifiers) {
clSet.push_back(learner); // no call of clone(), simply copy
}
std::shared_ptr<Bagging<Record, WeakLearnerVariant, Subsampler>> sptr =
std::make_shared<Bagging>(ensemble_size, share_overall, share_minor, type_weight, clSet);
return sptr;
}
} // namespace metric
#endif
| 22,789
|
C++
|
.cpp
| 562
| 37.628114
| 120
| 0.684011
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,487
|
DT.cpp
|
metric-space-ai_metric/metric/mapping/ensembles/DT/DT.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Max Filippov
*/
#include "../DT.hpp"
#include "../../kmedoids.hpp"
#include "dimension.hpp"
#include <cassert>
#include <exception>
namespace metric {
template <class Record>
DT<Record>::DT(double entropy_threshold, double gain_threshold)
: entropy_threshold(entropy_threshold), gain_threshold(gain_threshold)
{
}
template <class Record>
template <typename ConType, typename VariantType>
void DT<Record>::train(const ConType &payments, std::vector<VariantType> dimensions,
std::function<int(const Record &)> &response)
{
typedef double NumType; // TODO replace hardcode
if (dimensions.size() == 0)
return;
// first by feature, then record against record matrix
std::vector<std::vector<std::vector<NumType>>> distances = {};
for (size_t f = 0; f < dimensions.size(); f++) // field (dimension) loop
{
std::vector<std::vector<NumType>> matrix(payments.size(), std::vector<NumType>(payments.size()));
for (size_t i = 0; i < payments.size(); i++) {
for (size_t j = i; j < payments.size(); j++) {
auto d_dist_vis = [&payments, i, j](auto &d) {
return (NumType)d.get_distance(payments[i], payments[j]);
};
matrix[i][j] = matrix[j][i] = std::visit(d_dist_vis, dimensions[f]);
}
bool found = false; // also count unique labels
int r = response(payments[i]);
if (r < 0) {
throw std::runtime_error("Error in input dataset: negative labels are not allowed. Learning cancelled");
return;
}
for (int l : unique_labels) {
if (l == r) {
found = true;
break;
}
}
if (!found)
unique_labels.push_back(r);
}
distances.push_back(matrix);
}
// (new) code for dimensions - end
// here distances table is fullfilled for each feature
// now we can start growing the tree from the root
std::queue<NodeDataUnit> subset_queue; // this is instead of recursion
std::vector<int> root_subset; // first node subset
for (size_t i = 0; i < payments.size(); i++)
root_subset.push_back((int)i); // indices of subset: initially all records in dataset are selected
std::shared_ptr<Node> current_node = std::make_shared<Node>();
root = current_node;
NodeDataUnit root_unit;
root_unit.node = current_node;
root_unit.subset = std::make_shared<std::vector<int>>(root_subset); // TODO make ctor from data size, if needed
root_unit.entropy = std::numeric_limits<double>::max(); // root element will be processed anyway
root_unit.debug_id = 0; // for debug purpose, TODO remove
subset_queue.push(root_unit);
int unit_debug_id = 0;
while (!subset_queue.empty()) {
NodeDataUnit unit = subset_queue.front();
subset_queue.pop();
auto [new_subsets, new_entropies, new_medoids, field, entropy_weighted_sum] =
split_subset(*unit.subset, distances, payments, response);
// gain-based condition check
double gain = unit.entropy - entropy_weighted_sum;
if (gain <= gain_threshold) {
// add leaf without processing subsets
add_distribution_to_node(payments, response, unit.subset, unit.node); // changes *unit.node
continue;
}
for (size_t i = 0; i < new_subsets.size();
i++) // loop through subsets obtained from clustering function via process_node
{ // we assume sizes of all vectors in tuple are eqeual (to the number of classes in train dataset)
unit.node->medoid_records.push_back(payments[new_medoids[i]]); // add medoid raw record
unit.node->field_index = field; // field the division is made by
if (new_entropies[i] <= entropy_threshold || new_subsets[i]->size() < unique_labels.size()) {
// add leaf node
auto new_node = std::make_shared<Node>();
if (!(new_entropies[i] > 0)) {
if ((*(new_subsets[i])).size() > 0) {
// add non-stohastic leaf node
new_node->predicted_class =
response(payments[(*(new_subsets[i]))[0]]); // call label accessor for the first element
// assume subset is not empty and all labels are equal (because of zero entropy)
} else {
// empty subset ocurred, add stohastic equally-distributed node
for (size_t l = 0; l < unique_labels.size(); l++)
new_node->prediction_distribution.push_back(1);
}
} else {
add_distribution_to_node(payments, response, new_subsets[i], new_node); // changes *new_node
}
unit.node->children.push_back(new_node); // set pointer to the newly created child in the current node
} else {
// enqueue regular node (add subset to queue and node to tree)
NodeDataUnit new_unit;
new_unit.node = std::make_shared<Node>(); // create new empty node within queue unit
new_unit.subset = new_subsets[i]; // pointer to vector of subset indices
new_unit.entropy = new_entropies[i]; // in order to compute gain on the next step
unit.node->children.push_back(
new_unit.node); // set pointer to the newly created child in the current node
new_unit.debug_id = ++unit_debug_id; // for debuf porpose, TODO disable
subset_queue.push(new_unit); // enqueue new data unit;
}
}
}
}
template <class Record>
template <typename ConType, typename VariantType>
void DT<Record>::predict(const ConType &input_data, std::vector<VariantType> dimensions, std::vector<int> &predictions)
{
typedef double NumType; // TODO replace hardcode
if (root == nullptr || input_data.size() == 0)
return;
std::random_device rand_dev;
std::mt19937 generator(rand_dev());
for (size_t i = 0; i < input_data.size(); i++) // input data item loop (new) code for dimensions
{
std::shared_ptr<Node> current_node = root;
while (current_node->children.size() > 0) // predicted_class == -1) // go through tree until leaf is reached
{
// check if children found but predicted class is defined
assert(current_node->predicted_class == -1);
double min_distance = std::numeric_limits<double>::max();
int nearese_medoid_index = -1;
for (size_t m = 0; m < current_node->medoid_records.size(); m++) // medoid loop
{
Record r = current_node->medoid_records[m];
auto d_dist_vis = [&input_data, i, r](auto &d) { return (NumType)d.get_distance(input_data[i], r); };
NumType distance = std::visit(d_dist_vis, dimensions[current_node->field_index]);
if (distance < min_distance) {
min_distance = distance;
nearese_medoid_index = m;
}
}
// here we have nearest medoid index, so we can chose the child, assuming order of childs is same as order
// of medoids
current_node = current_node->children[nearese_medoid_index];
}
if (current_node->predicted_class != -1)
predictions.push_back(current_node->predicted_class);
else {
// montecarlo prediction generation based on leaf subset label distribution
int r_max = 0;
std::vector<int> roulette = {};
for (int el : current_node->prediction_distribution) {
r_max += el;
roulette.push_back(r_max);
}
std::uniform_int_distribution<int> distr(0, r_max - 1);
int random_val = (int)distr(generator);
int r_idx = 0;
while (random_val >= roulette[r_idx])
r_idx++;
// here r_idx is randomely chosen index in current_node->prediction_distribution
predictions.push_back(unique_labels[r_idx]);
}
}
}
template <class Record>
template <typename NumType>
std::tuple<std::vector<std::vector<NumType>>, std::vector<int>>
DT<Record>::distance_matrix_of_subset(const std::vector<int> &subset,
const std::vector<std::vector<NumType>> &feature_dist)
{
// output - distance subtable ready for kmedoids_
std::vector<std::vector<NumType>> result(subset.size(), std::vector<NumType>(subset.size()));
std::vector<int> orig_idx(subset.size());
for (size_t i = 0; i < subset.size(); i++) {
for (size_t j = i; j < subset.size(); j++) {
result[i][j] = result[j][i] = feature_dist[subset[i]][subset[j]];
}
orig_idx[i] = subset[i];
}
return std::tuple(result, orig_idx);
}
template <class Record>
template <typename ConType, typename NumType>
std::tuple<std::vector<std::shared_ptr<std::vector<int>>>, std::vector<double>, std::vector<int>, int, double>
DT<Record>::split_subset(const std::vector<int> &subset,
const std::vector<std::vector<std::vector<NumType>>> &distances, const ConType &data,
const std::function<int(Record)> &response)
{
// input - indices of subset, full distance matrices for all features, raw data, label acceessor
// output - for each new subset: pointer to vector of element indices, entropy value, medoid index, and single
// values: index of the field used for classification and entropy weighted sum
std::vector<std::vector<std::vector<int>>> new_subsets(
distances.size(), std::vector<std::vector<int>>(unique_labels.size(), std::vector<int>(0)));
// by field, then by nearest medoid, then global indices of new subset elements
std::vector<std::vector<double>> split_entropy(distances.size(), std::vector<double>(unique_labels.size(), 0));
// by field, then by nearest medoid (one value per new subset for each field)
// we keep entropy values of all fields in order to return it for the winning one
// std::vector<double> entropy_weighted_sum(distances.size(), 0);
// one value per field
std::vector<std::vector<int>> medoid_global_idx(distances.size(), std::vector<int>(unique_labels.size(), -1));
// by field, then by nearest medoid
// global indices of medoids for each field
double min_entropy_weighted_sum = std::numeric_limits<double>::max(); // criterion for the best field selection
int greatest_information_gain_idx = -1;
for (size_t f = 0; f < distances.size(); f++) // field loop
{
// clustering via kmedoids_
auto [feature_dist_table, feature_dist_orig_idx] = distance_matrix_of_subset(subset, distances[f]);
auto results = metric::kmedoids_(feature_dist_table, unique_labels.size(), 1000);
auto medoids = std::get<1>(results); // medoid indices in feature_dist_table; original indices can be obtained
// via feature_dist_orig_idx
std::vector<std::vector<int>> label_count(unique_labels.size(), std::vector<int>(medoids.size(), 0));
// one value per each class for each new subset
for (auto el : subset) // record loop
{
// find and compare distances from the current record to medoids
int chosen_medoid = -1;
NumType min_dist;
for (size_t medoid_idx = 0; medoid_idx < medoids.size(); medoid_idx++) // medoid loop
{ // compare distance to medoids. We assume medoids.size() MUST NOT be greater than unique_labels.size()
NumType d =
distances[f][el][feature_dist_orig_idx[medoids[medoid_idx]]]; // between each el and each medoid
if (chosen_medoid == -1) // first time
{
min_dist = d;
chosen_medoid = medoid_idx;
} else {
if (d < min_dist) {
min_dist = d;
chosen_medoid = medoid_idx;
}
}
} // end of medoid loop
// here we have index of the nearest mediod in chosen medoid; this index is related to 'medoids' vector,
// where indices in feature_dist_table are saved
// add el to new subset related to the nearest medoid and count labels for each new subset
new_subsets[f][chosen_medoid].push_back(el); // add the current element to appropriate new subset
for (size_t l = 0; l < unique_labels.size(); l++) {
if (unique_labels[l] == response(data[el])) {
label_count[l][chosen_medoid]++; // query response and count elements of each class (by labels) in
// each new subset
break; // values in label_count are unique
}
}
} // end of record loop
// here we have subsets for the current field based on distance to medoids listed in 'medoids' vector
// and counts of elements of each class
double current_entropy_weighted_sum = 0;
for (size_t medoid_idx = 0; medoid_idx < medoids.size();
medoid_idx++) // medoid loop: collecting outpot data for each new subset
{ // now we do not know which field will win, so we collect data for each field
// find entropy of each new subset and weighted sum of them (gain subtrahend)
double new_entropy = 0;
for (size_t l = 0; l < unique_labels.size(); l++) {
double probability = (double)label_count[l][medoid_idx] /
new_subsets[f][medoid_idx].size(); // share of each class by labels
if (probability > 0)
new_entropy = new_entropy - probability * std::log2(probability);
// else do nothing, because lim(share*log2(share))==0
// TODO add zero entropy "leaf reached" flag if needed
// Something went wrong: negative entropy
assert(new_entropy >= 0);
assert(!isnan(new_entropy));
}
split_entropy[f][medoid_idx] = new_entropy;
current_entropy_weighted_sum =
current_entropy_weighted_sum + new_entropy * new_subsets[f][medoid_idx].size() / subset.size();
// also fill global medoid indices (in order to output them for winning field)
medoid_global_idx[f][medoid_idx] = feature_dist_orig_idx[medoids[medoid_idx]];
} // end of medoid loop
if (current_entropy_weighted_sum < min_entropy_weighted_sum) // update winning field
{
min_entropy_weighted_sum = current_entropy_weighted_sum;
greatest_information_gain_idx = f;
}
// here we also have entropy of each subset and gain subtrahend for all subsets (filled for the current field)
} // end of field loop
// here we have new subsets, entropy of each subset and gain subtrahends - for all fields
// and index of the winning field
// make output ptrs
std::vector<std::shared_ptr<std::vector<int>>> output_subset_ptrs = {};
for (size_t medoid_idx = 0; medoid_idx < unique_labels.size();
medoid_idx++) // medoid loop: making ptrs: doing it only for winning field mast be faster than creating ptrs
// for all field inside record loop
output_subset_ptrs.push_back(
std::make_shared<std::vector<int>>(new_subsets[greatest_information_gain_idx][medoid_idx]));
return std::tuple(output_subset_ptrs, // new_subsets[best_field_idx],
split_entropy[greatest_information_gain_idx], medoid_global_idx[greatest_information_gain_idx],
greatest_information_gain_idx, min_entropy_weighted_sum);
}
template <class Record>
template <typename ConType>
inline void DT<Record>::add_distribution_to_node( // mutates *new_node!
const ConType &payments, const std::function<int(Record)> &response,
const std::shared_ptr<std::vector<int>> &new_subset,
const std::shared_ptr<Node> &new_node // subject to change
)
{
if (new_subset->size() < 1)
std::cout << "\nSomething went wrong: empty dataset obtained!\n"; // should never happen
// describe distribution in order to generate random values in predict function
for (size_t l = 0; l < unique_labels.size(); l++)
new_node->prediction_distribution.push_back(0); // init distribution
for (size_t r = 0; r < new_subset->size(); r++) // elements in subset
{ // fullfill distribution vector
for (size_t l = 0; l < unique_labels.size(); l++)
if (response(payments[(*new_subset)[r]]) == unique_labels[l])
new_node->prediction_distribution[l]++; // simply count occurences
}
}
} // namespace metric
| 15,220
|
C++
|
.cpp
| 318
| 44.176101
| 119
| 0.691169
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,488
|
correlation_weighted_accuracy.cpp
|
metric-space-ai_metric/metric/mapping/ensembles/DT/correlation_weighted_accuracy.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch, Signal Empowering Technology
*/
#ifndef METRIC_MAPPING_ENSEMBLES_DT_CORRELATION_WEIGHTED_ACCURACY_CPP
#define METRIC_MAPPING_ENSEMBLES_DT_CORRELATION_WEIGHTED_ACCURACY_CPP
#include "correlation_weighted_accuracy.hpp"
#include <assert.h>
#include <cmath>
#include <vector>
#include <algorithm>
namespace metric {
namespace CWA_details {
template <typename Container>
std::vector<std::vector<typename Container::value_type>> confusion_matrix(Container a, Container b)
{
typedef typename Container::value_type T;
assert(a.size() == b.size());
std::vector<T> ab; // build unique vector ( a.size()+b.size());
ab.reserve(a.size() + a.size()); // preallocate memory
ab.insert(ab.end(), a.begin(), a.end());
ab.insert(ab.end(), b.begin(), b.end());
std::sort(ab.begin(), ab.end());
ab.erase(std::unique(ab.begin(), ab.end()), ab.end());
auto k = ab.size(); // unique counts
std::vector<std::vector<T>> CM(k, std::vector<T>(k, 0)); // matrix with zeros
for (int i = 0; i < a.size(); ++i) {
auto pos_g = std::distance(ab.begin(), std::find(ab.begin(), ab.end(), a[i]));
auto pos_q = std::distance(ab.begin(), std::find(ab.begin(), ab.end(), b[i]));
CM[pos_g][pos_q] += 1;
}
return CM;
}
template <typename T> double accuracy(std::vector<std::vector<T>> CM)
{
T hits = 0;
T sum = 0;
for (int i = 0; i < CM.size(); ++i) {
for (int j = 0; j < CM.size(); ++j) {
if (i == j) {
hits += CM[i][j];
}
sum += CM[i][j];
}
}
return double(hits) / double(sum);
}
/*
Matthews correlation coefficient for multiclasses
Function to compute the K-category correlation coefficient based on the confusion matrix
(generalization of the Matthews correlation coefficient for multiclasses
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient)
Reference
Comparing two K-category assignments by a K-category correlation coefficient
J. Gorodkin, Computational Biology and Chemistry, 28:367-374, 2004.
*/
template <typename T> double MCC(std::vector<std::vector<T>> CM)
{
int cols = CM.size();
T MCC_numerator = 0;
for (int k = 0; k < cols; ++k) {
for (int l = 0; l < cols; ++l) {
for (int m = 0; m < cols; ++m) {
MCC_numerator = MCC_numerator + (CM[k][k] * CM[m][l]) - (CM[l][k] * CM[k][m]);
}
}
}
T MCC_denominator_1 = 0;
for (int k = 0; k < cols; ++k) {
T MCC_den_1_part1 = 0;
for (int l = 0; l < cols; ++l) {
MCC_den_1_part1 = MCC_den_1_part1 + CM[l][k];
}
T MCC_den_1_part2 = 0;
for (int f = 0; f < cols; ++f) {
if (f != k) {
for (int g = 0; g < cols; ++g) {
MCC_den_1_part2 = MCC_den_1_part2 + CM[g][f];
}
}
}
MCC_denominator_1 = (MCC_denominator_1 + (MCC_den_1_part1 * MCC_den_1_part2));
}
T MCC_denominator_2 = 0;
for (int k = 0; k < cols; ++k) {
T MCC_den_2_part1 = 0;
for (int l = 0; l < cols; ++l) {
MCC_den_2_part1 = MCC_den_2_part1 + CM[k][l];
}
T MCC_den_2_part2 = 0;
for (int f = 0; f < cols; ++f) {
if (f != k) {
for (int g = 0; g < cols; ++g) {
MCC_den_2_part2 = MCC_den_2_part2 + CM[f][g];
}
}
}
MCC_denominator_2 = (MCC_denominator_2 + (MCC_den_2_part1 * MCC_den_2_part2));
}
double MCC =
(double(MCC_numerator)) / (std::sqrt(double(MCC_denominator_1)) * std::sqrt(double(MCC_denominator_2)));
return MCC;
}
} // namespace CWA_details
template <typename Container> double correlation_weighted_accuracy(Container a, Container b)
{
auto CM = CWA_details::confusion_matrix(a, b);
auto accu = CWA_details::accuracy(CM);
auto corr = CWA_details::MCC(CM);
if (corr < 0) {
corr = 0;
}
return std::sqrt(accu * corr);
}
} // namespace metric
#endif
| 3,851
|
C++
|
.cpp
| 118
| 30.152542
| 106
| 0.644187
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,489
|
edm_wrappers.cpp
|
metric-space-ai_metric/metric/mapping/ensembles/DT/edm_wrappers.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Panda Team
*/
#ifndef _METRIC_MAPPING_ENSEMBLES_DT_EDM_WRAPPERS_CPP
#define _METRIC_MAPPING_ENSEMBLES_DT_EDM_WRAPPERS_CPP
#include <exception>
namespace metric {
template <typename Record, typename edmCl>
template <typename ConType>
void edmClassifier<Record, edmCl>::train(ConType &payments, std::vector<std::function<double(Record)>> &features,
std::function<bool(Record)> &response)
{
if (features.size() == 0)
return;
CDataset data = this->read_data(payments, features, response);
if (features.size() > 0)
model = std::make_shared<edmCl>(data); // in get_prediction we check if model == nullptr and return default
// undefuned (random) prefiction if so
}
template <typename Record, typename edmCl>
template <typename ConType>
void edmClassifier<Record, edmCl>::predict(ConType &data, std::vector<std::function<double(Record)>> &features,
std::vector<bool> &predictions)
{
get_prediction(data, features, model, predictions);
}
template <typename Record, typename edmCl>
std::shared_ptr<edmClassifier<Record, edmCl>> edmClassifier<Record, edmCl>::clone()
{
// TODO check if not learned!
std::shared_ptr<edmClassifier<Record, edmCl>> sptr = std::make_shared<edmClassifier<Record, edmCl>>();
return sptr;
}
template <typename Record, typename edmCl>
template <typename ConType>
void edmClassifier<Record, edmCl>::get_prediction(ConType &data, std::vector<std::function<double(Record)>> &features,
std::shared_ptr<edmCl> model, std::vector<bool> &predictions)
{ // aimed for usage in predict functions of derived classes
predictions = std::vector<bool>(data.size(), false);
if (features.size() <= 0 || data.size() <= 0 || model == nullptr) {
for (size_t i = 0; i < predictions.size(); i++) // undefined prediction: random output
predictions[i] = std::rand() % 2 == 1;
return;
}
CDataset dsdata = this->read_data(data, features);
CPrediction *result = model->Classify(dsdata);
std::vector<int> labels = result->GetPredictedLabelIndices();
for (size_t i = 0; i < data.size(); i++) {
if (labels[i] == 0)
predictions[i] = false;
else {
if (labels[i] == 1)
predictions[i] = true;
else {
throw std::runtime_error("ERROR IN EDM PREDICTION");
predictions[i] = false;
}
}
}
}
template <typename Record, typename edmCl>
template <typename ConType>
libedm::CDataset edmClassifier<Record, edmCl>::read_data(ConType &payments,
std::vector<std::function<double(Record)>> &features,
std::function<bool(Record)> response) const
{
std::vector<AttrStr> attributes = {};
std::vector<bool> min_set = {};
size_t n_attr;
if (response != nullptr)
n_attr = features.size() + 1;
else
n_attr = features.size();
for (size_t j = 0; j < n_attr; j++) {
AttrStr att_str = {
// features
2, // int AttType // 2 = continous
"attr" + std::to_string(j), // string Name
-std::numeric_limits<double>::max(), // double Max
std::numeric_limits<double>::max(), // double Min
false, // bool MMSet // Have max and min value been set?
(int)j, // int OtherPos // position of an attribute
{} // std::vector<DiscValueStr> Disc // discrete attribute: list of all values
};
attributes.push_back(att_str);
min_set.push_back(false);
}
std::vector<DiscValueStr> classes = {};
if (response == nullptr) {
DiscValueStr class0;
class0.Name = "0";
DiscValueStr class1;
class1.Name = "1";
classes = {class0, class1};
AttrStr att_str = {
// label
3, // int AttType // 3 = class label
"label", // string Name
1, // double Max // seems to be not in use
0, // double Min
false, // bool MMSet // Have max and min value been set?
(int)features.size(), // int OtherPos // position of an attribute
classes // std::vector<DiscValueStr> Disc // discrete attribute: list of all values
};
attributes.push_back(att_str);
}
std::vector<InstanceStr> matrix = {};
for (auto i = payments.cbegin(); i != payments.cend(); i++) // (size_t i = 0; i < payments.size(); i++)
{
std::vector<ValueData> rec = {};
for (size_t j = 0; j < n_attr; j++) {
ValueData write_val;
double read_val;
if (j < features.size()) {
read_val = features[j](*i); //(payments[i]);
write_val.Cont = read_val;
} else {
read_val = response(*i) ? 1 : 0; //(payments[i]) ? 1 : 0; // test (int)response
write_val.Discr = read_val; // read_val; // labels are discrete
}
rec.push_back(write_val);
if (read_val < attributes[j].Min) { // find min, max
attributes[j].Min = read_val;
min_set[j] = true;
}
if (read_val > attributes[j].Max) {
attributes[j].Max = read_val;
if (min_set[j])
attributes[j].MMSet = true;
}
}
matrix.push_back(rec);
}
CASE_INFO case_info = {
(int)n_attr, // int ReadWidth // number of attribute in each row(including label)
(int)n_attr, // int ValidWidth;//number of real attribute (ignored attributes are excluded)
2, // int ClassNum
(int)payments.size(), // int Height;//number of instances
classes, // std::vector<DiscValueStr> Classes
attributes, // std::vector<AttrStr> ReadAttrs;//all attributes in a row (including label)
attributes // std::vector<AttrStr> ValidAttrs;//all attributes in a row (ignored attributes are excluded)
};
return libedm::CDataset(case_info, matrix);
}
template <typename Record> edmC45<Record>::edmC45(int UMINOBJS_, double UEpsilon_, double UCF_, double WillPrune_)
{
UMINOBJS = UMINOBJS_;
UEpsilon = UEpsilon_;
UCF = UCF_;
WillPrune = WillPrune_;
}
template <class Record>
template <typename ConType>
void edmC45<Record>::train(ConType payments, std::vector<std::function<double(Record)>> features,
std::function<bool(Record)> response)
{
if (features.size() == 0)
return;
CDataset data = this->read_data(payments, features, response);
model = std::make_shared<libedm::CC45>(data, UMINOBJS, UEpsilon, UCF, WillPrune);
}
template <class Record>
template <typename ConType>
void edmC45<Record>::predict(ConType &data, std::vector<std::function<double(Record)>> &features,
std::vector<bool> &predictions)
{
this->get_prediction(data, features, model, predictions);
}
template <class Record> std::shared_ptr<edmC45<Record>> edmC45<Record>::clone()
{
std::shared_ptr<edmC45<Record>> sptr = std::make_shared<edmC45<Record>>(UMINOBJS, UEpsilon, UCF, WillPrune);
return sptr;
}
template <typename Record>
edmSVM<Record>::edmSVM(int usvm_type, int ukernel_type, int udegree, double ucoef0, double ucache_size, double ueps,
double uC, int unr_weight, int *uweight_label, double *uweight, double unu, double up,
int ushrinking, int uprobability)
{
svm_type = usvm_type;
kernel_type = ukernel_type;
degree = udegree;
coef0 = ucoef0;
cache_size = ucache_size;
eps = ueps;
C = uC;
nr_weight = unr_weight;
weight_label = uweight_label;
weight = uweight;
nu = unu;
p = up;
shrinking = ushrinking;
probability = uprobability;
}
template <typename Record>
template <typename ConType>
void edmSVM<Record>::train(ConType payments, std::vector<std::function<double(Record)>> features,
std::function<bool(Record)> response)
{
if (features.size() == 0)
return;
CDataset data = this->read_data(payments, features, response);
model = std::make_shared<libedm::CSVM>(data, svm_type, kernel_type, degree, coef0, cache_size, eps, C, nr_weight,
weight_label, weight, nu, p, shrinking, probability);
}
template <typename Record>
template <typename ConType>
void edmSVM<Record>::predict(ConType &data, std::vector<std::function<double(Record)>> &features,
std::vector<bool> &predictions)
{
this->get_prediction(data, features, model, predictions);
}
template <typename Record> std::shared_ptr<edmSVM<Record>> edmSVM<Record>::clone()
{
// TODO check if not learned!
std::shared_ptr<edmSVM<Record>> sptr =
std::make_shared<edmSVM<Record>>(svm_type, kernel_type, degree, coef0, cache_size, eps, C, nr_weight,
weight_label, weight, nu, p, shrinking, probability);
return sptr;
}
} // namespace metric
#endif
| 8,358
|
C++
|
.cpp
| 224
| 34.325893
| 118
| 0.688478
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,491
|
Prediction.cpp
|
metric-space-ai_metric/metric/3rdparty/libedm/Prediction.cpp
|
/*
Copyright (c) 2014, Qiangli Zhao and Yanhuang Jiang
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <fstream>
#include <string>
// using namespace std;
//#include "Obj.h"
#include "DataSet.h"
#include "Prediction.h"
using namespace libedm;
inline CPrediction::~CPrediction() {}
// Dataset: data set be predicted
// Probabilities: Probabilities of each instance belong to each class label
// start: start time of predicting
inline CPrediction::CPrediction(const CDataset &Dataset, const std::vector<std::vector<double>> &Probabilities,
clock_t PredictTime)
{
const MATRIX &Data = Dataset.GetData();
const CASE_INFO &Info = Dataset.GetInfo();
CaseNum = Info.Height;
ClassNum = Info.ClassNum;
Accuracy = 0;
Probs.assign(Probabilities.begin(), Probabilities.end());
// the label of an instance is the class with the max probability
for (int j = 0; j < CaseNum; j++) {
int Class = 0;
double MaxProb = 0;
for (int k = 0; k < ClassNum; k++)
if (Probs[j][k] > MaxProb) {
Class = k;
MaxProb = Probs[j][k];
}
PredLabelIndices.push_back(Class);
// correct prediction count
int IdealResult = Data[j][Info.ValidWidth - 1].Discr;
if (IdealResult == Class) {
IsCorrect.push_back(true);
Accuracy += 1;
} else
IsCorrect.push_back(false);
}
Accuracy /= CaseNum;
// Total time consumed
// CreatingTime = (double)PredictTime/CLOCKS_PER_SEC;
}
inline const std::vector<std::vector<double>> &CPrediction::GetProbs() const { return Probs; }
inline const std::vector<int> &CPrediction::GetPredictedLabelIndices() const { return PredLabelIndices; }
inline const std::vector<bool> &CPrediction::GetCorrectness() const { return IsCorrect; }
inline double CPrediction::GetAccuracy() const { return Accuracy; }
| 3,178
|
C++
|
.cpp
| 70
| 43.171429
| 111
| 0.77386
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,493
|
DataSet.cpp
|
metric-space-ai_metric/metric/3rdparty/libedm/DataSet.cpp
|
/*
Copyright (c) 2014, Qiangli Zhao and Yanhuang Jiang
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <iostream>
#include <string>
// #include <fstream>
// #include <istream>
// #include <sstream>
// #include <climits>
#include <assert.h> // added by Max, Oct 21, for assert
#include <cstring> // uncommented by Max, Oct 21, for strcpy
// #include <cassert>
// #include <cstdarg>
// #include <ctime>
// using namespace std;
using namespace libedm;
//#include "Obj.h"
//#include "zString.h"
//#include "DateTime.h"
#include "DataSet.h"
#include "RandSequence.h"
#include <time.h>
inline CDataset &CDataset::operator+=(const CDataset &b)
{
// same object
if (this == &b)
return *this;
// empty dat set
if (CaseInfo.Height == 0) {
*this = b;
return *this;
}
// if(CaseInfo.ValidWidth!=b.CaseInfo.ValidWidth)
// throw("Adding different type of data!",701,0);
CaseInfo.Height += b.CaseInfo.Height;
for (int i = 0; i < (int)b.CaseInfo.Height; i++)
Matrix.push_back(b.Matrix[i]);
return *this;
}
inline CDataset &CDataset::operator=(const CDataset &b)
{
if (this == &b)
return *this;
CaseInfo = b.CaseInfo;
Matrix.assign(b.Matrix.begin(), b.Matrix.end());
return *this;
}
inline CDataset::CDataset()
{
// CreatingTime=0;
CaseInfo.Height = 0;
}
inline const int CDataset::LINE_OK = 0;
inline const int CDataset::SKIP_LINE = 1;
// remove all control chars and reserve only space and comma
inline int CDataset::FormatLine(std::string &Line) const
{
int Dot = 0;
bool PeriodisLast = false;
// use space as delimiter?
bool SpaceAsDelimiter = false;
if (Line.find(',') == std::string::npos && Line.find(':') == std::string::npos)
SpaceAsDelimiter = true;
// table is transfered into space
for (int i = 0; i < (int)Line.length(); i++)
if (Line[i] == '\t')
Line[i] = ' ';
// format
for (int i = 0; i < (int)Line.length();) {
// all other non-readable chars are treated as delimiter
if (Line[i] < ' ') {
Line.erase(Line.begin() + i, Line.end());
continue;
}
// remove leading spaces
if (i == 0 && Line[i] == ' ') {
Line.erase(Line.begin());
continue;
}
switch (Line[i]) {
case '"': // remove quotation mark
case '\'':
Line.erase(Line.begin() + i);
continue;
case '|': // remove commented
case '%': // remove commented
Line.erase(Line.begin() + i, Line.end());
continue;
case ';': // heading semicolon as comment
if (i <= 0) {
Line.erase(Line.begin() + i, Line.end());
continue;
} else // otherwise as delimiter
Line[i] = ',';
break;
case '.': // last character in line is a dot, which means end of line
Dot = i;
PeriodisLast = true;
break;
case ',': // delimiter
case ':': // start of attribute description
// remove preceding and trailing spaces
while (Line[i + 1] == ' ')
Line.erase(Line.begin() + i + 1);
while (i > 0 && Line[i - 1] == ' ') {
Line.erase(Line.begin() + i - 1);
i--;
}
break;
case ' ':
break;
default: // non-control chars
// lower case
if (Line[i] >= 'A' && Line[i] <= 'Z')
Line[i] -= ('A' - 'a');
PeriodisLast = false;
break;
}
i++;
}
// if last char is a period, the line is ended
if (PeriodisLast)
Line.erase(Line.begin() + Dot, Line.end());
// remove trailing spaces
for (int i = (int)Line.length() - 1; i >= 0; i--) {
if (Line[i] == ' ' || Line[i] == '\t')
Line.erase(Line.begin() + i);
else
break;
}
// convert space into comma
for (int i = 0; i < (int)Line.length(); i++)
if (SpaceAsDelimiter && Line[i] == ' ')
Line[i] = ',';
// empty line?
if (Line.length() <= 0)
return SKIP_LINE;
return LINE_OK;
}
// if the first word of the first line is the name of any attribute, this file has a head line
inline bool CDataset::HasHeading(std::string &Line) const
{
// read a value
char *DataLine = new char[Line.length() + 1];
std::strcpy(DataLine, Line.c_str());
const char *Del = ",";
char *pValue = strtok(DataLine, Del);
std::string Value = pValue;
// is it a name for an attribute?
int i;
for (i = 0; i < CaseInfo.ReadWidth; i++)
if (CaseInfo.ReadAttrs[i].Name == Value)
break;
delete[] DataLine;
if (i < CaseInfo.ReadWidth)
return true;
return false;
}
// //get training samples from * file to Matrix->data array
// //unknown class labeled is marked as -1
// //instances with unknown values is removed
// //Number=0, read all;>0 read some; <0 read none
// void CDataset::ReadMatrix(ifstream &DataFile,int Number)
// {
// assert(!DataFile.fail());
// // {
// // throw(CError("Open data file failed!",308,0));
// // }
// Matrix.clear();
// if(DataFile.eof()||Number<0)
// return;
// //read a instance
// int InstanceNum=0;
// int IsFinish=LINE_OK;
// while(!DataFile.eof())
// {
// //read a line
// string Line;
// // try
// // {
// getline(DataFile,Line);
// // }
// // catch(...)
// // {
// // //not enough buffer
// // throw(CError("Data file: too long a line!",309,0));
// // }
// if(DataFile.fail())
// {
// assert(!DataFile.eof());
// // continue;
// // throw(CError("Data file: read error!",310,0));
// }
// //format and parse the line
// if((IsFinish=FormatLine(Line))==SKIP_LINE)
// continue;
// //csv format has a heading line, skip it
// if(InstanceNum==0 && HasHeading(Line))
// continue;
// //read data from the line
// int ValueNum=0;
// InstanceStr Inst;
// bool HasMissedValue=false;
// ValueData Label;
// char *DataLine=new char[Line.length()+1];
// strcpy(DataLine,Line.c_str());
// const char *Del=",";
// char *pValue=strtok(DataLine,Del);
// while(pValue!=NULL)
// {
// //read a value
// string Value(pValue);
// ValueData Item;
// //get class label
// if(CaseInfo.ReadAttrs[ValueNum].AttType==ATT_CLASSLABEL)
// {
// // try
// // {
// Which(Label,Value);
// // }
// // catch(CError &Err)
// // {
// // delete [] DataLine;
// // basic_ostringstream<char> OutMsg;
// // OutMsg<<" in line "<<InstanceNum<<ends;
// // Err.Description+=OutMsg.str();
// // throw(Err);
// // }
// ValueNum++;
// //read next value
// pValue=strtok(NULL,Del);
// continue;
// }
// else if(CaseInfo.ReadAttrs[ValueNum].AttType==ATT_IGNORED)
// {
// //for ignored attribute, we just skip the value
// ValueNum++;
// //read next value
// pValue=strtok(NULL,Del);
// continue;
// }
// else if(CaseInfo.ReadAttrs[ValueNum].AttType==ATT_DISCRETE)
// {
// //discrete attribute with int value(ItemNo)
// // try
// // {
// Which(Item,ValueNum,Value);
// // }
// // catch(CError &Err)
// // {
// // delete [] DataLine;
// // basic_ostringstream<char> OutMsg;
// // OutMsg<<" in line "<<InstanceNum<<ends;
// // Err.Description+=OutMsg.str();
// // throw(Err);
// // }
// int k=CaseInfo.ReadAttrs[ValueNum].OtherPos;
// //value is a "?" (unknown)
// if(Item.Discr==-1)
// HasMissedValue=true;
// //maximum and minimum value for attribute
// else if(!CaseInfo.ValidAttrs[k].MMSet)
// {
// CaseInfo.ValidAttrs[k].Max=Item.Discr;
// CaseInfo.ValidAttrs[k].Min=Item.Discr;
// CaseInfo.ValidAttrs[k].MMSet=true;
// CaseInfo.ReadAttrs[ValueNum].Max=Item.Discr;
// CaseInfo.ReadAttrs[ValueNum].Min=Item.Discr;
// CaseInfo.ReadAttrs[ValueNum].MMSet=true;
// }
// else
// {
// if(Item.Discr>CaseInfo.ValidAttrs[k].Max)
// {
// CaseInfo.ValidAttrs[k].Max=Item.Discr;
// CaseInfo.ReadAttrs[ValueNum].Max=Item.Discr;
// }
// if(Item.Discr<CaseInfo.ValidAttrs[k].Min)
// {
// CaseInfo.ValidAttrs[k].Min=Item.Discr;
// CaseInfo.ReadAttrs[ValueNum].Min=Item.Discr;
// }
// }
// }
// //unknown continuous value?
// else if(Value=="?")
// {
// HasMissedValue=true;
// }
// else if(CaseInfo.ReadAttrs[ValueNum].AttType==ATT_CONTINUOUS||
// CaseInfo.ReadAttrs[ValueNum].AttType==ATT_DATETIME)
// {
// //continuous attribute with double value
// if(CaseInfo.ReadAttrs[ValueNum].AttType==ATT_CONTINUOUS)
// {
// basic_istringstream<char> FloatString(Value.c_str());
// FloatString>>Item.Cont;
// if(FloatString.fail())
// break;
// }
// // else//date time
// // {
// // CDateTime DateValue(Value);
// // Item.Cont=(double)DateValue.GetNumeric();
// // }
// //maximum and minimum value for continuous attribute
// int k=CaseInfo.ReadAttrs[ValueNum].OtherPos;
// if(!CaseInfo.ValidAttrs[k].MMSet)
// {
// CaseInfo.ValidAttrs[k].Max=Item.Cont;
// CaseInfo.ValidAttrs[k].Min=Item.Cont;
// CaseInfo.ValidAttrs[k].MMSet=true;
// CaseInfo.ReadAttrs[ValueNum].Max=Item.Cont;
// CaseInfo.ReadAttrs[ValueNum].Min=Item.Cont;
// CaseInfo.ReadAttrs[ValueNum].MMSet=true;
// }
// else
// {
// if(Item.Cont>CaseInfo.ValidAttrs[k].Max)
// {
// CaseInfo.ValidAttrs[k].Max=Item.Cont;
// CaseInfo.ReadAttrs[ValueNum].Max=Item.Cont;
// }
// if(Item.Cont<CaseInfo.ValidAttrs[k].Min)
// {
// CaseInfo.ValidAttrs[k].Min=Item.Cont;
// CaseInfo.ReadAttrs[ValueNum].Min=Item.Cont;
// }
// }
// }
// ValueNum++;
// Inst.push_back(Item);
// //read next value
// pValue=strtok(NULL,Del);
// }//line
// delete [] DataLine;
// //not enough values for all attributes
// assert((int)Inst.size()+1==CaseInfo.ValidWidth);
// // {
// // basic_ostringstream<char> OutMsg;
// // OutMsg<<"Data file: illegal instance data in line "<<InstanceNum<<ends;
// // throw(CError(string(OutMsg.str()),311,0));
// // }
// if(!HasMissedValue)//skip the instance with unknown value
// {
// //put label into instance
// Inst.push_back(Label);
// Matrix.push_back(Inst);
// }
// //number of instances has processed (include the skipped one)
// InstanceNum++;
// //we have enough instances, can now leave
// if(Number>0 && InstanceNum>=Number)
// break;
// // //display
// // if(InstanceNum%200000==0)
// // printf("Instance: %d\n",InstanceNum);
// }//end of file
// //number of instances
// CaseInfo.Height=(int)Matrix.size();
// }
// Locate value
bool libedm::operator==(const DiscValueStr &a, const DiscValueStr &b) { return (a.Name == b.Name); }
// from string to class label
inline bool CDataset::Which(ValueData &Item, const std::string &Name) const
{
// unknown label
if (Name == "?") {
Item.Discr = -1;
return false;
}
// search
DiscValueStr tmp;
tmp.Name = Name;
std::vector<DiscValueStr>::const_iterator it;
it = find(CaseInfo.Classes.begin(), CaseInfo.Classes.end(), tmp);
assert(it != CaseInfo.Classes.end());
// {
// string Msg="unexpected class label ";
// throw(CError(Msg+"'"+Name+"'",312,0));
// }
Item.Discr = (int)(it - CaseInfo.Classes.begin());
return true;
}
// from string to a discrete attribute value
inline bool CDataset::Which(ValueData &Item, int ValueNum, const std::string &Name) const
{
// unknown value
if (Name == "?") {
Item.Discr = -1;
return false;
}
std::vector<DiscValueStr>::const_iterator it;
DiscValueStr tmp;
tmp.Name = Name;
it = find(CaseInfo.ReadAttrs[ValueNum].Disc.begin(), CaseInfo.ReadAttrs[ValueNum].Disc.end(), tmp);
assert(it != CaseInfo.ReadAttrs[ValueNum].Disc.end());
// {
// string Msg="unexpected discrete value ";
// throw(CError(Msg+"'"+Name+"'"+" of column "+CzString::IntToStr(ValueNum),313,0));
// }
Item.Discr = (int)(it - CaseInfo.ReadAttrs[ValueNum].Disc.begin());
return true;
}
// random select (no duplication)
// IN: DataNum- size of target dataset
// OUT: TrainSet- target dataset
inline bool CDataset::SubSet(int DataNum, CDataset &TrainSet) const
{
// sub set of a data set
int FinalSize = DataNum;
if (DataNum > CaseInfo.Height)
FinalSize = CaseInfo.Height;
TrainSet.Matrix.clear();
CRandSequence RandSequence(FinalSize);
for (int i = 0; i < FinalSize; i++) {
int TrainNum = RandSequence.Poll();
TrainSet.Matrix.push_back(Matrix[TrainNum]);
}
TrainSet.CaseInfo = CaseInfo;
TrainSet.CaseInfo.Height = FinalSize;
// TrainSet.CreatingTime=0;
return true;
}
// caution: when we do bootstrapping, sub-setting or splitting,
// we haven't re-calculate the max or min information of a continuous attribute
// bootstrap re-sampling
// IN: DataNum- size of target dataset
// OUT: TrainSet- target dataset
inline bool CDataset::BootStrap(int DataNum, CDataset &TrainSet) const
{
TrainSet.Matrix.clear();
for (int i = 0; i < DataNum; i++) {
int TrainNum = IntRand(CaseInfo.Height);
TrainSet.Matrix.push_back(Matrix[TrainNum]);
}
TrainSet.CaseInfo = CaseInfo;
TrainSet.CaseInfo.Height = DataNum;
// TrainSet.CreatingTime=0;
return true;
}
// weighted bootstrap: re-sampling with considering instances' weights
// IN: Weights- weights for instances in this data set
// DataNum- size of target dataset
// OUT: TrainSet- target dataset
// OrginalPos- original position for instances of new-created dataset
inline bool CDataset::BootStrap(const std::vector<double> &Weights, int DataNum, std::vector<int> &OrginalPos,
CDataset &TrainSet) const
{
if ((int)Weights.size() != CaseInfo.Height)
return false;
int TrainNum = 0;
TrainSet.Matrix.clear();
TrainSet.CaseInfo = CaseInfo;
TrainSet.CaseInfo.Height = DataNum;
// TrainSet.CreatingTime=0;
OrginalPos.clear();
CRoulette Roult(Weights);
// select instances by roulette
for (int i = 0; i < DataNum; i++) {
TrainNum = Roult.Poll();
if (TrainNum >= CaseInfo.Height)
TrainNum = CaseInfo.Height - 1;
TrainSet.Matrix.push_back(Matrix[TrainNum]);
OrginalPos.push_back(TrainNum);
}
return true;
}
// sampling- the input dataset is randomly split into two parts
// IN: DataNum- size of target dataset
// OUT: TrainSet- target dataset
// TestSet- rest instances
inline bool CDataset::SplitData(int DataNum, CDataset &TrainSet, CDataset &TestSet) const
{
const int CaseNum = CaseInfo.Height;
if (DataNum >= CaseNum || DataNum <= 0)
return false;
const int TestNum = CaseNum - DataNum;
// test set first, because it is always less than train set
TestSet.Matrix.clear();
TestSet.CaseInfo = CaseInfo;
TestSet.CaseInfo.Height = TestNum;
// TestSet.CreatingTime=0;
// which has been selected
std::vector<int> SelFlag(CaseNum, 0);
int WaitForSelect = CaseNum;
// randomly select instances from original dataset(no duplicate)
for (int i = 0; i < TestNum; i++) {
// number of instance wait for selecting
int Selected = IntRand(WaitForSelect);
// find the TrainNum-th unselected instance
int Pos = 0;
int j;
for (j = 0; j < CaseNum; j++) {
if (SelFlag[j] > 0) // is a un-sampled instances?
continue;
if (Pos++ >= Selected)
break;
}
// found
SelFlag[j]++;
WaitForSelect--;
TestSet.Matrix.push_back(Matrix[j]);
}
// the remaining instances are putted into TrainSet
TrainSet.Matrix.clear();
TrainSet.CaseInfo = CaseInfo;
// TrainSet.CreatingTime=0;
for (int j = 0; j < CaseNum; j++)
if (SelFlag[j] <= 0)
TrainSet.Matrix.push_back(Matrix[j]);
TrainSet.CaseInfo.Height = (int)TrainSet.Matrix.size();
return true;
}
// sampling- the input dataset is randomly split into several train-sets and one test-set
// IN: DataNum- size of each target dataset
// SetNum- number of target datasets
// OUT: TrainSet- target dataset
// TestSet- rest instances
inline bool CDataset::SplitData(int DataNum, int SetNum, std::vector<CDataset> &TrainSets, CDataset &TestSet) const
{
// Test set is allowed to be null
assert(DataNum * SetNum >= CaseInfo.Height);
// throw(CError("Not enough instances!",501,0));
TrainSets.clear();
// srand((unsigned)time(NULL));
// flags identifying instances has been selected
int CaseNum = CaseInfo.Height;
std::vector<int> SelFlag(CaseInfo.Height, 0);
for (int k = 0; k < SetNum; k++) {
{
CDataset TrainSet;
TrainSet.CaseInfo = CaseInfo;
TrainSet.CaseInfo.Height = DataNum;
// TrainSet.CreatingTime=0;
TrainSets.push_back(TrainSet);
}
// randomly select instances from original dataset(no dup)
for (int i = 0; i < DataNum; i++) {
// number of instance for selecting
int TrainNum = IntRand(CaseNum);
// find the TrainNum-th unselected instance
int Pos = 0;
int j;
for (j = 0; j < CaseInfo.Height; j++) {
if (SelFlag[j] > 0) // is a un-sampled instances?
continue;
if (Pos++ >= TrainNum)
break;
}
// found
SelFlag[j]++;
CaseNum--;
TrainSets[k].Matrix.push_back(Matrix[j]);
}
}
// the remaining instances are putted into TestSet
TestSet.Matrix.clear();
TestSet.CaseInfo = CaseInfo;
// TestSet.CreatingTime=0;
for (int j = 0; j < CaseInfo.Height; j++)
if (SelFlag[j] <= 0)
TestSet.Matrix.push_back(Matrix[j]);
TestSet.CaseInfo.Height = (int)TestSet.Matrix.size();
//
assert(CaseNum >= 0);
// throw(CError("Not enough instances!",501,0));
return true;
}
// sampling- the input dataset is split into several new sets, from beginning to end
// IN: SetNum- number of target datasets
// OUT: TrainSets- target dataset
inline bool CDataset::DevideBySetNum(int SetNum, std::vector<CDataset> &TrainSets) const
{
// Parameters
const int CaseNum = CaseInfo.Height;
assert(SetNum >= CaseNum);
// throw(CError("CDataset::DevideBySetNum: Not enough instances!",502,0));
TrainSets.clear();
// in case instances can not evenly put into all sets
for (int k = 0; k < SetNum; k++) {
{
CDataset TrainSet;
TrainSet.CaseInfo = CaseInfo;
// TrainSet.CreatingTime=0;
TrainSets.push_back(TrainSet);
}
// sometimes data can not evenly put into each set, we just make it as even as possible
int Start = (int)(1.0 * k * CaseNum / SetNum);
int End = (int)(1.0 * (k + 1) * CaseNum / SetNum);
for (int i = Start; i < End; i++)
TrainSets[k].Matrix.push_back(Matrix[i]);
TrainSets[k].CaseInfo.Height = (int)TrainSets[k].Matrix.size();
}
return true;
}
// sampling- the input dataset is split into several new sets, from beginning to end
// IN: DataNum- number of data in each target dataset
// OUT: TrainSets- target dataset
inline bool CDataset::DevideByDataNum(int DataNum, std::vector<CDataset> &TrainSets) const
{
// Parameters
const int CaseNum = CaseInfo.Height;
TrainSets.clear();
// in case instances can not evenly put into all sets
int DataPos = 0;
while (DataPos < CaseNum) {
{
CDataset TrainSet;
TrainSet.CaseInfo = CaseInfo;
// TrainSet.CreatingTime=0;
TrainSets.push_back(TrainSet);
}
// sometimes data can not evenly put into each set, we just make it as even as possible
for (int i = 0; DataPos < CaseNum && i < DataNum; i++, DataPos++)
TrainSets.back().Matrix.push_back(Matrix[DataPos]);
TrainSets.back().CaseInfo.Height = (int)TrainSets.back().Matrix.size();
}
return true;
}
// IN: a,b- position of two instances to be swapped
// nothing is done for invalid input
inline bool CDataset::SwapInstance(int a, int b)
{
int DataNum = (int)CaseInfo.Height;
if (a == b)
return true;
if (a >= DataNum || b >= DataNum || a < 0 || b < 0)
return false;
InstanceStr Hold;
Hold = Matrix[a];
Matrix[a] = Matrix[b];
Matrix[b] = Hold;
return true;
}
// insert instances in the end
// should remove all ignored attributes, transform discrete values and labels into number (start from 0)
inline void CDataset::Insert(const InstanceStr &Instance)
{
assert((int)Instance.size() == CaseInfo.ValidWidth);
// throw(CError("Invalid data!",601,0));
// attributes
for (int i = 0; i < CaseInfo.ValidWidth - 1; i++) {
switch (CaseInfo.ValidAttrs[i].AttType) {
case ATT_DISCRETE:
assert(Instance[i].Discr >= 0 || Instance[i].Discr < (int)CaseInfo.ValidAttrs[i].Disc.size());
// throw(CError("Invalid discrete data!",602,0));
break;
case ATT_CONTINUOUS:
case ATT_DATETIME: {
int k = CaseInfo.ValidAttrs[i].OtherPos;
if (!CaseInfo.ValidAttrs[i].MMSet) {
CaseInfo.ValidAttrs[i].MMSet = true;
CaseInfo.ValidAttrs[i].Max = CaseInfo.ValidAttrs[i].Min = Instance[i].Cont;
CaseInfo.ReadAttrs[k].MMSet = true;
CaseInfo.ReadAttrs[k].Max = CaseInfo.ReadAttrs[k].Min = Instance[i].Cont;
} else if (Instance[i].Cont < CaseInfo.ValidAttrs[i].Min) {
CaseInfo.ValidAttrs[i].Min = Instance[i].Cont;
CaseInfo.ReadAttrs[k].Min = Instance[i].Cont;
} else if (Instance[i].Cont > CaseInfo.ValidAttrs[i].Max) {
CaseInfo.ValidAttrs[i].Max = Instance[i].Cont;
CaseInfo.ReadAttrs[k].Max = Instance[i].Cont;
}
} break;
default:
// throw(CError("Invalid data description!",602,0));
break;
}
}
assert(Instance[CaseInfo.ValidWidth - 1].Discr >= 0 ||
Instance[CaseInfo.ValidWidth - 1].Discr < (int)CaseInfo.ClassNum);
// throw(CError("Invalid label!",601,0));
Matrix.push_back(Instance);
CaseInfo.Height++;
}
// remove an instance
inline void CDataset::Remove(int Pos)
{
if (CaseInfo.Height > 0 && Pos >= 0 && Pos < CaseInfo.Height)
Matrix.erase(Matrix.begin() + Pos);
}
// remove all instances
inline void CDataset::ClearData()
{
CaseInfo.Height = 0;
Matrix.clear();
}
// IN: Att- number of attribute in ValidAttrs
// t- test value
// OUT: the greatest value of Att-th attribute which is no larger than t
inline double CDataset::GreatestValBelow(int Att, const double &t) const
{
if (t < CaseInfo.ValidAttrs[Att].Min)
return CaseInfo.ValidAttrs[Att].Min;
double v, Best;
bool HasSet = false;
for (int i = 0; i < CaseInfo.Height; i++) {
v = Matrix[i][Att].Cont;
if (v <= t) {
if (!HasSet) {
HasSet = true;
Best = v;
} else if (v > Best)
Best = v;
}
}
return Best;
}
// copy
inline CDataset::CDataset(const CDataset &Dataset)
{
// CreatingTime=0;
*this = Dataset;
}
// create a new dataset, by extending every multi-valued discrete attribute into multi boolean attributes(needed by BPNN
// and/or SVM) notice: new data set don't correspond to any file
inline CDataset *CDataset::ExpandDiscrete() const
{
clock_t start = clock();
// prepare
CDataset *newDataSet = new CDataset(*this);
// only process discrete attributes
{
int j;
// skip the label
for (j = CaseInfo.ValidWidth - 2; j >= 0; j--)
if (CaseInfo.ValidAttrs[j].AttType == ATT_DISCRETE)
break;
// just copy this one if no discrete attribute
if (j < 0)
return newDataSet;
}
// need to modify instances and their description
// remove all single-value attributes to run faster
newDataSet->CaseInfo.ValidAttrs.clear();
newDataSet->Matrix.clear();
// including attributes and label
for (int j = 0; j < CaseInfo.ValidWidth; j++) {
if (CaseInfo.ValidAttrs[j].AttType == ATT_DISCRETE) {
// transform into continuous attributes
AttrStr Attr;
Attr.AttType = ATT_CONTINUOUS;
Attr.Max = 1;
Attr.Min = 0;
Attr.MMSet = true;
int ValueNum = (int)CaseInfo.ValidAttrs[j].Disc.size();
// transformed into continuous attribute directly
if (ValueNum <= 2) {
Attr.Name = CaseInfo.ValidAttrs[j].Name;
newDataSet->CaseInfo.ValidAttrs.push_back(Attr);
} else {
for (int k = 0; k < ValueNum; k++) {
Attr.Name = CaseInfo.ValidAttrs[j].Name + "_" + CaseInfo.ValidAttrs[j].Disc[k].Name;
newDataSet->CaseInfo.ValidAttrs.push_back(Attr);
}
}
}
// keep non discrete attribute
else {
newDataSet->CaseInfo.ValidAttrs.push_back(CaseInfo.ValidAttrs[j]);
}
} // attributes
newDataSet->CaseInfo.ValidWidth = (int)newDataSet->CaseInfo.ValidAttrs.size();
// instances
for (int i = 0; i < CaseInfo.Height; i++) {
// single instance
InstanceStr Inst;
// process all attributes and the label
for (int j = 0; j < CaseInfo.ValidWidth; j++) {
if (CaseInfo.ValidAttrs[j].AttType == ATT_DISCRETE) {
ValueData Value = Matrix[i][j];
int ValueNum = (int)CaseInfo.ValidAttrs[j].Disc.size();
// transformed into continuous attribute directly
if (ValueNum <= 2) {
Value.Cont = (double)Value.Discr;
Inst.push_back(Value);
} else {
for (int k = 0; k < ValueNum; k++) {
Value.Cont = 0;
if (Matrix[i][j].Discr == k)
Value.Cont = 1;
Inst.push_back(Value);
}
}
} else {
Inst.push_back(Matrix[i][j]);
}
} // attributes
// insert this new instance
newDataSet->Matrix.push_back(Inst);
} // instances
// newDataSet->CreatingTime=(double)(clock()-start)/CLOCKS_PER_SEC;
return newDataSet;
}
// A training set must not contain instances with unknown label
inline void CDataset::RemoveUnknownInstance()
{
for (int i = CaseInfo.Height - 1; i >= 0; i--) {
// unknown label?
if (Matrix[i][CaseInfo.ValidWidth - 1].Discr == -1)
Matrix.erase(Matrix.begin() + i);
}
CaseInfo.Height = (int)Matrix.size();
}
// remove the attribute which has only a value
// don't use it on a expanded data set
inline void CDataset::RemoveNullAttribute()
{
// skip the labels
bool Changed = false;
for (int i = CaseInfo.ValidWidth - 2; i >= 0; i--) {
if (CaseInfo.ValidAttrs[i].Max == CaseInfo.ValidAttrs[i].Min) {
for (int j = 0; j < CaseInfo.Height; j++)
Matrix[j].erase(Matrix[j].begin() + i);
CaseInfo.ValidAttrs.erase(CaseInfo.ValidAttrs.begin() + i);
Changed = true;
}
}
if (Changed) {
CaseInfo.ValidWidth = (int)CaseInfo.ValidAttrs.size();
if (CaseInfo.ValidWidth == 0) {
Matrix.clear();
CaseInfo.Height = 0;
}
// the data set can't be used to read from file any more
CaseInfo.ReadAttrs.clear();
CaseInfo.ReadWidth = 0;
}
}
inline const MATRIX &CDataset::GetData() const { return Matrix; }
inline const CASE_INFO &CDataset::GetInfo() const { return CaseInfo; }
inline bool CDataset::AllContinuous() const
{
for (int i = 0; i < CaseInfo.ValidWidth - 1; i++)
if (CaseInfo.ValidAttrs[i].AttType == ATT_DISCRETE)
return false;
return true;
}
| 27,259
|
C++
|
.cpp
| 860
| 29.433721
| 120
| 0.674197
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,497
|
energy_encoder.cpp
|
metric-space-ai_metric/metric/transform/energy_encoder.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#include "energy_encoder.hpp"
#include "wavelet.hpp"
#include <stack>
namespace metric {
// recursive split for arbitrary depth
// this code is COPIED from DSPCC, TODO update DSPCC to remove code dubbing if this code becomes common
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator>
DWT_split(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> x,
std::stack<std::size_t> &subband_length, int wavelet_type, std::size_t subbands_num)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> out;
if (x.size() * 2 <= subbands_num) {
for (std::size_t el = 0; el < x.size(); ++el) {
auto split = wavelet::dwt(x[el], wavelet_type);
out.push_back(std::get<0>(split));
out.push_back(std::get<1>(split));
}
subband_length.push(x[0].size());
return DWT_split(out, subband_length, wavelet_type, subbands_num);
} else {
return x;
}
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator>
DWT_unsplit(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> in,
std::stack<std::size_t> &subband_length, int wavelet_type)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> x;
if (in.size() > 1) {
for (std::size_t el = 0; el < in.size(); el += 2) { // we assume size of deque is even, TODO check
x.push_back(wavelet::idwt(in[el], in[el + 1], wavelet_type, subband_length.top()));
}
subband_length.pop();
return DWT_unsplit(x, subband_length, wavelet_type);
} else {
return in;
}
}
template <template <typename, typename> class OuterContainer, class InnerContainer, typename OuterAllocator>
OuterContainer<InnerContainer, OuterAllocator> // TODO better use -> for deduction by return value
sequential_DWT(InnerContainer x, std::stack<std::size_t> &subband_length, int wavelet_type, std::size_t subbands_num)
{
OuterContainer<InnerContainer, OuterAllocator> outer_x = {x}; // TODO also rename deque_x to outer_x in DSPCC
return DWT_split(outer_x, subband_length, wavelet_type, subbands_num);
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
InnerContainer<ValueType, InnerAllocator>
sequential_iDWT(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> in,
std::stack<std::size_t> &subband_length, int wavelet_type)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> outer_out =
DWT_unsplit(in, subband_length, wavelet_type);
return outer_out[0]; // TODO rename deque_out also in DSPCC
}
EnergyEncoder::EnergyEncoder(int wavelet_type, std::size_t splits, bool bothsided)
: wavelet_type(wavelet_type), bothsided(bothsided) // not used now, TODO implement one-sided!
{
subbands = 1;
for (std::size_t i = 0; i < splits; ++i)
subbands *= 2;
}
template <template <typename, typename> class Container, typename Allocator, typename ValueType>
auto EnergyEncoder::operator()(Container<ValueType, Allocator> &in) const -> Container<ValueType, Allocator>
{
using InnerContainer = Container<ValueType, Allocator>;
InnerContainer out;
// subband_length = std::stack<std::size_t>();
std::stack<std::size_t> subband_length;
auto n_subbands = sequential_DWT<std::vector, InnerContainer, std::allocator<InnerContainer>>(
in, subband_length, wavelet_type, subbands); // TODO update splits with log2
for (std::size_t i = 0; i < n_subbands.size(); ++i) { // computing one energy value per subband
ValueType sum = 0;
for (std::size_t j = 0; j < n_subbands[i].size(); ++j)
sum += n_subbands[i][j]; // finding mean
ValueType mean = sum / n_subbands[i].size();
sum = 0;
ValueType normalized;
for (std::size_t j = 0; j < n_subbands[i].size(); ++j) {
normalized = n_subbands[i][j] - mean;
// sum += n_subbands[i][j] * n_subbands[i][j];
sum += normalized * normalized;
}
sum = sum / n_subbands[i].size();
out.push_back(sum);
}
return out;
}
auto EnergyEncoder::freq_bounds(std::size_t len) const -> std::vector<std::size_t>
{
std::vector<std::size_t> bounds = {0};
float step = len / (2.0 * (float)subbands);
for (std::size_t i = 1; i <= subbands; ++i)
bounds.push_back(step * i + 0.5);
return bounds;
}
// functions for computing sizes
std::size_t subband_size(std::size_t original_size, std::size_t depth, std::size_t wavelet_length)
{
std::size_t sz = original_size;
for (std::size_t i = 1; i <= depth; ++i) {
sz = (sz + wavelet_length - 1) / 2.0;
}
return sz;
}
std::size_t original_size_old(std::size_t subband_size, std::size_t depth, std::size_t wavelet_length)
{ // rounding issue
std::size_t n = 1;
float sum = 0;
for (std::size_t i = 1; i <= depth; ++i) {
n = n * 2;
sum += (wavelet_length - 2) / (float)n; // -2 instead of -1 because of floor
}
return n * (subband_size - sum);
}
std::size_t original_size(std::size_t subband_size, std::size_t depth, std::size_t wavelet_length)
{
std::size_t n = 1;
std::size_t sz = subband_size;
for (std::size_t i = 1; i <= depth; ++i) {
sz = sz * 2 - wavelet_length + 2;
}
return sz;
}
std::size_t wmaxlevel(std::size_t input_size, int waveletType) { return 0; }
} // namespace metric
| 5,873
|
C++
|
.cpp
| 136
| 40.919118
| 117
| 0.72301
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,498
|
discrete_cosine.cpp
|
metric-space-ai_metric/metric/transform/discrete_cosine.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_TRANSFORM_DISCRETE_COSINE_CPP
#define _METRIC_TRANSFORM_DISCRETE_COSINE_CPP
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#include "metric/3rdparty/DCT/fast-dct-lee.c"
namespace metric {
// apply forward or invese DCT depending on bool flag
template <class BlazeMatrix> bool apply_DCT(BlazeMatrix &Slices, bool inverse)
{
bool return_value = true;
size_t n = 0;
double *sample = new double[Slices.rows()];
double maxval = 0;
for (n = 0; n < Slices.columns(); n++) {
auto current_slice = column(Slices, n);
size_t idx = 0;
for (auto it = current_slice.begin(); it != current_slice.end(); ++it) {
sample[idx++] = *it;
}
bool success = false;
if (inverse)
success = FastDctLee_inverseTransform(sample, idx);
else
success = FastDctLee_transform(sample, idx);
if (success) {
idx = 0;
for (auto it = current_slice.begin(); it != current_slice.end(); ++it) {
*it = sample[idx++];
if (abs(*it) > maxval)
maxval = *it;
}
} else
return_value = false; // flag is dropped in case of any failure
}
delete[] sample;
if (maxval > 1)
Slices = evaluate(Slices / maxval);
return return_value;
}
template <template <typename, typename> class OuterContainer, typename OuterAllocator,
template <typename, typename> class InnerContainer, typename InnerAllocator, typename ValueType>
bool apply_DCT_STL(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> &Slices, bool inverse,
size_t cutoff_length = 0)
{
OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> Slices_out;
bool return_value = true;
size_t slice_len = Slices[0].size();
if (cutoff_length != 0)
slice_len = cutoff_length;
size_t n = 0;
double *sample = new double[slice_len];
double maxval = 0;
for (n = 0; n < Slices.size(); n++) {
auto current_slice = Slices[n];
size_t idx = 0;
for (auto it = current_slice.begin(); it != current_slice.begin() + slice_len; ++it) {
sample[idx++] = *it;
}
bool success = false;
if (inverse)
success = FastDctLee_inverseTransform(sample, idx);
else
success = FastDctLee_transform(sample, idx);
if (success) {
idx = 0;
for (auto it = current_slice.begin(); it != current_slice.begin() + slice_len; ++it) {
*it = sample[idx++];
if (abs(*it) > maxval)
maxval = *it;
}
} else
return_value = false; // flag is dropped in case of any failure
Slices_out.push_back(current_slice);
}
delete[] sample;
// for (n = 0; n < Slices_out.size(); n++) { // TODO optimize by using iterators
// for (size_t m = 0; m < slice_len; m++) {
// Slices_out[n][m] = Slices[n][m] / maxval;
// }
// }
if (inverse) {
for (n = 0; n < Slices_out.size(); n++) { // TODO optimize by using iterators
for (size_t m = 0; m < slice_len; m++) {
Slices_out[n][m] /= slice_len / 2.0;
}
}
}
Slices = Slices_out;
return return_value;
}
} // namespace metric
#endif
| 3,204
|
C++
|
.cpp
| 99
| 29.575758
| 115
| 0.668392
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,499
|
helper_functions.cpp
|
metric-space-ai_metric/metric/transform/helper_functions.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#ifndef _METRIC_HELPER_FUNCTIONS_CPP
#define _METRIC_HELPER_FUNCTIONS_CPP
#include <algorithm>
namespace helper_functions {
template <typename T, bool SO> void shrinkToFit(blaze::CompressedVector<T, SO> &mat)
{
blaze::CompressedVector<T>(~mat).swap(~mat);
}
template <typename T> T Lerp(T v0, T v1, T t) { return (1 - t) * v0 + t * v1; }
template <typename Container> Container quantile(Container data, const Container &probs)
{
using T = typename Container::value_type;
if (data.empty()) {
return Container();
}
if (1 == data.size()) {
return Container(1, data[0]);
}
std::sort(data.begin(), data.end());
Container quantiles;
for (int i = 0; i < probs.size(); ++i) {
T poi = Lerp<T>(-0.5, data.size() - 0.5, probs[i]);
int left = std::max(int(std::floor(poi)), int(0));
int right = std::min(int(std::ceil(poi)), int(data.size() - 1));
T datLeft = data[left];
T datRight = data[right];
T quantile = Lerp(datLeft, datRight, poi - left);
quantiles.push_back(quantile);
}
return quantiles;
}
// linear interpolation
template <typename Container>
Container lininterp(const Container &x, const Container &y, Container xi, bool extrapolate = true)
{
using T = typename Container::value_type;
int n = x.size();
Container yi(xi.size());
for (int ii = 0; ii < xi.size(); ++ii) {
int i = 0; // find left end of interval for interpolation
if (xi[ii] >= x[n - 2]) // special case: beyond right end
{
i = n - 2;
} else {
while (xi[ii] > x[i + 1])
i++;
}
T xL = x[i], yL = y[i], xR = x[i + 1], yR = y[i + 1]; // points on either side (unless beyond ends)
if (!extrapolate) // if beyond ends of array and not extrapolating
{
if (xi[ii] < xL)
yR = yL;
if (xi[ii] > xR)
yL = yR;
}
T dydx = (yR - yL) / (xR - xL); // gradient
yi.push_back(yL + dydx * (xi[ii] - xL));
}
return yi; // linear interpolation
}
// akima interpolation
/*
Ref. : Hiroshi Akima, Journal of the ACM, Vol. 17, No. 4, October 1970,
pages 589-602.
*/
template <typename Container>
Container akima(Container const &x, Container const &y, Container const &xi, bool save_Mode = true)
{
using T = typename Container::value_type;
// check inputs
if (save_Mode) {
auto diff = [](Container const &x) {
Container v(x.size() - 1);
for (int i = 1; i < x.size(); ++i) {
v[i - 1] = x[i] - x[i - 1];
}
return v;
};
Container d = diff(x);
if (x.size() != y.size())
std::cout << "Error in rts::akima ==> input vectors must have the same length" << std::endl;
if (!std::is_sorted(xi.begin(), xi.end()))
std::cout << "Error in rts::akima ==> xi values do not have ascending order" << std::endl;
if (!std::is_sorted(x.begin(), x.end()))
std::cout << "Error in rts::akima ==> x values do not have ascending order" << std::endl;
if (std::find_if(d.begin(), d.end(), [](T m) { return (m <= T(0)); }) != d.end())
std::cout << "Error in rts::akima ==> x values contain distinct values" << std::endl;
}
// calculate u vector
auto uVec = [](Container const &x, Container const &y) {
int n = x.size();
Container u((n + 3));
for (int i = 1; i < n; ++i) {
u[i + 1] = (y[i] - y[i - 1]) / (x[i] - x[i - 1]); // Shift i to i+2
}
auto akima_end = [](const T &u1, const T &u2) { return 2.0 * u1 - u2; };
u[1] = akima_end(u[2], u[3]);
u[0] = akima_end(u[1], u[2]);
u[n + 1] = akima_end(u[n], u[n - 1]);
u[n + 2] = akima_end(u[n + 1], u[n]);
return u;
};
Container u = uVec(x, y);
// calculate yp vector
Container yp(x.size());
for (int i = 0; i < x.size(); ++i) {
auto a = std::abs(u[i + 3] - u[i + 2]);
auto b = std::abs(u[i + 1] - u[i]);
if ((a + b) != 0) {
yp[i] = (a * u[i + 1] + b * u[i + 2]) / (a + b);
} else {
yp[i] = (u[i + 2] + u[i + 1]) / 2.0;
}
}
// calculte interpolated yi values
auto kFind = [](const T &xii, const Container &x, int start, int end) {
int klo = start;
int khi = end;
// // Find subinterval by bisection
while (khi - klo > 1) {
int k = (khi + klo) / 2;
x[k] > xii ? khi = k : klo = k;
}
return klo;
};
Container yi(xi.size());
for (int i = 0; i < xi.size(); ++i) {
// Find the right place in the table by means of a bisection.
int k = kFind(xi[i], x, int(0), x.size() - 1);
// Evaluate Akima polynomial
T b = x[k + 1] - x[k];
T a = xi[i] - x[k];
yi[i] = y[k] + yp[k] * a + (3.0 * u[k + 2] - 2.0 * yp[k] - yp[k + 1]) * a * a / b +
(yp[k] + yp[k + 1] - 2.0 * u[k + 2]) * a * a * a / (b * b);
}
return yi;
}
// pchip interpolation
/*!
// Reference:
// ==========
//
// F.N. Fritsch, R.E. Carlson:
// Monotone Piecewise Cubic Interpolation,
// SIAM J. Numer. Anal. Vol 17, No. 2, April 1980
//
// F.N. Fritsch and J. Butland:
// A method for constructing local monotone piecewise cubic interpolants,
// SIAM Journal on Scientific and Statistical Computing 5, 2 (June 1984), pp. 300-304.
*/
template <typename Container>
Container pchip(Container const &x, Container const &y, Container const &xi, bool save_Mode = false)
{
using T = typename Container::value_type;
// check inputs
if (save_Mode) {
auto diff = [](Container const &x) {
Container v(x.size() - 1);
for (int i = 1; i < x.size(); ++i) {
v[i - 1] = x[i] - x[i - 1];
}
return v;
};
Container d = diff(x);
if (x.size() != y.size())
std::cout << "Error in rts::pchip ==> input vectors must have the same length" << std::endl;
if (!std::is_sorted(xi.begin(), xi.end()))
std::cout << "Error in rts::pchip ==> xi values do not have ascending order" << std::endl;
if (!std::is_sorted(x.begin(), x.end()))
std::cout << "Error in rts::pchip ==> x values do not have ascending order" << std::endl;
if (std::find_if(d.begin(), d.end(), [](T m) { return (m <= T(0)); }) != d.end())
std::cout << "Error in rts::pchip ==> x values contain distinct values" << std::endl;
}
// int n = xi.size();
auto diff = [](Container const &x) {
Container v(x.size() - 1);
for (int i = 1; i < x.size(); ++i) {
v[i - 1] = x[i] - x[i - 1];
}
return v;
};
// First derivatives
Container h = diff(x);
Container delta(xi.size() - 1);
for (int i = 1; i < xi.size(); ++i) {
delta[i - 1] = (y[i] - y[i - 1]) / (x[i] - x[i - 1]); // Shift i to i+2
}
auto pchip_slopes = [](Container const &h, Container const &delta) {
// Slopes at interior points
int n = h.size() + 1;
Container d(n, 0);
auto sign = [](T val) { return (T(0) < val) - (val < T(0)); };
std::vector<int> k;
for (int i = 0; i < n - 1; ++i) {
if ((sign(delta[i]) * sign(delta[i + 1])) > 0.0) {
k.push_back(i);
}
}
for (int i = 0; i < k.size(); ++i) {
T hs = h[k[i]] + h[k[i] + 1];
T w1 = (h[k[i]] + hs) / (3 * hs);
T w2 = (hs + h[k[i] + 1]) / (3 * hs);
T dmax = std::max(std::abs(delta[k[i]]), std::abs(delta[k[i] + 1]));
T dmin = std::min(std::abs(delta[k[i]]), std::abs(delta[k[i] + 1]));
d[k[i] + 1] = dmin / (w1 * (delta[k[i]] / dmax) + w2 * (delta[k[i] + 1] / dmax));
}
auto pchip_end = [](const T &h1, const T &h2, const T &del1, const T &del2) {
auto sign = [](T val) { return (T(0) < val) - (val < T(0)); };
// Noncentered, shape-preserving, three-point formula.
T d;
if (sign(d) != sign(del1)) {
d = 0;
} else if ((sign(del1) != sign(del2)) && (std::abs(d) > std::abs(3 * del1))) {
d = 3.0 * del1;
} else {
d = ((2.0 * h1 + h2) * del1 - h1 * del2) / (h1 + h2);
}
return d;
};
// Slopes at endpoints
d[0] = pchip_end(h[0], h[1], delta[0], delta[1]);
d[n - 1] = pchip_end(h[n - 2], h[n - 3], delta[n - 2], delta[n - 3]);
return d;
};
// Derivative values for shape-preserving Piecewise Cubic Hermite Interpolation
Container d = pchip_slopes(h, delta);
// Piecewise polynomial coefficients
Container a(h.size() - 1);
Container b(h.size() - 1);
for (int i = 0; i < h.size() - 1; ++i) {
a[i] = (3.0 * delta[i] - 2.0 * d[i] - d[i + 1]) / h[i];
b[i] = (d[i] - 2.0 * delta[i] + d[i + 1]) / (h[i] * h[i]);
}
// calculte interpolated yi values
auto kFind = [](const T &xii, const Container &x, int start, int end) {
int klo = start;
int khi = end;
// // Find subinterval by bisection
while (khi - klo > 1) {
int k = (khi + klo) / 2;
x[k] > xii ? khi = k : klo = k;
}
return klo;
};
Container yi(xi.size());
for (int i = 0; i < xi.size(); ++i) {
int k = kFind(xi[i], x, int(1), x.size() - 2);
T s = xi[i] - x[k];
yi[i] = y[k] + s * (d[k] + s * (a[k] + s * b[k]));
}
return yi;
}
template <typename Container>
blaze::CompressedVector<typename Container::value_type> smoothDenoise(Container const &data,
typename Container::value_type const &tresh)
{
// smooth reduces of noise by threshold and gives back a sparse vector.
// initialize
using T = typename Container::value_type;
blaze::CompressedVector<T> svector(data.size());
svector.reserve(data.size());
bool lastEqualsZero;
bool keepNext;
lastEqualsZero = false;
keepNext = false;
for (int i = 0; i < data.size(); i++) {
if (data[i] != T(0)) {
if (std::abs(data[i]) > tresh) { // größer als Schwellwert
if (lastEqualsZero == true) {
svector.set(
i - 1,
data[i - 1]); // letzten Wert doch nicht zu Null setzen, wenn der aktuelle nicht null ist
lastEqualsZero = false;
}
svector.append(i, data[i]);
keepNext = true;
} else { // unterhalb des Schwellwertes
if (keepNext == true) {
svector.append(i, data[i]);
}
lastEqualsZero = true;
keepNext = false;
}
}
}
shrinkToFit(svector);
return svector;
}
template <typename T> std::vector<T> sparseToVector(blaze::CompressedVector<T> const &data)
{
std::vector<T> values_zeropadded;
T value;
bool addZeroFront;
bool addZeroLastBack;
int index;
int index_last = -1;
for (blaze::CompressedVector<double>::ConstIterator it = data.cbegin(); it != data.cend(); ++it) {
index = it->index(); // Read access to the index of the non-zero element.
value = it->value(); // Read access to the value of the non-zero element.
if (index == index_last + 1) {
addZeroFront = false;
} else {
addZeroFront = true;
}
if (index > index_last + 1 && index != 1 && index != index_last + 2) {
addZeroLastBack = true;
} else {
addZeroLastBack = false;
}
if (addZeroLastBack == true) {
values_zeropadded.push_back(0);
}
if (addZeroFront == true) {
values_zeropadded.push_back(0);
}
values_zeropadded.push_back(value);
index_last = index;
}
if (index_last < data.size() - 2) // vorletzter nicht vorhanden
{
values_zeropadded.push_back(0);
}
if (index_last < data.size() - 1) {
values_zeropadded.push_back(0);
}
return values_zeropadded;
}
template <typename Container>
Container sparseToContainer(blaze::CompressedVector<typename Container::value_type> const &data)
{
using T = typename Container::value_type;
Container values_zeropadded;
T value;
bool addZeroFront;
bool addZeroLastBack;
int index;
int index_last = -1;
for (blaze::CompressedVector<double>::ConstIterator it = data.cbegin(); it != data.cend(); ++it) {
index = it->index(); // Read access to the index of the non-zero element.
value = it->value(); // Read access to the value of the non-zero element.
if (index == index_last + 1) {
addZeroFront = false;
} else {
addZeroFront = true;
}
if (index > index_last + 1 && index != 1 && index != index_last + 2) {
addZeroLastBack = true;
} else {
addZeroLastBack = false;
}
if (addZeroLastBack == true) {
values_zeropadded.push_back(0);
}
if (addZeroFront == true) {
values_zeropadded.push_back(0);
}
values_zeropadded.push_back(value);
index_last = index;
}
if (index_last < data.size() - 2) // vorletzter nicht vorhanden
{
values_zeropadded.push_back(0);
}
if (index_last < data.size() - 1) {
values_zeropadded.push_back(0);
}
return values_zeropadded;
}
template <typename T> blaze::CompressedVector<T> zeroPad(blaze::CompressedVector<T> const &data)
{
// adds zero pads to blaze::sparsevector (for preparing sed)
blaze::CompressedVector<T> data_zeropadded(data.size());
data_zeropadded.reserve(2 + data.nonZeros() * 2);
T value;
bool addZeroFront;
bool addZeroLastBack;
int index;
int index_last = -1;
if (data.nonZeros() == 0) {
data_zeropadded.set(0, T(0));
data_zeropadded.set(data.size() - 1, T(0));
} else {
for (blaze::CompressedVector<double>::ConstIterator it = data.cbegin(); it != data.cend(); ++it) {
index = it->index(); // Read access to the index of the non-zero element.
value = it->value(); // Read access to the value of the non-zero element.
if (index == index_last + 1)
addZeroFront = false;
else
addZeroFront = true;
if (index > index_last + 1 && index != 1 && index != index_last + 2)
addZeroLastBack = true;
else
addZeroLastBack = false;
if (addZeroLastBack == true)
data_zeropadded.append(index_last + 1, T(0));
if (addZeroFront == true)
data_zeropadded.append(index - 1, T(0));
data_zeropadded.append(index, value);
index_last = index;
}
if (index_last < data.size() - 2) // vorletzter nicht vorhanden
{
data_zeropadded.append(index_last + 1, T(0));
}
if (index_last < data.size() - 1) {
data_zeropadded.append(data.size() - 1, T(0));
}
}
shrinkToFit(data_zeropadded);
return data_zeropadded;
}
// distance measure by time elastic cost matrix.
template <typename T>
T TWED(blaze::CompressedVector<T> const &As, blaze::CompressedVector<T> const &Bs, T const &penalty, T const &elastic)
{
// calculates the Time Warp Edit Distance (TWED) for the sparse vectors A(time) und B(time)
//
// A := values of timeseries A (e.g. [ 10 2 30 4])
// B := values of timeseries B
// time := time values
// initialize
// build zero padded vectors
std::vector<T> A;
A.reserve(As.nonZeros());
std::vector<T> timeA;
timeA.reserve(As.nonZeros());
std::vector<T> B;
B.reserve(Bs.nonZeros());
std::vector<T> timeB;
timeB.reserve(Bs.nonZeros());
for (blaze::CompressedVector<double>::ConstIterator it = As.cbegin(); it != As.cend(); ++it) {
timeA.push_back(it->index()); // Read access to the index of the non-zero element.
A.push_back(it->value()); // Read access to the value of the non-zero element.
}
for (blaze::CompressedVector<double>::ConstIterator it = Bs.cbegin(); it != Bs.cend(); ++it) {
timeB.push_back(it->index()); // Read access to the index of the non-zero element.
B.push_back(it->value()); // Read access to the value of the non-zero element.
}
T C1, C2, C3;
int sizeB = B.size();
int sizeA = A.size();
std::vector<T> D0(sizeB);
std::vector<T> Di(sizeB);
// first element
D0[0] = std::abs(A[0] - B[0]) + elastic * (std::abs(timeA[0] - 0)); // C3
// first row
for (int j = 1; j < sizeB; j++) {
D0[j] = D0[j - 1] + std::abs(B[j - 1] - B[j]) + elastic * (timeB[j] - timeB[j - 1]) + penalty; // C2
}
// second-->last row
for (int i = 1; i < sizeA; i++) {
// every first element in row
Di[0] = D0[0] + std::abs(A[i - 1] - A[i]) + elastic * (timeA[i] - timeA[i - 1]) + penalty; // C1
// remaining elements in row
for (int j = 1; j < sizeB; j++) {
C1 = D0[j] + std::abs(A[i - 1] - A[i]) + elastic * (timeA[i] - timeA[i - 1]) + penalty;
C2 = Di[j - 1] + std::abs(B[j - 1] - B[j]) + elastic * (timeB[j] - timeB[j - 1]) + penalty;
C3 = D0[j - 1] + std::abs(A[i] - B[j]) + std::abs(A[i - 1] - B[j - 1]) +
elastic * (std::abs(timeA[i] - timeB[j]) + std::abs(timeA[i - 1] - timeB[j - 1]));
Di[j] = (C1 < ((C2 < C3) ? C2 : C3)) ? C1 : ((C2 < C3) ? C2 : C3); // Di[j] = std::min({C1,C2,C3});
}
std::swap(D0, Di);
}
T rvalue = D0[sizeB - 1];
return rvalue;
}
// rainflow counting algorithm
template <typename Container> std::vector<Container> rfc(Container const &data, Container const &time_in)
{
using T = typename Container::value_type;
auto findIndexOfTurningPoints = [](auto s) {
std::vector<int> tp;
tp.push_back(s[0]); // first value is set as extreme
auto diff_last = s[1] - s[0];
for (int i = 1; i < s.size(); ++i) {
auto diff = s[i] - s[i - 1];
if (diff * diff_last < 0) {
tp.push_back(i - 1);
}
diff_last = diff;
}
if (tp[tp.size() - 1] != s.size() - 1) // last value is set as extreme
{
tp.push_back(s.size() - 1);
}
return tp;
};
auto ext_index = findIndexOfTurningPoints(data);
Container ext;
// ext.reserve(ext_index.size());
for (int i = 0; i < ext_index.size(); ++i) {
ext.push_back(data[ext_index[i]]);
}
std::vector<Container> cycles;
int i = 0;
int j = 1;
while (ext.size() > (i + 1)) {
T Y = std::abs(ext[i + 1] - ext[i]);
T X = std::abs(ext[j + 1] - ext[j]);
if (X >= Y) {
if (i == 0) // counts a half cycle and deletes the poit that is counted
{
Container cycle_info = {std::abs(ext[i] - ext[i + 1]) / 2, (ext[i] + ext[i + 1]) / 2, 0.5};
cycles.push_back(cycle_info);
ext.erase(ext.begin());
} else // counts one cycle and deletes the poits that are counted
{
Container cycle_info = {std::abs(ext[i] - ext[i + 1]) / 2, (ext[i] + ext[i + 1]) / 2, 1.0};
cycles.push_back(cycle_info);
ext.erase(ext.begin() + i, ext.begin() + i + 1 + 1);
}
i = 0;
j = 1;
} else {
i += 1;
j += 1;
}
}
for (int i = 0; i < ext.size() - 1;
++i) // counts the rest of the points that still not have been counted as a half cycle
{
Container cycle_info = {std::abs(ext[i] - ext[i + 1]) / 2, (ext[i] + ext[i + 1]) / 2, 0.5};
cycles.push_back(cycle_info);
}
return cycles;
}
// get subband stats
template <typename Container> Container chebyshev(Container const &data, int polynom)
{
using T = typename Container::value_type;
Container r_data = {};
static std::vector<std::function<T(T)>> chebyshevPolyFun{
[](auto x) { return 1; }, // T0
[](auto x) { return x; }, // T1
[](auto x) { return 2 * std::pow(x, 2) - 1; }, // T2
[](auto x) { return 4 * std::pow(x, 3) - 3 * x; }, // T3
[](auto x) { return 8 * std::pow(x, 4) - 8 * std::pow(x, 2) + 1; }, // T4
[](auto x) { return 16 * std::pow(x, 5) - 20 * std::pow(x, 3) + 5 * x; }, // T5
[](auto x) { return 32 * std::pow(x, 6) - 48 * std::pow(x, 4) + 18 * std::pow(x, 2) - 1; }, // T6
[](auto x) { return 64 * std::pow(x, 7) - 112 * std::pow(x, 5) + 56 * std::pow(x, 3) - 7 * x; } // T7
};
for (int i = 0; i < data.size(); ++i) {
r_data.push_back(chebyshevPolyFun[polynom](data[i]));
}
return r_data;
}
template <typename T> T nextpow2(T value)
{
int exp;
if (std::frexp(value, &exp) == 0.5) {
// Omit this case to round precise powers of two up to the *next* power
return value;
}
return std::ldexp(T(1), exp);
}
template <typename Container>
Container rescale(Container const &A, typename Container::value_type a, typename Container::value_type b)
{
using T = typename Container::value_type;
Container R(A.size());
T inputMin = *std::min_element(std::begin(A), std::end(A));
T inputMax = *std::max_element(std::begin(A), std::end(A));
bool constReg = (inputMin == inputMax);
T sigma = std::max(std::min(T(0), inputMax), inputMin);
inputMin = inputMin - sigma;
inputMax = inputMax - sigma;
T e1 = nextpow2(std::max(std::abs(inputMax), std::abs(inputMin)));
T r1 = std::pow(2, (e1 - 1));
T e2 = nextpow2(std::max(std::abs(a), std::abs(b)));
T r2 = std::pow(2, (e2 - 1));
T r3 = std::pow(2, (std::floor((e1 + e2) / 2) - 1));
T z = ((inputMax / r1) * (a / r3) - (inputMin / r1) * (b / r3) + (a / r3) * (T(constReg) / r1)) /
((inputMax / r1) - (inputMin / r1) + (T(constReg) / r1));
T slope = ((b / r2) - (a / r2)) / ((inputMax / r3) - (inputMin / r3) + (T(constReg) / r3));
for (int i = 0; i < A.size(); ++i) {
R[i] = r2 * (slope / r3 * (A[i] - sigma) + (r3 / r2) * z);
}
return R;
}
} // namespace helper_functions
#endif
| 20,183
|
C++
|
.cpp
| 587
| 31.412266
| 118
| 0.596958
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,500
|
distance_potential_minimization.cpp
|
metric-space-ai_metric/metric/transform/distance_potential_minimization.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Michael Welsch
*/
//#define MEASURE
//#define DEBUG_OUTPUT
#include "distance_potential_minimization.hpp"
#include "metric/transform/wavelet2d.hpp" // for only Convolution2dCustom
#include <algorithm>
#include <cmath>
#include <iostream>
#include <iterator>
#include <set>
#include <tuple>
#ifndef M_PI
// MSVC does not define M_PI
#define M_PI 3.14159265358979323846
#endif
#ifdef MEASURE
#include <chrono>
#endif
namespace metric {
namespace DPM_detail {
static blaze::DynamicMatrix<double> addPad(const blaze::DynamicMatrix<double> &A)
{
size_t m = A.rows();
size_t n = A.columns();
blaze::DynamicMatrix<double> B((m + 2), (n + 2));
for (size_t i = 0; i < m + 2; ++i) {
for (size_t j = 0; j < n + 2; ++j) {
B(i, j) = -1;
}
}
// first row
B(0, 0) = A(1, 1);
for (size_t j = 0; j < n; ++j) {
B(0, j + 1) = A(1, j);
}
B(0, n + 1) = A(1, n - 2);
// inner rows
for (size_t i = 0; i < m; ++i) {
B(i + 1, 0) = A(i, 1);
for (size_t j = 0; j < n; ++j) {
B(i + 1, j + 1) = A(i, j);
}
B(i + 1, n + 1) = A(i, n - 2);
}
// //last row
B(m + 1, 0) = A(m - 2, 1);
for (size_t j = 0; j < n; ++j) {
B(m + 1, j + 1) = A(m - 2, j);
}
B(m + 1, n + 1) = A(m - 2, n - 2);
return B;
}
static blaze::DynamicMatrix<double> removePad(const blaze::DynamicMatrix<double> &A)
{
size_t m = A.rows();
size_t n = A.columns();
blaze::DynamicMatrix<double> B((m - 2), (n - 2));
for (size_t i = 0; i < m - 2; i++) {
for (size_t j = 0; j < n - 2; j++) {
B(i, j) = A(i + 1, (j + 1));
}
}
return B;
}
static void updatePad(blaze::DynamicMatrix<double> &A)
{
size_t m = A.rows();
size_t n = A.columns();
// first row
A(0, 0) = A(2, 2);
for (size_t j = 1; j < n - 1; j++) {
A(0, j) = A(2, j);
}
// A(0, n) = A(2, n - 3);
A(0, n - 1) = A(2, n - 3);
// middle rows
for (size_t i = 1; i < m - 1; ++i) {
A(i, 0) = A(i, 2);
// A(i, n) = A(i, n - 3);
A(i, n - 1) = A(i, n - 3);
}
// last row
// A(m, 0) = A(m - 2, 2);
A(m - 1, 0) = A(m - 3, 2);
for (size_t j = 1; j < n - 1; j++) {
// A(m, j) = A(m - 3, j);
A(m - 1, j) = A(m - 3, j);
}
// A(m, n) = A(m - 3, n - 3);
A(m - 1, n - 1) = A(m - 3, n - 3);
}
static std::tuple<blaze::DynamicMatrix<double>, blaze::DynamicMatrix<double>>
gradient(const blaze::DynamicMatrix<double> &f)
{
size_t m = f.rows();
size_t n = f.columns();
blaze::DynamicMatrix<double> fx(m, n);
blaze::DynamicMatrix<double> fy(m, n);
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
// fx
if (j == 0) {
fx(i, j) = f(i, j + 1) - f(i, j);
} else {
if (j == (n - 1)) {
fx(i, j) = f(i, j) - f(i, j - 1);
} else {
fx(i, j) = (f(i, j + 1) - f(i, j - 1)) / 2;
}
}
// fy
if (i == 0) {
fy(i, j) = f(i + 1, j) - f(i, j);
} else {
if (i == (m - 1)) {
fy(i, j) = f(i, j) - f(i - 1, j);
} else {
fy(i, j) = (f(i + 1, j) - f(i - 1, j)) / 2;
}
}
}
}
// return std::make_tuple(fx, fy);
return std::make_tuple(fy, fx); // replaced by Max F
}
static blaze::DynamicMatrix<double> laplacian(const blaze::DynamicMatrix<double> &A)
{
size_t m = A.rows();
size_t n = A.columns();
blaze::DynamicMatrix<double> B(m, n);
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
if (i == 0 || i == m - 1 || j == 0 || j == n - 1) {
B(i, j) = 0;
} else {
B(i, j) = A(i + 1, j) + A(i - 1, j) + A(i, j + 1) + A(i, j - 1) - double(4) * A(i, j);
}
}
}
return B;
}
static std::tuple<double, double, double> initialCircle(const blaze::DynamicMatrix<double> &A)
{
size_t m = A.rows();
size_t n = A.columns();
double xc = 0;
double yc = 0;
double r = 0;
double sumI = 0;
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
xc += (j * A(i, j));
yc += (i * A(i, j));
sumI += A(i, j);
}
}
xc /= sumI;
yc /= sumI;
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
r += std::sqrt((i - yc) * (i - yc) + (j - xc) * (j - xc)) * A(i, j);
}
}
r /= sumI;
return std::make_tuple(xc, yc, r);
}
static std::vector<double> linspace(double a, double b, size_t n)
{
std::vector<double> array;
if (n > 1) {
double step = (b - a) / double(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
/* Return a 2d-grid of given ellipse parameter
the ellipse is represented by the following equation set
x=xc+a*cos(theta)*cos(phi)-b*sin(theta)*sin(phi)
y=yc+a*cos(theta)*sin(phi)+b*sin(theta)*cos(phi)
m: number of rows
n: number of columns
yx: center point of x-axis
yc: center of y-axis
a: elliptic parameter a
b: elliptic parameter b
phi: arc
*/
static std::vector<blaze::DynamicVector<double>> ellipse2grid(size_t m, size_t n, double xc, double yc, double a,
double b, double phi)
{
auto theta = linspace(0, 2 * M_PI, std::round(2 * M_PI * std::max(a, b)));
blaze::DynamicVector<double> x(theta.size());
blaze::DynamicVector<double> y(theta.size());
//#ifdef MEASURE
// auto t1 = std::chrono::steady_clock::now();
//#endif
for (size_t i = 0; i < theta.size(); ++i) {
x[i] = std::round(xc + a * std::cos(theta[i]) * std::cos(phi) - b * std::sin(theta[i]) * std::sin(phi));
y[i] = std::round(yc + a * std::cos(theta[i]) * std::sin(phi) + b * std::sin(theta[i]) * std::cos(phi));
}
//#ifdef MEASURE
// auto t2 = std::chrono::steady_clock::now();
// auto seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
// std::cout << "---- in ellipse2grid: theta loop of " << theta.size() << " iterations took " << seconds << "
// s\n";
//#endif
// filter our non unique pairs.
std::vector<int> liste(theta.size(), 1);
size_t sum = theta.size();
for (size_t i = 0; i < theta.size(); ++i) {
for (size_t j = i + 1; j < theta.size(); ++j) {
double d = std::abs(x[i] - x[j]) + std::abs(y[i] - y[j]);
if (d == 0) {
liste[i] = 0;
sum -= 1;
}
}
}
std::vector<double> x1;
std::vector<double> y1;
std::vector<double> theta1;
for (size_t i = 0; i < theta.size(); ++i) {
if (liste[i] == 1) {
x1.push_back(x[i]);
y1.push_back(y[i]);
theta1.push_back(theta[i]);
}
}
// sort grid points by arc parameter
std::vector<size_t> idx(theta1.size());
std::iota(idx.begin(), idx.end(), 0);
stable_sort(idx.begin(), idx.end(), [&theta1](size_t i1, size_t i2) { return theta1[i1] < theta1[i2]; });
blaze::DynamicVector<double> x2(theta1.size());
blaze::DynamicVector<double> y2(theta1.size());
blaze::DynamicVector<double> theta2(theta1.size());
for (size_t i = 0; i < theta1.size(); ++i) {
x2[i] = x1[idx[i]];
y2[i] = y1[idx[i]];
theta2[i] = theta1[idx[i]];
}
// check boudaries
if ((blaze::min(x2) < 1) || (blaze::min(y2) < 1) || (blaze::max(x2) > n) || (blaze::max(y2) > m)) {
std::cout << "Error: Contour out of image" << std::endl;
}
std::vector<blaze::DynamicVector<double>> result = {x2, y2, theta2};
return result;
}
static double torsion_moment(const blaze::DynamicMatrix<double> &u, const blaze::DynamicMatrix<double> &v,
const blaze::DynamicVector<double> &x, const blaze::DynamicVector<double> &y,
const blaze::DynamicVector<double> &theta, double xc, double yc, double phi)
{
size_t m = u.rows();
size_t n = u.columns();
size_t N = x.size();
double torsional_moment = 0;
double d;
size_t r, c;
double fx, fy;
for (size_t i = 0; i < N; i++) {
r = (size_t)y[i] - 1;
c = (size_t)x[i] - 1;
fx = u(r, c);
fy = v(r, c);
d = std::sqrt((x[i] - xc) * (x[i] - xc) + (y[i] - yc) * (y[i] - yc));
torsional_moment += ((-fx * std::sin(theta[i] + phi) + fy * std::cos(theta[i] + phi)) * d);
}
torsional_moment /= (N * (double)N);
return torsional_moment;
}
static blaze::DynamicMatrix<double> contourForces(const blaze::DynamicMatrix<double> &u,
const blaze::DynamicMatrix<double> &v,
const blaze::DynamicVector<double> &x,
const blaze::DynamicVector<double> &y)
{
size_t m = u.rows();
size_t n = u.columns();
size_t N = x.size();
size_t r, c;
blaze::DynamicMatrix<double> F(N, 2);
for (size_t i = 0; i < N; i++) {
r = (size_t)y[i] - 1;
c = (size_t)x[i] - 1;
F(i, 0) = u(r, c);
F(i, 1) = v(r, c);
}
return F;
}
static double force(const blaze::DynamicMatrix<double> &u, const blaze::DynamicMatrix<double> &v,
const std::vector<double> &x, const std::vector<double> &y, double p_cos, double p_sin)
{
size_t m = u.rows();
size_t n = u.columns();
size_t N = x.size();
double force = 0;
size_t r, c;
double fx, fy;
for (size_t i = 0; i < N; i++) {
r = (size_t)y[i] - 1;
c = (size_t)x[i] - 1;
fx = u(r, c);
fy = v(r, c);
force += (fx * p_cos + fy * p_sin);
}
return force / N;
}
/* searching for the equliribirum of the vector partial differential equation.
taking two pictures, where one picture is static and one is based on a parametric model (an ellipse).
the algorithm finds the best model parameters to minimimize the distance potential aka gradient vector field.
gvf field is implemented using an explicit finite difference scheme.
v(x,y,t+t)= v(x,y,t)+ t/(xy) g (|∇f|) L * v(x,y,t) −th(|∇f |) [v(x, y, t) − ∇f ].
*/
static std::vector<double> fit_ellipse(const std::vector<double> &init, const blaze::DynamicVector<double> &increment,
const blaze::DynamicVector<double> &threshold, const std::vector<double> &bound,
const blaze::DynamicMatrix<double> &gvf_x,
const blaze::DynamicMatrix<double> &gvf_y, size_t iter)
{
size_t m = gvf_x.rows();
size_t n = gvf_x.columns();
double xc = init[0];
double yc = init[1];
double a = init[2];
double b = init[3];
double phi = init[4];
//#ifdef MEASURE
// auto t1 = std::chrono::steady_clock::now();
//#endif
// std::cout << xc << " " << yc << " " << a << " " << b << " " << phi << "\n"; // TODO remove
for (size_t it = 0; it < iter; ++it) {
// compute grid points from ellipse parameter
std::vector<blaze::DynamicVector<double>> x_y_theta = ellipse2grid(m, n, xc, yc, a, b, phi);
// torsion along the ellpise about center
double torsion = torsion_moment(gvf_x, gvf_y, x_y_theta[0], x_y_theta[1], x_y_theta[2], xc, yc, phi);
#ifdef DEBUG_OUTPUT
// std::cout << "theta: \n" << x_y_theta[0] << "\n" << x_y_theta[1] << "\n" << x_y_theta[2] << "\n";
std::cout << "iteration " << it << ":\n";
std::cout << "gvf_x: min: " << blaze::min(gvf_x) << ", max: " << blaze::max(gvf_x) << "\n";
std::cout << "gvf_y: min: " << blaze::min(gvf_y) << ", max: " << blaze::max(gvf_y) << "\n";
#endif
// update phi
if (torsion > threshold[4]) {
phi = phi + increment[4];
}
if (torsion < -threshold[4]) {
phi = phi - increment[4];
}
// F_around
blaze::DynamicMatrix<double> iresult = contourForces(gvf_x, gvf_y, x_y_theta[0], x_y_theta[1]);
blaze::DynamicVector<double, blaze::rowVector> F_round = blaze::sum<blaze::columnwise>(iresult);
for (size_t i = 0; i < F_round.size(); ++i) {
F_round[i] = F_round[i] / double(x_y_theta[2].size());
}
std::vector<double> Fround(F_round.size());
for (size_t i = 0; i < F_round.size(); ++i) {
Fround[i] = F_round[i];
}
std::vector<double> x_index1;
std::vector<double> y_index1;
std::vector<double> x_index2;
std::vector<double> y_index2;
std::vector<double> x_index3;
std::vector<double> y_index3;
std::vector<double> x_index4;
std::vector<double> y_index4;
for (size_t i = 0; i < x_y_theta[0].size(); ++i) {
if (x_y_theta[2][i] > M_PI * 3 / 4 && x_y_theta[2][i] < M_PI * 5 / 4) {
x_index1.push_back(x_y_theta[0][i]);
y_index1.push_back(x_y_theta[1][i]);
}
if (x_y_theta[2][i] < M_PI / 4 || x_y_theta[2][i] > M_PI * 7 / 4) {
x_index2.push_back(x_y_theta[0][i]);
y_index2.push_back(x_y_theta[1][i]);
}
if (x_y_theta[2][i] > M_PI / 4 && x_y_theta[2][i] < M_PI * 3 / 4) {
x_index3.push_back(x_y_theta[0][i]);
y_index3.push_back(x_y_theta[1][i]);
}
if (x_y_theta[2][i] > M_PI * 5 / 4 && x_y_theta[2][i] < M_PI * 7 / 4) {
x_index4.push_back(x_y_theta[0][i]);
y_index4.push_back(x_y_theta[1][i]);
}
}
double F_left = force(gvf_x, gvf_y, x_index1, y_index1, std::cos(phi), std::sin(phi));
double F_right = force(gvf_x, gvf_y, x_index2, y_index2, -std::cos(phi), -std::sin(phi));
double F_up = force(gvf_x, gvf_y, x_index3, y_index3, std::sin(phi), -std::cos(phi));
double F_down = force(gvf_x, gvf_y, x_index4, y_index4, -std::sin(phi), std::cos(phi));
// std::cout << "it=" << it << " --> " << F_left << " " << F_right << " " << F_up << " " << F_down
// << std::endl;
// update xc and yc
double F_left_right = F_round[0] * 1 + F_round[1] * 0;
if (F_left_right > threshold[0]) {
xc = xc + increment[0];
;
} else if (F_left_right < -threshold[0]) {
xc = xc - increment[0];
}
double F_down_up = F_round[0] * 0 + F_round[1] * 1;
if (F_down_up > threshold[1]) {
yc = yc + increment[1];
} else if (F_down_up < -threshold[1]) {
yc = yc - increment[1];
}
// update xc and yc again according to diagonal force
double F_diag1 = F_round[0] * 0.7071 + F_round[1] * 0.7071;
if (F_diag1 > threshold[0] + threshold[1]) {
xc = xc + increment[0];
yc = yc + increment[1];
} else if (F_diag1 < -threshold[0] - threshold[1]) {
xc = xc - increment[0];
yc = yc - increment[1];
}
double F_diag2 = F_round[0] * (-0.7071) + F_round[1] * 0.7071;
if (F_diag2 > threshold[0] + threshold[1]) {
xc = xc - increment[0];
yc = yc + increment[1];
} else if (F_diag2 < -threshold[0] - threshold[1]) {
xc = xc + increment[0];
yc = yc - increment[1];
}
// update a and b
if (F_left + F_right > threshold[2])
a = a - increment[2];
else if (F_left + F_right < -threshold[2])
a = a + increment[2];
if (F_up + F_down > threshold[3])
b = b - increment[3];
else if (F_up + F_down < -threshold[3])
b = b + increment[3];
if (b > a) {
std::swap(a, b);
phi = std::fmod(phi + M_PI / 2, M_PI);
}
// restrict a and b using lower and upper bounds
if (a > bound[1])
a = bound[1];
if (a < bound[0])
a = bound[0];
if (b > bound[3])
b = bound[3];
if (b < bound[2])
b = bound[2];
// std::cout << xc << " " << yc << " " << a << " " << b << " " << phi << " | " << it << "\n"; // TODO remove
}
//#ifdef MEASURE
// auto t2 = std::chrono::steady_clock::now();
// auto seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
// std::cout << "---- in fit_ellipse: " << iter << " iterations completed in " << seconds << " s\n";
//#endif
std::vector<double> result = {xc, yc, a, b, phi};
return result;
}
/* Gradient Vector Flow (GVF) */
static std::tuple<blaze::DynamicMatrix<double>, blaze::DynamicMatrix<double>>
gvf(const blaze::DynamicMatrix<double> &f0, double alpha, double mu, size_t iter)
{
blaze::DynamicMatrix<double> f(f0);
size_t m = f.rows();
size_t n = f.columns();
// normalization
auto fmin = blaze::min(f);
auto fmax = blaze::max(f);
if (fmax <= fmin) {
std::cout << "Error: constant Input Matrix." << std::endl;
}
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < n; j++) {
f(i, j) = (f(i, j) - fmin) / (fmax - fmin);
}
}
// add pads around the grid for processing
#ifdef MEASURE
auto t1 = std::chrono::steady_clock::now();
#endif
auto f2 = metric::DPM_detail::addPad(f);
#ifdef MEASURE
auto t2 = std::chrono::steady_clock::now();
auto seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "--- in gvf: call of addPad took " << seconds << " s\n";
#endif
// compute the gradient field
#ifdef MEASURE
t1 = std::chrono::steady_clock::now();
#endif
auto [fx, fy] = metric::DPM_detail::gradient(f2);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "--- in gvf: call of gradient took " << seconds << " s\n";
#endif
// square of magnitude
blaze::DynamicMatrix<double> fxy_square((m + 2), (n + 2));
for (size_t i = 0; i < m + 2; i++) {
for (size_t j = 0; j < n + 2; ++j) {
fxy_square(i, j) = fx(i, j) * fx(i, j) + fy(i, j) * fy(i, j);
}
}
blaze::DynamicMatrix<double> u1(fx);
blaze::DynamicMatrix<double> v1(fy);
// blaze::DynamicMatrix<double> Lu1((m + 2), (n + 2));
// blaze::DynamicMatrix<double> Lv1((m + 2), (n + 2));
blaze::DynamicMatrix<double> Lu1; //((m + 2), (n + 2), 0);
blaze::DynamicMatrix<double> Lv1; //((m + 2), (n + 2), 0);
#ifdef MEASURE
seconds = 0;
auto useconds2 = 0;
#endif
for (size_t it = 0; it < iter; it++) {
#ifdef MEASURE
t1 = std::chrono::steady_clock::now();
#endif
metric::DPM_detail::updatePad(u1);
metric::DPM_detail::updatePad(v1);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds += double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
t1 = std::chrono::steady_clock::now();
#endif
Lu1 = metric::DPM_detail::laplacian(u1);
Lv1 = metric::DPM_detail::laplacian(v1);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
useconds2 += double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count());
#endif
for (size_t i = 0; i < (m + 2); i++) {
for (size_t j = 0; j < (n + 2); ++j) {
u1(i, j) = u1(i, j) + alpha * (mu * Lu1(i, j) - fxy_square(i, j) * (u1(i, j) - fx(i, j)));
v1(i, j) = v1(i, j) + alpha * (mu * Lv1(i, j) - fxy_square(i, j) * (v1(i, j) - fy(i, j)));
}
}
}
#ifdef MEASURE
std::cout << "--- in gvf: " << iter << "*2 calls of updatePad took " << seconds << " s\n";
std::cout << "--- in gvf: " << iter << "*2 calls of laplacian took " << useconds2 / 1000000 << " s\n";
t1 = std::chrono::steady_clock::now();
#endif
auto u2 = metric::DPM_detail::removePad(u1);
auto v2 = metric::DPM_detail::removePad(v1);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "--- in gvf: 2 calls of removePad took " << seconds << " s\n";
#endif
// std::cout << "u:\n" << u2 << "v:\n" << v2 << "\n"; // TODO remove
// return std::make_tuple(u2, v2);
return std::make_tuple(v2, u2); // replaced by Max F
}
static double ln_gamma(double x)
{
double g;
if (x < 0) {
std::cout << "error: ln_gamma(), undefined for z <= 0" << std::endl;
} else if (x < 10.0) {
// Use recursive formula:
return ln_gamma(x + 1.0) - std::log(x);
} else {
// high value approximation
g = 0.5 * (std::log(2 * M_PI) - log(x));
g += x * (std::log(x + (1 / (12.0 * x - 0.1 / x))) - 1);
}
return g;
}
static double ln_besseli(double _nu, double _z)
{
double t0 = _nu * std::log(0.5 * _z);
double t1 = 0.0;
double t2 = 0.0;
double t3 = 0.0;
double y = 0.0;
size_t iterations = 64;
for (size_t k = 0; k < iterations; ++k) {
// compute log( (z^2/4)^k )
t1 = 2.0 * k * std::log(double(0.5) * _z);
// compute: log( k! * Gamma(nu + k +1) )
t2 = ln_gamma((double)k + 1.0);
t3 = ln_gamma(_nu + (double)k + 1.0);
// accumulate y
y += std::exp(t1 - t2 - t3);
}
return t0 + std::log(y);
}
// I_v(z) : Modified Bessel function of the first kind
static double besseli(double nu, double z) { return std::exp(ln_besseli(nu, z)); }
/* Correct for the radius of curvature
input:
r: radius
sigma: std of Gaussian PSF
iter: number of iterations
output:
R: corrected radius
*/
static double correctCurve(double r, double sigma, size_t iter)
{
double var = sigma * sigma;
double R = r;
double x;
for (size_t it = 0; it < iter; ++it) {
x = r * R / (var);
if (x < 100) {
R = (r + var / r) * besseli(1, x) / besseli(0, x);
} else {
R = (r + var / r) * (128 * x * x - 48 * x - 15) / (128 * x * x + 16 * x + 9);
}
}
return R;
}
// added by Max F
template <typename T>
static T gauss(T x, T mu, T sigma)
{
T expVal = -1 * (pow(x - mu, 2) / pow(2 * sigma, 2));
return exp(expVal) / (sqrt(2 * M_PI * pow(sigma, 2)));
}
template <typename T> static blaze::DynamicMatrix<T> gaussianKernel(T sigma)
{
size_t sz = round(sigma * 6) + 2;
if (sz % 2 != 0)
++sz;
T center = T(sz) / 2.0;
size_t c = center;
auto kernel = blaze::DynamicMatrix<T>(sz, sz);
T r, value;
for (size_t i = 0; i < c; ++i) {
for (size_t j = 0; j < c; ++j) {
r = sqrt(pow(i - center, 2) + pow(j - center, 2));
value = gauss(r, 0.0, sigma);
kernel(i, j) = value;
kernel(sz - 1 - i, j) = value;
kernel(i, sz - 1 - j) = value;
kernel(sz - 1 - i, sz - 1 - j) = value;
}
}
return kernel;
}
// using Convolurion2d.hpp
template <typename T, size_t Channels> class Convolution2dCustomStride1 : public metric::Convolution2d<T, Channels> {
public:
Convolution2dCustomStride1(size_t imageWidth, size_t imageHeight, size_t kernelWidth, size_t kernelHeight
// const PadDirection pd = PadDirection::POST,
// const PadType pt = PadType::CIRCULAR,
// const size_t stride = 1
)
{
this->padWidth = 0;
this->padHeight = 0;
metric::PadDirection pd = metric::PadDirection::POST;
// metric::PadDirection pd = metric::PadDirection::BOTH;
metric::PadType pt = metric::PadType::CIRCULAR;
// metric::PadType pt = metric::PadType::REPLICATE;
// metric::PadType pt = metric::PadType::SYMMETRIC;
size_t stride = 1;
this->padModel = std::make_shared<metric::PadModel<T>>(pd, pt, 0);
// auto t1 = Clock::now();
this->convLayer = std::make_shared<typename metric::Convolution2d<T, Channels>::ConvLayer2d>(
imageWidth + this->padWidth, imageHeight + this->padHeight, 1, 1, kernelWidth, kernelHeight, stride);
// auto t2 = Clock::now();
// auto d = std::chrono::duration_cast<std::chrono::duration<double>>(t2 - t1);
}
};
// template <typename T> // first version with bad black padding
// static blaze::DynamicMatrix<T> gaussianBlur(const blaze::DynamicMatrix<T> & img, T sigma) {
// auto kernel = gaussianKernel(sigma);
// auto conv = Convolution2dCustomStride1<T, 1>(img.columns(), img.rows(), kernel.columns(), kernel.rows());
// auto blurred = conv({img}, kernel)[0];
// blaze::DynamicMatrix<T> padded (img.rows(), img.columns(), 0);
// blaze::submatrix( // padding with black after conv
// padded,
// (img.rows() - blurred.rows())/2, (img.columns() - blurred.columns())/2,
// blurred.rows(), blurred.columns()
// ) = blurred;
// return padded;
// }
template <typename T> static blaze::DynamicMatrix<T> gaussianBlur(const blaze::DynamicMatrix<T> &img, T sigma)
{
auto kernel = gaussianKernel(sigma);
blaze::DynamicMatrix<T> padded(img.rows() + kernel.rows() - 1, img.columns() + kernel.columns() - 1, 0);
blaze::submatrix( // padding with black before conv
padded, (padded.rows() - img.rows()) / 2, (padded.columns() - img.columns()) / 2, img.rows(), img.columns()) =
img;
// we can try constant padding here, BUT black padded area looks more useful since all except the contour is noise,
// and we expect that the countur line should not cross the border of the picture.
// Anyways, even if it crosses the border, constant padding is unable to reconstruct it properly and only
// distorts it (by adding bright pixels out of the image border).
// THis will cause irrelevant additional brightness
// in such places near image borders after blur (convolution) is applied.
auto conv = Convolution2dCustomStride1<T, 1>(padded.columns(), padded.rows(), kernel.columns(), kernel.rows());
auto blurred = conv({padded}, kernel)[0];
return blurred;
}
template <typename T>
static blaze::DynamicMatrix<T> blackPaddedConv(const blaze::DynamicMatrix<T> &img,
const blaze::DynamicMatrix<T> &kernel)
{
auto conv = Convolution2dCustomStride1<T, 1>(img.columns(), img.rows(), kernel.columns(), kernel.rows());
auto blurred = conv({img}, kernel)[0];
blaze::DynamicMatrix<T> padded(img.rows(), img.columns(), 0);
blaze::submatrix( // padding with black after conv
padded, (img.rows() - blurred.rows()) / 2, (img.columns() - blurred.columns()) / 2, blurred.rows(),
blurred.columns()) = blurred;
return padded;
}
} // end namespace DPM_detail
static std::vector<double> fit_hysteresis(const blaze::DynamicVector<double> &x, const blaze::DynamicVector<double> &y,
size_t grid_row, size_t grid_column, size_t steps, std::vector<double> sigma)
{
blaze::DynamicMatrix<double> I = blaze::zero<double>(grid_row, grid_column);
for (size_t i = 0; i < x.size(); ++i) {
I((int)y[i], (int)x[i]) = 100;
}
return fit_hysteresis(I, steps, sigma);
}
static std::vector<double> fit_hysteresis(const blaze::DynamicMatrix<double> &I, double xc0, double yc0, double r0,
size_t steps, std::vector<double> sigma, double incr, double thresh)
{
std::vector<double> ep = {xc0, yc0, r0, r0, 0}; // initial parameter guess
blaze::DynamicVector<double> increment = {incr, incr, incr, incr, M_PI / 180 * incr}; // increment in each iteration
blaze::DynamicVector<double> threshold = {thresh, thresh, thresh, thresh,
thresh}; // threshold for forces/torsinal moments
double half_min_size = (I.rows() < I.columns() ? I.rows() : I.columns()) / 2.0;
std::vector<double> bound = {5, half_min_size, 5, half_min_size}; // the lower/upper bounds of a and b
for (size_t i = 0; i < sigma.size(); ++i) {
// size_t filtersize = round(sigma[i] * 7 + 2);
// size_t filtersize = round(sigma[i] * 6); // 3 sigma
#ifdef MEASURE
auto t1 = std::chrono::steady_clock::now();
#endif
// imfilter<double, 1, FilterType::GAUSSIAN, PadDirection::BOTH, PadType::SYMMETRIC> f(filtersize, filtersize,
// sigma[i]);
auto gk = DPM_detail::gaussianKernel(
sigma[i]); // kernel may be saved outside the function, TODO optimize this way if needed
#ifdef MEASURE
auto t2 = std::chrono::steady_clock::now();
auto seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: construction #" << i << " of gaussian kernel took " << seconds << " s\n";
t1 = std::chrono::steady_clock::now();
#endif
// blaze::DynamicMatrix<double> I1 = f(I);
blaze::DynamicMatrix<double> I1 = DPM_detail::blackPaddedConv(I, gk);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: call #" << i << " gaussian blur took " << seconds << " s\n";
t1 = std::chrono::steady_clock::now();
#endif
#ifdef DEBUG_OUTPUT
std::cout << "blur input: min: " << blaze::min(I) << ", max: " << blaze::max(I) << "\n";
std::cout << "GVF input: min: " << blaze::min(I1) << ", max: " << blaze::max(I1) << "\n";
#endif
// auto [u1, v1] = DPM_detail::gvf(I1, 1, 0.1, 10);
auto [u1, v1] = DPM_detail::gvf(I1, 0.1, 1, 10);
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: call #" << i << " of gvf took " << seconds << " s, queried 10 iterations\n";
t1 = std::chrono::steady_clock::now();
#endif
ep = DPM_detail::fit_ellipse(ep, sigma[i] / 5 * increment, sigma[i] / 5 * threshold, bound, u1, v1,
steps / sigma.size());
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: call #" << i << " of fit_ellipse took " << seconds << " s, queried "
<< steps / sigma.size() << " iterations\n";
#endif
}
// ep == ellipse parameter [xc, yc, a, b, phi]
ep[4] = std::fmod(ep[4], M_PI);
// curve correction
double r1 = DPM_detail::correctCurve(std::pow(ep[3], 2) / ep[2], sigma[sigma.size() - 1], 100);
double r2 = DPM_detail::correctCurve(std::pow(ep[2], 2) / ep[3], sigma[sigma.size() - 1], 100);
ep[2] = std::pow(r1 * r2 * r2, (1.0 / 3.0));
ep[3] = std::pow(r1 * r1 * r2, (1.0 / 3.0));
return ep;
}
static std::vector<double> fit_hysteresis(const blaze::DynamicMatrix<double> &I, size_t steps,
std::vector<double> sigma)
{
auto [xc0, yc0, r0] = DPM_detail::initialCircle(I); // initial guess
std::vector<double> ep = {xc0, yc0, r0, r0, 0}; // initial parameter guess
blaze::DynamicVector<double> increment = {0.2, 0.2, 0.2, 0.2, M_PI / 180 * 0.2}; // increment in each iteration
blaze::DynamicVector<double> threshold = {1e-6, 1e-6, 1e-6, 1e-6, 1e-6}; // threshold for forces/torsinal moments
std::vector<double> bound = {10, 200, 10, 200}; // the lower/upper bounds of a and b
for (size_t i = 0; i < sigma.size(); ++i) {
// blaze::DynamicMatrix<double> I1 = I; // TODO: replace with gaussian filter
// auto I1=gaussianBlur(I,sigma[i]);
auto gk = DPM_detail::gaussianKernel(
sigma[i]); // kernel may be saved outside the function, TODO optimize this way if needed
blaze::DynamicMatrix<double> I1 = DPM_detail::blackPaddedConv(I, gk);
#ifdef MEASURE
auto t1 = std::chrono::steady_clock::now();
#endif
auto [u1, v1] = DPM_detail::gvf(I1, 1, 0.1, 10);
#ifdef MEASURE
auto t2 = std::chrono::steady_clock::now();
auto seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: call #" << i << " of gvf took " << seconds << " s, queried 10 iterations\n";
t1 = std::chrono::steady_clock::now();
#endif
ep = DPM_detail::fit_ellipse(ep, sigma[i] / 5 * increment, sigma[i] / 5 * threshold, bound, u1, v1,
steps / sigma.size());
#ifdef MEASURE
t2 = std::chrono::steady_clock::now();
seconds = double(std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count()) / 1000000;
std::cout << "-- in fit_hysteresis: call #" << i << " of fit_ellipse took " << seconds << " s, queried "
<< steps / sigma.size() << " iterations\n";
#endif
}
// ep == ellipse parameter [xc, yc, a, b, phi]
ep[4] = std::fmod(ep[4], M_PI);
// curve correction
double r1 = DPM_detail::correctCurve(std::pow(ep[3], 2) / ep[2], sigma[sigma.size() - 1], 100);
double r2 = DPM_detail::correctCurve(std::pow(ep[2], 2) / ep[3], sigma[sigma.size() - 1], 100);
ep[2] = std::pow(r1 * r2 * r2, (1.0 / 3.0));
ep[3] = std::pow(r1 * r1 * r2, (1.0 / 3.0));
return ep;
}
} // end namespace metric
| 30,790
|
C++
|
.cpp
| 826
| 34.474576
| 119
| 0.59849
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,501
|
wavelet.cpp
|
metric-space-ai_metric/metric/transform/wavelet.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#ifndef _METRIC_TRANSFORM_WAVELET_CPP
#define _METRIC_TRANSFORM_WAVELET_CPP
#include "wavelet.hpp"
#include <algorithm>
#include <cmath> // for only sqrt in DaubechiesMat
#include <memory>
#include <type_traits>
namespace wavelet {
// template <typename T, bool SO>
// void shrinkToFit(blaze::CompressedVector<T, SO>& mat)
//{
// blaze::CompressedVector<T>(~mat).swap(~mat);
// } // moved to helper_functions.cpp
// valid convolution
template <typename Container> Container conv_valid(Container const &f, Container const &g)
{
using El = types::index_value_type_t<Container>;
int const nf = f.size();
int const ng = g.size();
Container const &min_v = (nf < ng) ? f : g;
Container const &max_v = (nf < ng) ? g : f;
int const n = std::max(nf, ng) - std::min(nf, ng) + 1;
// Container out(n, typename Container::value_type());
Container out(n, El());
for (auto i(0); i < n; ++i) {
for (int j(min_v.size() - 1), k(i); j >= 0; --j) {
out[i] += min_v[j] * max_v[k];
++k;
}
}
return out;
}
// full convolution
template <typename Container> Container conv(Container const &f, Container const &g)
{
using El = types::index_value_type_t<Container>;
int const nf = f.size();
int const ng = g.size();
int const n = nf + ng - 1;
// Container out(n, typename Container::value_type());
Container out(n, El());
for (auto i(0); i < n; ++i) {
int const jmn = (i >= ng - 1) ? i - (ng - 1) : 0;
int const jmx = (i < nf - 1) ? i : nf - 1;
for (auto j(jmn); j <= jmx; ++j) {
out[i] += (f[j] * g[i - j]);
}
}
return out;
}
// linspace (erzeugt einen linearen Datenvektor)
template <typename Container>
Container linspace(typename Container::value_type a, typename Container::value_type b, int n)
{
Container array;
if (n > 1) {
typename Container::value_type step = (b - a) / typename Container::value_type(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
// upsconv
template <typename Container> Container upsconv(Container const &x, Container const &f, int len)
{
// initialize std::vector dyay
// typename Container::value_type tmp[x.size() * 2];
// Container dyay(x.size() * 2);
Container dyay(x.size() * 2 - 1);
for (int i = 0, j = 0; i < x.size(); ++i, j = j + 2) {
dyay[j] = x[i];
if (j + 1 < dyay.size())
dyay[j + 1] = 0.0;
}
// dyay.pop_back();
Container cnv = conv(dyay, f);
int d = (cnv.size() - len) / 2;
int first = 1 + (cnv.size() - len) / 2; // floor inclucded
int last = first + len;
// cnv.erase(cnv.begin() + last - 1, cnv.end());
// cnv.erase(cnv.begin(), cnv.begin() + first - 1);
Container out(len);
for (int i = first - 1; i < last; ++i)
out[i - first + 1] = cnv[i];
// return cnv;
return out;
}
template <typename Container> Container dbwavf(const int wnum)
{
assert(wnum <= 10);
assert(wnum > 0);
switch (wnum) {
case 1:
return {0.50000000000000, 0.50000000000000};
case 2:
return {0.34150635094622, 0.59150635094587, 0.15849364905378, -0.09150635094587};
case 3:
return {0.23523360389270, 0.57055845791731, 0.32518250026371,
-0.09546720778426, -0.06041610415535, 0.02490874986589};
case 4:
return {0.16290171402562, 0.50547285754565, 0.44610006912319, -0.01978751311791,
-0.13225358368437, 0.02180815023739, 0.02325180053556, -0.00749349466513};
case 5:
return {0.11320949129173, 0.42697177135271, 0.51216347213016, 0.09788348067375, -0.17132835769133,
-0.02280056594205, 0.05485132932108, -0.00441340005433, -0.00889593505093, 0.00235871396920};
case 6:
return {0.07887121600145072, 0.3497519070376178, 0.5311318799408691, 0.22291566146501776,
-0.15999329944606142, -0.09175903203014758, 0.0689440464873723, 0.019461604854164663,
-0.022331874165094537, 0.0003916255761485779, 0.003378031181463938, -0.0007617669028012533};
case 7:
return {0.05504971537285, 0.28039564181304, 0.51557424581833, 0.33218624110566, -0.10175691123173,
-0.15841750564054, 0.05042323250485, 0.05700172257986, -0.02689122629486, -0.01171997078235,
0.00887489618962, 0.00030375749776, -0.00127395235906, 0.00025011342658};
case 8:
return {0.03847781105406, 0.22123362357624, 0.47774307521438, 0.41390826621166,
-0.01119286766665, -0.20082931639111, 0.00033409704628, 0.09103817842345,
-0.01228195052300, -0.03117510332533, 0.00988607964808, 0.00618442240954,
-0.00344385962813, -0.00027700227421, 0.00047761485533, -0.00008306863060};
case 9:
return {0.02692517479416, 0.17241715192471, 0.42767453217028, 0.46477285717278, 0.09418477475112,
-0.20737588089628, -0.06847677451090, 0.10503417113714, 0.02172633772990, -0.04782363205882,
0.00017744640673, 0.01581208292614, -0.00333981011324, -0.00302748028715, 0.00130648364018,
0.00016290733601, -0.00017816487955, 0.00002782275679};
case 10:
return {0.01885857879640, 0.13306109139687, 0.37278753574266, 0.48681405536610, 0.19881887088440,
-0.17666810089647, -0.13855493935993, 0.09006372426666, 0.06580149355070, -0.05048328559801,
-0.02082962404385, 0.02348490704841, 0.00255021848393, -0.00758950116768, 0.00098666268244,
0.00140884329496, -0.00048497391996, -0.00008235450295, 0.00006617718320, -0.00000937920789};
default:
return {};
}
}
// orthfilt
template <typename Container> std::tuple<Container, Container, Container, Container> orthfilt(Container const &W_in)
{
using El = types::index_value_type_t<Container>;
auto qmf = [](Container const &x) {
// Container y(x.rbegin(), x.rend());
Container y(x.size());
for (size_t i = 0; i < x.size(); ++i)
y[i] = x[x.size() - 1 - i];
auto isEven = [](int n) {
if (n % 2 == 0)
return true;
else
return false;
};
int first;
if (isEven(y.size())) {
first = 1;
} else {
first = 2;
}
for (int i = first; i < y.size(); i = i + 2) {
y[i] = -y[i];
}
return y;
};
auto sqrt = [](Container const &x) {
// Container out;
// out.reserve(x.size());
Container out(x.size());
for (int i = 0; i < x.size(); ++i) {
// out.push_back(std::sqrt(2) * (x[i]));
out[i] = std::sqrt(2) * (x[i]);
}
return out;
};
// typename Container::value_type W_in_sum = std::accumulate(W_in.begin(), W_in.end(), 0);
El W_in_sum = 0;
for (size_t i = 0; i < W_in.size(); ++i)
W_in_sum += W_in[i];
Container Lo_R = sqrt(W_in);
Container Hi_R = qmf(Lo_R);
// Container Hi_D(Hi_R.rbegin(), Hi_R.rend());
Container Hi_D(Hi_R.size());
for (size_t i = 0; i < Hi_R.size(); ++i)
Hi_D[i] = Hi_R[Hi_R.size() - 1 - i];
// Container Lo_D(Lo_R.rbegin(), Lo_R.rend());
Container Lo_D(Lo_R.size());
for (size_t i = 0; i < Lo_R.size(); ++i)
Lo_D[i] = Lo_R[Lo_R.size() - 1 - i];
return {Lo_D, Hi_D, Lo_R, Hi_R};
}
// dwt
template <typename Container> std::tuple<Container, Container> dwt(Container const &x, int waveletType)
{
using El = types::index_value_type_t<Container>;
// Container F = dbwavf<Container>(waveletType, typename Container::value_type(1.0));
Container F = dbwavf<Container>(waveletType);
auto [Lo_D, Hi_D, Lo_R, Hi_R] = orthfilt(F);
int lf = Lo_D.size();
int lx = x.size();
int first = 2;
int lenEXT = lf - 1;
int last = lx + lf - 1;
// Container x_ext;
// x_ext.reserve(lx + 2 * lenEXT); // preallocate memory
Container x_ext(lx + 2 * lenEXT);
// x_ext.insert(x_ext.end(), x.rbegin() + (lx - lenEXT), x.rend());
for (int i = 0; i < lenEXT; ++i)
x_ext[i] = x[lenEXT - 1 - i];
// x_ext.insert(x_ext.end(), x.begin(), x.end());
for (int i = 0; i < lx; ++i)
x_ext[lenEXT + i] = x[i];
// x_ext.insert(x_ext.end(), x.rbegin(), x.rend() - (lx - lenEXT));
for (int i = 0; i < lenEXT; ++i)
x_ext[lenEXT + lx + i] = x[lx - 1 - i];
Container z1 = conv_valid(x_ext, Lo_D);
Container z2 = conv_valid(x_ext, Hi_D);
// Container a;
// Container d;
// a.reserve(last);
// d.reserve(last);
size_t len = (last - first + 2) / 2;
Container a(len);
Container d(len);
size_t cnt = 0;
for (int i = first - 1; i < last; i = i + 2) {
// a.push_back(z1[i]);
// d.push_back(z2[i]);
a[cnt] = z1[i];
d[cnt] = z2[i];
++cnt;
}
return {a, d};
}
template <typename Container> Container idwt(Container a, Container d, int waveletType, int lx)
{
using El = types::index_value_type_t<Container>;
// Container F = dbwavf<Container>(waveletType, typename Container::value_type(1.0));
Container F = dbwavf<Container>(waveletType);
auto [Lo_D, Hi_D, Lo_R, Hi_R] = orthfilt(F);
Container out1 = upsconv(a, Lo_R, lx);
Container out2 = upsconv(d, Hi_R, lx);
Container result(out1.size());
// std::transform(out1.begin(), out1.end(), result.begin(), std::bind(std::plus<typename Container::value_type>(),
// out2));
for (size_t i = 0; i < out1.size(); ++i)
result[i] = out1[i] + out2[i];
return result;
}
static int wmaxlev(int sizeX, int waveletType)
{
const auto F = dbwavf<std::vector<double>>(waveletType);
auto [Lo_D, Hi_D, Lo_R, Hi_R] = orthfilt(F);
int lev = (int)(std::log2((double)sizeX / ((double)Lo_D.size() - 1.0)));
if (lev >= 1) {
return lev;
} else {
return 0;
}
}
template <typename Container> std::deque<Container> wavedec(Container const &x, int order, int waveletType)
{
std::deque<Container> subBands;
Container d;
Container x_tmp = x;
Container zeros(x.size(), 0);
subBands.push_front(zeros);
for (int k = 0; k < order; ++k) {
auto [x_tmp2, d] = dwt(x_tmp, waveletType);
x_tmp = x_tmp2;
subBands.push_front(d);
}
subBands.push_front(x_tmp);
return subBands;
}
template <typename Container> Container waverec(std::deque<Container> const &subBands, int waveletType)
{
std::deque<Container> a;
a.push_back(subBands[0]);
std::vector<int> l_devide;
for (int i = 0; i < subBands.size(); ++i) {
l_devide.push_back(subBands[i].size());
}
for (int p = 1; p < l_devide.size() - 1; p++) {
a.push_back(idwt(a[p - 1], subBands[p], waveletType, l_devide[p + 1]));
}
return a[a.size() - 1];
}
// 2d functions
template <typename Container>
typename std::enable_if<
!blaze::IsMatrix<Container>::value,
std::tuple<std::vector<Container>, std::vector<Container>, std::vector<Container>, std::vector<Container>>>::type
dwt2(std::vector<Container> const &x, int waveletType)
// template <typename Container>
// std::tuple<std::vector<Container>, std::vector<Container>, std::vector<Container>, std::vector<Container>>
// dwt2(std::vector<Container> const & x, int waveletType)
{
std::vector<Container> ll, lh, hl, hh, l, h;
l = std::vector<Container>(x.size());
h = std::vector<Container>(x.size());
for (size_t row_idx = 0; row_idx < x.size(); ++row_idx) { // top-level split, by rows
auto row_split = dwt(x[row_idx], waveletType);
l[row_idx] = std::get<0>(row_split);
h[row_idx] = std::get<1>(row_split);
}
assert(l[0].size() == h[0].size()); // TODO remove after testing
// bool vector_empty = true;
for (size_t col_idx = 0; col_idx < l[0].size(); col_idx++) { // 2 lower level splits, by colmns
Container l_col(l.size());
Container h_col(h.size());
for (size_t row_idx = 0; row_idx < l.size(); ++row_idx) { // we assume sizes of l and r are equal
l_col[row_idx] = l[row_idx][col_idx];
h_col[row_idx] = h[row_idx][col_idx];
}
{
auto col_split_l = dwt(l_col, waveletType);
assert(std::get<0>(col_split_l).size() == std::get<1>(col_split_l).size()); // TODO remove after testing
// if (vector_empty) {
if (col_idx < 1) { // first iteration only
// init
size_t r_sz = std::get<0>(col_split_l).size();
ll = std::vector<Container>(r_sz, Container(l[0].size(), 0));
lh = std::vector<Container>(r_sz, Container(l[0].size(), 0));
hl = std::vector<Container>(r_sz, Container(l[0].size(), 0));
hh = std::vector<Container>(r_sz, Container(l[0].size(), 0));
// vector_empty = false;
}
// std::vector<Container> ll_col, lh_col;
// ll_col.push_back(std::get<0>(col_split_l));
// lh_col.push_back(std::get<1>(col_split_l));
for (size_t row_idx = 0; row_idx < std::get<0>(col_split_l).size(); ++row_idx) {
ll[row_idx][col_idx] = std::get<0>(col_split_l)[row_idx];
lh[row_idx][col_idx] = std::get<1>(col_split_l)[row_idx];
}
} // remove col_split_l from memory
{
auto col_split_h = dwt(h_col, waveletType);
// std::vector<Container> hl_col, hh_col;
// hl_col.push_back(std::get<0>(col_split_h));
// hh_col.push_back(std::get<1>(col_split_h));
assert(std::get<0>(col_split_h).size() == std::get<1>(col_split_h).size()); // TODO remove after testing
for (size_t row_idx = 0; row_idx < std::get<0>(col_split_h).size(); ++row_idx) {
hl[row_idx][col_idx] = std::get<0>(col_split_h)[row_idx];
hh[row_idx][col_idx] = std::get<1>(col_split_h)[row_idx];
}
}
}
return std::make_tuple(ll, lh, hl, hh);
}
template <typename Container2d>
// std::tuple<Container2d, Container2d, Container2d, Container2d>
typename std::enable_if<blaze::IsMatrix<Container2d>::value,
std::tuple<Container2d, Container2d, Container2d, Container2d>>::type
dwt2(Container2d const &x, int waveletType)
{
using El =
typename Container2d::ElementType; // now we support only Blaze matrices, TODO add type traits, generalize!!
Container2d ll, lh, hl, hh, l, h; // TODO use sparsed if input is sparsed
for (size_t row_idx = 0; row_idx < x.rows(); ++row_idx) { // top-level split, by rows
blaze::DynamicVector<El, blaze::rowVector> curr_row = blaze::row(x, row_idx);
// auto row_split = dwt(blaze::row(x, row_idx), waveletType); // TODO check if it's possible!!
auto row_split = dwt(curr_row, waveletType);
if (row_idx < 1) { // first iteration only
l = Container2d(x.rows(), std::get<0>(row_split).size());
h = Container2d(x.rows(), std::get<1>(row_split).size());
}
blaze::row(l, row_idx) = std::get<0>(row_split);
blaze::row(h, row_idx) = std::get<1>(row_split);
}
for (size_t col_idx = 0; col_idx < l.columns(); col_idx++) { // 2 lower level splits, by colmns
blaze::DynamicVector<El> l_col = blaze::column(l, col_idx);
blaze::DynamicVector<El> h_col = blaze::column(h, col_idx);
;
{
auto col_split_l = dwt(l_col, waveletType);
if (col_idx < 1) { // first iteration only
size_t r_sz = std::get<0>(col_split_l).size();
ll = Container2d(r_sz, l.columns());
lh = Container2d(r_sz, l.columns());
hl = Container2d(r_sz, l.columns());
hh = Container2d(r_sz, l.columns());
}
blaze::column(ll, col_idx) = std::get<0>(col_split_l);
blaze::column(lh, col_idx) = std::get<1>(col_split_l);
} // remove col_split_l from memory
{
auto col_split_h = dwt(h_col, waveletType);
blaze::column(hl, col_idx) = std::get<0>(col_split_h);
blaze::column(hh, col_idx) = std::get<1>(col_split_h);
}
}
return std::make_tuple(ll, lh, hl, hh);
}
template <typename Container>
std::vector<Container> idwt2(std::vector<Container> const &ll, std::vector<Container> const &lh,
std::vector<Container> const &hl, std::vector<Container> const &hh, int waveletType,
int hx, int wx)
{
assert(ll.size() == lh.size()); // TODO remove after testing and add exception
assert(ll.size() == hl.size());
assert(ll.size() == hh.size());
assert(ll[0].size() == lh[0].size());
assert(ll[0].size() == hl[0].size());
assert(ll[0].size() == hh[0].size());
std::vector<Container> l_colmajor(ll[0].size());
std::vector<Container> h_colmajor(ll[0].size());
for (size_t col_idx = 0; col_idx < ll[0].size(); col_idx++) {
Container col_split_l, col_split_h;
Container col_ll(ll.size());
Container col_lh(ll.size());
Container col_hl(ll.size());
Container col_hh(ll.size());
for (size_t row_idx = 0; row_idx < ll.size(); ++row_idx) {
col_ll[row_idx] = ll[row_idx][col_idx];
col_lh[row_idx] = lh[row_idx][col_idx];
col_hl[row_idx] = hl[row_idx][col_idx];
col_hh[row_idx] = hh[row_idx][col_idx];
}
col_split_l = wavelet::idwt(col_ll, col_lh, waveletType, hx);
l_colmajor[col_idx] = col_split_l;
col_split_h = wavelet::idwt(col_hl, col_hh, waveletType, hx);
h_colmajor[col_idx] = col_split_h;
}
assert(l_colmajor[0].size() == h_colmajor[0].size());
// transpose and apply second idwt
std::vector<Container> out(l_colmajor[0].size());
for (size_t row_idx = 0; row_idx < l_colmajor[0].size(); ++row_idx) {
Container row_split_l(l_colmajor.size());
Container row_split_h(l_colmajor.size());
for (size_t col_idx = 0; col_idx < l_colmajor.size(); col_idx++) {
row_split_l[col_idx] = l_colmajor[col_idx][row_idx];
row_split_h[col_idx] = h_colmajor[col_idx][row_idx];
}
// Container row = idwt(row_split_l, row_split_h, waveletType, wx);
out[row_idx] = idwt(row_split_l, row_split_h, waveletType, wx);
}
return out;
}
template <typename Container>
std::vector<Container>
idwt2(std::tuple<std::vector<Container>, std::vector<Container>, std::vector<Container>, std::vector<Container>> in,
int waveletType, int hx, int wx)
{
return idwt2(std::get<0>(in), std::get<1>(in), std::get<2>(in), std::get<3>(in), waveletType, hx, wx);
}
/* // sequence 1
template <typename Container2d>
Container2d idwt2(
Container2d const & ll,
Container2d const & lh,
Container2d const & hl,
Container2d const & hh,
int waveletType,
int hx,
int wx)
{
using El = typename Container2d::ElementType; // now we support only Blaze matrices, TODO add type traits,
generalize!!
// TODO use sparsed if input is sparsed
assert(ll.rows()==lh.rows()); // TODO replace with exception of nan return
assert(ll.rows()==hl.rows());
assert(ll.rows()==hh.rows());
assert(ll.columns()==lh.columns());
assert(ll.columns()==hl.columns());
assert(ll.columns()==hh.columns());
Container2d l;
Container2d h;
for (size_t row_idx = 0; row_idx<ll.rows(); row_idx++) {
blaze::DynamicVector<El, blaze::rowVector> row_ll = blaze::row(ll, row_idx);
blaze::DynamicVector<El, blaze::rowVector> row_lh = blaze::row(lh, row_idx);
blaze::DynamicVector<El, blaze::rowVector> row_hl = blaze::row(hl, row_idx);
blaze::DynamicVector<El, blaze::rowVector> row_hh = blaze::row(hh, row_idx);
auto row_split_l = wavelet::idwt(row_ll, row_lh, waveletType, wx);
auto row_split_h = wavelet::idwt(row_hl, row_hh, waveletType, wx);
if (row_idx < 1) {
l = blaze::DynamicMatrix<El>(ll.rows(), row_split_l.size());
h = blaze::DynamicMatrix<El>(ll.rows(), row_split_h.size());
}
blaze::row(l, row_idx) = row_split_l;
blaze::row(h, row_idx) = row_split_h;
}
// second idwt
blaze::DynamicMatrix<El, blaze::columnMajor> out_col_major; // temporary, TODO replace using type trait
for (size_t col_idx = 0; col_idx<l.columns(); ++col_idx) {
blaze::DynamicVector<El> col_split_l (l.rows()); // column vector
blaze::DynamicVector<El> col_split_h (l.rows());
for (size_t row_idx = 0; row_idx<l.rows(); row_idx++) { // row-major to column-major, TODO optimize
col_split_l[row_idx] = l(row_idx, col_idx);
col_split_h[row_idx] = h(row_idx, col_idx);
}
auto curr_column = idwt(col_split_l, col_split_h, waveletType, hx);
if (col_idx < 1) {
out_col_major = blaze::DynamicMatrix<El, blaze::columnMajor> (curr_column.size(), l.columns());
}
blaze::column(out_col_major, col_idx) = curr_column;
}
Container2d out = out_col_major; // col-major to row-major
return out;
}
// */
template <typename Container2d>
// Container2d idwt2(
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
idwt2(Container2d const &ll, Container2d const &lh, Container2d const &hl, Container2d const &hh, int waveletType,
int hx, int wx)
{
using El =
typename Container2d::ElementType; // now we support only Blaze matrices, TODO add type traits, generalize!!
// TODO use sparsed if input is sparsed
assert(ll.rows() == lh.rows()); // TODO replace with exception of nan return
assert(ll.rows() == hl.rows());
assert(ll.rows() == hh.rows());
assert(ll.columns() == lh.columns());
assert(ll.columns() == hl.columns());
assert(ll.columns() == hh.columns());
blaze::DynamicMatrix<El, blaze::columnMajor> ll_cm =
ll; // row-major to col-major // type is temporary, TODO add type trait
blaze::DynamicMatrix<El, blaze::columnMajor> lh_cm = lh;
blaze::DynamicMatrix<El, blaze::columnMajor> hl_cm = hl;
blaze::DynamicMatrix<El, blaze::columnMajor> hh_cm = hh;
blaze::DynamicMatrix<El, blaze::columnMajor> l_cm;
blaze::DynamicMatrix<El, blaze::columnMajor> h_cm;
for (size_t col_idx = 0; col_idx < ll_cm.columns(); col_idx++) {
blaze::DynamicVector<El> col_ll = blaze::column(ll_cm, col_idx);
blaze::DynamicVector<El> col_lh = blaze::column(lh_cm, col_idx);
blaze::DynamicVector<El> col_hl = blaze::column(hl_cm, col_idx);
blaze::DynamicVector<El> col_hh = blaze::column(hh_cm, col_idx);
auto col_split_l = wavelet::idwt(col_ll, col_lh, waveletType, hx);
auto col_split_h = wavelet::idwt(col_hl, col_hh, waveletType, hx);
if (col_idx < 1) {
l_cm = blaze::DynamicMatrix<El, blaze::columnMajor>(col_split_l.size(), ll_cm.columns());
h_cm = blaze::DynamicMatrix<El, blaze::columnMajor>(col_split_h.size(), ll_cm.columns());
}
blaze::column(l_cm, col_idx) = col_split_l;
blaze::column(h_cm, col_idx) = col_split_h;
}
Container2d l = l_cm; // col-major to row-major
Container2d h = h_cm;
// second idwt
Container2d out;
for (size_t row_idx = 0; row_idx < l.rows(); ++row_idx) {
blaze::DynamicVector<El, blaze::rowVector> row_split_l = blaze::row(l, row_idx);
blaze::DynamicVector<El, blaze::rowVector> row_split_h = blaze::row(h, row_idx);
auto curr_row = idwt(row_split_l, row_split_h, waveletType, wx);
if (row_idx < 1) {
out = Container2d(l.rows(), curr_row.size());
}
blaze::row(out, row_idx) = curr_row;
}
return out;
}
// ------------------------------ DWT based on matrix multiplication
/**
* Construct convolutional matrix for wavelet transform
* @tparam T float type
* @param size
* @param order
* @param padding padding type
* @return
*/
template <typename T>
blaze::CompressedMatrix<T> DaubechiesMat(size_t size, int order = 4, Padding padding = Padding::Periodized)
{
assert(order % 2 == 0);
assert(size >= order);
auto [Lo_D, Hi_D, Lo_R, Hi_R] = orthfilt(dbwavf<std::vector<T>>(order / 2));
/* Reverse filters for convolution */
std::reverse(Lo_D.begin(), Lo_D.end());
std::reverse(Hi_D.begin(), Hi_D.end());
/* Low filter part */
auto mat = blaze::CompressedMatrix<T>(size, size);
mat.reserve(size * Lo_D.size());
if (padding == Padding::Periodized) {
for (size_t i = 0; i < size / 2; ++i) {
size_t ci = mat.columns() - 2 * i;
if (ci > Lo_D.size()) {
ci = 0;
}
for (size_t a = 0; a < Lo_D.size(); ++a) {
if (ci >= Lo_D.size()) {
ci = ci % Lo_D.size();
}
size_t j = i * 2 + ci;
if (j >= mat.columns()) {
j = j % mat.columns();
}
mat.append(i, j, Lo_D[ci]);
++ci;
}
mat.finalize(i);
}
/* Hi filter part */
for (size_t i = 0; i < size / 2; ++i) {
size_t ci = mat.columns() - 2 * i;
if (ci > Hi_D.size()) {
ci = 0;
}
for (size_t a = 0; a < Hi_D.size(); ++a) {
if (ci >= Hi_D.size()) {
ci = ci % Hi_D.size();
}
size_t j = i * 2 + ci;
if (j >= mat.columns()) {
j = j % mat.columns();
}
mat.append(size / 2 + i, j, Hi_D[ci]);
++ci;
}
mat.finalize(size / 2 + i);
}
} else if (padding == Padding::ZeroDerivative) {
/* Calculate padding size */
int paddingSize = Lo_D.size() - 1;
/* Low filter part */
for (size_t i = 0; i < size / 2; ++i) {
int leftPadding = paddingSize / 2;
int j0 = -leftPadding + 2 * static_cast<int>(i);
/* Left padding */
T lp = 0;
for (int k = 0; k < -j0; ++k) {
lp += Lo_D[k];
}
/* Right padding */
T rp = 0;
int l = j0 + Lo_D.size() - size;
for (int k = 0; k < l; ++k) {
rp += Lo_D[Lo_D.size() - 1 - k];
}
for (int k = 0; k < Lo_D.size(); ++k) {
const int j = j0 + k;
if (j == 0) {
mat.append(i, j, lp + Lo_D[k]);
} else if (j == size - 1) {
mat.append(i, j, rp + Lo_D[k]);
} else if (j > 0 and j < size - 1) {
mat.append(i, j, Lo_D[k]);
} else {
}
}
mat.finalize(i);
}
/* Hi filter part */
for (size_t i = 0; i < size / 2; ++i) {
int leftPadding = paddingSize / 2;
int j0 = -leftPadding + 2 * static_cast<int>(i);
/* Left padding */
T lp = 0;
for (int k = 0; k < -j0; ++k) {
lp += Hi_D[k];
}
/* Right padding */
T rp = 0;
int l = j0 + Hi_D.size() - size;
for (int k = 0; k < l; ++k) {
rp += Hi_D[Hi_D.size() - 1 - k];
}
for (int k = 0; k < Hi_D.size(); ++k) {
const int j = j0 + k;
if (j == 0) {
mat.append(size / 2 + i, j, lp + Hi_D[k]);
} else if (j == size - 1) {
mat.append(size / 2 + i, j, rp + Hi_D[k]);
} else if (j > 0 and j < size - 1) {
mat.append(size / 2 + i, j, Hi_D[k]);
} else {
}
}
mat.finalize(size / 2 + i);
}
} else {
return mat;
}
return mat;
}
template <typename T> blaze::CompressedMatrix<T> DaubechiesMat_e(size_t vector_size, size_t overall_size, int order = 4)
{ // Daubechies Transform matrix generator for serialized image
assert(overall_size % vector_size == 0);
auto mat = DaubechiesMat<T>(vector_size, order);
blaze::CompressedMatrix<T> out(overall_size, overall_size, 0);
int n_vectors = overall_size / vector_size;
for (size_t i = 0; i < n_vectors; ++i) {
blaze::submatrix(out, i * vector_size, i * vector_size, vector_size, vector_size) =
mat; // TODO optimize via reserve-append-finalize snippet
}
return out;
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
dwt2s(Container2d const &x, Container2ds const &dmat_w, Container2ds const &dmat_h)
{ // whole image transform, no dividing by subbands
assert(dmat_w.columns() == dmat_w.rows());
assert(dmat_h.columns() == dmat_h.rows());
assert(dmat_w.rows() == x.columns());
assert(dmat_h.rows() == x.rows());
using El = typename Container2d::ElementType; // now we support only Blaze matrices
Container2d intermediate(x.rows(), x.columns());
Container2d out(x.rows(), x.columns());
for (size_t row_idx = 0; row_idx < x.rows(); ++row_idx) { // split by rows
blaze::DynamicVector<El, blaze::rowVector> curr_row = blaze::row(x, row_idx);
blaze::DynamicVector<El> row_split = dmat_w * blaze::trans(curr_row);
blaze::row(intermediate, row_idx) = blaze::trans(row_split);
}
for (size_t col_idx = 0; col_idx < x.columns(); ++col_idx) { // split by columns
blaze::DynamicVector<El> curr_col = blaze::column(intermediate, col_idx);
blaze::DynamicVector<El> col_split = dmat_h * curr_col;
blaze::column(out, col_idx) = col_split;
}
return out;
}
// template <typename Container2d, typename Container2ds> // TODO complete!
// typename std::enable_if<
// blaze::IsMatrix<Container2d>::value,
// Container2d
//>::type
// dwt2s_e(Container2d const & x, Container2ds const & dmat_e_w, Container2ds const & dmat_e_h) { // whole image
// transform, no dividing by subbands
// using El = typename Container2d::ElementType; // now we support only Blaze matrices
// // TODO debug
// blaze::DynamicMatrix<El, blaze::columnMajor> intermediate_cm;
// {
// blaze::DynamicVector<El> ser_rows (x.columns()*x.rows());
// for (size_t i=0; i<x.rows(); ++i) {
// blaze::subvector(ser_rows, i*x.columns(), x.columns()) = blaze::trans(blaze::row(x, i));
// }
// blaze::DynamicVector<El> ser_intermed = dmat_e_w * ser_rows;
// blaze::DynamicMatrix<El> intermediate (x.rows(), x.columns());
// for (size_t i=0; i<x.rows(); ++i) {
// blaze::row(intermediate, i) = blaze::trans(blaze::subvector(ser_intermed, i*x.columns(), x.columns()));
// }
// intermediate_cm = intermediate; // to column-major
// }
// Container2d out (x.rows(), x.columns());
// {
// blaze::DynamicVector<El> ser_cols (x.columns()*x.rows());
// for (size_t i=0; i<intermediate_cm.columns(); ++i) {
// blaze::subvector(ser_cols, i*x.rows(), x.rows()) = blaze::column(intermediate_cm, i);
// }
// blaze::DynamicVector<El> ser_intermed = dmat_e_h * ser_cols;
// for (size_t i=0; i<intermediate_cm.columns(); ++i) {
// blaze::column(out, i) = blaze::subvector(ser_intermed, i*x.rows(), x.rows()); // TODO check if efficient
// }
// }
// return out;
//}
/* // working code, basic version
template <typename Container2d, typename Container2ds>
typename std::enable_if<
blaze::IsMatrix<Container2d>::value,
Container2d
>::type
dwt2s_e(Container2d const & x, Container2ds const & dmat_e_w, Container2ds const & dmat_e_h) { // whole image transform,
no dividing by subbands
using El = typename Container2d::ElementType; // now we support only Blaze matrices
// TODO debug
blaze::DynamicMatrix<El, blaze::columnMajor> intermediate_cm;
{
blaze::DynamicVector<El, blaze::rowVector> ser_rows (x.columns()*x.rows());
for (size_t i=0; i<x.rows(); ++i) {
blaze::subvector(ser_rows, i*x.columns(), x.columns()) = blaze::row(x, i);
}
blaze::DynamicVector<El, blaze::rowVector> ser_intermed = blaze::trans(dmat_e_w * blaze::trans(ser_rows));
blaze::DynamicMatrix<El> intermediate (x.rows(), x.columns());
for (size_t i=0; i<x.rows(); ++i) {
blaze::row(intermediate, i) = blaze::subvector(ser_intermed, i*x.columns(), x.columns());
}
intermediate_cm = intermediate; // to column-major
}
Container2d out (x.rows(), x.columns());
{
blaze::DynamicVector<El> ser_cols (x.columns()*x.rows());
for (size_t i=0; i<intermediate_cm.columns(); ++i) {
blaze::subvector(ser_cols, i*x.rows(), x.rows()) = blaze::column(intermediate_cm, i);
}
blaze::DynamicVector<El> ser_intermed = dmat_e_h * ser_cols;
for (size_t i=0; i<intermediate_cm.columns(); ++i) {
blaze::column(out, i) = blaze::subvector(ser_intermed, i*x.rows(), x.rows()); // TODO check if efficient
}
}
return out;
}
// */
template <typename Container2d, typename Container2ds> // alternative code woth CustomMatrix, TODO test!
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
dwt2s_e(Container2d const &x, Container2ds const &dmat_e_w, Container2ds const &dmat_e_h)
{ // whole image transform, no dividing by subbands
using El = typename Container2d::ElementType; // now we support only Blaze matrices
// TODO test well
blaze::DynamicVector<El> ser_cols(x.columns() * x.rows());
blaze::CustomMatrix<El, blaze::unaligned, blaze::unpadded, blaze::columnMajor> intermediate_cm(
&ser_cols[0], x.rows(), x.columns());
{
blaze::DynamicVector<El, blaze::rowVector> ser_rows(x.columns() * x.rows());
for (size_t i = 0; i < x.rows(); ++i) {
blaze::subvector(ser_rows, i * x.columns(), x.columns()) = blaze::row(x, i);
}
blaze::DynamicVector<El, blaze::rowVector> ser_intermed = blaze::trans(dmat_e_w * blaze::trans(ser_rows));
blaze::CustomMatrix<El, blaze::unaligned, blaze::unpadded, blaze::rowMajor> intermediate(&ser_intermed[0],
x.rows(), x.columns());
intermediate_cm = intermediate; // to column-major
}
blaze::DynamicVector<El> ser_intermed = dmat_e_h * ser_cols;
Container2d out(x.rows(), x.columns());
for (size_t i = 0; i < intermediate_cm.columns(); ++i) {
blaze::column(out, i) = blaze::subvector(ser_intermed, i * x.rows(), x.rows()); // TODO check if efficient
}
return out;
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value,
std::tuple<Container2d, Container2d, Container2d, Container2d>>::type
dwt2(Container2d const &x, Container2ds const &dmat_w, Container2ds const &dmat_h)
{ // wrapper for dividing by subbands
Container2d r = dwt2s(x, dmat_w, dmat_h);
size_t split_sz_w = dmat_w.columns() / 2;
size_t split_sz_h = dmat_h.columns() / 2;
Container2d ll(split_sz_h, split_sz_w);
Container2d lh(split_sz_h, split_sz_w);
Container2d hl(split_sz_h, split_sz_w);
Container2d hh(split_sz_h, split_sz_w);
ll = blaze::submatrix(r, 0, 0, split_sz_h, split_sz_w);
lh = blaze::submatrix(r, split_sz_h, 0, split_sz_h, split_sz_w);
hl = blaze::submatrix(r, 0, split_sz_w, split_sz_h, split_sz_w);
hh = blaze::submatrix(r, split_sz_h, split_sz_w, split_sz_h, split_sz_w);
return std::make_tuple(ll, lh, hl, hh);
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value,
std::tuple<Container2d, Container2d, Container2d, Container2d>>::type
dwt2_e(Container2d const &x, Container2ds const &dmat_w_e, Container2ds const &dmat_h_e)
{ // wrapper for dividing by subbands
Container2d r = dwt2s_e(x, dmat_w_e, dmat_h_e);
size_t split_sz_w = x.columns() / 2;
size_t split_sz_h = x.rows() / 2;
Container2d ll(split_sz_h, split_sz_w);
Container2d lh(split_sz_h, split_sz_w);
Container2d hl(split_sz_h, split_sz_w);
Container2d hh(split_sz_h, split_sz_w);
ll = blaze::submatrix(r, 0, 0, split_sz_h, split_sz_w);
lh = blaze::submatrix(r, split_sz_h, 0, split_sz_h, split_sz_w);
hl = blaze::submatrix(r, 0, split_sz_w, split_sz_h, split_sz_w);
hh = blaze::submatrix(r, split_sz_h, split_sz_w, split_sz_h, split_sz_w);
return std::make_tuple(ll, lh, hl, hh);
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
idwt2( // wrapper for composing from subbands
Container2d const &ll, Container2d const &lh, Container2d const &hl, Container2d const &hh,
Container2ds const &dmat_w, Container2ds const &dmat_h)
{
using El = typename Container2d::ElementType; // now we support only Blaze matrices
assert(ll.rows() == lh.rows());
assert(ll.rows() == hl.rows());
assert(ll.rows() == hh.rows());
assert(ll.columns() == lh.columns());
assert(ll.columns() == hl.columns());
assert(ll.columns() == hh.columns());
assert(dmat_w.rows() == ll.columns() * 2);
assert(dmat_h.rows() == ll.rows() * 2);
Container2d out(dmat_h.rows(), dmat_w.rows());
blaze::submatrix(out, 0, 0, ll.rows(), ll.columns()) = ll;
blaze::submatrix(out, ll.rows(), 0, lh.rows(), lh.columns()) = lh;
blaze::submatrix(out, 0, ll.columns(), hl.rows(), hl.columns()) = hl;
blaze::submatrix(out, ll.rows(), ll.columns(), hh.rows(), hh.columns()) = hh;
return dwt2s(out, dmat_w, dmat_h);
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
idwt2_e( // wrapper for composing from subbands
Container2d const &ll, Container2d const &lh, Container2d const &hl, Container2d const &hh,
Container2ds const &dmat_w_e, Container2ds const &dmat_h_e)
{
using El = typename Container2d::ElementType; // now we support only Blaze matrices
assert(ll.rows() == lh.rows());
assert(ll.rows() == hl.rows());
assert(ll.rows() == hh.rows());
assert(ll.columns() == lh.columns());
assert(ll.columns() == hl.columns());
assert(ll.columns() == hh.columns());
assert(dmat_w_e.rows() == ll.columns() * ll.rows() * 4);
assert(dmat_h_e.rows() == dmat_w_e.rows());
Container2d out(ll.rows() * 2, ll.columns() * 2);
blaze::submatrix(out, 0, 0, ll.rows(), ll.columns()) = ll;
blaze::submatrix(out, ll.rows(), 0, lh.rows(), lh.columns()) = lh;
blaze::submatrix(out, 0, ll.columns(), hl.rows(), hl.columns()) = hl;
blaze::submatrix(out, ll.rows(), ll.columns(), hh.rows(), hh.columns()) = hh;
return dwt2s_e(out, dmat_w_e, dmat_h_e);
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
idwt2( // wrapper for composing from subbands passed in tuple
std::tuple<Container2d, Container2d, Container2d, Container2d> const &in, Container2ds const &dmat_w,
Container2ds const &dmat_h)
{
return idwt2(std::get<0>(in), std::get<1>(in), std::get<2>(in), std::get<3>(in), dmat_w, dmat_h);
}
template <typename Container2d, typename Container2ds>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type
idwt2_e( // wrapper for composing from subbands passed in tuple
std::tuple<Container2d, Container2d, Container2d, Container2d> const &in, Container2ds const &dmat_w_e,
Container2ds const &dmat_h_e)
{
return idwt2_e(std::get<0>(in), std::get<1>(in), std::get<2>(in), std::get<3>(in), dmat_w_e, dmat_h_e);
}
// loop-based version
template <typename Container2d>
typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type dwt2_l(Container2d const &x,
int order = 4)
{
using El = typename Container2d::ElementType; // now we support only Blaze matrices
assert(order % 2 == 0);
std::vector<El> c(order);
// c[0] = (1+sqrt(3))/(4*sqrt(2)); // D4
// c[1] = (3+sqrt(3))/(4*sqrt(2));
// c[2] = (3-sqrt(3))/(4*sqrt(2));
// c[3] = (1-sqrt(3))/(4*sqrt(2));
El coeff = 2 / sqrt(2);
c = dbwavf<std::vector<El>>(order / 2);
for (size_t i = 0; i < c.size(); ++i) {
c[i] = c[i] * coeff;
}
Container2d intermediate(x.rows(), x.columns(), 0);
// auto mat = blaze::CompressedMatrix<El>(size, size, 0);
size_t split_size = x.columns() / 2;
for (size_t r_idx = 0; r_idx < x.rows(); ++r_idx) { // input row
for (size_t i = 0; i < split_size; ++i) { // offsets
int sign = 1;
for (size_t ci = 0; ci < c.size(); ++ci) { // Daubechies coeffs
intermediate(r_idx, i) += x(r_idx, (i * 2 + ci) % x.columns()) * c[ci]; // TODO remove %
intermediate(r_idx, i + split_size) += x(r_idx, (i * 2 + ci) % x.columns()) * c[order - 1 - ci] * sign;
sign *= -1;
}
}
}
Container2d out(x.rows(), x.columns(), 0);
split_size = x.rows() / 2;
for (size_t c_idx = 0; c_idx < x.columns(); ++c_idx) { // input column
for (size_t i = 0; i < split_size; ++i) { // offsets
int sign = 1;
for (size_t ci = 0; ci < c.size(); ++ci) { // Daubechies coeffs
out(i, c_idx) += intermediate((i * 2 + ci) % x.rows(), c_idx) * c[ci]; // TODO remove %
out(i + split_size, c_idx) += intermediate((i * 2 + ci) % x.rows(), c_idx) * c[order - 1 - ci] * sign;
sign *= -1;
}
}
}
return out;
}
// ---- vector by vector versions
/* // working code, may be enabled
template <typename Container2d>
typename std::enable_if<
blaze::IsMatrix<Container2d>::value,
std::tuple<Container2d, Container2d, Container2d, Container2d>
>::type
dwt2t(Container2d const & x, Container2d const & dmat_w, Container2d const & dmat_h) { // splitting each vector
assert(dmat_w.columns() == dmat_w.rows());
assert(dmat_h.columns() == dmat_h.rows());
assert(dmat_w.rows() == x.columns());
assert(dmat_h.rows() == x.rows());
using El = typename Container2d::ElementType; // now we support only Blaze matrices, TODO add type traits,
generalize!! Container2d ll, lh, hl, hh, l, h; size_t split_sz_w = dmat_w.columns()/2; size_t split_sz_h =
dmat_h.columns()/2; l = Container2d(x.rows(), split_sz_w); h = Container2d(x.rows(), split_sz_w); ll =
Container2d(split_sz_h, split_sz_w); lh = Container2d(split_sz_h, split_sz_w); hl = Container2d(split_sz_h, split_sz_w);
hh = Container2d(split_sz_h, split_sz_w);
for (size_t row_idx = 0; row_idx<x.rows(); ++row_idx) { // top-level split, by rows
blaze::DynamicVector<El, blaze::rowVector> curr_row = blaze::row(x, row_idx);
blaze::DynamicVector<El> row_split = dmat_w*blaze::trans(curr_row);
blaze::row(l, row_idx) = blaze::trans(blaze::subvector(row_split, 0, split_sz_w));
blaze::row(h, row_idx) = blaze::trans(blaze::subvector(row_split, split_sz_w, split_sz_w));
}
for (size_t col_idx = 0; col_idx<l.columns(); col_idx++) { // 2 lower level splits, by columns
blaze::DynamicVector<El> l_col = blaze::column(l, col_idx);
blaze::DynamicVector<El> h_col = blaze::column(h, col_idx);;
{
blaze::DynamicVector<El> col_split_l = dmat_h*l_col;
blaze::column(ll, col_idx) = blaze::subvector(col_split_l, 0, split_sz_h);
blaze::column(lh, col_idx) = blaze::subvector(col_split_l, split_sz_h, split_sz_h);
} // remove col_split_l from memory
{
blaze::DynamicVector<El> col_split_h = dmat_h*h_col;
blaze::column(hl, col_idx) = blaze::subvector(col_split_h, 0, split_sz_h);
blaze::column(hh, col_idx) = blaze::subvector(col_split_h, split_sz_h, split_sz_h);
}
}
return std::make_tuple(ll, lh, hl, hh);
}
// */
// // TODO debug
// template <typename Container2d>
// typename std::enable_if<blaze::IsMatrix<Container2d>::value, Container2d>::type dwt2_reordered(
// Container2d const & ll,
// Container2d const & lh,
// Container2d const & hl,
// Container2d const & hh,
// Container2d const & dmat_w,
// Container2d const & dmat_h)
//{
// using El = typename Container2d::ElementType; // now we support only Blaze matrices
// assert(ll.rows()==lh.rows());
// assert(ll.rows()==hl.rows());
// assert(ll.rows()==hh.rows());
// assert(ll.columns()==lh.columns());
// assert(ll.columns()==hl.columns());
// assert(ll.columns()==hh.columns());
// blaze::DynamicMatrix<El, blaze::columnMajor> ll_cm = ll; // row-major to col-major
// blaze::DynamicMatrix<El, blaze::columnMajor> lh_cm = lh;
// blaze::DynamicMatrix<El, blaze::columnMajor> hl_cm = hl;
// blaze::DynamicMatrix<El, blaze::columnMajor> hh_cm = hh;
// blaze::DynamicMatrix<El, blaze::columnMajor> col_composed_cm (ll.rows() + lh(rows), ll.columns() + lh.columns());
// //blaze::DynamicMatrix<El, blaze::columnMajor> h_cm;
// for (size_t col_idx = 0; col_idx < ll.columns() + lh.columns(); col_idx++) {
// //blaze::DynamicVector<El> col_ll = blaze::column(ll_cm, col_idx);
// //blaze::DynamicVector<El> col_lh = blaze::column(lh_cm, col_idx);
// //blaze::DynamicVector<El> col_hl = blaze::column(hl_cm, col_idx);
// //blaze::DynamicVector<El> col_hh = blaze::column(hh_cm, col_idx);
// blaze::DynamicVector<El> col_concat (ll.rows() + lh(rows));
// blaze::subvector(col_concat, 0, ll.rows()) = ll_cm;
// blaze::subvector(col_concat, ll.rows(), lh.rows()) = lh_cm;
// //auto col_split_l = wavelet::idwt(col_ll, col_lh, waveletType, hx);
// //auto col_split_h = wavelet::idwt(col_hl, col_hh, waveletType, hx);
// blaze::DynamicVector<El> col_composed_v = dmat_h_t*blaze::trans(col_concat);
// //if (col_idx < 1) {
// //col_composed_cm = blaze::DynamicMatrix<El, blaze::columnMajor>(col_composed.size(), ll_cm.columns());
// //h_cm = blaze::DynamicMatrix<El, blaze::columnMajor>(col_split_h.size(), ll_cm.columns());
// //}
// blaze::column(col_composed_cm, col_idx) = col_composed_v;
// //blaze::column(h_cm, col_idx) = col_split_h;
// }
// Container2d col_composed = col_composed_cm; // col-major to row-major
// //Container2d h = h_cm;
// // second idwt
// Container2d out (ll.rows() + lh(rows), ll.columns() + lh.columns());
// for (size_t row_idx = 0; row_idx<col_composed.rows(); ++row_idx) {
// blaze::DynamicVector<El, blaze::rowVector> row_split = blaze::row(col_composed, row_idx);
// //blaze::DynamicVector<El, blaze::rowVector> row_split_h = blaze::row(h, row_idx);
// //auto curr_row = idwt(row_split, row_split_h, waveletType, wx);
// blaze::DynamicVector<El> curr_row = dmat_w_t*blaze::trans(row_split);
// //if (row_idx < 1) {
// //out = Container2d (col_composed.rows(), curr_row.size());
// //}
// blaze::row(out, row_idx) = blaze::trans(curr_row);
// }
// return out;
//}
} // namespace wavelet
#endif
| 43,813
|
C++
|
.cpp
| 1,051
| 39.021884
| 120
| 0.647103
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,502
|
hog.cpp
|
metric-space-ai_metric/metric/transform/hog.cpp
|
#include "hog.hpp"
namespace metric {
template <typename T>
HOG<T>::HOG(const size_t orientations, const size_t cellSize, const size_t blockSize)
: orientations(orientations), cellSize(cellSize), blockSize(blockSize)
{
assert(orientations > 1);
assert(cellSize > 0);
assert(blockSize > 0);
// assert(cellSize % 2 == 0);
}
template <typename T> typename HOG<T>::Vector HOG<T>::encode(const HOG::Matrix &image) const
{
assert(image.rows() > 0);
assert(image.columns() > 0);
assert(image.rows() % blockSize * cellSize == 0);
assert(image.columns() % blockSize * cellSize == 0);
/* Compute dx */
blaze::DynamicMatrix<T, blaze::columnMajor> imageColumnMajor = image;
blaze::DynamicMatrix<T, blaze::columnMajor> dxColumnMajor(image.rows(), image.columns());
blaze::column(dxColumnMajor, 0) = blaze::column(image, 1);
for (auto i = 1; i < image.columns() - 1; ++i) {
blaze::column(dxColumnMajor, i) = blaze::column(image, i + 1) - blaze::column(image, i - 1);
}
blaze::column(dxColumnMajor, image.columns() - 1) = -blaze::column(image, image.columns() - 2);
blaze::DynamicMatrix<T> dx = dxColumnMajor;
/* Compute dy */
blaze::DynamicMatrix<T> dy(image.rows(), image.columns());
blaze::row(dy, 0) = blaze::row(image, 1);
for (auto i = 1; i < image.rows() - 1; ++i) {
blaze::row(dy, i) = blaze::row(image, i + 1) - blaze::row(image, i - 1);
}
blaze::row(dy, image.rows() - 1) = -blaze::row(image, image.rows() - 2);
/* Compute magnitude */
Matrix dx2 = blaze::pow(dx, 2);
Matrix dy2 = blaze::pow(dy, 2);
Matrix magnitude = blaze::sqrt(dx2 + dy2);
/* Compute angle */
Matrix angle(image.rows(), image.columns());
dx += 2 * std::numeric_limits<T>::epsilon();
for (auto i = 0; i < image.rows(); ++i) {
blaze::row(angle, i) = blaze::row(dy, i) / blaze::row(dx, i);
}
// std::cout << " dev" << std::endl;
// std::cout << blaze::row(angle, 0) << std::endl;
// std::cout << "---" << std::endl;
// std::cout << blaze::min(angle) << std::endl;
// std::cout << blaze::max(angle) << std::endl;
angle = blaze::atan(angle);
// std::cout << " atan" << std::endl;
// std::cout << blaze::row(angle, 0) << std::endl;
// std::cout << blaze::min(angle) << std::endl;
// std::cout << blaze::max(angle) << std::endl;
angle += M_PI / T(2);
// std::cout << " norm" << std::endl;
// std::cout << blaze::row(angle, 0) << std::endl;
// std::cout << blaze::min(angle) << std::endl;
// std::cout << blaze::max(angle) << std::endl;
angle /= M_PI / T(orientations);
// angle *= 0.9999;//T(1) - 20 * std::numeric_limits<T>::epsilon();
// std::cout << blaze::min(angle) << std::endl;
// std::cout << blaze::max(angle) << std::endl;
// std::cout << " norm" << std::endl;
// std::cout << blaze::row(angle, 0) << std::endl;
// std::cout << angle << std::endl;
/* Define HOG features */
/* Length of histogram of block */
const size_t blockHistogramSize = blockSize * blockSize * orientations;
/* Size of block in pixels */
const size_t blockCellSize = blockSize * cellSize;
/* Number of blocks in image */
const size_t blockNumbers = (image.rows() / blockCellSize) * (image.columns() / blockCellSize);
/* Resulting features vector */
Vector features(blockNumbers * blockHistogramSize);
size_t blockCount = 0;
for (auto blockStartRow = 0; blockStartRow < image.rows() - blockCellSize + 1; blockStartRow += blockCellSize) {
for (auto blockStartColumn = 0; blockStartColumn < image.columns() - blockCellSize + 1;
blockStartColumn += blockCellSize) {
/* Compute block histogram */
blaze::DynamicMatrix<T> blockHistogram = blaze::zero<T>(blockSize * blockSize, orientations);
for (auto i = 0; i < blockCellSize; ++i) {
for (auto j = 0; j < blockCellSize; ++j) {
const size_t cellNumber = (i / cellSize) * blockSize + (j / cellSize);
T angleBin = std::floor(angle(blockStartRow + i, blockStartColumn + j));
if (angleBin == orientations) {
--angleBin;
}
// std::cout << i << " " << j << " " << cellNumber << " " << angleBin << std::endl;
blockHistogram(cellNumber, angleBin) += magnitude(blockStartRow + i, blockStartColumn + j);
}
}
/* Normalize block */
T norm = std::sqrt(blaze::sum(blaze::pow(blockHistogram, 2)) + 2 * std::numeric_limits<T>::epsilon());
// std::cout << norm << std::endl;
blockHistogram /= norm;
// std::cout << blockHistogram << std::endl;
for (size_t k = 0; k < blockHistogram.rows(); ++k) {
// std::cout << blockCount * blockHistogramSize + k * orientations << " " << orientations << std::endl;
blaze::subvector(features, blockCount * blockHistogramSize + k * orientations, orientations) =
blaze::trans(blaze::row(blockHistogram, k));
// std::cout << blaze::row(blockHistogram, k) << std::endl;
// std::cout << blaze::subvector(features, blockCount * blockHistogramSize + k * orientations,
// orientations) << std::endl;
}
// std::cout << blaze::trans(features) << std::endl;
++blockCount;
// std::cout << blockCount << std::endl;
}
}
// std:: cout << "result" << std::endl;
// std::cout << blaze::subvector(features, blockCount * blockHistogramSize + 1 * orientations, orientations) <<
// std::endl; std::cout << blaze::subvector(features, 0, features.size()) << std::endl; std::cout <<
// blaze::trans(features) << std::endl;
return features;
}
template <typename T>
typename HOG<T>::DistanceMatrix HOG<T>::getGroundDistance(const blaze::DynamicMatrix<T> &image, const T rotation_cost,
const T move_cost, const T threshold)
{
size_t blockStride = 0;
size_t blocks_per_image_rows = (image.rows() / T(cellSize) - blockSize) / (blockSize - blockStride) + 1;
size_t blocks_per_image_columns = (image.columns() / T(cellSize) - blockSize) / (blockSize - blockStride) + 1;
size_t totalBinsNumber = blocks_per_image_rows * blocks_per_image_columns * blockSize * blockSize * orientations;
/* Spatial distance matrix */
DistanceMatrix spatialDistance0 =
getSpatialDistance(totalBinsNumber, blocks_per_image_rows, blocks_per_image_columns);
/* Thresholding */
if (threshold != 0) {
spatialDistance0 = blaze::map(spatialDistance0, [threshold](T d) { return (d > threshold) ? threshold : d; });
}
DistanceMatrix spatialDistance(spatialDistance0.rows() * orientations);
for (auto i = 0; i < spatialDistance.rows(); ++i) {
for (auto j = i + 1; j < spatialDistance.columns(); ++j) {
spatialDistance(i, j) = spatialDistance0(i / orientations, j / orientations);
}
}
/* Orientations distance matrix */
DistanceMatrix orientationDistance0 = getOrientationDistance();
size_t scale = totalBinsNumber / orientations;
DistanceMatrix orientationDistance(orientationDistance0.rows() * scale);
for (auto i = 0; i < orientationDistance.rows(); ++i) {
for (auto j = i + 1; j < orientationDistance.columns(); ++j) {
orientationDistance(i, j) =
orientationDistance0(i % orientationDistance0.rows(), j % orientationDistance0.columns());
}
}
/* Total ground distance matrix */
DistanceMatrix groundDistance = rotation_cost * orientationDistance + move_cost * spatialDistance;
return groundDistance;
}
template <typename T>
typename HOG<T>::DistanceMatrix HOG<T>::getSpatialDistance(size_t totalBinsNumber, size_t blocks_per_image_rows,
size_t blocks_per_image_columns)
{
Vector cell_i_vect = blaze::zero<T>(totalBinsNumber / orientations);
Vector cell_j_vect = cell_i_vect;
size_t idx = 0;
for (size_t b_i = 1; b_i <= blocks_per_image_rows; ++b_i) {
for (size_t b_j = 1; b_j <= blocks_per_image_columns; ++b_j) {
for (size_t cb_j = 1; cb_j <= blockSize; ++cb_j) {
for (size_t cb_i = 1; cb_i <= blockSize; ++cb_i) {
size_t cell_j;
if (b_j == 1) {
cell_j = cb_j;
} else {
cell_j = cb_j + (b_j - 1);
}
size_t cell_i;
if (b_i == 1) {
cell_i = cb_i;
} else {
cell_i = cb_i + (b_i - 1);
}
cell_i_vect[idx] = cell_i;
cell_j_vect[idx] = cell_j;
++idx;
}
}
}
}
DistanceMatrix spatial_dist_mat(cell_i_vect.size());
for (size_t i = 0; i < spatial_dist_mat.rows(); ++i) {
for (size_t j = i + 1; j < spatial_dist_mat.columns(); ++j) {
spatial_dist_mat(i, j) =
std::abs(cell_i_vect[i] - cell_i_vect[j]) + std::abs(cell_j_vect[i] - cell_j_vect[j]);
}
}
return spatial_dist_mat;
}
template <typename T> typename HOG<T>::DistanceMatrix HOG<T>::getOrientationDistance(const T angleUnitCost)
{
size_t maxAngle;
bool isSigned = false;
if (isSigned == true) {
maxAngle = 360;
} else {
maxAngle = 180;
}
Vector orientsVector(orientations);
for (size_t i = 0; i < orientations; ++i) {
orientsVector[i] = maxAngle * (T(1) - T(1) / orientations) / (orientations - 1) * i;
}
blaze::SymmetricMatrix<Matrix> orientationsDistance(orientations);
for (size_t i = 0; i < orientsVector.size(); ++i) {
for (size_t j = i + 1; j < orientsVector.size(); ++j) {
T normDeg = std::fabs(std::fmod(orientsVector[i] - orientsVector[j], maxAngle));
orientationsDistance(i, j) = std::min(maxAngle - normDeg, normDeg);
}
}
orientationsDistance = orientationsDistance / angleUnitCost;
return orientationsDistance;
}
} // namespace metric
| 9,200
|
C++
|
.cpp
| 213
| 40.014085
| 118
| 0.651493
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,503
|
ThreadPool.cpp
|
metric-space-ai_metric/metric/utils/ThreadPool.cpp
|
#include "ThreadPool.hpp"
#include <iostream>
ThreadPool::ThreadPool(size_t maxThreads) : isClosed_(false)
{
for (size_t i = 0; i < maxThreads; ++i)
addWorker();
}
void ThreadPool::close()
{
{
std::lock_guard lock(mutex_);
isClosed_ = true;
cvEmpty_.notify_all();
}
for (auto &t : threads_) {
t.join();
// delete t;
}
}
void ThreadPool::execute(const callable &block)
{
try {
putToQueue(block);
} catch (const CloseException &e) {
std::cerr << "ThreadPool: execute on closed pool is ignored" << std::endl;
}
}
void ThreadPool::addWorker()
{
threads_.push_back(std::thread([this]() {
while (!isClosed_) {
try {
getFromQueue()();
} catch (const CloseException &x) {
break;
} catch (const std::exception &e) {
std::cerr << "error in ThreadPool worker: " << e.what() << std::endl;
} catch (...) {
std::cerr << "unknown error in ThreadPool worker" << std::endl;
}
}
}));
}
void ThreadPool::putToQueue(const callable &block)
{
std::lock_guard lock(mutex_);
queue_.push_back(block);
if (queue_.size() == 1)
cvEmpty_.notify_all();
}
ThreadPool::callable ThreadPool::getFromQueue()
{
std::unique_lock lock(mutex_);
while (!isClosed_ && queue_.size() == 0)
cvEmpty_.wait(lock);
if (isClosed_)
throw CloseException();
auto block = std::move(queue_.front());
queue_.pop_front();
return block;
}
| 1,366
|
C++
|
.cpp
| 61
| 20.032787
| 76
| 0.657935
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,504
|
visualizer.cpp
|
metric-space-ai_metric/metric/utils/visualizer.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "visualizer.hpp"
#include <cstring>
namespace mat2bmp {
template <class BlazeMatrix> void blaze2bmp(BlazeMatrix m, std::string filename)
{
int w = m.columns();
int h = m.rows();
int x, y, r, g, b;
FILE *f;
unsigned char *img = NULL;
int filesize = 54 + 3 * w * h;
img = (unsigned char *)malloc(3 * w * h);
std::memset(img, 0, 3 * w * h);
for (int i = 0; i < w; i++) {
for (int j = 0; j < h; j++) {
x = i;
y = j;
r = 0;
g = 0;
b = 0;
int p = m(j, i) * 255;
if (p > 0) // green for positive, red for negative
{
g = p;
b = p;
} else
r = -p;
if (r > 255)
r = 255;
if (g > 255)
g = 255;
if (b > 255)
b = 255;
img[(x + y * w) * 3 + 2] = (unsigned char)(r);
img[(x + y * w) * 3 + 1] = (unsigned char)(g);
img[(x + y * w) * 3 + 0] = (unsigned char)(b);
}
}
unsigned char bmpfileheader[14] = {'B', 'M', 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, 0};
unsigned char bmpinfoheader[40] = {40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 24, 0};
unsigned char bmppad[3] = {0, 0, 0};
bmpfileheader[2] = (unsigned char)(filesize);
bmpfileheader[3] = (unsigned char)(filesize >> 8);
bmpfileheader[4] = (unsigned char)(filesize >> 16);
bmpfileheader[5] = (unsigned char)(filesize >> 24);
bmpinfoheader[4] = (unsigned char)(w);
bmpinfoheader[5] = (unsigned char)(w >> 8);
bmpinfoheader[6] = (unsigned char)(w >> 16);
bmpinfoheader[7] = (unsigned char)(w >> 24);
bmpinfoheader[8] = (unsigned char)(h);
bmpinfoheader[9] = (unsigned char)(h >> 8);
bmpinfoheader[10] = (unsigned char)(h >> 16);
bmpinfoheader[11] = (unsigned char)(h >> 24);
f = fopen(filename.c_str(), "wb");
fwrite(bmpfileheader, 1, 14, f);
fwrite(bmpinfoheader, 1, 40, f);
for (int i = 0; i < h; i++) {
fwrite(img + (w * (h - i - 1) * 3), 3, w, f);
fwrite(bmppad, 1, (4 - (w * 3) % 4) % 4, f);
}
free(img);
fclose(f);
}
template <class BlazeMatrix> void blaze2bmp_norm(BlazeMatrix m, std::string filename)
{
auto maxval = max(m);
auto minval = min(m);
auto M = m / (maxval > -minval ? maxval : -minval);
mat2bmp::blaze2bmp(M, filename);
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
void blaze2bmp_norm(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> m_stl,
std::string filename, float magnitude = 1)
{
blaze::DynamicMatrix<ValueType> m(m_stl.size(), m_stl[0].size(),
0); // we assume length of all rows is equal and there is at least 1 row
for (size_t i = 0; i < m_stl.size(); ++i)
for (size_t j = 0; j < m_stl[0].size(); ++j)
m(i, j) = m_stl[i][j];
auto maxval = max(m);
auto minval = min(m);
auto M = m / (maxval > -minval ? maxval : -minval) * magnitude;
mat2bmp::blaze2bmp(M, filename);
}
template <template <typename, typename> class OuterContainer, template <typename, typename> class InnerContainer,
typename ValueType, typename OuterAllocator, typename InnerAllocator>
void blaze2bmp(OuterContainer<InnerContainer<ValueType, InnerAllocator>, OuterAllocator> m_stl, std::string filename,
float magnitude = 1)
{
blaze::DynamicMatrix<ValueType> m(m_stl.size(), m_stl[0].size(),
0); // we assume length of all rows is equal and there is at least 1 row
// blaze::DynamicMatrix<double> m(m_stl.size(), m_stl[0].size(), 0); // we assume length of all rows is equal and
// there is at least 1 row
for (size_t i = 0; i < m_stl.size(); ++i)
for (size_t j = 0; j < m_stl[0].size(); ++j)
m(i, j) = m_stl[i][j];
// m(i, j) = (double)m_stl[i][j];
auto M = m * magnitude;
mat2bmp::blaze2bmp(M, filename);
}
} // namespace mat2bmp
| 3,962
|
C++
|
.cpp
| 108
| 33.814815
| 117
| 0.631483
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,505
|
connected_components.cpp
|
metric-space-ai_metric/metric/utils/graph/connected_components.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_UTILS_GRAPH_CONNECTED_COMPONENTS_CPP
#define _METRIC_UTILS_GRAPH_CONNECTED_COMPONENTS_CPP
#include "connected_components.hpp"
namespace metric {
namespace graph {
template <typename Matrix> void Cracker<Matrix>::ProcessGraph(Matrix &tempGraph)
{
// Min Selection
Matrix directH(tempGraph.rows(), tempGraph.rows(), 0);
for (size_t i = 0; i != tempGraph.rows(); ++i) {
if (ActiveVertices[i]) {
// An active vertex
auto neighbors = blaze::row(tempGraph, i);
for (size_t j = 0; j != neighbors.size(); ++j) {
if ((ActiveVertices[j] && neighbors[j]) || (j == i)) {
size_t minVertex = j;
directH(i, minVertex) = true;
for (size_t k = 0; k != neighbors.size(); ++k)
if (ActiveVertices[k] && neighbors[k])
// Add edge
directH(k, minVertex) = true;
break;
}
}
}
}
// Pruning
tempGraph = 0;
// Min selection
for (size_t i = 0; i != directH.rows(); ++i) {
size_t minVertex = 0;
if (ActiveVertices[i]) {
// An active vertex
auto neighbors = blaze::row(directH, i);
size_t nbCount = neighborCount(i, neighbors);
for (size_t j = 0; j != neighbors.size(); ++j) {
if (ActiveVertices[j] && neighbors[j] && (j != i)) {
minVertex = j;
if (nbCount > 1) {
// Add an edge
for (size_t k = j + 1; k != neighbors.size(); ++k)
if (ActiveVertices[k] && neighbors[k] && (k != minVertex)) {
// Undirect graph, symmetrize
tempGraph(k, minVertex) = true;
tempGraph(minVertex, k) = true;
}
}
break;
}
}
size_t cv = incomingCount(i, directH);
if (!directH(i, i)) {
// Not connected to itself. Disable the vertex
ActiveVertices[i] = false;
--ActiveNum;
PropagationTrees(minVertex, i) = true;
} else if (cv == 1 && nbCount == 1) {
// IsSeed: Connected to itself only, it has not any connected (upcoming and outcoming) vertices
Seeds.push_back(i);
// Disable the vertex
ActiveVertices[i] = false;
--ActiveNum;
}
}
}
}
template <typename Matrix> void Cracker<Matrix>::PropagateTrees()
{
size_t seedSize = Seeds.size();
Components.resize(seedSize);
auto seedIt = Seeds.begin();
for (auto &l : Components) {
ProcessTreeNode(*seedIt, l);
++seedIt;
}
Components.remove_if([](auto &el) { return el.size() == 1; });
}
template <typename Matrix> void Cracker<Matrix>::ProcessTreeNode(const size_t node, std::vector<size_t> &Nodevector)
{
Nodevector.push_back(node);
auto children = blaze::row(PropagationTrees, node);
for (size_t i = 0; i != children.size(); ++i)
if (children[i])
ProcessTreeNode(i, Nodevector);
}
template <typename Matrix>
size_t Cracker<Matrix>::neighborCount(
const size_t n, const blaze::DynamicVector<typename Matrix::ElementType, blaze::rowVector> &neighbors) const
{
size_t count = 0;
for (size_t i = 0; i != neighbors.size(); ++i)
if (ActiveVertices[i] && neighbors[i]) {
count++;
// We compare with 1 only
if (count == 2)
return 2;
}
return count;
}
template <typename Matrix> size_t Cracker<Matrix>::incomingCount(const size_t n, Matrix &mx) const
{
typedef typename Matrix::ElementType Ty;
size_t count = 0;
for (size_t i = 0; i != mx.rows(); i++)
if (ActiveVertices[i] && mx(i, n)) {
++count;
// We compare with 1 only
if (count == 2)
return 2;
}
return count;
}
template <typename Matrix>
void Cracker<Matrix>::ConvertVertexvectorToMatrix(const std::vector<size_t> &vertices, const Matrix &input,
Matrix &output)
{
std::vector<bool> NodesExist(input.rows());
for (size_t node : vertices)
NodesExist[node] = true;
for (size_t i = 0; i < input.rows(); ++i)
if (NodesExist[i])
for (size_t j = i + 1; j < input.rows(); ++j)
if (NodesExist[j]) {
output(i, j) = input(i, j);
output(j, i) = input(j, i);
}
}
template <typename Matrix>
std::vector<Matrix> Cracker<Matrix>::GetAllComponents(const Matrix &input, const size_t Count)
{
typedef typename Matrix::ElementType Ty;
ActiveVertices.assign(input.rows(), true);
ActiveNum = input.rows();
PropagationTrees.resize(input.rows(), input.rows());
PropagationTrees = false;
Matrix tempG = input;
do {
ProcessGraph(tempG);
} while (ActiveNum > 0);
PropagateTrees();
Components.sort([](const auto &x, const auto &y) { return x.size() > y.size(); });
// if Count = 0 - get all components, 1 - largest
size_t sz, compsz = Components.size();
sz = Count ? (Count < compsz ? Count : compsz) : compsz;
Components.resize(sz);
std::vector<Matrix> matrices(Components.size(), Matrix(input.rows(), input.rows(), false));
size_t k = 0;
for (auto &lst : Components) {
ConvertVertexvectorToMatrix(lst, input, matrices[k]);
++k;
}
return matrices;
}
// if Count=0 - get all components, 1 - largest
template <typename Matrix> inline std::vector<Matrix> connected_components(const Matrix &input, const size_t Count)
{
Cracker<Matrix> CA;
return CA.GetAllComponents(input, Count);
}
// if Count=0 - get all components, 1 - largest
template <typename Matrix> inline std::vector<Matrix> all_connected_components(const Matrix &input)
{
Cracker<Matrix> CA;
return CA.GetAllComponents(input, size_t(0));
}
template <typename Matrix> inline std::vector<Matrix> largest_connected_component(const Matrix &input)
{
Cracker<Matrix> CA;
return CA.GetAllComponents(input, size_t(1));
}
} // namespace graph
} // namespace metric
#endif
| 5,684
|
C++
|
.cpp
| 179
| 28.47486
| 116
| 0.669842
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,506
|
graph.cpp
|
metric-space-ai_metric/metric/utils/graph/graph.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#include "metric/utils/graph.hpp"
#include <algorithm>
#include <unordered_map>
namespace metric {
// Graph based on blaze-lib
template <typename WeightType, bool isDense, bool isSymmetric>
Graph<WeightType, isDense, isSymmetric>::Graph(size_t nodesNumber) : nodesNumber(nodesNumber), valid(false)
{
}
template <typename WeightType, bool isDense, bool isSymmetric>
Graph<WeightType, isDense, isSymmetric>::Graph() : nodesNumber(0), valid(false)
{
}
template <typename WeightType, bool isDense, bool isSymmetric>
Graph<WeightType, isDense, isSymmetric>::Graph(const std::vector<std::pair<size_t, size_t>> &edgesPairs)
{
buildEdges(edgesPairs);
}
// FIXME: check what to do with other fields
template <typename WeightType, bool isDense, bool isSymmetric>
Graph<WeightType, isDense, isSymmetric>::Graph(MatrixType &&matrix) : matrix(std::move(matrix))
{
}
template <typename WeightType, bool isDense, bool isSymmetric>
size_t Graph<WeightType, isDense, isSymmetric>::getNodesNumber() const
{
return nodesNumber;
}
template <typename WeightType, bool isDense, bool isSymmetric>
bool Graph<WeightType, isDense, isSymmetric>::isValid() const
{
return valid;
}
// left for comparison
template <typename WeightType, bool isDense, bool isSymmetric>
std::vector<std::vector<size_t>> Graph<WeightType, isDense, isSymmetric>::getNeighboursOld(const size_t index,
const size_t maxDeep)
{
// return empty if weighted, TODO implement weight-based metric if needed
if (isWeighted)
return std::vector<std::vector<size_t>>(0);
std::vector<std::vector<size_t>> neighboursList(maxDeep + 1);
std::stack<typename Graph<WeightType, isDense, isSymmetric>::MatrixType::Iterator> iterator_stack;
std::stack<size_t> row_stack;
std::unordered_map<size_t, size_t> indices;
row_stack.push(index);
iterator_stack.push(
matrix.begin(index)); // stacks are ever processed together (so they are always of the same size)
indices[row_stack.top()] = 0;
size_t depth = 1;
while (true) {
if (iterator_stack.top() == matrix.end(row_stack.top()) || depth > maxDeep) // end of row or max depth reached
{
row_stack.pop(); // return to the previous level
iterator_stack.pop();
depth--;
if (depth < 1) // finish
break;
} else {
row_stack.push(iterator_stack.top()->index()); // enter the next level
iterator_stack.push(matrix.begin(row_stack.top()));
depth++;
auto search = indices.find(row_stack.top());
if (search == indices.end() ||
search->second > depth - 1) // node not exists or its depth is greater than current
{
indices[row_stack.top()] = depth - 1; // write node into output
}
continue; // prevent from a step along the level when entered the new level
}
iterator_stack.top()++; // step along the level
}
for (const auto &[index, deep] : indices) {
neighboursList[deep].push_back(index);
}
return neighboursList;
}
template <typename WeightType, bool isDense, bool isSymmetric>
size_t Graph<WeightType, isDense, isSymmetric>::modularPow(const size_t base, const size_t exponent,
const size_t modulus)
{
if (modulus == 1) {
return 1;
}
size_t c = 1;
for (size_t e = 0; e < exponent; ++e) {
c = (c * base) % modulus;
}
return c;
}
template <typename WeightType, bool isDense, bool isSymmetric>
void Graph<WeightType, isDense, isSymmetric>::buildEdges(const std::vector<std::pair<size_t, size_t>> &edgesPairs)
{
size_t max = 0;
for (const auto &[i, j] : edgesPairs) { // TODO loop may be removed if max is passed as a parameter
if (i > max)
max = i;
if (j > max)
max = j;
}
max = max + 1;
if (max > nodesNumber)
nodesNumber = max;
matrix.resize((unsigned long)max, (unsigned long)max);
matrix.reset();
// matrix.reserve(edgePairs.size()); // TODO optimize via reserve-insert-finalize idiom
for (const auto &[i, j] : edgesPairs) {
if (i != j) {
matrix(i, j) = 1;
}
// TODO on the final optimization stage: restore faster insertion via .insert(...) with compile-time
// check/specialization
}
}
template <typename WeightType, bool isDense, bool isSymmetric>
void Graph<WeightType, isDense, isSymmetric>::updateEdges(const MatrixType &edgeMatrix)
{
assert(matrix.rows() == edgeMatrix.rows());
assert(matrix.columns() == edgeMatrix.columns());
matrix = edgeMatrix;
matrix_changed_ = true;
}
template <typename WeightType, bool isDense, bool isSymmetric>
template <typename T, bool denseFlag>
typename std::enable_if_t<!std::is_same<T, bool>::value, std::vector<std::vector<size_t>>>
Graph<WeightType, isDense, isSymmetric>::getNeighbours(const size_t index, const size_t maxDeep)
{
// return empty if weighted, TODO implement weight-based metric if needed
return std::vector<std::vector<size_t>>(0);
}
template <typename WeightType, bool isDense, bool isSymmetric>
template <typename T, bool denseFlag>
typename std::enable_if_t<std::is_same<T, bool>::value && !denseFlag, std::vector<std::vector<size_t>>>
Graph<WeightType, isDense, isSymmetric>::getNeighbours(const size_t index, const size_t maxDeep)
{
std::vector<std::vector<size_t>> neighboursList(maxDeep + 1);
if (index >= matrix.columns())
return neighboursList;
std::vector<size_t> parents;
std::vector<size_t> children;
std::vector<bool> nodes = std::vector<bool>(matrix.columns(), false);
parents.push_back(index);
neighboursList[0].push_back(index);
nodes[index] = true;
size_t depth = 1;
while (depth <= maxDeep) {
typename MatrixType::Iterator it;
for (auto el : parents) {
for (it = matrix.begin(el); it != matrix.end(el);
it++) // for dense and for sparse matrix.end(...) has different meaning!..
{
// tested on sparse only
if (!nodes[it->index()]) {
neighboursList[depth].push_back(it->index()); // write node into output
children.push_back(it->index());
nodes[it->index()] = true;
}
}
}
depth++;
parents.swap(children);
children = {};
}
return neighboursList;
}
template <typename WeightType, bool isDense, bool isSymmetric>
template <typename T, bool denseFlag>
typename std::enable_if_t<std::is_same<T, bool>::value && denseFlag, std::vector<std::vector<size_t>>>
Graph<WeightType, isDense, isSymmetric>::getNeighbours(const size_t index, const size_t maxDeep)
{
// similar to sparse specialization except the way matrix elements are accessed
std::vector<std::vector<size_t>> neighboursList(maxDeep + 1);
if (index >= matrix.columns())
return neighboursList;
std::vector<size_t> parents;
std::vector<size_t> children;
std::vector<bool> nodes = std::vector<bool>(matrix.columns(), false);
parents.push_back(index);
neighboursList[0].push_back(index);
nodes[index] = true;
size_t depth = 1;
while (depth <= maxDeep) {
typename MatrixType::Iterator it;
for (auto el : parents) {
for (it = matrix.begin(el); it != matrix.end(el); it++) {
size_t idx = it - matrix.begin(el);
if (matrix(el, idx) != 1)
continue;
if (!nodes[idx]) {
neighboursList[depth].push_back(idx); // write node into output
children.push_back(idx);
nodes[idx] = true;
}
}
}
depth++;
parents.swap(children);
children = {};
}
return neighboursList;
}
template <typename WeightType, bool isDense, bool isSymmetric>
auto Graph<WeightType, isDense, isSymmetric>::get_matrix() const -> const
typename Graph<WeightType, isDense, isSymmetric>::MatrixType &
{
return matrix;
}
// end of base class implementation
// Grid4_blaze
inline Grid4::Grid4(size_t nodesNumber) : Graph<>(nodesNumber)
{
size_t s = sqrt(nodesNumber);
if ((s * s) != nodesNumber) {
valid = false;
} else {
construct(s, s);
}
}
inline Grid4::Grid4(size_t width, size_t height) : Graph<>(width * height) { construct(width, height); }
inline void Grid4::construct(size_t width, size_t height)
{
unsigned long n_nodes = width * height;
matrix.resize(n_nodes, n_nodes);
std::vector<std::pair<size_t, size_t>> edgesPairs;
for (size_t i = 0; i < height; ++i) {
for (size_t j = 0; j < width; ++j) {
int ii0 = 0, ii1 = 0;
int jj0 = 0, jj1 = 0;
if (i > 0) {
ii0 = -1;
}
if (j > 0) {
jj0 = -1;
}
if (i < height - 1) {
ii1 = 1;
}
if (j < width - 1) {
jj1 = 1;
}
for (int ii = ii0; ii <= ii1; ++ii) {
for (int jj = jj0; jj <= jj1; ++jj) {
if ((ii == 0) || (jj == 0)) {
edgesPairs.emplace_back(i * width + j, (i + ii) * width + (j + jj));
}
}
}
}
}
buildEdges(edgesPairs);
valid = true;
}
// Grig6_blaze
inline Grid6::Grid6(size_t nodesNumber) : Graph<>(nodesNumber)
{
size_t s = sqrt(nodesNumber);
if ((s * s) != nodesNumber) {
valid = false;
} else {
construct(s, s);
}
}
inline Grid6::Grid6(size_t width, size_t height) : Graph<>(width * height) { construct(width, height); }
inline void Grid6::construct(size_t width, size_t height)
{
unsigned long n_nodes = width * height;
matrix.resize(n_nodes, n_nodes);
std::vector<std::pair<size_t, size_t>> edgesPairs;
for (size_t i = 0; i < height; ++i) {
for (size_t j = 0; j < width; ++j) {
bool odd = true;
if (i % 2 == 0) {
odd = false;
}
bool up = true;
if (i == 0) {
up = false;
}
bool down = true;
if (i == height - 1) {
down = false;
}
bool left = true;
if (j == 0) {
left = false;
}
bool right = true;
if (j == width - 1) {
right = false;
}
if (up) {
edgesPairs.emplace_back(i * width + j, (i - 1) * width + (j + 0));
}
if (down) {
edgesPairs.emplace_back(i * width + j, (i + 1) * width + (j + 0));
}
if (left) {
edgesPairs.emplace_back(i * width + j, (i + 0) * width + (j - 1));
}
if (right) {
edgesPairs.emplace_back(i * width + j, (i + 0) * width + (j + 1));
}
if (!odd && left) {
if (up) {
edgesPairs.emplace_back(i * width + j, (i - 1) * width + (j - 1));
}
if (down) {
edgesPairs.emplace_back(i * width + j, (i + 1) * width + (j - 1));
}
}
if (odd && right) {
if (up) {
edgesPairs.emplace_back(i * width + j, (i - 1) * width + (j + 1));
}
if (down)
edgesPairs.emplace_back(i * width + j, (i + 1) * width + (j + 1));
}
}
}
valid = true;
buildEdges(edgesPairs);
}
// Grid8_blaze
inline Grid8::Grid8(size_t nodesNumber) : Graph<>(nodesNumber)
{
size_t s = sqrt(nodesNumber);
if ((s * s) != nodesNumber) {
valid = false;
} else {
construct(s, s);
}
}
inline Grid8::Grid8(size_t width, size_t height) : Graph<>(width * height) { construct(width, height); }
inline void Grid8::construct(size_t width, size_t height)
{
unsigned long n_nodes = width * height;
matrix.resize(n_nodes, n_nodes);
std::vector<std::pair<size_t, size_t>> edgesPairs;
for (size_t i = 0; i < height; ++i) {
for (size_t j = 0; j < width; ++j) {
int ii0 = 0, ii1 = 0;
int jj0 = 0, jj1 = 0;
if (i > 0) {
ii0 = -1;
}
if (j > 0) {
jj0 = -1;
}
if (i < height - 1) {
ii1 = 1;
}
if (j < width - 1) {
jj1 = 1;
}
for (int ii = ii0; ii <= ii1; ++ii) {
for (int jj = jj0; jj <= jj1; ++jj) {
if ((ii != 0) || (jj != 0)) {
edgesPairs.emplace_back(i * width + j, (i + ii) * width + (j + jj));
}
}
}
}
}
buildEdges(edgesPairs);
valid = true;
}
// Paley_blaze
inline Paley::Paley(size_t nodesNumber) : Graph<>(nodesNumber)
{
if (nodesNumber % 4 != 1) {
return;
}
std::vector<std::pair<size_t, size_t>> edgesPairs;
std::vector<size_t> squareList;
size_t l = (nodesNumber - 1) / 2;
squareList.reserve(l);
for (size_t i = 0; i < l; ++i) {
squareList.push_back(i * i % nodesNumber);
}
for (size_t i = 0; i < nodesNumber; ++i) {
for (auto j : squareList) {
edgesPairs.emplace_back(i, (i + j) % nodesNumber);
}
}
buildEdges(edgesPairs);
valid = true;
}
// LPS_blaze
inline LPS::LPS(size_t nodesNumber) : Graph<>(nodesNumber)
{
if (!millerRabin(nodesNumber)) {
return;
}
std::vector<std::pair<size_t, size_t>> edgesPairs;
for (size_t i = 0; i < nodesNumber; ++i) {
if (i == 0) {
edgesPairs.emplace_back(0, nodesNumber - 1);
edgesPairs.emplace_back(0, 1);
} else {
edgesPairs.emplace_back(i, i - 1);
edgesPairs.emplace_back(i, (i + 1) % nodesNumber);
edgesPairs.emplace_back(i, modularPow(i, nodesNumber - 2, nodesNumber));
}
}
buildEdges(edgesPairs);
valid = true;
}
inline bool LPS::millerRabin(const size_t nodesNumber)
{
srand(time(NULL));
auto d = nodesNumber - 1;
auto s = 0;
while (d % 2 == 0) {
d >>= 1;
s += 1;
}
for (int repeat = 0; repeat < 20; ++repeat) {
size_t a = 0;
while (a == 0) {
a = rand() % nodesNumber;
}
if (!miller_rabin_pass(a, s, d, nodesNumber)) {
return false;
}
}
return true;
}
inline bool LPS::miller_rabin_pass(const size_t a, const size_t s, const size_t d, const size_t nodesNumber)
{
auto p = size_t(std::pow(a, d)) % nodesNumber;
if (p == 1) {
return true;
}
for (size_t i = 0; i < s - 1; ++i) {
if (p == nodesNumber - 1) {
return true;
}
p = (p * p) % nodesNumber;
}
return p == nodesNumber - 1;
}
// Margulis_blaze
inline Margulis::Margulis(size_t nodesNumber) : Graph<>(nodesNumber)
{
size_t s = sqrt(nodesNumber);
if ((s * s) != nodesNumber) {
valid = false;
} else {
std::vector<std::pair<size_t, size_t>> edgesPairs;
for (size_t i = 0; i < s; ++i) {
for (size_t j = 0; j < s; ++j) {
edgesPairs.emplace_back(i * s + j, ((i + 2 * j) % s) * s + j);
edgesPairs.emplace_back(i * s + j, i * s + (2 * i + j) % s);
edgesPairs.emplace_back(i * s + j, i * s + (2 * i + j + 1) % s);
edgesPairs.emplace_back(i * s + j, ((i + 2 * j + 1) % s) * s + j);
}
}
buildEdges(edgesPairs);
valid = true;
}
}
// Random with constant nummber of connections
template <typename WType, bool isDense>
RandomUniform<WType, isDense>::RandomUniform(size_t nNodes, WType lower_bound, WType upper_bound, int nConnections)
: Graph<WType, isDense, false>(nNodes)
{
// TODO implement
using MType = typename std::conditional<isDense, blaze::DynamicMatrix<WType>, blaze::CompressedMatrix<WType>>::type;
this->matrix = MType(nNodes, nNodes);
if (nConnections > 0)
this->fill(this->matrix, lower_bound, upper_bound, nConnections);
else
this->fill(this->matrix, lower_bound, upper_bound);
this->valid = true;
}
template <typename WType, bool isDense> //
void RandomUniform<WType, isDense>::fill(blaze::DynamicMatrix<WType> &matrix, WType lower_bound, WType upper_bound,
int nConnections)
{
std::default_random_engine rgen;
size_t nNodes = matrix.columns();
assert(nNodes == matrix.rows());
auto uniform_int = std::uniform_int_distribution<int>(0, nNodes - 1);
auto uniform_double = std::uniform_real_distribution<double>(lower_bound, upper_bound);
int count;
size_t r_row, r_col;
for (r_col = 0; r_col < nNodes; r_col++) {
for (count = 0; count < nConnections; count++) {
r_row = uniform_int(rgen);
if (matrix(r_row, r_col) ==
0) // TODO profile comparison of doubles, try replacing by lookup in vector of bools
matrix(r_row, r_col) = uniform_double(rgen);
else
count--; // retry
}
}
}
template <typename WType, bool isDense> // optimized overload for compressed matrix
void RandomUniform<WType, isDense>::fill(blaze::CompressedMatrix<WType> &matrix, WType lower_bound, WType upper_bound,
int nConnections)
{
std::default_random_engine rgen;
size_t nNodes = matrix.columns();
assert(nNodes == matrix.rows());
auto uniform_int = std::uniform_int_distribution<int>(0, nNodes - 1);
auto uniform_double = std::uniform_real_distribution<double>(lower_bound, upper_bound);
int count;
size_t r_row, r_col;
for (r_col = 0; r_col < nNodes; r_col++) {
for (count = 0; count < nConnections; count++) {
r_row = uniform_int(rgen);
if (matrix.find(r_row, r_col) == matrix.end(r_row)) // find works for compressed matrix only
matrix.insert(r_row, r_col, uniform_double(rgen));
else
count--; // retry
}
}
}
template <typename WType, bool isDense> // total fullfilling for both dense and sparse matrices
template <typename MType>
void RandomUniform<WType, isDense>::fill(MType &matrix, WType lower_bound, WType upper_bound)
{
std::default_random_engine rgen;
size_t nNodes = matrix.columns();
assert(nNodes == matrix.rows());
auto uniform_double = std::uniform_real_distribution<double>(lower_bound, upper_bound);
size_t r_row, r_col;
for (r_row = 0; r_row < nNodes; r_row++)
for (r_col = 0; r_col < nNodes; r_col++) {
matrix(r_row, r_col) = uniform_double(rgen);
}
}
// KNN-Graph
// Graph factory based on Blaze matrix of 4 allowed types
template <class ValueType> Graph<ValueType, false, false> make_graph(blaze::CompressedMatrix<ValueType> &&matrix)
{
return Graph<ValueType, false, false>(std::move(matrix));
}
template <class ValueType>
Graph<ValueType, false, true> make_graph(blaze::SymmetricMatrix<blaze::CompressedMatrix<ValueType>> &&matrix)
{
return Graph<ValueType, false, true>(std::move(matrix));
}
template <class ValueType> Graph<ValueType, true, false> make_graph(blaze::DynamicMatrix<ValueType> &&matrix)
{
return Graph<ValueType, true, false>(std::move(matrix));
}
template <class ValueType>
Graph<ValueType, true, true> make_graph(blaze::SymmetricMatrix<blaze::DynamicMatrix<ValueType>> &&matrix)
{
return Graph<ValueType, true, true>(std::move(matrix));
}
} // end namespace metric
| 17,638
|
C++
|
.cpp
| 559
| 28.631485
| 118
| 0.666981
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,507
|
sparsify.cpp
|
metric-space-ai_metric/metric/utils/graph/sparsify.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
Copyright (c) 2019 Oleg Popov
*/
/*
Laplacians is a package containing graph algorithms, with an emphasis on tasks related to
spectral and algebraic graph theory. It contains (and will contain more) code for solving
systems of linear equations in graph Laplacians, low stretch spanning trees, sparsifiation,
clustering, local clustering, and optimization on graphs.
All graphs are represented by sparse adjacency matrices. This is both for speed, and because
our main concerns are algebraic tasks. It does not handle dynamic graphs. It would be very slow
to implement dynamic graphs this way.
https://github.com/danspielman/Laplacians.jl
*/
#ifndef _METRIC_UTILS_GRAPH_SPARSIFY_CPP
#define _METRIC_UTILS_GRAPH_SPARSIFY_CPP
namespace metric {
template <typename Tv>
blaze::CompressedMatrix<Tv, blaze::columnMajor>
sparsify_effective_resistance(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, float ep, float matrixConcConst,
float JLfac)
{
Random<double> rnd;
std::vector<size_t> pcgIts;
SolverB<Tv> f = approxchol_lap(a, pcgIts, 1e-2F);
size_t n = a.rows();
size_t k = (size_t)round(JLfac * std::log(n)); // number of dims for JL
blaze::CompressedMatrix<Tv, blaze::columnMajor> U = wtedEdgeVertexMat(a);
size_t m = U.rows();
blaze::DynamicMatrix<double, blaze::columnMajor> R(m, k);
for (size_t i = 0; i < m; i++) {
for (size_t j = 0; j < k; j++)
R(i, j) = rnd.randn();
}
blaze::CompressedMatrix<Tv, blaze::columnMajor> UR = adjoint(U) * R;
blaze::CompressedMatrix<Tv, blaze::columnMajor> V(n, k, 0);
for (size_t i = 0; i < k; i++) {
blaze::DynamicVector<Tv> x, b;
std::vector<size_t> idx = collect(0, UR.rows());
b = index(UR, idx, i);
x = f(b);
idx = collect(0, V.rows());
index(V, idx, i, x);
}
auto [ai, aj, av] = findnz(triu(a));
blaze::DynamicVector<Tv> prs(av.size());
for (size_t h = 0; h < av.size(); h++) {
size_t i = ai[h];
size_t j = aj[h];
blaze::DynamicVector<Tv> vi, vj, vr;
std::vector<size_t> idx = collect(0, V.columns());
vi = index(V, i, idx);
vj = index(V, j, idx);
Tv nr = std::pow(norm(vi - vj), 2) / k;
Tv tmp = av[h] * nr * matrixConcConst * std::log(n) / std::pow(ep, 2);
prs[h] = (1 < tmp) ? 1 : tmp;
}
std::vector<bool> ind(prs.size());
blaze::DynamicVector<double> rndvec = rnd.randv(prs.size());
for (size_t i = 0; i < prs.size(); i++)
ind[i] = rndvec[i] < prs[i];
std::vector<size_t> ai_ind = indexbool(ai, ind);
std::vector<size_t> aj_ind = indexbool(aj, ind);
blaze::DynamicVector<Tv> av_ind = indexbool(av, ind);
blaze::DynamicVector<Tv> prs_ind = indexbool(prs, ind);
blaze::DynamicVector<Tv> divs = av_ind / prs_ind;
blaze::CompressedMatrix<Tv, blaze::columnMajor> as = sparse(ai_ind, aj_ind, divs, n, n);
as = as + adjoint(as);
return as;
}
namespace kruskal_sparsify_detail {
template <typename Tv> class KruskalEdge {
private:
size_t node_from;
size_t node_to;
Tv weight;
bool enabled;
public:
KruskalEdge(size_t node_from, size_t node_to, Tv weight)
: node_from(node_from), node_to(node_to), weight(weight), enabled(false)
{
}
KruskalEdge(KruskalEdge &&e) noexcept
: node_from(e.node_from), node_to(e.node_to), weight(std::move(e.weight)), enabled(e.enabled)
{
}
KruskalEdge &operator=(KruskalEdge &&e)
{
if (this != &e) {
node_from = e.node_from;
node_to = e.node_to;
weight = std::move(e.weight);
enabled = e.enabled;
}
return *this;
}
inline const Tv &getWeight() const { return weight; }
inline size_t getNodeFrom() const { return node_from; }
inline size_t getNodeTo() const { return node_to; }
inline void enable() { enabled = true; }
inline bool isEnabled() const { return enabled; }
};
class KruskalNode {
private:
KruskalNode *parent;
size_t size;
KruskalNode *find()
{
if (!parent)
return this;
KruskalNode *p = parent->find();
parent = p;
return p;
}
public:
KruskalNode() : parent(0), size(1) {}
inline bool isConnected(KruskalNode &node) { return find() == node.find(); }
inline void union_with(KruskalNode &node)
{
KruskalNode *root0 = find();
KruskalNode *root1 = node.find();
if (root1->size > root0->size) {
KruskalNode *tmp = root0;
root0 = root1;
root1 = tmp;
}
root0->size += root1->size;
root1->parent = root0;
}
std::string toString()
{
std::stringstream ss;
ss << this << ":" << parent << ":" << size;
return ss.str();
}
};
} // namespace kruskal_sparsify_detail
template <typename Tv>
blaze::CompressedMatrix<Tv, blaze::columnMajor>
sparsify_spanning_tree(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, bool minimum)
{
if (a.columns() != a.rows())
throw std::invalid_argument("expected square matrix");
using namespace kruskal_sparsify_detail;
const size_t node_count = a.columns();
const size_t edge_count = a.nonZeros();
// initializing edge list
std::vector<KruskalEdge<Tv>> edges;
edges.reserve(edge_count);
for (size_t node_from = 0; node_from < node_count; node_from++) {
// not checking elements above diagonal (graph is undirected)
for (auto i = a.begin(node_from); i != a.end(node_from); ++i) {
size_t node_to = i->index();
if (node_to >= node_from)
break;
Tv weight = i->value();
edges.push_back(KruskalEdge<Tv>(node_from, node_to, weight));
}
}
// sorting edge list
if (minimum) {
sort(edges.begin(), edges.end(),
[](const KruskalEdge<Tv> &a, const KruskalEdge<Tv> &b) { return a.getWeight() < b.getWeight(); });
} else {
sort(edges.begin(), edges.end(),
[](const KruskalEdge<Tv> &a, const KruskalEdge<Tv> &b) { return a.getWeight() > b.getWeight(); });
}
// initializing node list (needed to have disjoint-set data structure)
std::vector<KruskalNode> nodes;
nodes.resize(node_count);
// traversing edge list, addition happens only if no loops are
// created in the process
size_t new_edge_count = 0;
for (auto &i : edges) {
KruskalNode &node_from = nodes[i.getNodeFrom()];
KruskalNode &node_to = nodes[i.getNodeTo()];
if (!node_from.isConnected(node_to)) {
node_from.union_with(node_to);
i.enable();
new_edge_count++;
}
}
// putting together result
blaze::CompressedMatrix<Tv, blaze::columnMajor> res(node_count, node_count);
res.reserve(new_edge_count * 2);
for (auto &i : edges) {
if (i.isEnabled()) {
res(i.getNodeTo(), i.getNodeFrom()) = i.getWeight();
res(i.getNodeFrom(), i.getNodeTo()) = i.getWeight();
}
}
return res;
}
} // namespace metric
#endif
| 6,709
|
C++
|
.cpp
| 198
| 31.222222
| 120
| 0.684789
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,508
|
auto_detect_metric 2.cpp
|
metric-space-ai_metric/metric/utils/auto_detect_metric/auto_detect_metric 2.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 PANDA Team
*/
namespace metric {
MetricAutoDetector::MetricAutoDetector() {}
template <typename Record, typename Graph>
std::string MetricAutoDetector::detect(Graph &graph, int graph_w, int graph_h, std::vector<Record> dataset,
bool isEstimate)
{
std::vector<std::string> metric_type_names = {
"Euclidean", "Manhatten", "P_norm", "Euclidean_thresholded", "Cosine", "Chebyshev", "Earth Mover Distance",
"SSIM", "TWED"};
// Random updating
std::vector<size_t> randomized_indexes(dataset.size());
std::iota(randomized_indexes.begin(), randomized_indexes.end(), 0);
// shuffle samples after all was processed
std::shuffle(randomized_indexes.begin(), randomized_indexes.end(), std::mt19937{std::random_device{}()});
double relative_diff_mean;
std::vector<double> relative_diff_means;
for (auto metric_type : metric_type_names) {
if (metric_type == "Euclidean") {
// Euclidean
metric::Euclidean<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Euclidean<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Manhatten") {
// Manhatten
metric::Manhatten<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Manhatten<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "P_norm") {
// P_norm
metric::P_norm<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::P_norm<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Euclidean_thresholded") {
// Euclidean_thresholded
metric::Euclidean_thresholded<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Euclidean_thresholded<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Cosine") {
// Cosine
metric::Cosine<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Cosine<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Chebyshev") {
// Chebyshev
metric::Chebyshev<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Chebyshev<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Earth Mover Distance") {
// Earth Mover Distance
auto cost_mat = metric::EMD_details::ground_distance_matrix_of_2dgrid<double>(graph_w, graph_h);
auto maxCost = metric::EMD_details::max_in_distance_matrix(cost_mat);
metric::EMD<double> distance(cost_mat, maxCost);
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::EMD<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "SSIM") {
// SSIM
metric::SSIM<double, Record> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::SSIM<double, Record>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "TWED") {
// TWED
metric::TWED<double> distance(0, 1);
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::TWED<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
}
relative_diff_means.push_back(relative_diff_mean);
if (verbose) {
std::cout << metric_type << " relative_diff_mean: " << relative_diff_mean << std::endl;
}
}
// Metrics metric;
std::vector<double>::iterator min_result = std::min_element(relative_diff_means.begin(), relative_diff_means.end());
auto best_index = std::distance(relative_diff_means.begin(), min_result);
if (verbose) {
std::cout << std::endl;
std::cout << "Best metric: " << metric_type_names[best_index] << std::endl;
}
return metric_type_names[best_index];
}
template <typename Record, typename Graph, typename Metric = metric::Euclidean<double>>
double get_mean_distance_difference(Graph &graph, Metric distance, std::vector<Record> dataset,
std::vector<size_t> randomized_indexes, bool isEstimate)
{
metric::SOM<Record, Graph, Metric> som(graph, Metric());
if (isEstimate) {
som.estimate(dataset, 50);
} else {
som.train(dataset);
}
auto iterations = 20;
if (iterations > dataset.size()) {
iterations = dataset.size();
}
std::vector<double> relative_diffs;
for (auto i = 0; i < iterations; i++) {
for (auto j = 0; j < iterations; j++) {
if (i != j) {
auto dimR_1 = som.encode(dataset[randomized_indexes[i]]);
auto bmu_1 = som.BMU(dataset[randomized_indexes[i]]);
auto dimR_2 = som.encode(dataset[randomized_indexes[j]]);
auto bmu_2 = som.BMU(dataset[randomized_indexes[j]]);
// we get the same bmu for both records
auto som_distance_1 = dimR_1[bmu_1] + dimR_2[bmu_1];
auto som_distance_2 = dimR_1[bmu_2] + dimR_2[bmu_2];
auto som_distance = min(som_distance_1, som_distance_2);
auto direct_distance = distance(dataset[randomized_indexes[i]], dataset[randomized_indexes[j]]);
auto diff = abs(abs(som_distance) - abs(direct_distance));
auto relative_diff = diff / abs(direct_distance);
relative_diffs.push_back(relative_diff);
// std::cout << "som_distance: " << som_distance << " direct_distance: " << direct_distance << " diff: "
// << diff << " relative_diff: " << relative_diff << std::endl;
}
}
}
double relative_diff_sum = 0;
for (auto item : relative_diffs) {
relative_diff_sum += item;
}
double relative_diff_mean = relative_diff_sum / relative_diffs.size();
return relative_diff_mean;
}
} // end namespace metric
| 5,915
|
C++
|
.cpp
| 128
| 42.78125
| 117
| 0.706239
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,509
|
auto_detect_metric.cpp
|
metric-space-ai_metric/metric/utils/auto_detect_metric/auto_detect_metric.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 PANDA Team
*/
namespace metric {
MetricAutoDetector::MetricAutoDetector() {}
template <typename Record, typename Graph>
std::string MetricAutoDetector::detect(Graph &graph, int graph_w, int graph_h, std::vector<Record> dataset,
bool isEstimate)
{
// std::vector<std::string> metric_type_names = {"Euclidean", "Manhatten", "P_norm", "Euclidean_thresholded",
// "Cosine", "Chebyshev", "Earth Mover Distance", "SSIM", "TWED"};
std::vector<std::string> metric_type_names = {"Euclidean", "Manhatten", "P_norm", "Cosine", "Chebyshev"};
// std::vector<std::string> metric_type_names = {"Euclidean", "Manhatten"};
// Random updating
std::vector<size_t> randomized_indexes(dataset.size());
std::iota(randomized_indexes.begin(), randomized_indexes.end(), 0);
// shuffle samples after all was processed
std::shuffle(randomized_indexes.begin(), randomized_indexes.end(), std::mt19937{std::random_device{}()});
double relative_diff_mean;
std::vector<double> relative_diff_means;
for (auto metric_type : metric_type_names) {
if (metric_type == "Euclidean") {
// Euclidean
metric::Euclidean<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Euclidean<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Manhatten") {
// Manhatten
metric::Manhatten<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Manhatten<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "P_norm") {
// P_norm
metric::P_norm<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::P_norm<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Euclidean_thresholded") {
// Euclidean_thresholded
metric::Euclidean_thresholded<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Euclidean_thresholded<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Cosine") {
// Cosine
metric::Cosine<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Cosine<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Chebyshev") {
// Chebyshev
metric::Chebyshev<double> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::Chebyshev<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "Earth Mover Distance") {
// Earth Mover Distance
auto cost_mat = metric::EMD_details::ground_distance_matrix_of_2dgrid<double>(graph_w, graph_h);
auto maxCost = metric::EMD_details::max_in_distance_matrix(cost_mat);
metric::EMD<double> distance(cost_mat, maxCost);
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::EMD<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "SSIM") {
// SSIM
metric::SSIM<double, Record> distance;
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::SSIM<double, Record>>(
graph, distance, dataset, randomized_indexes, isEstimate);
} else if (metric_type == "TWED") {
// TWED
metric::TWED<double> distance(0, 1);
relative_diff_mean = get_mean_distance_difference<Record, Graph, metric::TWED<double>>(
graph, distance, dataset, randomized_indexes, isEstimate);
}
relative_diff_means.push_back(relative_diff_mean);
if (verbose) {
std::cout << metric_type << " relative_diff_mean: " << relative_diff_mean << std::endl;
}
}
// Metrics metric;
std::vector<double>::iterator min_result = std::min_element(relative_diff_means.begin(), relative_diff_means.end());
auto best_index = std::distance(relative_diff_means.begin(), min_result);
if (verbose) {
std::cout << std::endl;
std::cout << "Best metric: " << metric_type_names[best_index] << std::endl;
}
return metric_type_names[best_index];
}
template <typename Record, typename Graph, typename Metric>
double MetricAutoDetector::get_mean_distance_difference(Graph &graph, Metric distance, std::vector<Record> dataset,
std::vector<size_t> randomized_indexes, bool isEstimate)
{
metric::SOM<Record, Graph, Metric> som(graph, distance);
if (isEstimate) {
som.estimate(dataset, 50);
} else {
som.train(dataset);
}
metric::Kohonen<double, Record, Graph, Metric> Kohonen_object(som, dataset);
auto iterations = 20;
if (iterations > dataset.size()) {
iterations = dataset.size();
}
std::vector<double> relative_diffs;
for (auto i = 0; i < iterations; i++) {
for (auto j = 0; j < iterations; j++) {
if (i != j) {
// we get the same bmu for both records
auto Kohonen = Kohonen_object(dataset[randomized_indexes[i]], dataset[randomized_indexes[j]]);
auto direct_distance = distance(dataset[randomized_indexes[i]], dataset[randomized_indexes[j]]);
auto diff = abs(abs(Kohonen) - abs(direct_distance));
auto relative_diff = diff / abs(direct_distance);
if (direct_distance != 0) {
relative_diffs.push_back(relative_diff);
}
// std::cout << " Kohonen: " << Kohonen << " direct_distance: " << direct_distance << " diff: " << diff
// << " relative_diff: " << relative_diff << std::endl;
}
}
}
double relative_diff_sum = 0;
for (auto item : relative_diffs) {
relative_diff_sum += item;
}
double relative_diff_mean = relative_diff_sum / relative_diffs.size();
return relative_diff_mean;
}
} // end namespace metric
| 5,883
|
C++
|
.cpp
| 126
| 43.309524
| 117
| 0.707194
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,510
|
math_functions.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/math_functions.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_MATH_FUNCTIONS_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_MATH_FUNCTIONS_CPP
#include <algorithm>
#include <vector>
namespace metric {
template <typename T> std::vector<T> linspace(T a, T b, int n)
{
std::vector<T> array;
if (n > 1) {
T step = (b - a) / T(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
template <typename T>
std::vector<T> akimaInterp1(std::vector<T> const &x, std::vector<T> const &y, std::vector<T> const &xi, bool save_Mode)
{
// check inputs
// calculate u vector
auto uVec = [](std::vector<T> const &x, std::vector<T> const &y) {
size_t n = x.size();
std::vector<T> u((n + 3));
for (size_t i = 1; i < n; ++i) {
u[i + 1] = (y[i] - y[i - 1]) / (x[i] - x[i - 1]); // Shift i to i+2
}
auto akima_end = [](const T &u1, const T &u2) { return 2.0 * u1 - u2; };
u[1] = akima_end(u[2], u[3]);
u[0] = akima_end(u[1], u[2]);
u[n + 1] = akima_end(u[n], u[n - 1]);
u[n + 2] = akima_end(u[n + 1], u[n]);
return u;
};
std::vector<T> u = uVec(x, y);
// calculate yp vector
std::vector<T> yp(x.size());
for (size_t i = 0; i < x.size(); ++i) {
auto a = std::abs(u[i + 3] - u[i + 2]);
auto b = std::abs(u[i + 1] - u[i]);
if ((a + b) != 0) {
yp[i] = (a * u[i + 1] + b * u[i + 2]) / (a + b);
} else {
yp[i] = (u[i + 2] + u[i + 1]) / 2.0;
}
}
// calculte interpolated yi values
auto kFind = [](const T &xii, const std::vector<T> &x, int start, int end) {
int klo = start;
int khi = end;
// // Find subinterval by bisection
while (khi - klo > 1) {
int k = (khi + klo) / 2;
x[k] > xii ? khi = k : klo = k;
}
return klo;
};
std::vector<T> yi(xi.size());
for (size_t i = 0; i < xi.size(); ++i) {
// Find the right place in the table by means of a bisection.
int k = kFind(xi[i], x, int(0), x.size() - 1);
// Evaluate Akima polynomial
T b = x[k + 1] - x[k];
T a = xi[i] - x[k];
yi[i] = y[k] + yp[k] * a + (3.0 * u[k + 2] - 2.0 * yp[k] - yp[k + 1]) * a * a / b +
(yp[k] + yp[k + 1] - 2.0 * u[k + 2]) * a * a * a / (b * b);
// Differentiate to find the second-order interpolant
// ypi[i] = yp[k] + (3.0u[k+2] - 2.0yp[k] - yp[k+1])2a/b + (yp[k] + yp[k+1] - 2.0u[k+2])3aa/(b*b);
// Differentiate to find the first-order interpolant
// yppi[i] = (3.0u[k+2] - 2.0yp[k] - yp[k+1])2/b + (yp[k] + yp[k+1] - 2.0u[k+2])6a/(b*b);
}
return yi;
}
template <typename T> std::vector<T> resize(std::vector<T> y, size_t n)
{
std::sort(y.begin(), y.end());
auto x0 = linspace(T(0.5) / T(n), T(1) - T(0.5) / T(n), n);
auto x = linspace(T(0.5) / T(y.size()), T(1) - T(0.5) / T(y.size()), y.size());
return akimaInterp1(x, y, x0);
}
template <typename T> std::vector<std::vector<T>> transpose(std::vector<std::vector<T>> &a)
{
size_t rows = a.size();
size_t cols = a[0].size();
std::vector<std::vector<T>> array(cols, std::vector<T>(rows));
for (size_t i = 0; i < cols; ++i) {
for (size_t j = 0; j < rows; ++j) {
array[i][j] = a[j][i];
}
}
return array;
}
template <typename T> T Lerp(T v0, T v1, T t) { return (1 - t) * v0 + t * v1; }
template <typename T> T quickQuantil(std::vector<T> data, T probs)
{
if (!(data.size() > 0))
return 0;
if (1 == data.size())
return data[0];
T poi = Lerp(T(-0.5), data.size() - T(0.5), probs);
int left = std::max(int(std::floor(poi)), int(0));
int right = std::min(int(std::ceil(poi)), int(data.size() - 1));
if (probs <= T(0.5))
std::nth_element(data.begin(), data.begin() + left, data.end());
else
std::nth_element(data.begin(), data.begin() + right, data.end());
T datLeft = data[left];
T datRight = data[right];
T quantile = Lerp(datLeft, datRight, poi - T(left));
return quantile;
}
} // namespace metric
#endif // header guard
| 4,098
|
C++
|
.cpp
| 123
| 30.747967
| 119
| 0.568205
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,511
|
poor_mans_quantum.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/poor_mans_quantum.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Michael Welsch
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_POOR_MANS_QUANTUM_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_POOR_MANS_QUANTUM_CPP
#include "../poor_mans_quantum.hpp"
#include <blaze/Math.h>
#include <limits>
#include <map>
#include <random>
#ifdef USE_VECTOR_SORT
#include "3dparty/vector_sort.hpp"
#endif
#include "distributions/Normal.hpp"
#include "distributions/Weibull.hpp"
#include "math_functions.hpp"
//#include "distributions/Binomial.hpp"
#include "distributions/Discrete.hpp"
namespace metric {
/*** constructor for univariate distribution ***/
template <typename Distribution, typename T>
PMQ<Distribution, T>::PMQ(T par1, T par2, size_t n, Distribution d) : _dist(d), _generator(std::random_device{}())
{
_dist._p1 = par1;
_dist._p2 = par2;
T min = quantil(T(1) / T(n));
T max = quantil(T(1) - T(1) / T(n));
_dist._prob = linspace(T(0.5) / T(n), T(1) - T(0.5) / T(n), n);
_dist._data.resize(n);
for (size_t i = 0; i < n; ++i) {
_dist._data[i] = icdf(_dist._prob[i]);
}
}
/*** constructor for discrete samples ***/
template <typename Distribution, typename T>
PMQ<Distribution, T>::PMQ(std::vector<T> data, Distribution d) : _dist(d), _generator(std::random_device{}())
{
_dist._data.resize(data.size());
for (size_t i = 0; i < data.size(); ++i) {
_dist._data[i] = T(data[i]);
}
#if USE_VECTOR_SORT
vector_sort::sort(_dist._data);
#else
std::sort(_dist._data.begin(), _dist._data.end());
#endif
auto prob = linspace(T(0.5) / T(data.size()), T(1) - T(0.5) / T(data.size()), data.size());
_dist._prob.resize(prob.size());
for (size_t i = 0; i < prob.size(); ++i) {
_dist._prob[i] = T(prob[i]);
}
}
template <typename Distribution, typename T> size_t PMQ<Distribution, T>::size() { return _dist._data.size(); }
template <typename Distribution, typename T> template <typename mT> mT PMQ<Distribution, T>::believe(mT rv)
{
size_t n1 = _dist._data.size();
size_t n2 = rv._dist._data.size();
size_t n = n1 + n2;
size_t it1 = std::round((n1) / float(n) * float(RV_SAMPLES));
size_t it2 = std::round((n2) / float(n) * float(RV_SAMPLES));
std::vector<float> y1(it1);
std::vector<float> y2(it2);
for (size_t i = 0; i < it1; ++i) {
y1[i] = rnd();
}
for (size_t i = 0; i < it2; ++i) {
auto value1 = rnd();
auto value2 = rnd();
auto value3 = rv.rnd();
if (std::abs(value3 - value1) < std::abs(value3 - value2))
y2[i] = value1;
else
y2[i] = value2;
}
y1.insert(y1.end(), y2.begin(), y2.end()); // concat vectors
mT out(resize(y1, n / 2));
return out;
}
template <typename Distribution, typename T> T PMQ<Distribution, T>::rnd() { return _dist.rnd(); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::mean() { return _dist.mean(); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::variance() { return _dist.variance(); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::median() { return _dist.median(); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::quantil(T p) { return _dist.quantil(p); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::cdf(T x) { return _dist.cdf(x); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::icdf(T x) { return _dist.icdf(x); }
template <typename Distribution, typename T> T PMQ<Distribution, T>::pdf(T x) { return _dist.pdf(x); }
/*** addition ***/
template <typename D1, typename D2, typename T1, typename T2>
PMQ<Discrete<float>> operator+(PMQ<D1, T1> &rv1, PMQ<D2, T2> &rv2)
{
size_t n = (rv1._dist._data.size() + rv2._dist._data.size()) / 2;
std::vector<float> y(RV_SAMPLES);
for (size_t i = 0; i < y.size(); ++i) {
y[i] = rv1.rnd() + rv2.rnd();
}
PMQ<Discrete<float>> out(resize(y, n));
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator+(PMQ<D, T> &rv, float x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] + x;
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator+(float x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] + x;
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator+(PMQ<D, T> &rv, double x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] + float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator+(double x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] + float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
/*** substraction ***/
template <typename D1, typename D2, typename T1, typename T2>
PMQ<Discrete<float>> operator-(PMQ<D1, T1> &rv1, PMQ<D2, T2> &rv2)
{
size_t n = (rv1._dist._data.size() + rv2._dist._data.size()) / 2;
std::vector<float> y(RV_SAMPLES);
for (size_t i = 0; i < y.size(); ++i) {
y[i] = rv1.rnd() - rv2.rnd();
}
PMQ<Discrete<float>> out(resize(y, n));
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator-(PMQ<D, T> &rv, float x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] - x;
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator-(float x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = x - rv._dist._data[i];
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator-(PMQ<D, T> &rv, double x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] - float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator-(double x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = float(x) - rv._dist._data[i];
}
PMQ<Discrete<float>> out(y);
return out;
}
/*** multiplication ***/
template <typename D1, typename D2, typename T1, typename T2>
PMQ<Discrete<float>> operator*(PMQ<D1, T1> &rv1, PMQ<D2, T2> &rv2)
{
size_t n = (rv1._dist._data.size() + rv2._dist._data.size()) / 2;
std::vector<float> y(RV_SAMPLES);
for (size_t i = 0; i < y.size(); ++i) {
y[i] = rv1.rnd() * rv2.rnd();
}
PMQ<Discrete<float>> out(resize(y, n));
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator*(PMQ<D, T> &rv, float x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] * x;
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator*(float x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = x * rv._dist._data[i];
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator*(PMQ<D, T> &rv, double x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] * float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator*(double x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] * float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
/*** division ***/
template <typename D1, typename D2, typename T1, typename T2>
PMQ<Discrete<float>> operator/(PMQ<D1, T1> &rv1, PMQ<D2, T2> &rv2)
{
size_t n = (rv1._dist._data.size() + rv2._dist._data.size()) / 2;
std::vector<float> y(RV_SAMPLES);
for (size_t i = 0; i < y.size(); ++i) {
y[i] = rv1.rnd() / rv2.rnd();
}
PMQ<Discrete<float>> out(resize(y, n));
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator/(PMQ<D, T> &rv, float x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] / x;
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator/(float x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = x / rv._dist._data[i];
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator/(PMQ<D, T> &rv, double x)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = rv._dist._data[i] / float(x);
}
PMQ<Discrete<float>> out(y);
return out;
}
template <typename D, typename T> PMQ<Discrete<float>> operator/(double x, PMQ<D, T> &rv)
{
size_t n = rv._dist._data.size();
std::vector<float> y(n);
for (size_t i = 0; i < n; ++i) {
y[i] = float(x) / rv._dist._data[i];
}
PMQ<Discrete<float>> out(y);
return out;
}
/*** equality ***/
// significance test
template <typename D1, typename D2, typename T1, typename T2>
float operator==(const PMQ<D1, T1> &rv0, const PMQ<D2, T2> &rv1)
{
auto [rv0_l, rv0_r] = rv0.confidence(1 - RV_ERROR);
auto [rv01_l, rv01_r] = rv1.merged_confidence(rv0_l, rv0_r, 1 - RV_ERROR);
auto l = rv1.in_confidence(rv01_l, rv01_r);
auto [rv1_l, rv1_r] = rv1.confidence(1 - RV_ERROR);
auto [rv10_l, rv10_r] = rv0.merged_confidence(rv1_l, rv1_r, 1 - RV_ERROR);
auto r = rv0.in_confidence(rv10_l, rv10_r);
return (l + r) / 2;
}
/*** inequality ***/
// significance test
template <typename D1, typename D2, typename T1, typename T2>
float operator!=(const PMQ<D1, T1> &rv0, const PMQ<D2, T2> &rv1)
{
auto [rv0_l, rv0_r] = rv0.confidence(1 - RV_ERROR);
auto [rv01_l, rv01_r] = rv1.merged_confidence(rv0_l, rv0_r, 1 - RV_ERROR);
auto l = rv1.out_confidence(rv01_l, rv01_r);
auto [rv1_l, rv1_r] = rv1.confidence(1 - RV_ERROR);
auto [rv10_l, rv10_r] = rv0.merged_confidence(rv1_l, rv1_r, 1 - RV_ERROR);
auto r = rv0.out_confidence(rv10_l, rv10_r);
return (l + r) / 2;
}
template <typename T1, typename T2> PMQ<Discrete<float>> merge(T1 rv1, T2 rv2)
{
size_t n = rv1._dist._data.size() + rv2._dist._data.size();
std::vector<float> y(rv1._dist._data);
y.insert(y.end(), rv2._dist._data.begin(), rv2._dist._data.end());
PMQ<Discrete<float>> rv(y);
return rv;
}
template <typename T1> std::tuple<PMQ<Discrete<float>>, PMQ<Discrete<float>>> split(T1 rv)
{
size_t n = rv._dist._data.size() / 2;
std::vector<float> y1(RV_SAMPLES);
std::vector<float> y2(RV_SAMPLES);
for (size_t i = 0; i < y1.size(); ++i) {
auto value1 = rv.rnd();
auto value2 = rv.rnd();
if (value1 > value2) {
std::swap(value1, value2);
}
y1[i] = value1;
y2[i] = value2;
}
PMQ<Discrete<float>> out1(resize(y1, n));
PMQ<Discrete<float>> out2(resize(y2, n));
return {out1, out2};
}
template <typename Distribution, typename T>
std::tuple<PMQ<Discrete<float>>, PMQ<Discrete<float>>> PMQ<Distribution, T>::confidence(const T &confidencelevel) const
{
std::random_device rd; // seed for the random number engine
std::mt19937 gen(rd()); // Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<T> dis(T(0), T(1));
size_t samples = RV_SAMPLES;
size_t windowSize = _dist._data.size();
// compute probability matrix of set_0
std::vector<std::vector<T>> m_0(samples, std::vector<T>(_dist._data.size()));
for (size_t i = 0; i < samples; ++i) {
for (size_t j = 0; j < _dist._data.size(); ++j) {
m_0[i][j] = T(dis(gen)); // fill with random numbers
}
#if USE_VECTOR_SORT
vector_sort::sort(m_0[i]);
#else
std::sort(m_0[i].begin(), m_0[i].end()); // sort the row
#endif
m_0[i] = akimaInterp1(_dist._prob, _dist._data, m_0[i]); // interpolate the random numbers
}
auto m_0t = transpose(m_0);
m_0.clear();
m_0.shrink_to_fit();
// compute left and right confidence boundaries of set_0
std::vector<T> set_0_left(_dist._data.size());
std::vector<T> set_0_right(_dist._data.size());
for (size_t i = 0; i < _dist._data.size(); ++i) {
set_0_left[i] = quickQuantil(m_0t[i], (T(1) - confidencelevel) / T(2));
set_0_right[i] = quickQuantil(m_0t[i], confidencelevel + (T(1) - confidencelevel) / T(2));
}
m_0t.clear();
m_0t.shrink_to_fit();
PMQ<Discrete<float>> left(set_0_left);
PMQ<Discrete<float>> right(set_0_right);
return {left, right};
}
template <typename Distribution, typename T>
template <typename mT1, typename mT2>
std::tuple<PMQ<Discrete<float>>, PMQ<Discrete<float>>>
PMQ<Distribution, T>::merged_confidence(const mT1 &set_0_left, const mT2 &set_0_right, const T confidencelevel) const
{
std::random_device rd; // seed for the random number engine
std::mt19937 gen(rd()); // Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<T> dis(T(0), T(1));
size_t samples = RV_SAMPLES;
size_t windowSize = _dist._data.size();
// compute probability matrix of left and right and medians of set_0
std::vector<std::vector<T>> m_prop_1(samples, std::vector<T>(windowSize));
for (size_t i = 0; i < samples; ++i) {
for (size_t j = 0; j < windowSize; ++j) {
m_prop_1[i][j] = T(dis(gen)); // fill with random numbers
}
#if USE_VECTOR_SORT
vector_sort::sort(m_prop_1[i]);
#else
std::sort(m_prop_1[i].begin(), m_prop_1[i].end()); // sort the row
#endif
}
std::vector<std::vector<T>> quants(2, std::vector<T>(windowSize));
// left
std::vector<std::vector<T>> m(samples, std::vector<T>(windowSize));
for (size_t i = 0; i < samples; ++i) {
m[i] =
akimaInterp1(set_0_left._dist._prob, set_0_left._dist._data, m_prop_1[i]); // interpolate the random numbers
}
auto mt = transpose(m);
for (size_t i = 0; i < windowSize; ++i) {
quants[0][i] = quickQuantil(mt[i], (T(1.0) - confidencelevel) / T(2.0));
}
// right
for (size_t i = 0; i < samples; ++i) {
m[i] = akimaInterp1(set_0_right._dist._prob, set_0_right._dist._data, m_prop_1[i]);
}
mt = transpose(m);
for (size_t i = 0; i < windowSize; ++i) {
quants[1][i] = quickQuantil(mt[i], confidencelevel + (T(1.0) - confidencelevel) / T(2.0));
}
PMQ<Discrete<float>> left(quants[0]);
PMQ<Discrete<float>> right(quants[1]);
return {left, right};
}
template <typename Distribution, typename T> float PMQ<Distribution, T>::is_normal_distributed()
{
PMQ<Normal> control(mean(), variance(), size());
PMQ<Normal> var(_dist._data); // TODO: use self reference
return (var == control);
}
template <typename Distribution, typename T> float PMQ<Distribution, T>::is_weibull_distributed()
{
auto [x0, x1] = weibull_fit(_dist._data);
PMQ<Weibull> control(x0, x1, size());
PMQ<Normal> var(_dist._data); // TODO: use self reference
return (var == control);
}
template <typename Distribution, typename T>
template <typename mT1, typename mT2>
float PMQ<Distribution, T>::in_confidence(const mT1 &set_left, const mT2 &set_right) const
{
if (set_left._dist._data.size() != _dist._data.size() || set_right._dist._data.size() != _dist._data.size()) {
return float(-999999999);
}
int num_out = 0;
for (size_t i = 0; i < _dist._data.size(); ++i) {
if (_dist._data[i] > set_left._dist._data[i] && _dist._data[i] < set_right._dist._data[i]) {
num_out += 1;
}
}
return float(num_out) / float(_dist._data.size());
}
template <typename Distribution, typename T>
template <typename mT1, typename mT2>
float PMQ<Distribution, T>::out_confidence(const mT1 &set_left, const mT2 &set_right) const
{
if (set_left._dist._data.size() != _dist._data.size() || set_right._dist._data.size() != _dist._data.size()) {
return float(-999999999);
}
int num_out = 0;
for (size_t i = 0; i < _dist._data.size(); ++i) {
if (_dist._data[i] < set_left._dist._data[i] || _dist._data[i] > set_right._dist._data[i]) {
num_out += 1;
}
}
return float(num_out) / float(_dist._data.size());
}
} // namespace metric
#endif
| 16,384
|
C++
|
.cpp
| 476
| 32.428571
| 119
| 0.644613
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,512
|
Weibull.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/distributions/Weibull.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_WEIBULL_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_WEIBULL_CPP
#include <cmath>
#include <random>
#include <vector>
namespace metric {
inline Weibull::Weibull() : _generator(std::random_device{}()) {}
/*** random sampling ***/
inline float Weibull::rnd()
{
std::weibull_distribution<float> weibull_dist(_p1, _p2);
return weibull_dist(_generator);
}
template <typename T> T gamma(T z);
inline float Weibull::mean() { return float(1) / _p1 * gamma(float(1) + float(1) / _p2); }
inline float Weibull::quantil(float p) { return icdf(p); }
/*** pdf ***/
inline float Weibull::pdf(const float x)
{
float z = x / _p1;
float w = std::exp(-(std::pow(z, _p2)));
float y = std::pow(z, (_p2 - 1)) * w * _p2 / _p1;
if (w == 0)
return 0;
else
return y;
}
/*** cdf ***/
inline float Weibull::cdf(const float x)
{
float z = -std::pow(x / _p1, _p2);
float p;
if (std::abs(z) < float(1e-5))
p = z + float(0.5) * z * z;
else
p = -(std::exp(z) - float(1.0));
return p;
}
/*** icdf ***/
inline float Weibull::icdf(const float x) { return _p1 * std::pow(-std::log(1 - x), 1 / _p2); }
template <typename T> T ln_gamma(T z)
{
const T lct[9 + 1] = {
0.9999999999998099322768470047347, 676.520368121885098567009190444019, -1259.13921672240287047156078755283,
771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853,
-0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7};
T sum;
T base;
const T g_pi = 3.14159265358979323846;
const T ln_sqrt_2_pi = 0.91893853320467274178;
if (z < 0.5) {
return std::log(g_pi / std::sin(g_pi * z)) - ln_gamma(1.0 - z);
}
z = z - 1.0;
base = z + 7.5;
sum = 0;
for (int i = 9; i >= 1; i--) {
sum += lct[i] / (z + ((T)i));
}
sum += lct[0];
return ((ln_sqrt_2_pi + std::log(sum)) - base) + std::log(base) * (z + 0.5);
}
template <typename T> T gamma(T z) { return std::exp(ln_gamma(z)); }
static float weibull_scale_likelihood(float sigma, std::vector<float> &x, std::vector<float> &w, float xbar, int size)
{
float v;
std::vector<float> wLocal(size);
float sumxw;
float sumw;
for (size_t i = 0; i < size; i++) {
wLocal[i] = w[i] * std::exp(x[i] / sigma);
}
sumxw = 0;
sumw = 0;
for (size_t i = 0; i < size; i++) {
sumxw += (wLocal[i] * x[i]);
sumw += wLocal[i];
}
v = (sigma + xbar - sumxw / sumw);
return v;
}
/* based on dfzero from fortan, it finxs the zero in the given search bands, and stops if it is within tolerance. */
static void wdfzero(float *sigmahat, float *likelihood_value, float *err, float *search_bands, float tol,
std::vector<float> &x0, std::vector<float> &frequency, float meanUncensored, int size)
{
float exitflag;
float a, b, c = 0.0, d = 0.0, e = 0.0, m, p, q, r, s;
float fa, fb, fc;
float fval;
float tolerance;
exitflag = 1;
*err = exitflag;
a = search_bands[0];
b = search_bands[1];
fa = weibull_scale_likelihood(a, x0, frequency, meanUncensored, size);
fb = weibull_scale_likelihood(b, x0, frequency, meanUncensored, size);
if (fa == 0) {
b = a;
*sigmahat = b;
fval = fa;
*likelihood_value = fval;
return;
} else if (fb == 0) {
fval = fb;
*likelihood_value = fval;
*sigmahat = b;
return;
} else // if ((fa > 0) == (fb > 0))
{
assert(fa * fb <= 0);
// std::cout << "ERROR: wdfzero says function values at the interval endpoints must differ in sign\n";
}
fc = fb;
/*Main loop, exit from middle of the loop */
while (fb != 0) {
/* Insure that b is the best result so far, a is the previous */
/* value of b, and that c is on the opposite size of the zero from b. */
if ((fb > 0) == (fc > 0)) {
c = a;
fc = fa;
d = b - a;
e = d;
}
float absFC;
float absFB;
absFC = std::abs(fc);
absFB = std::abs(fb);
if (absFC < absFB) {
a = b;
b = c;
c = a;
fa = fb;
fb = fc;
fc = fa;
}
/*set up for test of Convergence, is the interval small enough? */
m = 0.5 * (c - b);
float absB, absM, absFA, absE; //, absFB
absB = std::abs(b);
absM = std::abs(m);
absFA = std::abs(fa);
absFB = std::abs(fb);
absE = std::abs(e);
tolerance = 2.0 * tol * ((absB > 1.0) ? absB : 1.0);
if ((absM <= tolerance) | (fb == 0.0))
break;
/*Choose bisection or interpolation */
if ((absE < tolerance) | (absFA <= absFB)) {
/*Bisection */
d = m;
e = m;
} else {
/*Interpolation */
s = fb / fa;
if (a == c) {
/*Linear interpolation */
p = 2.0 * m * s;
q = 1.0 - s;
} else {
/*Inverse quadratic interpolation */
q = fa / fc;
r = fb / fc;
p = s * (2.0 * m * q * (q - r) - (b - a) * (r - 1.0));
q = (q - 1.0) * (r - 1.0) * (s - 1.0);
}
if (p > 0)
q = -1.0 * q;
else
p = -1.0 * p;
}
float tempTolerance = tolerance * q;
float absToleranceQ;
float absEQ;
float tempEQ = (0.5 * e * q);
absToleranceQ = std::abs(tempTolerance);
absEQ = std::abs(tempEQ);
/*Is interpolated point acceptable */
if ((2.0 * p < 3.0 * m * q - absToleranceQ) & (p < absEQ)) {
e = d;
d = p / q;
} else {
d = m;
e = m;
}
/*Interpolation */
/*Next point */
a = b;
fa = fb;
if (std::abs(d) > tolerance)
b = b + d;
else if (b > c)
b = b - tolerance;
else
b = b + tolerance;
fb = weibull_scale_likelihood(b, x0, frequency, meanUncensored, size);
} /*Main loop (While) */
fval = weibull_scale_likelihood(b, x0, frequency, meanUncensored, size);
*likelihood_value = fval;
*sigmahat = b;
return;
}
inline std::tuple<float, float> weibull_fit(const std::vector<float> &inputData)
{
size_t size = inputData.size();
std::vector<float> data(size);
float alpha = 10;
float PI = 3.141592653589793238462;
float tol =
1e-6; /* this impacts the non-linear estimation..
if your problem is highly unstable (small scale) this might be made larger
but we never recommend anything greater than 10e-5.
Also if larger it will converge faster, so if yo can live with lower accuracy, you can change it */
float n;
float nuncensored = 0;
float ncensored = 0;
std::vector<float> censoring(size);
std::vector<float> frequency(size);
std::vector<float> var(size);
std::vector<float> x0(size);
/*set frequency to all 1.0's */
/*and censoring to 0.0's */
for (size_t i = 0; i < size; i++) {
frequency[i] = 1.0;
censoring[i] = 0.0;
}
for (size_t i = 0; i < size; i++) {
data[i] = std::log(inputData[i]);
}
float mySum;
mySum = 0;
for (size_t i = 0; i < size; i++) {
mySum += frequency[i];
}
n = mySum;
{
mySum = 0;
for (size_t i = 0; i < size; i++) {
mySum += (frequency[i] * censoring[i]);
}
ncensored = mySum;
nuncensored = n - ncensored;
}
/* declar local for max/range computation */
float maxVal, minVal;
float range, maxx;
float tempVal;
maxVal = -1000000000;
minVal = 1000000000;
for (size_t i = 0; i < size; i++) {
tempVal = data[i];
if (tempVal < minVal)
minVal = tempVal;
if (tempVal > maxVal)
maxVal = tempVal;
}
range = maxVal - minVal;
maxx = maxVal;
/*Shift x to max(x) == 0, min(x) = -1 to make likelihood eqn more stable. */
float mean, myStd;
float sigmahat;
float meanUncensored;
float upper, lower;
float search_band[2];
for (size_t i = 0; i < size; i++) {
x0[i] = (data[i] - maxx) / range;
}
mean = 0;
myStd = 0;
for (size_t i = 0; i < size; i++) {
mean += x0[i];
}
mean /= n;
for (size_t i = 0; i < size; i++) {
var[i] = x0[i] - mean;
}
for (size_t i = 0; i < size; i++) {
myStd += var[i] * var[i];
}
myStd /= (n - 1);
myStd = std::sqrt(myStd);
sigmahat = (std::sqrt((float)(6.0)) * myStd) / PI;
meanUncensored = 0;
for (size_t i = 0; i < size; i++) {
meanUncensored += (frequency[i] * x0[i]) / n;
}
if ((tempVal = weibull_scale_likelihood(sigmahat, x0, frequency, meanUncensored, size)) > 0) {
upper = sigmahat;
lower = 0.5 * upper;
while ((tempVal = weibull_scale_likelihood(lower, x0, frequency, meanUncensored, size)) > 0) {
upper = lower;
lower = 0.5 * upper;
}
} else {
lower = sigmahat;
upper = 2.0 * lower;
while ((tempVal = weibull_scale_likelihood(upper, x0, frequency, meanUncensored, size)) < 0) {
lower = upper;
upper = 2 * lower;
/* check for overflow, no finite root */
}
}
search_band[0] = lower;
search_band[1] = upper;
/* ... Next we go find the root (zero) of the likelihood eqn which wil be the MLE for sigma. */
/* then the MLE for mu has an explicit formula from that. */
float err;
float likelihood_value;
wdfzero(&sigmahat, &likelihood_value, &err, search_band, tol, x0, frequency, meanUncensored, size);
float muHat;
float sumfrequency;
muHat = 0;
sumfrequency = 0;
for (size_t i = 0; i < size; i++) {
tempVal = std::exp(x0[i] / sigmahat);
sumfrequency += (frequency[i] * tempVal);
}
sumfrequency = sumfrequency / nuncensored;
muHat = sigmahat * std::log(sumfrequency);
/*Those were parameter estimates for the shifted, scaled data, now */
/*transform the parameters back to the original location and scale. */
return {std::exp((range * muHat) + maxx), 1 / (range * sigmahat)};
}
} // end namespace metric
#endif // header guard
| 9,437
|
C++
|
.cpp
| 330
| 25.939394
| 118
| 0.62409
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,513
|
Binomial.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/distributions/Binomial.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_BINOMIAL_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_BINOMIAL_CPP
#include "Binomial.hpp"
namespace metric {
Binomial::Binomial() : _generator(std::random_device{}()) {}
/*** random sampling ***/
float Binomial::rnd()
{
std::binomial_distribution<float> Binomial_dist(_p1, _p2);
return Binomial_dist(_generator);
}
// float Binomial::mean()
// {
// return float(1)/_p1*gamma(float(1)+float(1)/_p2);
// }
// float Binomial::quantil(float p)
// {
// return icdf(p);
// }
// /*** pdf ***/
// float Binomial::pdf(const float x)
// {
// float z = x / _p1;
// float w = std::exp(-(std::pow(z,_p2)));
// float y = std::pow(z,(_p2-1)) * w * _p2 / _p1;
// if (w==0) return 0;
// else return y;
// }
// /*** icdf ***/
// float Binomial::icdf(const float x)
// {
// return _p1 * std::pow(-std::log(1-x),1/_p2);
// }
// template <typename float>
float lngamma(float z)
{
const float lct[9 + 1] = {
0.9999999999998099322768470047347, 676.520368121885098567009190444019, -1259.13921672240287047156078755283,
771.3234287776530788486528258894, -176.61502916214059906584551354, 12.507343278686904814458936853,
-0.13857109526572011689554707, 9.984369578019570859563e-6, 1.50563273514931155834e-7};
float sum;
float base;
const float g_pi = 3.14159265358979323846;
const float ln_sqrt_2_pi = 0.91893853320467274178;
if (z < 0.5) {
return std::log(g_pi / std::sin(g_pi * z)) - lngamma(1.0 - z);
}
z = z - 1.0;
base = z + 7.5;
sum = 0;
for (int i = 9; i >= 1; i--) {
sum += lct[i] / (z + ((float)i));
}
sum += lct[0];
return ((ln_sqrt_2_pi + std::log(sum)) - base) + std::log(base) * (z + 0.5);
}
// template <typename T>
float betacf(float a, float b, float x)
{
int max_it = 100;
float tiny = 1.0e-30;
float eps = 3.0e-7;
float a_plus_b = a + b;
float a_plus_one = a + 1.0;
float a_minus_one = a - 1.0;
float dj = -a_plus_b * x / a_plus_one;
float Dj = 1.0 + dj;
if (std::abs(Dj) < tiny) {
Dj = tiny;
}
Dj = 1.0 / Dj;
float Cj = 1.0;
float fj = Dj;
int m, m_mult_2;
float deltdj;
for (m = 1; m <= max_it; m++) {
m_mult_2 = 2 * m;
dj = m * (b - m) * x / ((a_minus_one + m_mult_2) * (a + m_mult_2));
Dj = 1.0 + dj * Dj;
if (std::abs(Dj) < tiny) {
Dj = tiny;
}
Dj = 1.0 / Dj;
Cj = 1.0 + dj / Cj;
if (std::abs(Cj) < tiny) {
Cj = tiny;
}
fj *= Dj * Cj;
dj = -(a + m) * (a_plus_b + m) * x / ((a + m_mult_2) * (a_plus_one + m_mult_2));
Dj = 1.0 + dj * Dj;
if (std::abs(Dj) < tiny) {
Dj = tiny;
}
Dj = 1.0 / Dj;
// compute Cj according to its definition
Cj = 1.0 + dj / Cj;
if (std::abs(Cj) < tiny) {
Cj = tiny;
}
deltdj = Dj * Cj;
fj *= deltdj;
if (std::abs(deltdj - 1.0) < eps) {
break;
}
}
return fj;
}
// template <typename T>
float betai(float a, float b, float x)
{
float betacf(float a, float b, float x);
float lngamma(float x);
float first_term;
assert(x >= 0.0 && x <= 1.0);
// if (x < 0.0 || x > 1.0) {
// std::cout << "x parameter = " << x << " (p parameter of binomial) is out of [0, 1].";
// }
if (x == 0.0 || x == 1.0) {
first_term = 0.0;
} else {
float log_complete_beta = lngamma(a) + lngamma(b) - lngamma(a + b);
first_term = std::exp(a * log(x) + b * log(1.0 - x) - log_complete_beta);
}
if (x < (a + 1.0) / (a + b + 2.0)) {
return first_term * betacf(a, b, x) / a;
} else {
return 1.0 - first_term * betacf(b, a, 1.0 - x) / b;
}
}
/*** cdf ***/
// param n total number of trials
// param k number of successful trials
// param p success probability of single trial
float Binomial::cdf(int kk)
{
float k = float(kk);
float n = float(_data.size());
float p = _p1;
return betai(k, n - k + 1, p);
}
} // namespace metric
#endif
| 4,008
|
C++
|
.cpp
| 148
| 24.952703
| 109
| 0.599478
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,514
|
Normal.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/distributions/Normal.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTION_NORMAL_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTION_NORMAL_CPP
#include "Normal.hpp"
#include <cassert>
namespace metric {
inline Normal::Normal() : _generator(std::random_device{}()) {}
/*** random sampling ***/
inline float Normal::rnd()
{
std::normal_distribution<float> norm_dist(_p1, _p2);
return norm_dist(_generator);
}
inline float Normal::median() { return _p1; }
inline float Normal::quantil(float p) { return icdf(p); }
inline float Normal::mean() { return _p1; }
inline float Normal::variance() { return _p2; }
/*** pdf ***/
inline float Normal::pdf(const float x)
{
float z = (x - _p1) / _p2;
return -0.9189385332 - std::log(_p2) - z * z / float(2);
}
/*** cdf ***/
inline float Normal::cdf(const float x)
{
return (float(0.5) * (float(1) + (x - _p1) / (_p2 * 1.41421356237309504880)));
}
/*** icdf ***/
template <typename T> T erfcinv(T z);
inline float Normal::icdf(const float x) { return _p1 + -1.41421356237309504880 * erfcinv(2 * x) * _p2; }
template <class T> T polyeval(const std::vector<T> &poly, const T &z)
{
int n = poly.size();
T sum = poly[n - 1];
for (int i = n - 2; i >= 0; --i) {
sum *= z;
sum += poly[i];
}
return sum;
}
/*** originally by John Maddock 2006 under Boost Software License, Version 1.0, only refactored for use with this lib
* **/
template <typename T> T erfinv_imp(const T &p, const T &q)
{
T result = 0;
if (p <= 0.5) {
static const float Y = 0.0891314744949340820313f;
static const std::vector<T> P = {-0.000508781949658280665617L, -0.00836874819741736770379L,
0.0334806625409744615033L, -0.0126926147662974029034L,
-0.0365637971411762664006L, 0.0219878681111168899165L,
0.00822687874676915743155L, -0.00538772965071242932965L};
static const std::vector<T> Q = {1,
-0.970005043303290640362L,
-1.56574558234175846809L,
1.56221558398423026363L,
0.662328840472002992063L,
-0.71228902341542847553L,
-0.0527396382340099713954L,
0.0795283687341571680018L,
-0.00233393759374190016776L,
0.000886216390456424707504L};
T g = p * (p + 10);
T r = polyeval(P, p) / polyeval(Q, p);
result = g * Y + g * r;
} else if (q >= 0.25) {
static const float Y = 2.249481201171875f;
static const std::vector<T> P = {-0.202433508355938759655L, 0.105264680699391713268L, 8.37050328343119927838L,
17.6447298408374015486L, -18.8510648058714251895L, -44.6382324441786960818L,
17.445385985570866523L, 21.1294655448340526258L, -3.67192254707729348546L};
static const std::vector<T> Q = {1L,
6.24264124854247537712L,
3.9713437953343869095L,
-28.6608180499800029974L,
-20.1432634680485188801L,
48.5609213108739935468L,
10.8268667355460159008L,
-22.6436933413139721736L,
1.72114765761200282724L};
T g = std::sqrt(-2 * std::log(q));
T xs = q - 0.25;
T r = polyeval(P, xs) / polyeval(Q, xs);
result = g / (Y + r);
} else {
T x = std::sqrt(-std::log(q));
if (x < 3) {
static const float Y = 0.807220458984375f;
static const std::vector<T> P = {
-0.131102781679951906451L, -0.163794047193317060787L, 0.117030156341995252019L,
0.387079738972604337464L, 0.337785538912035898924L, 0.142869534408157156766L,
0.0290157910005329060432L, 0.00214558995388805277169L, -0.679465575181126350155e-6L,
0.285225331782217055858e-7L, -0.681149956853776992068e-9L};
static const std::vector<T> Q = {1,
3.46625407242567245975L,
5.38168345707006855425L,
4.77846592945843778382L,
2.59301921623620271374L,
0.848854343457902036425L,
0.152264338295331783612L,
0.01105924229346489121L};
T xs = x - 1.125;
T R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 6) {
static const float Y = 0.93995571136474609375f;
static const std::vector<T> P = {
-0.0350353787183177984712L, -0.00222426529213447927281L, 0.0185573306514231072324L,
0.00950804701325919603619L, 0.00187123492819559223345L, 0.000157544617424960554631L,
0.460469890584317994083e-5L, -0.230404776911882601748e-9L, 0.266339227425782031962e-11L};
static const std::vector<T> Q = {1L,
1.3653349817554063097L,
0.762059164553623404043L,
0.220091105764131249824L,
0.0341589143670947727934L,
0.00263861676657015992959L,
0.764675292302794483503e-4L};
T xs = x - 3;
T R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 18) {
static const float Y = 0.98362827301025390625f;
static const std::vector<T> P = {
-0.0167431005076633737133L, -0.00112951438745580278863L, 0.00105628862152492910091L,
0.000209386317487588078668L, 0.149624783758342370182e-4L, 0.449696789927706453732e-6L,
0.462596163522878599135e-8L, -0.281128735628831791805e-13L, 0.99055709973310326855e-16L};
static const std::vector<T> Q = {1L,
0.591429344886417493481L,
0.138151865749083321638L,
0.0160746087093676504695L,
0.000964011807005165528527L,
0.275335474764726041141e-4L,
0.282243172016108031869e-6L};
T xs = x - 6;
T R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 44) {
static const float Y = 0.99714565277099609375f;
static const std::vector<T> P = {-0.0024978212791898131227L, -0.779190719229053954292e-5L,
0.254723037413027451751e-4L, 0.162397777342510920873e-5L,
0.396341011304801168516e-7L, 0.411632831190944208473e-9L,
0.145596286718675035587e-11L, -0.116765012397184275695e-17L};
static const std::vector<T> Q = {1L,
0.207123112214422517181L,
0.0169410838120975906478L,
0.000690538265622684595676L,
0.145007359818232637924e-4L,
0.144437756628144157666e-6L,
0.509761276599778486139e-9L};
T xs = x - 18;
T R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else {
static const float Y = 0.99941349029541015625f;
static const std::vector<T> P = {-0.000539042911019078575891L, -0.28398759004727721098e-6L,
0.899465114892291446442e-6L, 0.229345859265920864296e-7L,
0.225561444863500149219e-9L, 0.947846627503022684216e-12L,
0.135880130108924861008e-14L, -0.348890393399948882918e-21L};
static const std::vector<T> Q = {1L,
0.0845746234001899436914L,
0.00282092984726264681981L,
0.468292921940894236786e-4L,
0.399968812193862100054e-6L,
0.161809290887904476097e-8L,
0.231558608310259605225e-11L};
T xs = x - 44;
T R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
}
}
return result;
}
template <typename T> T erfcinv(T z)
{
assert((z >= 0) && (z <= 2));
// if((z < 0) || (z > 2))
// std::cout << "Argument outside range [0,2] in inverse erfc function (got p=%1%)." << std::endl;
T p, q, s;
if (z > 1) {
q = 2 - z;
p = 1 - q;
s = -1;
} else {
p = 1 - z;
q = z;
s = 1;
}
return s * erfinv_imp(p, q);
}
template <typename T> T erfinv(T z)
{
assert((z >= -1) && (z <= 1));
// std::cout << "Argument outside range [-1, 1] in inverse erf function (got p=%1%)." << std::endl;
T p, q, s;
if (z < 0) {
p = -z;
q = 1 - p;
s = -1;
} else {
p = z;
q = 1 - z;
s = 1;
}
return s * erfinv_imp(p, q);
}
} // namespace metric
#endif
| 7,911
|
C++
|
.cpp
| 211
| 31.990521
| 117
| 0.652463
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,515
|
Discrete.cpp
|
metric-space-ai_metric/metric/utils/poor_mans_quantum/distributions/Discrete.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_DISCRETE_CPP
#define _METRIC_UTILS_POOR_MANS_QUANTUM_DISTRIBUTIONS_DISCRETE_CPP
#include <random>
#include <vector>
#include "../math_functions.hpp"
namespace metric {
template <typename T> Discrete<T>::Discrete() : _generator(std::random_device{}()) {}
template <typename T> T Discrete<T>::rnd()
{
std::uniform_real_distribution<T> Discrete_dist(T(0), T(1));
std::vector<T> values;
values.push_back(Discrete_dist(_generator));
return akimaInterp1(_prob, _data, values)[0];
}
template <typename T> T Discrete<T>::median()
{
std::vector<T> values;
values.push_back(0.5);
return akimaInterp1(_prob, _data, values)[0];
}
template <typename T> T Discrete<T>::quantil(T p)
{
std::vector<T> values;
values.push_back(p);
return akimaInterp1(_prob, _data, values)[0];
}
template <typename T> T Discrete<T>::mean()
{
// Updated mean calculation by Stepan Mamontov 04.02.2020
// Updated according to formula: mean = sum (x * P(x)) ->
// https://en.wikipedia.org/wiki/Mean#Mean_of_a_probability_distribution
T sum = _data[0] * _prob[0];
for (size_t i = 1; i < _data.size(); ++i) {
sum += _data[i] * (_prob[i] - _prob[i - 1]);
}
return sum;
}
template <typename T> T Discrete<T>::variance()
{
// Updated variance calculation by Stepan Mamontov 04.02.2020
// Updated accroding to formula: variance = sum ((x - mean) * (x - mean) * P(x)) ->
// https://en.wikipedia.org/wiki/Variance#Discrete_random_variable
T mean_value = mean();
T sum = (_data[0] - mean_value) * (_data[0] - mean_value) * _prob[0];
for (size_t i = 1; i < _data.size(); ++i) {
sum += (_data[i] - mean_value) * (_data[i] - mean_value) * (_prob[i] - _prob[i - 1]);
}
return sum;
}
/*** pdf ***/
template <typename T> T Discrete<T>::pdf(const T x)
{
// TODO:NEED TO IMPLEMENT!!!
return (T)0;
}
/*** cdf ***/
template <typename T> T Discrete<T>::cdf(const T x)
{
// Updated cdf calculation by Stepan Mamontov 04.02.2020
// Updated by return interpolated func of cumulative propability from distributed value
std::vector<T> values;
values.push_back(x);
T result = akimaInterp1(_data, _prob, values)[0];
// cut probs over the _data values
if (x < _data[0]) {
result = 0;
}
if (x > _data[_data.size() - 1]) {
result = 1;
}
return result;
}
/*** icdf ***/
template <typename T> T Discrete<T>::icdf(const T x)
{
// TODO:NEED TO IMPLEMENT!!!
return (T)0;
}
} // end namespace metric
#endif // header guard
| 2,711
|
C++
|
.cpp
| 86
| 29.639535
| 88
| 0.677543
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,516
|
convolution.cpp
|
metric-space-ai_metric/metric/utils/image_processing/convolution.cpp
|
#include "convolution.hpp"
namespace metric {
template <typename T, size_t Channels>
Convolution2d<T, Channels>::Convolution2d(size_t imageWidth, size_t imageHeight, size_t kernelWidth,
size_t kernelHeight)
{
convLayer = std::make_shared<ConvLayer2d>(imageWidth, imageHeight, 1, 1, kernelWidth, kernelHeight, 1, true);
}
template <typename T, size_t Channels> void Convolution2d<T, Channels>::setKernel(const FilterKernel &kernel)
{
/* Convert kernel */
std::vector<T> kernelData(kernel.rows() * kernel.columns());
size_t e = 0;
for (size_t i = 0; i < kernel.rows(); ++i) {
for (size_t j = 0; j < kernel.columns(); ++j) {
kernelData[e++] = kernel(i, j);
}
}
/* Set kernel */
convLayer->setParameters({kernelData, {0}});
}
template <typename T, size_t Channels>
typename Convolution2d<T, Channels>::Image Convolution2d<T, Channels>::operator()(const Image &image)
{
/* Create output image */
Image output;
for (size_t c = 0; c < image.size(); ++c) {
const auto &channel = image[c];
/* Convert image */
Matrix imageData(1, channel.rows() * channel.columns());
size_t e = 0;
for (size_t i = 0; i < channel.rows(); ++i) {
for (size_t j = 0; j < channel.columns(); ++j) {
imageData(0, e++) = channel(i, j);
}
}
/* Process */
convLayer->forward(imageData);
const auto outputData = convLayer->output();
/* Convert output */
e = 0;
auto outputChannel = Channel(convLayer->getOutputShape()[1], convLayer->getOutputShape()[0]);
for (size_t i = 0; i < outputChannel.rows(); ++i) {
for (size_t j = 0; j < outputChannel.columns(); ++j) {
outputChannel(i, j) = outputData(0, e++);
}
}
output[c] = outputChannel;
}
return output;
}
template <typename T, size_t Channels>
typename Convolution2d<T, Channels>::Image Convolution2d<T, Channels>::operator()(const Image &image,
const FilterKernel &kernel)
{
setKernel(kernel);
return operator()(image);
}
} // namespace metric
| 1,975
|
C++
|
.cpp
| 59
| 30.474576
| 110
| 0.668592
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,517
|
image_filter.cpp
|
metric-space-ai_metric/metric/utils/image_processing/image_filter.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Created by Aleksey Timin on 3/29/20.
*/
#include "image_filter.hpp"
constexpr double PI = 3.14159265358979323846;
namespace metric {
using namespace metric::image_processing_details;
template <typename T, size_t N> Image<T, N> iminit(size_t rows, size_t columns, T initValue)
{
return Image<T, N>(blaze::DynamicMatrix<T>(rows, columns, initValue));
}
template <typename T>
std::pair<blaze::DynamicMatrix<T>, Shape> PadModel<T>::pad(const Shape &shape, const blaze::DynamicMatrix<T> &src) const
{
using namespace blaze;
size_t padRow = shape[0];
size_t padCol = shape[1];
// Init padded matrix
DynamicMatrix<T> dst;
switch (_padDirection) {
case PadDirection::PRE:
case PadDirection::POST:
dst = blaze::DynamicMatrix<T>(src.rows() + padRow, src.columns() + padCol, _initValue);
break;
case PadDirection::BOTH:
dst = blaze::DynamicMatrix<T>(src.rows() + padRow * 2, src.columns() + padCol * 2, _initValue);
break;
}
// Fill the padded matrix
if (_padDirection == PadDirection::POST) {
padRow = 0;
padCol = 0;
}
blaze::submatrix(dst, padRow, padCol, src.rows(), src.columns()) = src;
// Padding
for (size_t i = 0; i < dst.rows(); ++i) {
for (size_t j = 0; j < dst.columns(); ++j) {
int si = i - padRow;
int sj = j - padCol;
bool inside = si >= 0 && si < src.rows() && sj >= 0 && sj < src.columns();
if (inside) {
j += src.columns(); // work only in pad area
} else {
switch (_padType) {
case PadType::CONSTANT:
break;
case PadType::REPLICATE:
si = std::max<int>(0, si);
si = std::min<int>(src.rows() - 1, si);
sj = std::max<int>(0, sj);
sj = std::min<int>(src.columns() - 1, sj);
dst(i, j) = src(si, sj);
break;
case PadType::CIRCULAR:
si = (i + padRow + 1) % src.rows();
sj = (j + padCol + 1) % src.columns();
dst(i, j) = src(si, sj);
break;
case PadType::SYMMETRIC: {
int distX = padRow - i;
int distY = padCol - j;
int xN = std::ceil((float)distX / src.rows());
int yN = std::ceil((float)distY / src.columns());
int cordMx = padRow - xN * src.rows();
int cordMy = padCol - yN * src.columns();
int xi = (i - cordMx) % src.rows();
int xj = (j - cordMy) % src.columns();
si = xN % 2 == 0 ? xi : src.rows() - xi - 1;
sj = yN % 2 == 0 ? xj : src.columns() - xj - 1;
dst(i, j) = src(si, sj);
break;
}
}
}
}
}
return std::make_pair(dst, Shape{padRow, padCol});
}
// template <typename ImgT, typename Filter, PadDirection PadDir, PadType PadType>
// ImgT imfilter<ImgT, Filter, PadDir, PadType>::operator()(const ImgT& input) {
// return ::metric::image_processing_details::filter(input, _filter, _padModel);
// }
template <typename ChannelType, size_t N, typename Filter, PadDirection PadDir, PadType PadType>
Channel<ChannelType> imfilter<ChannelType, N, Filter, PadDir, PadType>::operator()(const Channel<ChannelType> &input)
{
return ::metric::image_processing_details::filter(input, _filter, _padModel);
}
template <typename ChannelType, size_t N, typename Filter, PadDirection PadDir, PadType PadType>
Image<ChannelType, N> imfilter<ChannelType, N, Filter, PadDir, PadType>::operator()(const Image<ChannelType, N> &input)
{
return ::metric::image_processing_details::filter(input, _filter, _padModel);
}
inline FilterType::AVERAGE::AVERAGE(size_t rows, size_t columns)
{
FilterKernel f(rows, columns, 1.0);
_kernel = f / blaze::prod(Shape{rows, columns});
}
inline FilterType::GAUSSIAN::GAUSSIAN(size_t rows, size_t columns, double sigma)
{
Shape shape{rows, columns};
blaze::StaticVector<FilterKernel::ElementType, 2> halfShape =
(static_cast<blaze::StaticVector<FilterKernel::ElementType, 2>>(shape) - 1) / 2;
auto xrange = range<FilterKernel::ElementType, blaze::rowVector>(-halfShape[1], halfShape[1]);
auto yrange = range<FilterKernel::ElementType, blaze::columnVector>(-halfShape[0], halfShape[0]);
auto [xMat, yMat] = meshgrid(xrange, yrange);
auto arg = -(xMat % xMat + yMat % yMat) / (2 * sigma * sigma);
_kernel = blaze::exp(arg);
FilterKernel::ElementType max = blaze::max(_kernel);
for (int i = 0; i < _kernel.rows(); ++i) {
for (int j = 0; j < _kernel.columns(); ++j) {
_kernel(i, j) =
_kernel(i, j) < max * std::numeric_limits<FilterKernel::ElementType>::epsilon() ? 0 : _kernel(i, j);
}
}
auto sumh = blaze::sum(_kernel);
if (sumh != 0) {
_kernel = _kernel / sumh;
}
_xMat = xMat;
_yMat = yMat;
}
inline FilterType::LAPLACIAN::LAPLACIAN(double alpha)
{
alpha = std::max<double>(0, std::min<double>(alpha, 1));
auto h1 = alpha / (alpha + 1);
auto h2 = (1 - alpha) / (alpha + 1);
_kernel = FilterKernel{{h1, h2, h1}, {h2, -4 / (alpha + 1), h2}, {h1, h2, h1}};
}
inline FilterType::LOG::LOG(size_t rows, size_t columns, double sigma)
{
Shape shape{rows, columns};
auto std2 = sigma * sigma;
GAUSSIAN gausFilter(rows, columns, sigma);
auto h = gausFilter();
_kernel =
h % (gausFilter._xMat % gausFilter._xMat + gausFilter._yMat % gausFilter._yMat - 2 * std2) / (std2 * std2);
_kernel -= blaze::sum(_kernel) / blaze::prod(shape);
}
inline FilterType::MOTION::MOTION(double len, int theta)
{
len = std::max<double>(1, len);
auto half = (len - 1) / 2;
auto phi = static_cast<double>(theta % 180) / 180 * PI;
double cosphi = std::cos(phi);
double sinphi = std::sin(phi);
int xsign = cosphi > 0 ? 1 : -1;
double linewdt = 1;
auto eps = std::numeric_limits<double>::epsilon();
auto sx = std::trunc(half * cosphi + linewdt * xsign - len * eps);
auto sy = std::trunc(half * sinphi + linewdt - len * eps);
auto xrange = range<FilterKernel::ElementType, blaze::rowVector>(0, sx, xsign);
auto yrange = range<FilterKernel::ElementType, blaze::columnVector>(0, sy);
auto [xMat, yMat] = meshgrid(xrange, yrange);
FilterKernel dist2line = (yMat * cosphi - xMat * sinphi);
auto rad = blaze::sqrt(xMat % xMat + yMat % yMat);
// find points beyond the line's end-point but within the line width
blaze::DynamicMatrix<bool> cond = blaze::map(rad, [half](const auto &x) { return x >= half; }) &&
blaze::map(abs(dist2line), [linewdt](const auto &x) { return x <= linewdt; });
auto lastpix = mfind(static_cast<FilterKernel>(dist2line), cond);
for (auto [i, j] : lastpix) {
auto v = dist2line(i, j);
auto pix = half - abs((xMat(i, j) + v * sinphi) / cosphi);
dist2line(i, j) = std::sqrt(v * v + pix * pix);
}
dist2line = linewdt + eps - abs(dist2line);
// zero out anything beyond line width
dist2line = blaze::map(dist2line, [](const FilterKernel::ElementType &v) { return v < 0 ? 0 : v; });
auto h = rot90(rot90<FilterKernel::ElementType>(dist2line));
_kernel = FilterKernel(h.rows() * 2 - 1, h.columns() * 2 - 1);
blaze::submatrix(_kernel, 0, 0, h.rows(), h.columns()) = h;
blaze::submatrix(_kernel, h.rows() - 1, h.columns() - 1, dist2line.rows(), dist2line.columns()) = dist2line;
_kernel /= blaze::sum(_kernel) + eps * len * len;
if (cosphi > 0) {
_kernel = flipud(_kernel);
}
}
inline FilterType::UNSHARP::UNSHARP(double alpha)
{
_kernel = FilterKernel{{0, 0, 0}, {0, 1, 0}, {0, 0, 0}} - LAPLACIAN(alpha)();
}
namespace image_processing_details {
template <typename T>
std::pair<blaze::DynamicMatrix<T>, blaze::DynamicMatrix<T>>
meshgrid(const blaze::DynamicVector<T, blaze::rowVector> &x, const blaze::DynamicVector<T, blaze::columnVector> &y)
{
blaze::DynamicMatrix<T, blaze::rowMajor> xMat(blaze::size(y), blaze::size(x));
blaze::DynamicMatrix<T, blaze::columnMajor> yMat(blaze::size(y), blaze::size(x));
for (int i = 0; i < xMat.rows(); ++i) {
blaze::row(xMat, i) = x;
}
for (int i = 0; i < yMat.columns(); ++i) {
blaze::column(yMat, i) = y;
}
return std::make_pair(xMat, yMat);
}
template <typename T, bool P> blaze::DynamicVector<T, P> range(T start, T stop, T step)
{
blaze::DynamicVector<T, P> vec(std::abs((stop - start) / step) + 1);
for (auto &val : vec) {
val = start;
start += step;
}
return vec;
}
template <typename T>
blaze::DynamicVector<std::pair<size_t, size_t>, blaze::columnVector> mfind(const blaze::DynamicMatrix<T> &input,
const blaze::DynamicMatrix<bool> &cond)
{
std::vector<std::pair<size_t, size_t>> indecies;
for (auto i = 0; i < input.rows(); ++i) {
for (auto j = 0; j < input.columns(); ++j) {
if (cond(i, j)) {
indecies.push_back(std::make_pair(i, j));
}
}
}
return blaze::DynamicVector<std::pair<size_t, size_t>, blaze::columnVector>(indecies.size(), indecies.data());
}
template <typename T> blaze::DynamicMatrix<T> flipud(const blaze::DynamicMatrix<T> &input)
{
blaze::DynamicMatrix<T> out(input.rows(), input.columns());
for (int i = 0; i < input.rows(); ++i) {
blaze::row(out, input.rows() - i - 1) = blaze::row(input, i);
}
return out;
}
static blaze::DynamicMatrix<double> imgcov2(const blaze::DynamicMatrix<double> &input, const FilterKernel &kernel)
{
size_t funcRows = kernel.rows();
size_t funcCols = kernel.columns();
blaze::DynamicMatrix<double> resultMat(input.rows() - std::ceil((double)funcRows / 2),
input.columns() - std::ceil((double)funcCols / 2));
for (auto i = 0; i < input.rows() - funcRows; ++i) {
for (auto j = 0; j < input.columns() - funcCols; ++j) {
auto bwProd = blaze::submatrix(input, i, j, funcRows, funcCols) % kernel;
auto filteredVal = blaze::sum(bwProd);
double val = blaze::round(filteredVal);
resultMat(i, j) = val > 0 ? val : 0;
}
}
return resultMat;
}
template <typename Filter, typename ChannelType>
Channel<ChannelType> filter(const Channel<ChannelType> &channel, const Filter &impl,
const PadModel<ChannelType> &padmodel, bool full)
{
auto kernel = impl();
Channel<ChannelType> result;
Shape padShape{kernel.rows() - 1, kernel.columns() - 1};
auto [paddedCh, imgCord] = padmodel.pad(padShape, channel);
auto filteredChannel = imgcov2(paddedCh, kernel);
if (full) {
result = filteredChannel;
} else {
result = blaze::submatrix(filteredChannel, std::max<size_t>(0, imgCord[0] - 1),
std::max<size_t>(0, imgCord[1] - 1), channel.rows(), channel.columns());
}
return result;
}
template <typename Filter, typename ChannelType, size_t ChannelNumber>
Image<ChannelType, ChannelNumber> filter(const Image<ChannelType, ChannelNumber> &img, const Filter &impl,
const PadModel<ChannelType> &padmodel, bool full)
{
auto kernel = impl();
Shape padShape{kernel.rows() - 1, kernel.columns() - 1};
Image<ChannelType, ChannelNumber> result;
for (size_t ch = 0; ch < img.size(); ++ch) {
auto [paddedCh, imgCord] = padmodel.pad(padShape, img[ch]);
auto filteredChannel = imgcov2(paddedCh, kernel);
if (full) {
result[ch] = filteredChannel;
} else {
result[ch] = blaze::submatrix(filteredChannel, std::max<size_t>(0, imgCord[0] - 1),
std::max<size_t>(0, imgCord[1] - 1), img[ch].rows(), img[ch].columns());
}
}
return result;
}
} // namespace image_processing_details
} // namespace metric
| 11,171
|
C++
|
.cpp
| 283
| 36.572438
| 120
| 0.664173
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,518
|
lapwrappers.cpp
|
metric-space-ai_metric/metric/utils/solver/helper/lapwrapper/lapwrappers.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
Copyright (c) 2019 Oleg Popov
*/
/*
Laplacians is a package containing graph algorithms, with an emphasis on tasks related to
spectral and algebraic graph theory. It contains (and will contain more) code for solving
systems of linear equations in graph Laplacians, low stretch spanning trees, sparsifiation,
clustering, local clustering, and optimization on graphs.
All graphs are represented by sparse adjacency matrices. This is both for speed, and because
our main concerns are algebraic tasks. It does not handle dynamic graphs. It would be very slow
to implement dynamic graphs this way.
https://github.com/danspielman/Laplacians.jl
*/
#ifndef _METRIC_UTILS_SOLVER_HELPER_LAPWRAPPERS_CPP
#define _METRIC_UTILS_SOLVER_HELPER_LAPWRAPPERS_CPP
namespace metric {
template <typename Tv>
inline blaze::DynamicVector<Tv> nullSolver(const blaze::DynamicVector<Tv> &a, std::vector<size_t> &pcg)
{
return blaze::DynamicVector<Tv>(1, 0);
}
template <typename Tv>
inline blaze::DynamicVector<Tv> chol_subst(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &Lower,
const blaze::CompressedMatrix<Tv, blaze::columnMajor> &B)
{
blaze::DynamicVector<Tv> res(B.rows());
blaze::DynamicMatrix<Tv, blaze::columnMajor> B1 = B, L = Lower;
potrs(L, B1, 'L');
res = column(B1, 0);
return res;
}
template <typename Tv>
blaze::DynamicVector<Tv> chol_subst(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &Lower,
const blaze::DynamicVector<Tv> &b)
{
blaze::DynamicMatrix<Tv, blaze::columnMajor> L = Lower;
blaze::DynamicVector<Tv> b1 = b;
potrs(L, b1, 'L');
return b1;
}
template <typename Tv> inline Factorization<Tv> cholesky(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &A)
{
blaze::DynamicMatrix<Tv, blaze::columnMajor> A1(A), L;
Factorization<Tv> F;
blaze::llh(A1, L);
F.Lower = L;
return F;
}
template <typename Tv>
SubSolver<Tv> sddmWrapLap(SolverA<Tv> lapSolver, const blaze::CompressedMatrix<Tv, blaze::columnMajor> &sddm,
std::vector<size_t> &pcgIts, float tol, double maxits, double maxtime, bool verbose,
ApproxCholParams params)
{
auto [a, d] = adj(sddm);
blaze::CompressedMatrix<Tv, blaze::columnMajor> a1 = extendMatrix(a, d);
SubSolver<Tv> F = lapSolver(a1, pcgIts, tol, maxits, maxtime, verbose, params);
return [a = a, F, tol, maxits, maxtime, verbose, params](const blaze::DynamicVector<Tv> &b,
std::vector<size_t> &pcgIts) mutable {
blaze::DynamicVector<Tv> sb(b.size() + 1);
subvector(sb, 0, b.size()) = b;
sb[b.size()] = -blaze::sum(b);
blaze::DynamicVector<Tv> xaug = F(sb, pcgIts, tol, maxits, maxtime, verbose, params);
xaug = xaug - blaze::DynamicVector<Tv>(xaug.size(), xaug[xaug.size() - 1]);
return subvector(xaug, 0, a.rows() - 1);
};
}
template <typename Tv>
SubSolver<Tv> lapWrapComponents(SolverA<Tv> solver, const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a,
std::vector<size_t> &pcgIts, float tol, double maxits, double maxtime, bool verbose,
ApproxCholParams params)
{
auto t1 = std::chrono::high_resolution_clock::now();
std::vector<size_t> co = components(a);
if (*max_element(co.begin(), co.end()) == 1) {
SubSolver<Tv> s = solver(a, pcgIts, tol, maxits, maxtime, verbose, params);
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return s;
} else {
std::vector<std::vector<size_t>> comps = vecToComps(co);
std::vector<SubSolver<Tv>> solvers;
for (size_t i = 0; i < comps.size(); ++i) {
std::vector<size_t> ind = comps[i];
blaze::CompressedMatrix<Tv> asub = index<Tv>(a, ind, ind);
SubSolver<Tv> subSolver;
if (ind.size() == 1) {
subSolver = SubSolver<Tv>(nullSolver<Tv>);
} else if (ind.size() < 50) {
std::vector<size_t> pcgits;
subSolver = lapWrapConnected<Tv>(chol_sddm<Tv>(), asub, pcgits);
} else {
subSolver = solver(a, pcgIts);
}
solvers.push_back(subSolver);
}
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return BlockSolver(comps, solvers, pcgIts, tol, maxits, maxtime, verbose);
}
}
template <typename Tv>
SubSolver<Tv> lapWrapConnected(SolverA<Tv> solver, const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a,
std::vector<size_t> &pcgIts, float tol, double maxits, double maxtime, bool verbose,
ApproxCholParams params)
{
blaze::CompressedMatrix<Tv, blaze::columnMajor> la = forceLap(a);
size_t N = la.rows();
size_t ind = findmax(diag(la)).second;
std::vector<size_t> leave;
// Delete the row with the max value
for (size_t i = 0; i < N; ++i) {
if (i != ind)
leave.push_back(i);
}
blaze::CompressedMatrix<Tv, blaze::columnMajor> lasub = index<Tv>(la, leave, leave);
SubSolver<Tv> subSolver = solver(lasub, pcgIts, tol, maxits, maxtime, verbose, params);
return SubSolver<Tv>([=](const blaze::DynamicVector<Tv> &b, std::vector<size_t> &pcgIts) mutable {
blaze::DynamicVector<Tv> bs = index(b, leave) - blaze::DynamicVector<Tv>(leave.size(), mean(b));
blaze::DynamicVector<Tv> xs = subSolver(bs, pcgIts);
blaze::DynamicVector<Tv> x(b.size(), 0);
index(x, leave, xs);
x = x - blaze::DynamicVector<Tv>(x.size(), mean(x));
return x;
});
}
template <typename Tv>
SubSolver<Tv> wrapInterface(const FactorSolver<Tv> solver, const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a,
std::vector<size_t> &pcgIts, float, double maxits, double maxtime, bool verbose,
ApproxCholParams params)
{
auto t1 = std::chrono::high_resolution_clock::now();
Factorization<Tv> sol = solver(a);
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return SubSolver<Tv>(
[=](const blaze::DynamicVector<Tv> &b, std::vector<size_t> &pcgIts) -> blaze::DynamicVector<Tv> {
if (pcgIts.size())
pcgIts[0] = 0;
auto t1 = std::chrono::high_resolution_clock::now();
blaze::DynamicVector<Tv> x = chol_subst(sol.Lower, b);
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return x;
});
}
template <typename Tv>
SubSolver<Tv> BlockSolver(const std::vector<std::vector<size_t>> &comps, std::vector<SubSolver<Tv>> &solvers,
std::vector<size_t> &pcgIts, float tol, double maxits, double maxtime, bool verbose)
{
return SubSolver<Tv>([=](const blaze::DynamicVector<Tv> &b, std::vector<size_t> &pcgIts) mutable {
std::vector<size_t> pcgTmp;
if (pcgIts.size()) {
pcgIts[0] = 0;
pcgTmp.push_back(0);
}
blaze::DynamicVector<Tv> x(b.size(), 0);
for (size_t i = 0; i < comps.size(); ++i) {
std::vector<size_t> ind = comps[i];
blaze::DynamicVector<Tv> bi = index(b, ind);
blaze::DynamicVector<Tv> solution = (solvers[i])(bi, pcgTmp);
index(x, ind, solution);
if (pcgIts.size())
pcgIts[0] = pcgIts[0] > pcgTmp[0] ? pcgIts[0] : pcgTmp[0];
}
return x;
});
}
template <typename Tv>
SubSolverMat<Tv> wrapInterfaceMat(const FactorSolver<Tv> solver,
const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol, double maxits, double maxtime, bool verbose, ApproxCholParams params)
{
auto t1 = std::chrono::high_resolution_clock::now();
Factorization<Tv> sol = solver(a);
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return SubSolverMat<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &b, std::vector<size_t> &pcgIts) {
if (pcgIts.size())
pcgIts[0] = 0;
auto t1 = std::chrono::high_resolution_clock::now();
blaze::DynamicVector<Tv> x = chol_subst(sol.Lower, b);
if (verbose) {
auto t2 = std::chrono::high_resolution_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count();
std::cout << "Solver build time: " << msec << " ms.";
}
return x;
});
}
template <typename Tv> SolverAMat<Tv> wrapInterfaceMat(const FactorSolver<Tv> solver)
{
return SolverAMat<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol = 1e-6, double maxits = HUGE_VAL, double maxtime = HUGE_VAL,
bool verbose = false, ApproxCholParams params = ApproxCholParams()) {
return wrapInterfaceMat(solver, a, pcgIts, tol, maxits, maxtime, verbose, params);
});
}
template <typename Tv> SolverA<Tv> wrapInterface(const FactorSolver<Tv> solver)
{
return SolverA<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol = 1e-6, double maxits = HUGE_VAL, double maxtime = HUGE_VAL, bool verbose = false,
ApproxCholParams params = ApproxCholParams()) {
return wrapInterface(solver, a, pcgIts, tol, maxits, maxtime, verbose, params);
});
}
template <typename Tv> SolverAMat<Tv> chol_sddm_mat() { return wrapInterfaceMat<Tv>(cholesky<Tv>); }
template <typename Tv> SolverA<Tv> chol_sddm() { return wrapInterface<Tv>(cholesky<Tv>); }
template <typename Tv> inline SolverA<Tv> lapWrapConnected(const SolverA<Tv> solver)
{
return SolverA<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol = 1e-6, double maxits = HUGE_VAL, double maxtime = HUGE_VAL, bool verbose = false,
const ApproxCholParams params = ApproxCholParams()) {
return lapWrapConnected(solver, a, pcgIts, tol, maxits, maxtime, verbose, params);
});
}
template <typename Tv> inline SolverA<Tv> lapWrapComponents(const SolverA<Tv> solver)
{
return SolverA<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol = 1e-6, double maxits = HUGE_VAL, double maxtime = HUGE_VAL, bool verbose = false,
const ApproxCholParams params = ApproxCholParams()) {
return lapWrapComponents(solver, a, pcgIts, tol, maxits, maxtime, verbose, params);
});
}
template <typename Tv> inline SolverA<Tv> lapWrapSDDM(SolverA<Tv> sddmSolver)
{
return lapWrapComponents(lapWrapConnected(sddmSolver));
}
template <typename Tv> inline SolverA<Tv> sddmWrapLap(const SolverA<Tv> solver)
{
return SolverA<Tv>([=](const blaze::CompressedMatrix<Tv, blaze::columnMajor> &a, std::vector<size_t> &pcgIts,
float tol = 1e-6, double maxits = HUGE_VAL, double maxtime = HUGE_VAL, bool verbose = false,
const ApproxCholParams params = ApproxCholParams()) {
return sddmWrapLap(solver, a, pcgIts, tol, maxits, maxtime, verbose, params);
});
}
} // namespace metric
#endif
| 11,371
|
C++
|
.cpp
| 256
| 41.046875
| 117
| 0.698241
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,519
|
ijvstruct.cpp
|
metric-space-ai_metric/metric/utils/solver/helper/ijvstruct/ijvstruct.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
Copyright (c) 2019 Oleg Popov
*/
/*
Laplacians is a package containing graph algorithms, with an emphasis on tasks related to
spectral and algebraic graph theory. It contains (and will contain more) code for solving
systems of linear equations in graph Laplacians, low stretch spanning trees, sparsifiation,
clustering, local clustering, and optimization on graphs.
All graphs are represented by sparse adjacency matrices. This is both for speed, and because
our main concerns are algebraic tasks. It does not handle dynamic graphs. It would be very slow
to implement dynamic graphs this way.
https://github.com/danspielman/Laplacians.jl
*/
#ifndef _METRIC_UTILS_SOLVER_HELPER_IJVSTRUCT_CPP
#define _METRIC_UTILS_SOLVER_HELPER_IJVSTRUCT_CPP
namespace metric {
template <typename Tv> IJV<Tv> IJV<Tv>::operator+(const IJV &b) const
{
IJV<Tv> m;
m.n = n;
m.nnz = nnz + b.nnz;
m.i = i;
m.j = j;
m.v = v;
// Append vectors
m.i.insert(m.i.end(), b.i.begin(), b.i.end());
m.j.insert(m.j.end(), b.j.begin(), b.j.end());
m.v.resize(m.nnz);
for (size_t i = v.size(); i < m.nnz; i++) {
m.v[i] = b.v[i - v.size()];
}
return m;
}
template <typename Tv> IJV<Tv>::IJV(const blaze::CompressedMatrix<Tv, blaze::columnMajor> &mat)
{
n = mat.columns();
nnz = mat.nonZeros();
i.resize(nnz);
j.resize(nnz);
v.resize(nnz);
size_t k = 0;
// Fill i, row and v
for (size_t l = 0UL; l < mat.columns(); ++l) {
for (typename blaze::CompressedMatrix<Tv, blaze::columnMajor>::ConstIterator it = mat.cbegin(l);
it != mat.cend(l); ++it) {
i[k] = it->index();
j[k] = l;
v[k] = it->value();
++k;
}
}
}
template <typename Tv> IJV<Tv>::IJV(const SparseMatrixCSC<Tv> &cscm)
{
n = cscm.n;
nnz = cscm.nzval.size();
i.resize(nnz);
j.resize(nnz);
v.resize(nnz);
size_t k = 0;
for (size_t l = 0; l != n; l++) {
size_t colbegin = cscm.colptr[l];
size_t colend = cscm.colptr[l + 1];
for (size_t row = colbegin; row != colend; row++) {
i[k] = cscm.rowval[row];
j[k] = l;
v[k] = cscm.nzval[row];
++k;
}
}
}
template <typename Tv> void IJV<Tv>::dump_ijv(int ijvn) const
{
std::cout << "ijv" << ijvn << " matrix dump:\n";
std::cout << "\n"
<< "n= " << n;
std::cout << "\n"
<< "nnz= " << nnz;
std::cout << "\ni=";
for (size_t k = 0; k < nnz; ++k)
std::cout << i[k] << " ";
std::cout << "\n"
<< "j=";
for (size_t k = 0; k < nnz; ++k)
std::cout << j[k] << " ";
std::cout << "\n"
<< "v= ";
for (size_t k = 0; k < nnz; ++k)
std::cout << v[k] << " ";
std::cout << std::endl << std::endl;
}
template <typename Tv> void IJV<Tv>::sortByCol()
{
std::vector<size_t> idcs = collect(0, nnz);
sort(idcs.begin(), idcs.end(),
[this](size_t idx1, size_t idx2) { return j[idx1] < j[idx2] || (j[idx1] == j[idx2] && i[idx1] < i[idx2]); });
std::vector<size_t> ni(nnz), nj(nnz);
blaze::DynamicVector<Tv> nv(nnz);
for (size_t k = 0; k < nnz; k++) {
ni[k] = i[idcs[k]];
nj[k] = j[idcs[k]];
nv[k] = v[idcs[k]];
};
i = std::move(ni);
j = std::move(nj);
v = std::move(nv);
}
template <typename Tv> SparseMatrixCSC<Tv> sparseCSC(const IJV<Tv> &ijv)
{
SparseMatrixCSC<Tv> res;
res.m = ijv.n;
res.n = res.m;
size_t nnz = ijv.nnz;
res.colptr.resize(res.n + 1);
res.rowval.resize(nnz);
res.nzval.resize(nnz);
res.colptr[0] = 0;
res.colptr[res.n] = nnz;
size_t k = 0;
// Fill colptr, rowval and nzval
std::size_t totalnz = 0, t = 0;
for (size_t l = 0UL; l < res.n; ++l) {
std::size_t rownz = 0;
while (t < nnz && l == ijv.j[t]) {
res.nzval[k] = ijv.v[t];
res.rowval[k] = ijv.i[t];
++k;
++rownz;
++t;
}
totalnz += rownz;
res.colptr[l + 1] = totalnz;
}
return res;
}
} // namespace metric
#endif
| 3,962
|
C++
|
.cpp
| 141
| 25.524823
| 112
| 0.621987
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,524
|
RandomEMD.cpp
|
metric-space-ai_metric/metric/distance/k-random/RandomEMD.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
#include "RandomEMD.hpp"
#include "../../utils/poor_mans_quantum.hpp"
namespace metric {
template <typename Sample, typename D>
auto RandomEMD<Sample, D>::operator()(const Sample &sample_1, const Sample &sample_2) const -> distance_type
{
PMQ pmq_1(sample_1);
PMQ pmq_2(sample_2);
// find the area between distributions
Sample concat_data;
for (int i = 0; i < sample_1.size(); i++) {
concat_data.push_back(sample_1[i]);
}
for (int i = 0; i < sample_2.size(); i++) {
concat_data.push_back(sample_2[i]);
}
std::sort(concat_data.begin(), concat_data.end());
//
D area = 0;
double step = (concat_data[concat_data.size() - 1] - concat_data[0]) * precision;
for (double value = concat_data[0]; value <= concat_data[concat_data.size() - 1]; value += step) {
area += abs(pmq_1.cdf(value) - pmq_2.cdf(value)) * step;
}
return area;
}
} // namespace metric
| 1,108
|
C++
|
.cpp
| 32
| 32.625
| 108
| 0.688263
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,525
|
CramervonMises.cpp
|
metric-space-ai_metric/metric/distance/k-random/CramervonMises.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
#include "CramervonMises.hpp"
#include "../../utils/poor_mans_quantum.hpp"
namespace metric {
template <typename Sample, typename D>
auto CramervonMises<Sample, D>::operator()(const Sample &sample_1, const Sample &sample_2) const -> distance_type
{
PMQ pmq_1(sample_1);
PMQ pmq_2(sample_2);
// find the sum of squared differences between the two cdfs
Sample concat_data;
for (int i = 0; i < sample_1.size(); i++) {
concat_data.push_back(sample_1[i]);
}
for (int i = 0; i < sample_2.size(); i++) {
concat_data.push_back(sample_2[i]);
}
std::sort(concat_data.begin(), concat_data.end());
//
D area = 0;
double step = (concat_data[concat_data.size() - 1] - concat_data[0]) * precision;
for (double value = concat_data[0]; value <= concat_data[concat_data.size() - 1]; value += step) {
area += (pmq_1.cdf(value) - pmq_2.cdf(value)) * (pmq_1.cdf(value) - pmq_2.cdf(value)) * step;
}
return sqrt(area);
}
} // namespace metric
| 1,182
|
C++
|
.cpp
| 32
| 34.9375
| 113
| 0.688323
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,526
|
KolmogorovSmirnov.cpp
|
metric-space-ai_metric/metric/distance/k-random/KolmogorovSmirnov.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
#include "KolmogorovSmirnov.hpp"
#include "../../utils/poor_mans_quantum.hpp"
namespace metric {
template <typename Sample, typename D>
auto KolmogorovSmirnov<Sample, D>::operator()(const Sample &sample_1, const Sample &sample_2) const -> distance_type
{
PMQ pmq_1(sample_1);
PMQ pmq_2(sample_2);
// find the most difference between distributions
Sample concat_data;
for (int i = 0; i < sample_1.size(); i++) {
concat_data.push_back(sample_1[i]);
}
for (int i = 0; i < sample_2.size(); i++) {
concat_data.push_back(sample_2[i]);
}
std::sort(concat_data.begin(), concat_data.end());
D max_difference = 0;
for (int i = 0; i < concat_data.size(); i++) {
if (abs(pmq_1.cdf(concat_data[i]) - pmq_2.cdf(concat_data[i])) > max_difference) {
max_difference = abs(pmq_1.cdf(concat_data[i]) - pmq_2.cdf(concat_data[i]));
}
}
return max_difference;
}
} // namespace metric
| 1,124
|
C++
|
.cpp
| 32
| 33.09375
| 116
| 0.695291
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,527
|
Riemannian.cpp
|
metric-space-ai_metric/metric/distance/d-spaced/Riemannian.cpp
|
//#include "metric/distance/k-related/Standards.hpp"
#include "../../utils/wrappers/lapack.hpp"
#include "metric/utils/type_traits.hpp"
namespace metric {
namespace riemannian_details {
// averaged estimation: code COPIED from mgc.*pp with only mgc replaced with functor reference, TODO refactor to avoid
// code dubbing
inline std::vector<double> linspace(double a, double b, int n)
{
std::vector<double> array;
if (n > 1) {
double step = (b - a) / double(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
inline double polyeval(const std::vector<double> &poly, const double z)
{
const int n = poly.size();
double sum = poly[n - 1];
for (int i = n - 2; i >= 0; --i) {
sum *= z;
sum += poly[i];
}
return sum;
}
inline double erfinv_imp(const double p, const double q)
{
double result = 0;
if (p <= 0.5) {
static const float Y = 0.0891314744949340820313f;
static const std::vector<double> P = {-0.000508781949658280665617L, -0.00836874819741736770379L,
0.0334806625409744615033L, -0.0126926147662974029034L,
-0.0365637971411762664006L, 0.0219878681111168899165L,
0.00822687874676915743155L, -0.00538772965071242932965L};
static const std::vector<double> Q = {1,
-0.970005043303290640362L,
-1.56574558234175846809L,
1.56221558398423026363L,
0.662328840472002992063L,
-0.71228902341542847553L,
-0.0527396382340099713954L,
0.0795283687341571680018L,
-0.00233393759374190016776L,
0.000886216390456424707504L};
double g = p * (p + 10);
double r = polyeval(P, p) / polyeval(Q, p);
result = g * Y + g * r;
} else if (q >= 0.25) {
static const float Y = 2.249481201171875f;
static const std::vector<double> P = {
-0.202433508355938759655L, 0.105264680699391713268L, 8.37050328343119927838L,
17.6447298408374015486L, -18.8510648058714251895L, -44.6382324441786960818L,
17.445385985570866523L, 21.1294655448340526258L, -3.67192254707729348546L};
static const std::vector<double> Q = {1L,
6.24264124854247537712L,
3.9713437953343869095L,
-28.6608180499800029974L,
-20.1432634680485188801L,
48.5609213108739935468L,
10.8268667355460159008L,
-22.6436933413139721736L,
1.72114765761200282724L};
double g = std::sqrt(-2 * std::log(q));
double xs = q - 0.25;
double r = polyeval(P, xs) / polyeval(Q, xs);
result = g / (Y + r);
} else {
double x = std::sqrt(-std::log(q));
if (x < 3) {
static const float Y = 0.807220458984375f;
static const std::vector<double> P = {
-0.131102781679951906451L, -0.163794047193317060787L, 0.117030156341995252019L,
0.387079738972604337464L, 0.337785538912035898924L, 0.142869534408157156766L,
0.0290157910005329060432L, 0.00214558995388805277169L, -0.679465575181126350155e-6L,
0.285225331782217055858e-7L, -0.681149956853776992068e-9L};
static const std::vector<double> Q = {1,
3.46625407242567245975L,
5.38168345707006855425L,
4.77846592945843778382L,
2.59301921623620271374L,
0.848854343457902036425L,
0.152264338295331783612L,
0.01105924229346489121L};
double xs = x - 1.125;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 6) {
static const float Y = 0.93995571136474609375f;
static const std::vector<double> P = {
-0.0350353787183177984712L, -0.00222426529213447927281L, 0.0185573306514231072324L,
0.00950804701325919603619L, 0.00187123492819559223345L, 0.000157544617424960554631L,
0.460469890584317994083e-5L, -0.230404776911882601748e-9L, 0.266339227425782031962e-11L};
static const std::vector<double> Q = {1L,
1.3653349817554063097L,
0.762059164553623404043L,
0.220091105764131249824L,
0.0341589143670947727934L,
0.00263861676657015992959L,
0.764675292302794483503e-4L};
double xs = x - 3;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 18) {
static const float Y = 0.98362827301025390625f;
static const std::vector<double> P = {
-0.0167431005076633737133L, -0.00112951438745580278863L, 0.00105628862152492910091L,
0.000209386317487588078668L, 0.149624783758342370182e-4L, 0.449696789927706453732e-6L,
0.462596163522878599135e-8L, -0.281128735628831791805e-13L, 0.99055709973310326855e-16L};
static const std::vector<double> Q = {1L,
0.591429344886417493481L,
0.138151865749083321638L,
0.0160746087093676504695L,
0.000964011807005165528527L,
0.275335474764726041141e-4L,
0.282243172016108031869e-6L};
double xs = x - 6;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 44) {
static const float Y = 0.99714565277099609375f;
static const std::vector<double> P = {-0.0024978212791898131227L, -0.779190719229053954292e-5L,
0.254723037413027451751e-4L, 0.162397777342510920873e-5L,
0.396341011304801168516e-7L, 0.411632831190944208473e-9L,
0.145596286718675035587e-11L, -0.116765012397184275695e-17L};
static const std::vector<double> Q = {1L,
0.207123112214422517181L,
0.0169410838120975906478L,
0.000690538265622684595676L,
0.145007359818232637924e-4L,
0.144437756628144157666e-6L,
0.509761276599778486139e-9L};
double xs = x - 18;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else {
static const float Y = 0.99941349029541015625f;
static const std::vector<double> P = {-0.000539042911019078575891L, -0.28398759004727721098e-6L,
0.899465114892291446442e-6L, 0.229345859265920864296e-7L,
0.225561444863500149219e-9L, 0.947846627503022684216e-12L,
0.135880130108924861008e-14L, -0.348890393399948882918e-21L};
static const std::vector<double> Q = {1L,
0.0845746234001899436914L,
0.00282092984726264681981L,
0.468292921940894236786e-4L,
0.399968812193862100054e-6L,
0.161809290887904476097e-8L,
0.231558608310259605225e-11L};
double xs = x - 44;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
}
}
return result;
}
inline double erfcinv(const double z)
{
// if ((z < 0) || (z > 2))
// std::cout << "Argument outside range [0,2] in inverse erfc function (got p=%1%)." << std::endl;
double p, q, s;
if (z > 1) {
q = 2 - z;
p = 1 - q;
s = -1;
} else {
p = 1 - z;
q = z;
s = 1;
}
return s * erfinv_imp(p, q);
}
inline std::vector<double> icdf(const std::vector<double> &prob, const double mu, const double sigma)
{
std::vector<double> synth;
synth.reserve(prob.size());
for (auto p : prob) {
synth.push_back(mu + -1.41421356237309504880 * erfcinv(2 * p) * sigma);
}
return synth;
}
inline double variance(const std::vector<double> &data, const double mean)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += (data[i] - mean) * (data[i] - mean);
}
return sum;
}
inline double mean(const std::vector<double> &data)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += data[i];
}
double value = sum / data.size();
return value;
}
inline double peak2ems(const std::vector<double> &data)
{
double maxAbs = -1;
double rms = 0;
for (const auto v : data) {
const double absV = abs(v);
if (absV > maxAbs) {
maxAbs = absV;
}
rms += v * v;
}
rms /= data.size();
rms = sqrt(rms);
return maxAbs / rms;
}
template <typename Container, typename Functor>
double estimate(const Container &a, const Container &b, const Functor &f, const size_t sampleSize,
const double threshold, size_t maxIterations)
{
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
assert(a.size() == b.size());
const size_t dataSize = a.size();
/* Update maxIterations */
if (maxIterations == 0) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations > dataSize / sampleSize) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations < 1) {
return f(a, b);
}
/* Create shuffle indexes */
std::vector<size_t> indexes(dataSize);
std::iota(indexes.begin(), indexes.end(), 0);
auto rng = std::default_random_engine();
std::shuffle(indexes.begin(), indexes.end(), rng);
/* Create vector container for fast random access */
const std::vector<V> vectorA(a.begin(), a.end());
const std::vector<V> vectorB(b.begin(), b.end());
/* Create samples */
std::vector<V> sampleA;
std::vector<V> sampleB;
sampleA.reserve(sampleSize);
sampleB.reserve(sampleSize);
std::vector<double> mgcValues;
double mu = 0;
for (auto i = 1; i <= maxIterations; ++i) {
size_t start = (i - 1) * sampleSize;
size_t end = std::min(i * sampleSize - 1, dataSize - 1);
/* Create samples */
sampleA.clear();
sampleB.clear();
for (auto j = start; j < end; ++j) {
sampleA.push_back(vectorA[indexes[j]]);
sampleB.push_back(vectorB[indexes[j]]);
}
/* Get sample mgc value */
double mgc = f(sampleA, sampleB);
mgcValues.push_back(mgc);
std::sort(mgcValues.begin(), mgcValues.end());
const size_t n = mgcValues.size();
const auto p0 = riemannian_details::linspace(0.5 / n, 1 - 0.5 / n, n);
mu = riemannian_details::mean(mgcValues);
double sigma = riemannian_details::variance(mgcValues, mu);
const std::vector<double> synth = riemannian_details::icdf(p0, mu, sigma);
std::vector<double> diff;
diff.reserve(n);
for (auto i = 0; i < n; ++i) {
diff.push_back(mgcValues[i] - synth[i]);
}
auto convergence = peak2ems(diff) / n;
if (convergence < threshold) {
return mu;
}
}
return mu;
}
/*
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
code dubbing template <typename Container, typename Functor> double estimate( const Container & data, const Functor&
entropy, const size_t sampleSize, const double threshold, size_t maxIterations
){
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
const size_t dataSize = data.size();
// Update maxIterations
if (maxIterations == 0) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations > dataSize / sampleSize) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations < 1) {
return entropy(data);
}
// Create shuffle indexes
std::vector<size_t> indexes(dataSize);
std::iota(indexes.begin(), indexes.end(), 0);
auto rng = std::default_random_engine();
std::shuffle(indexes.begin(), indexes.end(), rng);
// Create vector container for fast random access
const std::vector<V> vectorA(data.begin(), data.end());
// Create samples
std::vector<V> sampleA;
sampleA.reserve(sampleSize);
std::vector<double> entropyValues;
double mu = 0;
for (auto i = 1; i <= maxIterations; ++i) {
size_t start = (i - 1) * sampleSize;
size_t end = std::min(i * sampleSize - 1, dataSize - 1);
// Create samples
sampleA.clear();
for (auto j = start; j < end; ++j) {
sampleA.push_back(vectorA[indexes[j]]);
}
// Get sample mgc value
double sample_entopy = entropy(sampleA);
entropyValues.push_back(sample_entopy);
std::sort(entropyValues.begin(), entropyValues.end());
const size_t n = entropyValues.size();
const auto p0 = riemannian_details::linspace(0.5 / n, 1 - 0.5 / n, n);
mu = riemannian_details::mean(entropyValues);
double sigma = riemannian_details::variance(entropyValues, mu);
const std::vector<double> synth = riemannian_details::icdf(p0, mu, sigma);
std::vector<double> diff;
diff.reserve(n);
for (auto i = 0; i < n; ++i) {
diff.push_back(entropyValues[i] - synth[i]);
}
auto convergence = entropy_details::peak2ems(diff) / n;
std::cout << n << " " << convergence << " " << sample_entopy << " " << mu << std::endl;
if (convergence < threshold) {
return mu;
}
}
return mu;
}
// */
} // namespace riemannian_details
template <typename RecType, typename Metric>
template <typename C>
double RiemannianDistance<RecType, Metric>::operator()(const C &Xc, const C &Yc) const
{
using V = metric::type_traits::underlying_type_t<C>;
blaze::DynamicMatrix<V> distancesX(Xc.size(), Xc.size(), 0);
blaze::DynamicMatrix<V> distancesY(Yc.size(), Yc.size(), 0);
for (size_t i = 0; i < Xc.size(); ++i) {
for (size_t j = 0; j < Xc.size(); ++j) {
auto d = metric(Xc[i], Xc[j]);
if (i < j) { // upper triangular area only
distancesX(i, j) = -d; // Laplacian matrix
distancesX(i, i) += d;
distancesX(j, j) += d;
}
}
}
for (size_t i = 0; i < Yc.size(); ++i) {
for (size_t j = 0; j < Yc.size(); ++j) {
auto d = metric(Yc[i], Yc[j]);
if (i < j) { // upper triangular area only
distancesY(i, j) = d;
distancesY(i, i) += d;
distancesY(j, j) += d;
}
}
}
return matDistance(distancesX, distancesY); // applying Riemannian to these distance matrices
}
template <typename RecType, typename Metric>
template <typename T>
T RiemannianDistance<RecType, Metric>::matDistance(blaze::DynamicMatrix<T> A, blaze::DynamicMatrix<T> B) const
{
blaze::DynamicVector<T> eigenValues;
sygv(A, B, eigenValues);
for (T &e : eigenValues) {
if (e <= std::numeric_limits<T>::epsilon()) {
e = 1;
}
}
return sqrt(blaze::sum(blaze::pow(blaze::log(eigenValues), 2)));
}
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
template <typename RecType, typename Metric>
template <typename Container>
double RiemannianDistance<RecType, Metric>::estimate(const Container &a, const Container &b, const size_t sampleSize,
const double threshold, size_t maxIterations) const
{
return riemannian_details::estimate(a, b, *this, sampleSize, threshold, maxIterations);
}
} // namespace metric
| 14,346
|
C++
|
.cpp
| 396
| 31.722222
| 118
| 0.670173
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,528
|
L1.cpp
|
metric-space-ai_metric/metric/distance/k-related/L1.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "L1.hpp"
namespace metric {
// the UNFINISHED code for L1 metrics from the review papaer
template <typename V>
template <typename Container>
auto Sorensen<V>::operator()(const Container &a, const Container &b) const ->
typename std::enable_if<!std::is_same<Container, V>::value, distance_type>::type
{
double sum1 = 0;
double sum2 = 0;
Container z{0};
auto it1 = a.begin();
auto it2 = b.begin();
while (true) {
if (it1 == it2) // both ends reached
break;
sum1 += (*it1 - *it2);
sum2 += (*it1 + *it2);
if (it1 + 1 != a.end() && it1 != z.begin())
++it1;
else
it1 = z.begin(); // end reached, using zero against values of other vector
if (it2 + 1 != b.end() && it1 != z.begin())
++it2;
else
it2 = z.begin();
}
if (sum1 < 0)
sum1 = -sum1;
return sum1 / sum2; // here the type is changed
}
template <typename V>
auto Sorensen<V>::operator()(const blaze::CompressedVector<V> &a, const blaze::CompressedVector<V> &b) const
-> distance_type
{
double sum1 = 0;
double sum2 = 0;
blaze::CompressedVector<V> z{0};
auto it1 = a.begin();
auto it2 = b.begin();
while (true) {
if (it1 == it2) // both ends reached
break;
sum1 += (it1->value() - it2->value());
sum2 += (it1->value() + it2->value());
// TODO implement element skipping!!
if (it1 + 1 != a.end() && it1 != z.begin())
++it1;
else
it1 = z.begin(); // end reached, using zero against values of other vector
if (it2 + 1 != b.end() && it1 != z.begin())
++it2;
else
it2 = z.begin();
}
if (sum1 < 0)
sum1 = -sum1;
return sum1 / sum2; // here the type is changed
}
template <typename V>
template <typename Container>
auto Hassanat<V>::operator()(const Container &A, const Container &B) const -> distance_type
{
value_type sum = 0;
value_type min;
value_type max;
for (auto it1 = A.begin(), it2 = B.begin(); it1 != A.end() || it2 != B.end(); ++it1, ++it2) {
min = std::min(*it1, *it2);
max = std::max(*it1, *it2);
if (min >= 0) {
sum += 1 - (1 + min) / (1 + max);
} else {
sum += 1 - (1 + min + min) / (1 + max + min);
}
}
return sum;
}
template <typename V>
template <typename Container>
auto Ruzicka<V>::operator()(const Container &A, const Container &B) const -> distance_type
{
value_type sum = 0;
value_type min;
value_type max;
for (auto it1 = A.begin(), it2 = B.begin(); it1 != A.end() || it2 != B.end(); ++it1, ++it2) {
sum += std::max(*it1, *it2) / min = std::min(*it1, *it2);
}
return double(1) - sum;
}
} // namespace metric
| 2,743
|
C++
|
.cpp
| 96
| 26.208333
| 108
| 0.629278
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,529
|
Standards.cpp
|
metric-space-ai_metric/metric/distance/k-related/Standards.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#include "Standards.hpp"
#include <blaze/Blaze.h>
#include <algorithm>
#include <cmath>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace metric {
template <typename V>
template <typename Container>
auto Euclidean<V>::operator()(const Container &a, const Container &b) const ->
typename std::enable_if<!std::is_same<Container, V>::value, distance_type>::type
{
// static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() && it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
return std::sqrt(sum);
}
template <typename V> auto Euclidean<V>::operator()(const V &a, const V &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
sum += (a - b) * (a - b);
return std::sqrt(sum);
}
template <typename V>
template <template <typename, bool> class Container, typename ValueType, bool F> // detect Blaze object by signature
double Euclidean<V>::operator()(const Container<ValueType, F> &a, const Container<ValueType, F> &b) const
{
return blaze::norm(a - b);
}
template <typename V>
template <typename Container>
auto Euclidean_thresholded<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
return std::min(thres, value_type(factor * std::sqrt(sum)));
}
template <typename V>
template <typename Container>
auto Euclidean_hard_clipped<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
return std::min(max_distance_, value_type(scal_ * std::sqrt(sum)));
}
template <typename V>
template <typename Container>
auto Euclidean_soft_clipped<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
auto distance = std::sqrt(sum);
if (distance > x_) {
return F_ * (value_type(1) - std::exp((-distance + x_) / T_)) + y_;
} else {
return scal_ * distance;
}
}
template <typename V>
template <typename Container>
Euclidean_standardized<V>::Euclidean_standardized(const Container &A) : mean(A[0].size(), 0), sigma(A[0].size(), 0)
{
for (auto it = A.begin(); it != A.end(); ++it) {
for (size_t i = 0; i < A[0].size(); ++i) {
mean[i] += *it[i];
}
}
for (size_t i = 0; i < mean.size(); ++i) {
mean[i] /= value_type(mean.size());
}
for (auto it = A.begin(); it != A.end(); ++it) {
for (size_t i = 0; i < A[0].size(); ++i) {
sigma[i] += std::pow(*it[i] - mean[i], 2);
}
}
for (size_t i = 0; i < sigma.size(); ++i) {
sigma[i] = std::sqrt(sigma[i] / value_type(sigma.size()));
}
}
template <typename V>
template <typename Container>
auto Euclidean_standardized<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
// static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (size_t i = 0; i < a.size(); ++i) {
sum += std::pow((a[i] - mean[i]) / sigma[i] - (b[i] - mean[i]) / sigma[i], 2);
}
return std::sqrt(sum);
}
template <typename V>
template <typename Container>
Manhatten_standardized<V>::Manhatten_standardized(const Container &A) : mean(A[0].size(), 0), sigma(A[0].size(), 0)
{
for (auto it = A.begin(); it != A.end(); ++it) {
for (size_t i = 0; i < A[0].size(); ++i) {
mean[i] += *it[i];
}
}
for (size_t i = 0; i < mean.size(); ++i) {
mean[i] /= value_type(mean.size());
}
for (auto it = A.begin(); it != A.end(); ++it) {
for (size_t i = 0; i < A[0].size(); ++i) {
sigma[i] += std::pow(*it[i] - mean[i], 2);
}
}
for (size_t i = 0; i < sigma.size(); ++i) {
sigma[i] = std::sqrt(sigma[i] / value_type(sigma.size()));
}
}
template <typename V>
template <typename Container>
auto Manhatten_standardized<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
// static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (size_t i = 0; i < a.size(); ++i) {
sum += std::abs((a[i] - mean[i]) / sigma[i] - (b[i] - mean[i]) / sigma[i]);
}
return sum;
}
template <typename V>
template <typename Container>
auto Manhatten<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += std::abs(*it1 - *it2);
}
return sum;
}
template <typename V>
template <typename Container>
auto P_norm<V>::operator()(const Container &a, const Container &b) const -> distance_type
{
static_assert(std::is_floating_point<value_type>::value, "T must be a float type");
distance_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += std::pow(std::abs(*it1 - *it2), p);
}
return std::pow(sum, 1 / p);
}
template <typename V>
template <typename Container>
auto Cosine<V>::operator()(const Container &A, const Container &B) const -> distance_type
{
value_type dot = 0, denom_a = 0, denom_b = 0;
for (auto it1 = A.begin(), it2 = B.begin(); it1 != A.end() || it2 != B.end(); ++it1, ++it2) {
dot += *it1 * *it2;
denom_a += *it1 * *it1;
denom_b += *it2 * *it2;
}
return std::acos(dot / (std::sqrt(denom_a) * std::sqrt(denom_b))) / M_PI;
}
template <typename V>
template <typename Container>
auto Weierstrass<V>::operator()(const Container &A, const Container &B) const -> distance_type
{
value_type dot_ab = 0, dot_a = 0, dot_b = 0;
for (auto it1 = A.begin(), it2 = B.begin(); it1 != A.end() || it2 != B.end(); ++it1, ++it2) {
dot_ab += *it1 * *it2;
dot_a += *it1 * *it1;
dot_b += *it2 * *it2;
}
return std::acosh(std::sqrt(1 + dot_a) * std::sqrt(1 + dot_b) - dot_ab);
}
template <typename V>
template <typename Container>
auto CosineInverted<V>::operator()(const Container &A, const Container &B) const -> distance_type
{
value_type dot = 0, denom_a = 0, denom_b = 0;
for (auto it1 = A.begin(), it2 = B.begin(); it1 != A.end() || it2 != B.end(); ++it1, ++it2) {
dot += *it1 * *it2;
denom_a += *it1 * *it1;
denom_b += *it2 * *it2;
}
return std::abs(1 - dot / (std::sqrt(denom_a) * std::sqrt(denom_b)));
}
template <typename V>
template <typename Container>
auto Chebyshev<V>::operator()(const Container &lhs, const Container &rhs) const -> distance_type
{
distance_type res = 0;
for (std::size_t i = 0; i < lhs.size(); i++) {
auto m = std::abs(lhs[i] - rhs[i]);
if (m > res)
res = m;
}
return res;
}
} // namespace metric
| 7,567
|
C++
|
.cpp
| 212
| 33.801887
| 116
| 0.633597
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,530
|
Kohonen.cpp
|
metric-space-ai_metric/metric/distance/k-structured/Kohonen.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 PANDA Team
*/
#include "Kohonen.hpp"
#include "metric/utils/poor_mans_quantum.hpp"
#include <blaze/Blaze.h>
#include <cmath>
#include <vector>
namespace metric {
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
Kohonen<D, Sample, Graph, Metric, Distribution>::Kohonen(metric::SOM<Sample, Graph, Metric, Distribution> &&som_model,
const std::vector<Sample> &samples, bool use_sparsification,
double sparsification_coef, bool use_reverse_diffusion,
size_t reverse_diffusion_neighbors)
: som_model(som_model), use_sparsification_(use_sparsification), sparsification_coef_(sparsification_coef),
use_reverse_diffusion_(use_reverse_diffusion), reverse_diffusion_neighbors_(reverse_diffusion_neighbors)
{
this->metric = som_model.get_metric();
calculate_distance_matrix(samples);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
Kohonen<D, Sample, Graph, Metric, Distribution>::Kohonen(
const metric::SOM<Sample, Graph, Metric, Distribution> &som_model, const std::vector<Sample> &samples,
bool use_sparsification, double sparsification_coef, bool use_reverse_diffusion, size_t reverse_diffusion_neighbors)
: som_model(som_model), use_sparsification_(use_sparsification), sparsification_coef_(sparsification_coef),
use_reverse_diffusion_(use_reverse_diffusion), reverse_diffusion_neighbors_(reverse_diffusion_neighbors)
{
this->metric = som_model.get_metric();
calculate_distance_matrix(samples);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
Kohonen<D, Sample, Graph, Metric, Distribution>::Kohonen(const std::vector<Sample> &samples, size_t nodesWidth,
size_t nodesHeight, bool use_sparsification,
double sparsification_coef, bool use_reverse_diffusion,
size_t reverse_diffusion_neighbors)
: som_model(Graph(nodesWidth, nodesHeight), Metric(), 0.8, 0.2, 20), use_sparsification_(use_sparsification),
sparsification_coef_(sparsification_coef), use_reverse_diffusion_(use_reverse_diffusion),
reverse_diffusion_neighbors_(reverse_diffusion_neighbors)
{
this->metric = som_model.get_metric();
som_model.train(samples);
calculate_distance_matrix(samples);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
Kohonen<D, Sample, Graph, Metric, Distribution>::Kohonen(const std::vector<Sample> &samples, Graph graph, Metric metric,
double start_learn_rate, double finish_learn_rate,
size_t iterations, Distribution distribution,
bool use_sparsification, double sparsification_coef,
bool use_reverse_diffusion, size_t reverse_diffusion_neighbors)
: som_model(graph, metric, start_learn_rate, finish_learn_rate, iterations, distribution),
use_sparsification_(use_sparsification), sparsification_coef_(sparsification_coef),
use_reverse_diffusion_(use_reverse_diffusion), reverse_diffusion_neighbors_(reverse_diffusion_neighbors)
{
this->metric = som_model.get_metric();
som_model.train(samples);
calculate_distance_matrix(samples);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
auto Kohonen<D, Sample, Graph, Metric, Distribution>::operator()(const Sample &sample_1, const Sample &sample_2) const
-> distance_type
{
// then we calculate distributions over SOM space for samples
auto bmu_1 = som_model.BMU(sample_1);
auto bmu_2 = som_model.BMU(sample_2);
std::vector<Sample> nodes = som_model.get_weights();
auto direct_distance = metric(sample_1, sample_2);
double to_nearest_1 = metric(sample_1, nodes[bmu_1]);
double to_nearest_2 = metric(nodes[bmu_2], sample_2);
if (direct_distance < to_nearest_1 + to_nearest_2) {
return direct_distance;
}
return to_nearest_1 + distance_matrix[bmu_1][bmu_2] + to_nearest_2;
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
void Kohonen<D, Sample, Graph, Metric, Distribution>::calculate_distance_matrix(const std::vector<Sample> &samples)
{
std::vector<Sample> nodes = som_model.get_weights();
if (use_reverse_diffusion_) {
make_reverese_diffusion(samples);
}
auto matrix = som_model.get_graph().get_matrix();
blaze::CompressedMatrix<D> blaze_matrix(matrix.rows(), matrix.columns());
for (size_t i = 0; i < matrix.rows(); ++i) {
for (size_t j = i + 1; j < matrix.columns(); ++j) {
if (matrix(i, j) > 0) {
blaze_matrix(i, j) = metric(nodes[i], nodes[j]);
blaze_matrix(j, i) = metric(nodes[i], nodes[j]);
}
}
}
if (use_sparsification_) {
sparcify_graph(blaze_matrix);
}
matrix = som_model.get_graph().get_matrix();
std::vector<D> distances;
std::vector<int> predecessor;
for (auto i = 0; i < nodes.size(); i++) {
std::tie(distances, predecessor) = calculate_distance(blaze_matrix, i, matrix.rows());
distance_matrix.push_back(distances);
predecessors.push_back(predecessor);
}
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
void Kohonen<D, Sample, Graph, Metric, Distribution>::sparcify_graph(blaze::CompressedMatrix<D> &direct_distance_matrix)
{
auto matrix = som_model.get_graph().get_matrix();
auto sorted_pairs = sort_indexes(direct_distance_matrix);
for (size_t i = 0; i < sorted_pairs.size() * (1 - sparsification_coef_); ++i) {
auto p = sorted_pairs[i];
matrix(p.first, p.second) = 0;
direct_distance_matrix(p.first, p.second) = 0;
direct_distance_matrix(p.second, p.first) = 0;
}
som_model.get_graph().updateEdges(matrix);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
void Kohonen<D, Sample, Graph, Metric, Distribution>::make_reverese_diffusion(const std::vector<Sample> &samples)
{
metric::Redif redif(som_model.get_weights(), reverse_diffusion_neighbors_, 10, metric);
som_model.updateWeights(redif.get_train_encoded());
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
void Kohonen<D, Sample, Graph, Metric, Distribution>::print_shortest_path(int from_node, int to_node) const
{
if (to_node == from_node) {
std::cout << to_node << " -> ";
} else if (predecessors[from_node][to_node] == -1) {
std::cout << "No path from " << from_node << " to " << to_node << std::endl;
} else {
print_shortest_path(from_node, predecessors[from_node][to_node]);
std::cout << to_node << " -> ";
}
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
std::vector<int> Kohonen<D, Sample, Graph, Metric, Distribution>::get_shortest_path(int from_node, int to_node) const
{
std::vector<int> path;
return get_shortest_path_(path, from_node, to_node);
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
double Kohonen<D, Sample, Graph, Metric, Distribution>::distortion_estimate(const std::vector<Sample> &samples)
{
metric::Euclidean<D> euclidean_distance;
double sum = 0;
int count = 0;
for (size_t i = 0; i < samples.size(); ++i) {
for (size_t j = 0; j < samples.size(); ++j) {
if (i != j) {
auto euclidean = euclidean_distance(samples[i], samples[j]);
auto kohonen = operator()(samples[i], samples[j]);
if (euclidean != 0 && !std::isinf(kohonen)) {
sum += kohonen / euclidean;
count++;
}
}
}
}
double mean = sum / count;
return mean - 1;
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
std::vector<int> Kohonen<D, Sample, Graph, Metric, Distribution>::get_shortest_path_(std::vector<int> &path,
int from_node, int to_node) const
{
if (to_node == from_node) {
path.push_back(to_node);
} else if (predecessors[from_node][to_node] == -1) {
} else {
get_shortest_path_(path, from_node, predecessors[from_node][to_node]);
path.push_back(to_node);
}
return path;
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
int Kohonen<D, Sample, Graph, Metric, Distribution>::get_closest_unmarked_node(const std::vector<D> &distance,
const std::vector<bool> &mark,
int nodes_count) const
{
D minDistance = INFINITY;
int closestUnmarkedNode = -1;
for (int i = 0; i < nodes_count; i++) {
if ((!mark[i]) && (minDistance >= distance[i])) {
minDistance = distance[i];
closestUnmarkedNode = i;
}
}
return closestUnmarkedNode;
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
auto Kohonen<D, Sample, Graph, Metric, Distribution>::calculate_distance(
const blaze::CompressedMatrix<D> &adjust_matrix, int from_node, int nodes_count) const
-> std::tuple<std::vector<D>, std::vector<int>>
{
std::vector<bool> mark(nodes_count);
std::vector<D> distances(nodes_count);
std::vector<int> predecessor(nodes_count);
// initialize
for (int i = 0; i < nodes_count; i++) {
mark[i] = false;
predecessor[i] = -1;
distances[i] = INFINITY;
}
distances[from_node] = 0;
//
int closestUnmarkedNode;
int count = 0;
while (count < nodes_count) {
closestUnmarkedNode = get_closest_unmarked_node(distances, mark, nodes_count);
mark[closestUnmarkedNode] = true;
for (int i = 0; i < nodes_count; i++) {
if (!mark[i] && adjust_matrix(closestUnmarkedNode, i) > 0) {
if (distances[i] > distances[closestUnmarkedNode] + adjust_matrix(closestUnmarkedNode, i)) {
distances[i] = distances[closestUnmarkedNode] + adjust_matrix(closestUnmarkedNode, i);
predecessor[i] = closestUnmarkedNode;
}
}
}
count++;
}
return {distances, predecessor};
}
template <typename D, typename Sample, typename Graph, typename Metric, typename Distribution>
std::vector<std::pair<size_t, size_t>>
Kohonen<D, Sample, Graph, Metric, Distribution>::sort_indexes(const blaze::CompressedMatrix<D> &matrix)
{
// initialize original index locations
// std::vector<std::pair<size_t, size_t>>& idx
std::vector<D> v;
std::vector<std::pair<size_t, size_t>> idx_pairs;
for (size_t i = 0; i < matrix.rows(); ++i) {
for (size_t j = i + 1; j < matrix.columns(); ++j) {
if (matrix(i, j) > 0) {
v.push_back(matrix(i, j));
idx_pairs.push_back({i, j});
}
}
}
std::vector<size_t> idx(v.size());
std::iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
// using std::stable_sort instead of std::sort
// to avoid unnecessary index re-orderings
// when v contains elements of equal values
stable_sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) { return v[i1] > v[i2]; });
std::vector<std::pair<size_t, size_t>> sorted_idx_pairs;
for (size_t i = 0; i < idx.size(); ++i) {
sorted_idx_pairs.push_back(idx_pairs[idx[i]]);
}
return sorted_idx_pairs;
}
} // namespace metric
| 11,145
|
C++
|
.cpp
| 254
| 40.748031
| 120
| 0.716183
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,531
|
SSIM.cpp
|
metric-space-ai_metric/metric/distance/k-structured/SSIM.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#ifndef _METRIC_DISTANCE_K_STRUCTURED_SSIM_CPP
#define _METRIC_DISTANCE_K_STRUCTURED_SSIM_CPP
#include "SSIM.hpp"
#include <cmath>
#include <vector>
#ifndef M_PI
#define M_PI 3.14159326
#endif
namespace metric {
namespace SSIM_details {
// gaussian_blur filter
inline std::vector<std::vector<double>> gaussian_blur(size_t n)
{
std::vector<std::vector<double>> gauss(n, std::vector<double>(n));
size_t d = n / 2;
size_t x, y;
double Norm = 0.0;
for (x = 0; x < n; x++) {
for (y = 0; y < n; y++) {
double g = std::exp(-(((x - d) * (x - d) + (y - d) * (y - d)) / 2.25));
Norm += g;
gauss[x][y] = g;
}
}
for (x = 0; x < n; x++) {
for (y = 0; y < n; y++) {
gauss[x][y] = gauss[x][y] / Norm;
}
}
return gauss;
}
} // namespace SSIM_details
namespace detail {
template <class> struct sfinae_true : std::true_type {
};
template <typename T> static auto test_vec_of_vec(int) -> sfinae_true<decltype(std::declval<T>()[0][0])>;
template <typename> static auto test_vec_of_vec(long) -> std::false_type;
} // namespace detail
template <typename T> struct is_vec_of_vec : decltype(detail::test_vec_of_vec<T>(0)) {
};
/*** distance measure for imagges by structural similarity. ***/
template <typename D, typename V>
template <typename Container>
auto SSIM<D, V>::operator()(const Container &img1, const Container &img2) const -> distance_type
{
if constexpr (is_vec_of_vec<Container>() != true) {
static_assert(true, "container should be 2D");
} else {
double sum = 0.0;
bool is_visibility = (masking < 2.0); // use stabilizer
// create gaussian filter matrix
size_t n = 11;
auto gauss = SSIM_details::gaussian_blur(n);
double C1 = std::pow(0.01 /*K1*/ * dynamic_range, 2);
double C2 = std::pow(0.03 /*K2*/ * dynamic_range, 2);
for (size_t i = 0; i < img1.size() - n + 1; ++i) {
for (size_t j = 0; j < img1[0].size() - n + 1; ++j) {
// initialize values
double mu1 = 0.0, mu2 = 0.0;
double sigma1 = 0.0, sigma2 = 0;
double corr = 0.0, sigma12 = 0;
double S1 = 0;
double S2 = 0;
for (size_t y = 0; y < n; y++) {
for (size_t x = 0; x < n; x++) {
double k1 = img1[i + y][j + x];
double k2 = img2[i + y][j + x];
double valv = gauss[y][x];
mu1 += k1 * valv;
mu2 += k2 * valv;
sigma1 += k1 * k1 * valv;
sigma2 += k2 * k2 * valv;
corr += k1 * k2 * valv;
}
}
double visibility = 1; // default
if (is_visibility) {
double l2norm1 = 0.0;
double l2norm2 = 0.0;
double lpnorm1 = 0.0;
double lpnorm2 = 0.0;
double sscale = n * n;
double C3 = C2 * std::pow(sscale, 2.0 / masking - 1.0); // scaling
for (size_t y = 0; y < n; y++) {
for (size_t x = 0; x < n; x++) {
double k1 = img1[i + y][j + x];
double k2 = img2[i + y][j + x];
double valv = gauss[y][x] * sscale;
double v1 = k1 - mu1;
double v2 = k2 - mu2;
l2norm1 += v1 * v1 * valv;
l2norm2 += v2 * v2 * valv;
lpnorm1 += std::pow(std::abs(v1), masking) * valv;
lpnorm2 += std::pow(std::abs(v2), masking) * valv;
}
}
lpnorm1 = std::pow(lpnorm1, 2.0 / masking);
lpnorm2 = std::pow(lpnorm2, 2.0 / masking);
visibility = (l2norm1 + l2norm2 + C3) / (lpnorm1 + lpnorm2 + C3);
visibility = std::pow(visibility, masking / 2.0);
if (visibility > 1) {
visibility = 1;
} else if (visibility < 0) {
visibility = 0;
}
}
sigma1 -= mu1 * mu1;
sigma2 -= mu2 * mu2;
corr -= mu1 * mu2;
if (sigma1 < 0) {
sigma1 = 0;
}
if (sigma2 < 0) {
sigma2 = 0;
}
sigma12 = std::sqrt(sigma1 * sigma2);
// Structural Indicies
S1 = (2.0 * mu1 * mu2 + C1) / (mu1 * mu1 + mu2 * mu2 + C1);
S2 = (2.0 * sigma12 + C2) / (sigma1 + sigma2 + C2);
// sum up the local ssim_distance
double value = 2.0 - S1 - S2;
if (value > 0.0) {
sum += std::sqrt(value);
}
}
}
return sum / ((img1.size() - n + 1) * (img1[0].size() - n + 1)); // normalize the sum
}
return distance_type{};
}
} // namespace metric
#endif
| 4,340
|
C++
|
.cpp
| 138
| 27.398551
| 105
| 0.576895
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| true
| false
| true
| false
|
1,531,532
|
EMD.cpp
|
metric-space-ai_metric/metric/distance/k-structured/EMD.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#ifndef _METRIC_DISTANCE_K_STRUCTURED_EMD_CPP
#define _METRIC_DISTANCE_K_STRUCTURED_EMD_CPP
#include "EMD.hpp"
/*Fast and Robust Earth Mover's Distances
Ofir Pele, Michael Werman
ICCV 2009
Original implementation by Ofir Pele 2009-2012,
Refactoring, API change and performance optimization by Michael Welsch, 2018
All rights reserved.
*/
#include <set>
// #include <limits>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <list>
#include <numeric>
#include <type_traits>
#include <utility>
#include <vector>
#include <cassert> // uncommented by Max F in order to build under Clang & QMake
namespace {
template <typename T> inline void print_vector(const std::vector<T> &v)
{
std::cout << "[ ";
for (auto r : v) {
std::cout << r << " ";
}
std::cout << "]";
}
template <typename T> inline void print_matrix(const std::vector<std::vector<T>> &m)
{
std::cout << "[ " << std::endl;
for (auto &r : m) {
std::cout << " ";
print_vector(r);
std::cout << std::endl;
}
std::cout << "]";
}
} // namespace
namespace metric {
namespace EMD_details {
// // disabled by Max F because of no need after rolling back to original code
template <typename Container> typename Container::value_type::value_type max_in_distance_matrix(const Container &C)
{
typedef typename Container::value_type::value_type T;
T max = 0;
for (size_t i = 0; i < C.size(); ++i) {
{
for (size_t j = 0; j < C[0].size(); ++j) {
if (C[i][j] > max)
max = C[i][j];
}
}
}
return max;
}
template <typename Container> struct Euclidean_thresholded_EMD_default {
typedef typename Container::value_type T;
static_assert(std::is_floating_point<T>::value, "T must be a float type");
T thres, factor;
T operator()(const Container &a, const Container &b) const
{
T sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
return std::min(thres, T(factor * sqrt(sum)));
}
Euclidean_thresholded_EMD_default(T thres = 1000, T factor = 3000) : thres(thres), factor(factor) {}
};
template <typename T, typename Metric = Euclidean_thresholded_EMD_default<std::vector<double>>>
std::vector<std::vector<T>> ground_distance_matrix_of_2dgrid(size_t cols, size_t rows)
{
size_t n = rows * cols;
Metric distance;
std::vector<std::vector<T>> distM(n, std::vector<T>(n));
size_t j = -1;
for (size_t c1 = 0; c1 < cols; ++c1) {
for (size_t r1 = 0; r1 < rows; ++r1) {
++j;
size_t i = -1;
for (size_t c2 = 0; c2 < cols; ++c2) {
for (size_t r2 = 0; r2 < rows; ++r2) {
++i;
std::vector<double> p = {static_cast<double>(r1), static_cast<double>(c1)};
std::vector<double> q = {static_cast<double>(r2), static_cast<double>(c2)};
distM[i][j] = distance(p, q);
}
}
}
}
return distM;
}
template <typename T, typename Metric = Euclidean_thresholded_EMD_default<std::vector<double>>>
std::vector<std::vector<T>> ground_distance_matrix_of_2dgrid(std::vector<std::vector<T>> grid)
{
size_t n = grid.size();
Metric distance;
std::vector<std::vector<T>> distM(n, std::vector<T>(n));
for (size_t c1 = 0; c1 < grid.size(); ++c1) {
for (size_t c2 = 0; c2 < grid.size(); ++c2) {
distM[c1][c2] = distance(grid[c1], grid[c2]);
}
}
return distM;
}
enum FLOW_TYPE_T { NO_FLOW = 0, WITHOUT_TRANSHIPMENT_FLOW, WITHOUT_EXTRA_MASS_FLOW };
/// returns the flow from/to transhipment vertex given flow F which was computed using
/// FLOW_TYPE_T of kind WITHOUT_TRANSHIPMENT_FLOW.
template <typename T, typename C>
void return_flow_from_to_transhipment_vertex(const std::vector<std::vector<T>> &F, const C &P, const C &Q,
std::vector<T> &flow_from_P_to_transhipment,
std::vector<T> &flow_from_transhipment_to_Q)
{
flow_from_P_to_transhipment.assign(std::begin(P), std::end(P));
flow_from_transhipment_to_Q.assign(std::begin(Q), std::end(Q));
for (size_t i = 0; i < P.size(); ++i) {
for (size_t j = 0; j < P.size(); ++j) {
flow_from_P_to_transhipment[i] -= F[i][j];
flow_from_transhipment_to_Q[j] -= F[i][j];
}
}
} // return_flow_from_to_transhipment_vertex
/// Transforms the given flow F which was computed using FLOW_TYPE_T of kind WITHOUT_TRANSHIPMENT_FLOW,
/// to a flow which can be computed using WITHOUT_EXTRA_MASS_FLOW. If you want the flow to the extra mass,
/// you can use return_flow_from_to_transhipment_vertex on the returned F.
template <typename T, typename C> void transform_flow_to_regular(std::vector<std::vector<T>> &F, const C &P, const C &Q)
{
const size_t N = P.size();
std::vector<T> flow_from_P_to_transhipment(N);
std::vector<T> flow_from_transhipment_to_Q(N);
return_flow_from_to_transhipment_vertex(F, P, Q, flow_from_P_to_transhipment, flow_from_transhipment_to_Q);
size_t i = 0;
size_t j = 0;
while (true) {
while (i < N && flow_from_P_to_transhipment[i] == 0)
++i;
while (j < N && flow_from_transhipment_to_Q[j] == 0)
++j;
if (i == N || j == N)
break;
if (flow_from_P_to_transhipment[i] < flow_from_transhipment_to_Q[j]) {
F[i][j] += flow_from_P_to_transhipment[i];
flow_from_transhipment_to_Q[j] -= flow_from_P_to_transhipment[i];
flow_from_P_to_transhipment[i] = 0;
} else {
F[i][j] += flow_from_transhipment_to_Q[j];
flow_from_P_to_transhipment[i] -= flow_from_transhipment_to_Q[j];
flow_from_transhipment_to_Q[j] = 0;
}
}
} // transform_flow_to_regular
// template <typename T, FLOW_TYPE_T FLOW_TYPE = NO_FLOW>
// struct EMD
// {
// //typedef typename Container::value_type T;
// T operator()(const std::vector<T> &P, const std::vector<T> &Q,
// const std::vector<std::vector<T>> &C,
// T extra_mass_penalty = -1,
// std::vector<std::vector<T>> *F = NULL);
// };
//------------------------------------------------------------------------------
template <typename T> struct edge {
edge(size_t to, T cost) : _to(to), _cost(cost) {}
size_t _to;
T _cost;
};
template <typename T> struct edgeCompareByCost {
bool operator()(const edge<T> &a, const edge<T> &b) { return a._cost < b._cost; }
};
template <typename T> struct edge0 {
edge0(size_t to, T cost, T flow) : _to(to), _cost(cost), _flow(flow) {}
size_t _to;
T _cost;
T _flow;
};
template <typename T> struct edge1 {
edge1(size_t to, T reduced_cost) : _to(to), _reduced_cost(reduced_cost) {}
size_t _to;
T _reduced_cost;
};
template <typename T> struct edge2 {
edge2(size_t to, T reduced_cost, T residual_capacity)
: _to(to), _reduced_cost(reduced_cost), _residual_capacity(residual_capacity)
{
}
size_t _to;
T _reduced_cost;
T _residual_capacity;
};
template <typename T> struct edge3 {
edge3(size_t to = 0, T dist = 0) : _to(to), _dist(dist) {}
size_t _to;
T _dist;
};
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
template <typename T> class min_cost_flow {
size_t _num_nodes;
std::vector<size_t> _nodes_to_Q;
public:
// e - supply(positive) and demand(negative).
// c[i] - edges that goes from node i. first is the second nod
// x - the flow is returned in it
T operator()(std::vector<T> &e, const std::vector<std::list<edge<T>>> &c, std::vector<std::list<edge0<T>>> &x)
{
assert(e.size() == c.size());
assert(x.size() == c.size());
_num_nodes = e.size();
_nodes_to_Q.resize(_num_nodes);
// init flow
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge<T>>::const_iterator it = c[from].begin(); it != c[from].end(); ++it) {
x[from].push_back(edge0<T>(it->_to, it->_cost, 0));
x[it->_to].push_back(edge0<T>(from, -it->_cost, 0));
}
} // it
}
} // from
// reduced costs for forward edges (c[i,j]-pi[i]+pi[j])
// Note that for forward edges the residual capacity is infinity
std::vector<std::list<edge1<T>>> r_cost_forward(_num_nodes);
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge<T>>::const_iterator it = c[from].begin(); it != c[from].end(); ++it) {
r_cost_forward[from].push_back(edge1<T>(it->_to, it->_cost));
}
}
}
}
// reduced costs and capacity for backward edges (c[j,i]-pi[j]+pi[i])
// Since the flow at the beginning is 0, the residual capacity is also zero
std::vector<std::list<edge2<T>>> r_cost_cap_backward(_num_nodes);
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge<T>>::const_iterator it = c[from].begin(); it != c[from].end(); ++it) {
r_cost_cap_backward[it->_to].push_back(edge2<T>(from, -it->_cost, 0));
}
} // it
}
} // from
// Max supply TODO:demand?, given U?, optimization-> min out of demand,supply
T U = 0;
{
for (size_t i = 0; i < _num_nodes; ++i) {
if (e[i] > U)
U = e[i];
}
}
T delta = static_cast<T>(std::pow(2.0l, std::ceil(std::log(static_cast<long double>(U)) / std::log(2.0))));
std::vector<T> d(_num_nodes);
std::vector<size_t> prev(_num_nodes);
delta = 1;
while (true) { // until we break when S or T is empty
T maxSupply = 0;
size_t k = 0;
for (size_t i = 0; i < _num_nodes; ++i) {
if (e[i] > 0) {
if (maxSupply < e[i]) {
maxSupply = e[i];
k = i;
}
}
}
if (maxSupply == 0) {
break;
}
delta = maxSupply;
size_t l = 0;
compute_shortest_path(d, prev, k, r_cost_forward, r_cost_cap_backward, e, l);
//---------------------------------------------------------------
// find delta (minimum on the path from k to l)
// delta= e[k];
// if (-e[l]<delta) delta= e[k];
size_t to = l;
do {
size_t from = prev[to];
assert(from != to);
// residual
typename std::list<edge2<T>>::iterator itccb = r_cost_cap_backward[from].begin();
while ((itccb != r_cost_cap_backward[from].end()) && (itccb->_to != to)) {
++itccb;
}
if (itccb != r_cost_cap_backward[from].end()) {
if (itccb->_residual_capacity < delta)
delta = itccb->_residual_capacity;
}
to = from;
} while (to != k);
//---------------------------------------------------------------
//---------------------------------------------------------------
// augment delta flow from k to l (backwards actually...)
to = l;
do {
size_t from = prev[to];
assert(from != to);
// TODO - might do here O(n) can be done in O(1)
typename std::list<edge0<T>>::iterator itx = x[from].begin();
while (itx->_to != to) {
++itx;
}
itx->_flow += delta;
// update residual for backward edges
typename std::list<edge2<T>>::iterator itccb = r_cost_cap_backward[to].begin();
while ((itccb != r_cost_cap_backward[to].end()) && (itccb->_to != from)) {
++itccb;
}
if (itccb != r_cost_cap_backward[to].end()) {
itccb->_residual_capacity += delta;
}
itccb = r_cost_cap_backward[from].begin();
while ((itccb != r_cost_cap_backward[from].end()) && (itccb->_to != to)) {
++itccb;
}
if (itccb != r_cost_cap_backward[from].end()) {
itccb->_residual_capacity -= delta;
}
// update e
e[to] += delta;
e[from] -= delta;
to = from;
} while (to != k);
//---------------------------------------------------------------------------------
} // while true (until we break when S or T is empty)
// compute distance from x
T dist = 0;
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge0<T>>::const_iterator it = x[from].begin(); it != x[from].end(); ++it) {
// if (it->_flow!=0) cout << from << "->" << it->_to << ": " << it->_flow
// << "x" << it->_cost << endl;
dist += (it->_cost * it->_flow);
}
} // it
}
} // from
return dist;
} // operator()
private:
void compute_shortest_path(std::vector<T> &d, std::vector<size_t> &prev,
size_t from, std::vector<std::list<edge1<T>>> &cost_forward,
std::vector<std::list<edge2<T>>> &cost_backward,
const std::vector<T> &e, size_t &l)
{
//----------------------------------------------------------------
// Making heap (all inf except 0, so we are saving comparisons...)
//----------------------------------------------------------------
std::vector<edge3<T>> Q(_num_nodes);
Q[0]._to = from;
_nodes_to_Q[from] = 0;
Q[0]._dist = 0;
size_t j = 1;
// TODO: both of these into a function?
{
for (size_t i = 0; i < from; ++i) {
Q[j]._to = i;
_nodes_to_Q[i] = j;
Q[j]._dist = std::numeric_limits<T>::max();
++j;
}
}
{
for (size_t i = from + 1; i < _num_nodes; ++i) {
Q[j]._to = i;
_nodes_to_Q[i] = j;
Q[j]._dist = std::numeric_limits<T>::max();
++j;
}
}
//----------------------------------------------------------------
//----------------------------------------------------------------
// main loop
//----------------------------------------------------------------
std::vector<size_t> finalNodesFlg(_num_nodes, false);
do {
size_t u = Q[0]._to;
d[u] = Q[0]._dist; // final distance
finalNodesFlg[u] = true;
if (e[u] < 0) {
l = u;
break;
}
heap_remove_first(Q, _nodes_to_Q);
// neighbors of u
{
for (typename std::list<edge1<T>>::const_iterator it = cost_forward[u].begin();
it != cost_forward[u].end(); ++it) {
assert(it->_reduced_cost >= 0);
T alt = d[u] + it->_reduced_cost;
size_t v = it->_to;
if ((_nodes_to_Q[v] < Q.size()) && (alt < Q[_nodes_to_Q[v]]._dist)) {
// cout << "u to v==" << u << " to " << v << " " << alt << endl;
heap_decrease_key(Q, _nodes_to_Q, v, alt);
prev[v] = u;
}
}
} // it
{
for (typename std::list<edge2<T>>::const_iterator it = cost_backward[u].begin();
it != cost_backward[u].end(); ++it) {
if (it->_residual_capacity > 0) {
assert(it->_reduced_cost >= 0);
T alt = d[u] + it->_reduced_cost;
size_t v = it->_to;
if ((_nodes_to_Q[v] < Q.size()) && (alt < Q[_nodes_to_Q[v]]._dist)) {
// cout << "u to v==" << u << " to " << v << " " << alt << endl;
heap_decrease_key(Q, _nodes_to_Q, v, alt);
prev[v] = u;
}
}
}
} // it
} while (!Q.empty());
//---------------------------------------------------------------------------------
// reduced costs for forward edges (c[i,j]-pi[i]+pi[j])
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge1<T>>::iterator it = cost_forward[from].begin();
it != cost_forward[from].end(); ++it) {
if (finalNodesFlg[from]) {
it->_reduced_cost += d[from] - d[l];
}
if (finalNodesFlg[it->_to]) {
it->_reduced_cost -= d[it->_to] - d[l];
}
}
}
}
}
// reduced costs and capacity for backward edges (c[j,i]-pi[j]+pi[i])
{
for (size_t from = 0; from < _num_nodes; ++from) {
{
for (typename std::list<edge2<T>>::iterator it = cost_backward[from].begin();
it != cost_backward[from].end(); ++it) {
if (finalNodesFlg[from]) {
it->_reduced_cost += d[from] - d[l];
}
if (finalNodesFlg[it->_to]) {
it->_reduced_cost -= d[it->_to] - d[l];
}
}
} // it
}
}
//---------------------------------------------------------------------------------
//----------------------------------------------------------------
} // compute_shortest_path
void heap_decrease_key(std::vector<edge3<T>> &Q, std::vector<size_t> &nodes_to_Q, size_t v, T alt)
{
size_t i = nodes_to_Q[v];
Q[i]._dist = alt;
while (i > 0 && Q[PARENT(i)]._dist > Q[i]._dist) {
swap_heap(Q, nodes_to_Q, i, PARENT(i));
i = PARENT(i);
}
} // heap_decrease_key
void heap_remove_first(std::vector<edge3<T>> &Q, std::vector<size_t> &nodes_to_Q)
{
swap_heap(Q, nodes_to_Q, 0, Q.size() - 1);
Q.pop_back();
heapify(Q, nodes_to_Q, 0);
} // heap_remove_first
void heapify(std::vector<edge3<T>> &Q, std::vector<size_t> &nodes_to_Q, size_t i)
{
do {
// TODO: change to loop
size_t l = LEFT(i);
size_t r = RIGHT(i);
size_t smallest;
if ((l < Q.size()) && (Q[l]._dist < Q[i]._dist)) {
smallest = l;
} else {
smallest = i;
}
if ((r < Q.size()) && (Q[r]._dist < Q[smallest]._dist)) {
smallest = r;
}
if (smallest == i)
return;
swap_heap(Q, nodes_to_Q, i, smallest);
i = smallest;
} while (true);
} // end heapify
void swap_heap(std::vector<edge3<T>> &Q, std::vector<size_t> &nodes_to_Q, size_t i, size_t j)
{
edge3<T> tmp = Q[i];
Q[i] = Q[j];
Q[j] = tmp;
nodes_to_Q[Q[j]._to] = j;
nodes_to_Q[Q[i]._to] = i;
} // swap_heapify
size_t LEFT(size_t i) { return 2 * (i + 1) - 1; }
size_t RIGHT(size_t i)
{
return 2 * (i + 1); // 2*(i+1)+1-1
}
size_t PARENT(size_t i) { return (i - 1) / 2; }
}; // end min_cost_flow
template <typename T> void fillFWithZeros(std::vector<std::vector<T>> &F)
{
for (size_t i = 0; i < F.size(); ++i) {
for (size_t j = 0; j < F[i].size(); ++j) {
F[i][j] = 0;
}
}
}
// Forward declarations
template <typename T, FLOW_TYPE_T FLOW_TYPE> struct emd_impl;
/*
Main implementation
*/
template <typename Container, FLOW_TYPE_T FLOW_TYPE> struct emd_impl_integral_types {
typedef typename Container::value_type T;
T operator()(const Container &POrig, const Container &QOrig, const std::vector<T> &Pc,
const std::vector<T> &Qc, // P, Q, C replaced with Pc, Qc, Cc by Max F
const std::vector<std::vector<T>> &Cc,
// T maxC, // disabled by MaxF //now updated inside
T extra_mass_penalty,
std::vector<std::vector<T>> *F //,
// T abs_diff_sum_P_sum_Q // disabled by MaxF //now updated inside
)
{
//-------------------------------------------------------
size_t N = Pc.size();
assert(Qc.size() == N);
// bool needToSwapFlow = false; // commented by Max F
// re-inserted by MAx F from the original code
// Ensuring that the supplier - P, have more mass.
std::vector<T> P;
std::vector<T> Q;
std::vector<std::vector<T>> C(Cc);
T abs_diff_sum_P_sum_Q;
T sum_P = 0;
T sum_Q = 0;
sum_P = std::accumulate(Pc.begin(), Pc.end(), T{0});
sum_Q = std::accumulate(Qc.begin(), Qc.end(), T{0});
// {for (std::size_t i=0; i<N; ++i) sum_P+= Pc[i];}
// {for (std::size_t i=0; i<N; ++i) sum_Q+= Qc[i];}
bool needToSwapFlow = false;
if (sum_Q > sum_P) {
needToSwapFlow = true;
P.assign(std::begin(Qc), std::end(Qc));
Q.assign(std::begin(Pc), std::end(Pc));
// transpose C
for (std::size_t i = 0; i < N; ++i) {
for (std::size_t j = 0; j < N; ++j) {
C[i][j] = Cc[j][i];
}
}
abs_diff_sum_P_sum_Q = sum_Q - sum_P;
} else {
P.assign(std::begin(Pc), std::end(Pc));
Q.assign(std::begin(Qc), std::end(Qc));
abs_diff_sum_P_sum_Q = sum_P - sum_Q;
}
// if (needToSwapFlow) cout << "needToSwapFlow" << endl;
// end of re-insertion
// creating the b vector that contains all vertexes
std::vector<T> b(2 * N + 2);
const size_t THRESHOLD_NODE = 2 * N;
const size_t ARTIFICIAL_NODE = 2 * N + 1; // need to be last !
for (size_t i = 0; i < N; ++i) {
b[i] = P[i];
}
for (size_t i = N; i < 2 * N; ++i) {
b[i] = (Q[i - N]);
}
// remark*) I put here a deficit of the extra mass, as mass that flows to the threshold node
// can be absorbed from all sources with cost zero (this is in reverse order from the paper,
// where incoming edges to the threshold node had the cost of the threshold and outgoing
// edges had the cost of zero)
// This also makes sum of b zero.
b[THRESHOLD_NODE] = -abs_diff_sum_P_sum_Q;
b[ARTIFICIAL_NODE] = 0;
//-------------------------------------------------------
// original code restored by Max F
T maxC = 0;
{
for (std::size_t i = 0; i < N; ++i) {
{
for (std::size_t j = 0; j < N; ++j) {
assert(C[i][j] >= 0);
if (C[i][j] > maxC)
maxC = C[i][j];
}
}
}
}
// end of restored code
if (extra_mass_penalty == -1)
extra_mass_penalty = maxC;
//-------------------------------------------------------
//=============================================================
std::set<size_t> sources_that_flow_not_only_to_thresh;
std::set<size_t> sinks_that_get_flow_not_only_from_thresh;
T pre_flow_cost = 0;
//=============================================================
//=============================================================
// regular edges between sinks and sources without threshold edges
std::vector<std::list<edge<T>>> c(b.size());
{
for (size_t i = 0; i < N; ++i) {
if (b[i] == 0)
continue;
{
for (size_t j = 0; j < N; ++j) {
if (b[j + N] == 0)
continue;
if (C[i][j] == maxC)
continue;
c[i].push_back(edge<T>(j + N, C[i][j]));
}
} // j
}
} // i
// checking which are not isolated
{
for (size_t i = 0; i < N; ++i) {
if (b[i] == 0)
continue;
{
for (size_t j = 0; j < N; ++j) {
if (b[j + N] == 0)
continue;
if (C[i][j] == maxC)
continue;
sources_that_flow_not_only_to_thresh.insert(i);
sinks_that_get_flow_not_only_from_thresh.insert(j + N);
}
} // j
}
} // i
// converting all sinks to negative
{
for (size_t i = N; i < 2 * N; ++i) {
b[i] = -b[i];
}
}
// add edges from/to threshold node,
// note that costs are reversed to the paper (see also remark* above)
// It is important that it will be this way because of remark* above.
{
for (size_t i = 0; i < N; ++i) {
c[i].push_back(edge<T>(THRESHOLD_NODE, 0));
}
}
{
for (size_t j = 0; j < N; ++j) {
c[THRESHOLD_NODE].push_back(edge<T>(j + N, maxC));
}
}
// artificial arcs - Note the restriction that only one edge i,j is artificial so I ignore it...
{
for (size_t i = 0; i < ARTIFICIAL_NODE; ++i) {
c[i].push_back(edge<T>(ARTIFICIAL_NODE, maxC + 1));
c[ARTIFICIAL_NODE].push_back(edge<T>(i, maxC + 1));
}
}
//=============================================================
//====================================================
// remove nodes with supply demand of 0
// and vertexes that are connected only to the
// threshold vertex
//====================================================
size_t current_node_name = 0;
// Note here it should be vector<int> and not vector<size_t>
// as I'm using -1 as a special flag !!!
const int REMOVE_NODE_FLAG = -1;
std::vector<int> nodes_new_names(b.size(), REMOVE_NODE_FLAG);
std::vector<int> nodes_old_names;
nodes_old_names.reserve(b.size());
{
for (size_t i = 0; i < N * 2; ++i) {
if (b[i] != 0) {
if (sources_that_flow_not_only_to_thresh.find(i) != sources_that_flow_not_only_to_thresh.end() ||
sinks_that_get_flow_not_only_from_thresh.find(i) !=
sinks_that_get_flow_not_only_from_thresh.end()) {
nodes_new_names[i] = current_node_name;
nodes_old_names.push_back(i);
++current_node_name;
} else {
if (i >= N) { // sink
pre_flow_cost -= (b[i] * maxC);
}
b[THRESHOLD_NODE] += b[i]; // add mass(i<N) or deficit (i>=N)
}
}
}
} // i
nodes_new_names[THRESHOLD_NODE] = current_node_name;
nodes_old_names.push_back(THRESHOLD_NODE);
++current_node_name;
nodes_new_names[ARTIFICIAL_NODE] = current_node_name;
nodes_old_names.push_back(ARTIFICIAL_NODE);
++current_node_name;
std::vector<T> bb(current_node_name);
size_t j = 0;
{
for (size_t i = 0; i < b.size(); ++i) {
if (nodes_new_names[i] != REMOVE_NODE_FLAG) {
bb[j] = b[i];
++j;
}
}
}
std::vector<std::list<edge<T>>> cc(bb.size());
{
for (size_t i = 0; i < c.size(); ++i) {
if (nodes_new_names[i] == REMOVE_NODE_FLAG)
continue;
{
for (typename std::list<edge<T>>::const_iterator it = c[i].begin(); it != c[i].end(); ++it) {
if (nodes_new_names[it->_to] != REMOVE_NODE_FLAG) {
cc[nodes_new_names[i]].push_back(edge<T>(nodes_new_names[it->_to], it->_cost));
}
}
}
}
}
min_cost_flow<T> mcf;
T my_dist;
std::vector<std::list<edge0<T>>> flows(bb.size());
T mcf_dist = mcf(bb, cc, flows);
if (FLOW_TYPE != NO_FLOW) {
for (size_t new_name_from = 0; new_name_from < flows.size(); ++new_name_from) {
for (typename std::list<edge0<T>>::const_iterator it = flows[new_name_from].begin();
it != flows[new_name_from].end(); ++it) {
if (new_name_from == nodes_new_names[THRESHOLD_NODE] || it->_to == nodes_new_names[THRESHOLD_NODE])
continue;
size_t i, j;
T flow = it->_flow;
bool reverseEdge = it->_to < new_name_from;
if (!reverseEdge) {
i = nodes_old_names[new_name_from];
j = nodes_old_names[it->_to] - N;
} else {
i = nodes_old_names[it->_to];
j = nodes_old_names[new_name_from] - N;
}
if (flow != 0 && new_name_from != nodes_new_names[THRESHOLD_NODE] &&
it->_to != nodes_new_names[THRESHOLD_NODE]) {
assert(i < N && j < N);
if (needToSwapFlow)
std::swap(i, j);
if (!reverseEdge) {
(*F)[i][j] += flow;
} else {
(*F)[i][j] -= flow;
}
}
}
}
}
if (FLOW_TYPE == WITHOUT_EXTRA_MASS_FLOW)
transform_flow_to_regular(*F, POrig, QOrig);
my_dist = pre_flow_cost + // pre-flowing on cases where it was possible
mcf_dist + // solution of the transportation problem
(abs_diff_sum_P_sum_Q * extra_mass_penalty); // emd-hat extra mass penalty
return my_dist;
//-------------------------------------------------------
} // emd_impl_integral_types (main implementation) operator()
};
//=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
/*** check types and convert float to integral type before continueing with emd_impl_integral_types()***/
template <typename Container, FLOW_TYPE_T FLOW_TYPE> struct emd_impl {
typedef typename Container::value_type T;
typedef long long int CONVERT_TO_T;
// typedef int T;
T operator()(const Container &POrig, const Container &QOrig, const std::vector<T> &P, const std::vector<T> &Q,
const std::vector<std::vector<T>> &C,
// T maxC, // disabled by Max F
T extra_mass_penalty,
std::vector<std::vector<T>> *F //,
// T abs_diff_sum_P_sum_Q // disabled by Max F
)
{
/*** integral types ***/
if (std::is_integral<T>::value) {
// return emd_impl_integral_types<Container, FLOW_TYPE>()(POrig, QOrig, P, Q, C, maxC, extra_mass_penalty,
// F, abs_diff_sum_P_sum_Q);
return emd_impl_integral_types<Container, FLOW_TYPE>()(POrig, QOrig, P, Q, C, extra_mass_penalty,
F); // replaced by Max F
}
/*** floating types ***/
else if (std::is_floating_point<T>::value) {
// TODO: static assert
assert(sizeof(CONVERT_TO_T) >= 8);
// This condition should hold:
// ( 2^(sizeof(CONVERT_TO_T*8)) >= ( MULT_FACTOR^2 )
// Note that it can be problematic to check it because
// of overflow problems. I simply checked it with Linux calc
// which has arbitrary precision.
const double MULT_FACTOR = 1000000;
// Constructing the input
const size_t N = P.size();
std::vector<CONVERT_TO_T> iPOrig(N);
std::vector<CONVERT_TO_T> iQOrig(N);
std::vector<CONVERT_TO_T> iP(N);
std::vector<CONVERT_TO_T> iQ(N);
std::vector<std::vector<CONVERT_TO_T>> iC(N, std::vector<CONVERT_TO_T>(N));
std::vector<std::vector<CONVERT_TO_T>> iF(N, std::vector<CONVERT_TO_T>(N));
// Converting to CONVERT_TO_T
double sumP = 0.0;
double sumQ = 0.0;
double imaxC = C[0][0];
for (size_t i = 0; i < N; ++i) {
sumP += POrig[i];
sumQ += QOrig[i];
for (size_t j = 0; j < N; ++j) {
if (C[i][j] > imaxC)
imaxC = C[i][j];
}
}
double minSum = std::min(sumP, sumQ);
double maxSum = std::max(sumP, sumQ);
double PQnormFactor = MULT_FACTOR / maxSum;
double CnormFactor = MULT_FACTOR / imaxC;
for (size_t i = 0; i < N; ++i) {
iPOrig[i] = static_cast<CONVERT_TO_T>(floor(POrig[i] * PQnormFactor + 0.5));
iQOrig[i] = static_cast<CONVERT_TO_T>(floor(QOrig[i] * PQnormFactor + 0.5));
iP[i] = static_cast<CONVERT_TO_T>(floor(P[i] * PQnormFactor + 0.5));
iQ[i] = static_cast<CONVERT_TO_T>(floor(Q[i] * PQnormFactor + 0.5));
for (size_t j = 0; j < N; ++j) {
iC[i][j] = static_cast<CONVERT_TO_T>(floor(C[i][j] * CnormFactor + 0.5));
if (FLOW_TYPE != NO_FLOW) {
iF[i][j] = static_cast<CONVERT_TO_T>(floor(((*F)[i][j]) * PQnormFactor + 0.5));
}
}
}
// computing distance without extra mass penalty
// double dist = emd_impl<std::vector<CONVERT_TO_T>, FLOW_TYPE>()(iPOrig, iQOrig, iP, iQ, iC,
// imaxC, 0, &iF, abs_diff_sum_P_sum_Q);
double dist = emd_impl<std::vector<CONVERT_TO_T>, FLOW_TYPE>()(iPOrig, iQOrig, iP, iQ, iC, 0,
&iF); // replaced by Max F
// unnormalize
dist = dist / PQnormFactor;
dist = dist / CnormFactor;
// adding extra mass penalty
if (extra_mass_penalty == -1)
extra_mass_penalty = imaxC;
dist += (maxSum - minSum) * extra_mass_penalty;
// converting flow to double
if (FLOW_TYPE != NO_FLOW) {
for (size_t i = 0; i < N; ++i) {
for (size_t j = 0; j < N; ++j) {
(*F)[i][j] = (iF[i][j] / PQnormFactor);
}
}
}
return dist;
}
}
}; // emd_impl
} // namespace EMD_details
/*
____| \ | __ \
__| |\/ | | |
| | | | |
_____| _| _| ____/ */
/*
input:
Pc: vector like container
Qc: vector like container
C: ground distance matrix
for images: serialize the T typed matricies in a vector and compute the ground distance matrix of the serialized grid
with auto C = metric::ground_distance_matrix_of_2dgrid<T>(cols, rows);
*/
// template <typename V>
// auto
// EMD<V>::operator()(const Container &Pc,
// const Container &Qc,
// //const std::vector<std::vector<typename Container::value_type>> &C,
// //typename Container::value_type maxC, // disabled my Max F
// typename Container::value_type extra_mass_penalty,
// std::vector<std::vector<typename Container::value_type>> *F) const
// {
// //std::vector<std::vector<typename Container::value_type>> C =
// EMD_details::ground_distance_matrix_of_2dgrid<typename Container::value_type>(Pc.size(), Qc.size()); // TODO
// replace with proper call
// // add default matrix
// std::vector<std::vector<typename Container::value_type>> C(Pc.size(), std::vector<typename
// Container::value_type>(Qc.size(), 0)); int t = std::min(Pc.size(), Qc.size()) / 2; // by default, ground distance
// saturates at the half of maximum distance possible for (size_t i=0; i<Pc.size(); i++)
// for (size_t j=0; j<Qc.size(); j++)
// C[i][j] = std::min(t, std::abs((int)(i - j))); // non-square matrix is supported here, BUT IS NOT
// SUPPORTED IN THE EMD IMPL
// return (*this)(Pc, Qc, C, extra_mass_penalty, F);
// }
template <typename V>
inline auto EMD<V>::default_ground_matrix(std::size_t rows, std::size_t cols) const
-> std::vector<std::vector<value_type>>
{
std::vector<std::vector<value_type>> matrix(rows, std::vector<value_type>(cols, 0));
if (rows == 1 && cols == 1) {
matrix[0][0] = 1;
return matrix;
}
int t = std::min(rows, cols) / 2; // by default, ground distance saturates at the half of maximum distance possible
for (size_t i = 0; i < rows; i++) {
for (size_t j = 0; j < cols; j++) {
matrix[i][j] = std::min(
t,
std::abs((int)(i - j))); // non-square matrix is supported here, BUT IS NOT SUPPORTED IN THE EMD IMPL
}
}
return matrix;
}
template <typename V>
template <typename Container>
auto EMD<V>::operator()(const Container &Pc, const Container &Qc) const -> distance_type
// const std::vector<std::vector<typename Container::value_type>> &C,
// //typename Container::value_type maxC, // disabled my Max F
// typename Container::value_type extra_mass_penalty,
// std::vector<std::vector<typename Container::value_type>> *F) const
{
if (!is_C_initialized) {
C = default_ground_matrix(Pc.size(), Pc.size());
is_C_initialized = true;
}
using T = value_type;
const EMD_details::FLOW_TYPE_T FLOW_TYPE = EMD_details::NO_FLOW;
// // if maxC is not given seperatly // disabled by Max F when rolled back to original version
// if (maxC == std::numeric_limits<T>::min())
// {
// maxC = EMD_details::max_in_distance_matrix(C);
// }
if (FLOW_TYPE != EMD_details::NO_FLOW) {
EMD_details::fillFWithZeros(*F);
}
assert((F != NULL) || (FLOW_TYPE == EMD_details::NO_FLOW));
std::vector<T> P(std::begin(Pc), std::end(Pc));
std::vector<T> Q(std::begin(Qc), std::end(Qc));
// Assuming metric property we can pre-flow 0-cost edges
{
for (size_t i = 0; i < P.size(); ++i) {
if (P[i] < Q[i]) {
if (FLOW_TYPE != EMD_details::NO_FLOW) {
((*F)[i][i]) = P[i];
}
Q[i] -= P[i];
P[i] = 0;
} else {
if (FLOW_TYPE != EMD_details::NO_FLOW) {
((*F)[i][i]) = Q[i];
}
P[i] -= Q[i];
Q[i] = 0;
}
}
}
// // need to swap? // disabled by Max F
// T sum_P = 0;
// T sum_Q = 0;
// {
// for (size_t i = 0; i < Pc.size(); ++i)
// sum_P += Pc[i];
// }
// {
// for (size_t i = 0; i < Qc.size(); ++i)
// sum_Q += Qc[i];
// }
// // need to swap?
// if (sum_Q > sum_P)
// {
// std::swap(P, Q);
// }
// T abs_diff_sum_P_sum_Q = std::abs(sum_P - sum_Q);
// T abs_diff_sum_P_sum_Q = 0; // temporary added by MAx F // TODO remove
// return EMD_details::emd_impl<std::vector<T>, FLOW_TYPE>()(Pc, Qc, P, Q, C, maxC, extra_mass_penalty, F,
// abs_diff_sum_P_sum_Q);
return EMD_details::emd_impl<Container, FLOW_TYPE>()(Pc, Qc, P, Q, C, extra_mass_penalty,
F); // turned to original state by Max F
} // EMD
} // namespace metric
#endif
| 34,480
|
C++
|
.cpp
| 990
| 31.028283
| 141
| 0.554603
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,533
|
Edit.cpp
|
metric-space-ai_metric/metric/distance/k-structured/Edit.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#include "Edit.hpp"
#include <algorithm>
#include <vector>
namespace metric {
template <typename V>
template <typename Container>
auto Edit<V>::operator()(const Container &str1, const Container &str2) const -> distance_type
{
size_t sizeA = str1.size();
size_t sizeB = str2.size();
// TODO: check empty strings.
std::vector<int> D0(sizeB + 1);
std::vector<int> Di(sizeB + 1);
int C1, C2, C3;
// first element
D0[0] = 0;
// first row
for (std::size_t j = 0; j < sizeB + 1; j++) {
// editDistance[0][j] = j;
D0[j] = j;
}
// second-->last row
for (std::size_t i = 1; i < sizeA + 1; i++) {
// every first element in row
Di[0] = i;
// remaining elements in row
for (std::size_t j = 1; j < sizeB + 1; j++) {
if (str1[i - 1] == str2[j - 1]) {
Di[j] = D0[j - 1];
} else {
C1 = D0[j];
C2 = Di[j - 1];
C3 = D0[j - 1];
Di[j] = (C1 < ((C2 < C3) ? C2 : C3)) ? C1 : ((C2 < C3) ? C2 : C3); // Di[j] = std::min({C1,C2,C3});
Di[j] += 1;
}
}
std::swap(D0, Di);
}
distance_type rvalue = D0[sizeB]; // +1 -1
return rvalue;
}
} // namespace metric
| 1,339
|
C++
|
.cpp
| 49
| 24.714286
| 103
| 0.598746
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,534
|
TWED.cpp
|
metric-space-ai_metric/metric/distance/k-structured/TWED.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#ifndef _METRIC_DISTANCE_K_STRUCTURED_TWED_CPP
#define _METRIC_DISTANCE_K_STRUCTURED_TWED_CPP
#include "TWED.hpp"
#include <algorithm>
#include <vector>
namespace metric {
/*** distance measure with time elastic cost matrix. ***/
template <typename V>
template <typename Container>
auto TWED<V>::operator()(const Container &As, const Container &Bs) const -> distance_type
{
std::vector<value_type> A;
A.reserve(As.size());
std::vector<value_type> timeA;
timeA.reserve(As.size());
std::vector<value_type> B;
B.reserve(Bs.size());
std::vector<value_type> timeB;
timeB.reserve(Bs.size());
for (auto it = As.cbegin(); it != As.cend(); ++it) {
if constexpr (std::is_same<blaze::CompressedVector<V>, Container>::value) {
timeA.push_back(it->index()); // Read access to the index of the non-zero element.
A.push_back(it->value()); // Read access to the value of the non-zero element.
} else {
timeA.push_back(std::distance(As.begin(), it)); // Read access to the index of the non-zero element.
A.push_back(*it); // Read access to the value of the non-zero element.
}
}
for (auto it = Bs.cbegin(); it != Bs.cend(); ++it) {
if constexpr (std::is_same<blaze::CompressedVector<V>, Container>::value) {
timeB.push_back(it->index()); // Read access to the index of the non-zero element.
B.push_back(it->value()); // Read access to the value of the non-zero element.
} else {
timeB.push_back(std::distance(Bs.begin(), it)); // Read access to the index of the non-zero element.
B.push_back(*it); // Read access to the value of the non-zero element.
}
}
value_type C1, C2, C3;
int sizeB = B.size();
int sizeA = A.size();
std::vector<value_type> D0(sizeB);
std::vector<value_type> Di(sizeB);
// first element
D0[0] = std::abs(A[0] - B[0]) + elastic * (std::abs(timeA[0] - 0)); // C3
// first row
for (int j = 1; j < sizeB; j++) {
D0[j] = D0[j - 1] + std::abs(B[j - 1] - B[j]) + elastic * (timeB[j] - timeB[j - 1]) + penalty; // C2
}
// second-->last row
for (int i = 1; i < sizeA; i++) {
// every first element in row
Di[0] = D0[0] + std::abs(A[i - 1] - A[i]) + elastic * (timeA[i] - timeA[i - 1]) + penalty; // C1
// remaining elements in row
for (int j = 1; j < sizeB; j++) {
C1 = D0[j] + std::abs(A[i - 1] - A[i]) + elastic * (timeA[i] - timeA[i - 1]) + penalty;
C2 = Di[j - 1] + std::abs(B[j - 1] - B[j]) + elastic * (timeB[j] - timeB[j - 1]) + penalty;
C3 = D0[j - 1] + std::abs(A[i] - B[j]) + std::abs(A[i - 1] - B[j - 1]) +
elastic * (std::abs(timeA[i] - timeB[j]) + std::abs(timeA[i - 1] - timeB[j - 1]));
Di[j] = (C1 < ((C2 < C3) ? C2 : C3)) ? C1 : ((C2 < C3) ? C2 : C3); // Di[j] = std::min({C1,C2,C3});
// std::cout << Di[j] << " [" << C1 << " " << C2 << " " << C3 << "] | "; // code for debug, added by Max F
}
// std::cout << "\n"; // code for debug, added by Max F
std::swap(D0, Di);
}
distance_type rvalue = D0[sizeB - 1];
return rvalue;
}
namespace TWED_details {
/** add zero padding to sparsed vector (preprocessing for time elatic distance) **/
template <typename T> blaze::CompressedVector<T> addZeroPadding(blaze::CompressedVector<T> const &data)
{
// adds zero pads to blaze::sparsevector (for preparing sed)
blaze::CompressedVector<T> data_zeropadded(data.size());
data_zeropadded.reserve(2 + data.nonZeros() * 2);
T value;
bool addZeroFront;
bool addZeroLastBack;
int index;
int index_last = -1;
if (data.nonZeros() == 0) {
data_zeropadded.set(0, T(0));
data_zeropadded.set(data.size() - 1, T(0));
} else {
for (blaze::CompressedVector<double>::ConstIterator it = data.cbegin(); it != data.cend(); ++it) {
index = it->index(); // Read access to the index of the non-zero element.
value = it->value(); // Read access to the value of the non-zero element.
if (index == index_last + 1)
addZeroFront = false;
else
addZeroFront = true;
if (index > index_last + 1 && index != 1 && index != index_last + 2)
addZeroLastBack = true;
else
addZeroLastBack = false;
if (addZeroLastBack == true)
data_zeropadded.append(index_last + 1, T(0));
if (addZeroFront == true)
data_zeropadded.append(index - 1, T(0));
data_zeropadded.append(index, value);
index_last = index;
}
if (index_last < int(data.size()) - 2) // vorletzter nicht vorhanden
{
data_zeropadded.append(index_last + 1, T(0));
}
if (index_last < int(data.size()) - 1) {
data_zeropadded.append(data.size() - 1, T(0));
}
}
shrinkToFit(data_zeropadded);
return data_zeropadded;
}
} // namespace TWED_details
} // namespace metric
#endif // header guard
| 4,873
|
C++
|
.cpp
| 121
| 37.471074
| 110
| 0.63569
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,535
|
tree.cpp
|
metric-space-ai_metric/metric/space/tree.cpp
|
/*This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#ifndef _METRIC_SPACE_TREE_CPP
#define _METRIC_SPACE_TREE_CPP
#include "tree.hpp" // back reference for header only use
#include <algorithm>
#include <cmath>
#include <functional>
#include <sstream>
#include <stdexcept>
#include <type_traits>
#include <vector>
#if defined BOOST_SERIALIZATION_NVP
#define SERIALIZATION_NVP2(name, variable) boost::serialization::make_nvp(name, variable)
#define SERIALIZATION_NVP(variable) BOOST_SERIALIZATION_NVP(variable)
#else
#define SERIALIZATION_NVP2(name, variable) (variable)
#define SERIALIZATION_NVP(variable) (variable)
#endif
#ifdef BOOST_SERIALIZATION_SPLIT_MEMBER
#define SERIALIZE_SPLIT_MEMBERS BOOST_SERIALIZATION_SPLIT_MEMBER
#else
#ifndef SERIALIZE_SPLIT_MEMBERS
#define SERIALIZE_SPLIT_MEMBERS()
#endif
#endif
namespace metric {
/*
__ \ _| | | \ | | _)
| | _ \ | _` | | | | __| |\/ | _ \ __| __| | __|
| | __/ __| ( | | | | | | | __/ | | | (
____/ \___| _| \__,_| \__,_| _| \__| _| _| \___| \__| _| _| \___|
*/
template <typename Container> struct L2_Metric_STL {
typedef typename Container::value_type result_type;
static_assert(std::is_floating_point<result_type>::value, "T must be a float type");
result_type operator()(const Container &a, const Container &b) const
{
result_type sum = 0;
for (auto it1 = a.begin(), it2 = b.begin(); it1 != a.end() || it2 != b.end(); ++it1, ++it2) {
sum += (*it1 - *it2) * (*it1 - *it2);
}
return std::sqrt(sum);
}
};
/*
\ | |
\ | _ \ _` | _ \
|\ | ( | ( | __/
_| \_| \___/ \__,_| \___|
nodes of the tree
*/
template <class RecType, class Metric> class Node {
Tree<RecType, Metric> *tree_ptr = nullptr;
public:
using Distance = typename Tree<RecType, Metric>::Distance;
explicit Node(Tree<RecType, Metric> *ptr, Distance base = Tree<RecType, Metric>().base) : tree_ptr(ptr), base(base)
{
}
Node() = delete;
Node(const Node &) = delete;
Node(Node &&) = delete;
Node &operator=(const Node &) = delete;
Node &operator=(Node &&) = delete;
~Node()
{
for (auto p : children) {
delete p;
}
}
typedef Node<RecType, Metric> *Node_ptr;
// private:
Distance base;
Node_ptr parent = nullptr; // parent of current node
std::vector<Node_ptr> children; // list of children
int level = 0; // current level of the node
Distance parent_dist = 0; // upper bound of distance to any of descendants
std::size_t ID = 0; // unique ID of current node
public:
[[nodiscard]] unsigned get_ID() const { return ID; }
void set_ID(const unsigned v) { ID = v; }
const RecType &get_data() const { return tree_ptr->get_data(ID); }
Node_ptr get_parent() const { return parent; }
void set_parent(Node_ptr node) { parent = node; }
std::vector<Node_ptr> &get_children() { return children; }
[[nodiscard]] int get_level() const { return level; }
void set_level(int l) { level = l; }
Distance get_parent_dist() const { return parent_dist; }
void set_parent_dist(const Distance &d) { parent_dist = d; }
Distance covdist(); // covering distance of subtree at current node
Distance sepdist(); // separating distance between nodes at current level
Distance dist(const RecType &pp) const; // distance between this node and point pp
Distance dist(Node_ptr n) const; // distance between this node and node n
Node_ptr setChild(const RecType &p,
int new_id = -1); // insert a new child of current node with point p
Node_ptr setChild(Node_ptr p,
int new_id = -1); // // insert the subtree p as child of
// current node (erase or reordering)
// setting iterators for children access in loops
typename std::vector<Node_ptr>::const_iterator begin() const { return children.cbegin(); }
typename std::vector<Node_ptr>::const_iterator end() const { return children.cend(); }
typename std::vector<Node_ptr>::iterator begin() { return children.begin(); }
typename std::vector<Node_ptr>::iterator end() { return children.end(); }
std::vector<Node_ptr> descendants()
{
std::vector<Node_ptr> result;
Node_ptr curNode = this;
std::stack<Node_ptr> nodeStack;
nodeStack.push(curNode);
while (nodeStack.size() > 0) {
curNode = nodeStack.top();
nodeStack.pop();
// f(curNode); // .. and callback each node.
result.push_back(curNode);
for (const auto &child : *curNode) {
nodeStack.push(child);
}
}
return result;
}
template <typename Archive> void serialize(Archive &ar, const unsigned int)
{
ar &SERIALIZATION_NVP(base) & SERIALIZATION_NVP(level) & SERIALIZATION_NVP(parent_dist) & SERIALIZATION_NVP(ID);
// & SERIALIZATION_NVP(data);
}
};
template <typename RecType, typename Metric> struct SerializedNode {
using NodeType = Node<RecType, Metric>;
NodeType *node = nullptr;
bool is_null = true;
bool has_children = false;
Tree<RecType, Metric> *tree_ptr;
SerializedNode(Tree<RecType, Metric> *tree_ptr_)
: node(nullptr), is_null(true), has_children(false), tree_ptr(tree_ptr_)
{
}
~SerializedNode() {}
SerializedNode(NodeType *n)
: node(n), is_null((node == nullptr) ? true : false),
has_children(!is_null && node->children.empty() ? false : true)
{
}
template <typename Archive> void save(Archive &ar, const unsigned int) const
{
ar << SERIALIZATION_NVP(is_null);
if (!is_null) {
ar << SERIALIZATION_NVP2("node", *node) << SERIALIZATION_NVP(has_children);
}
}
template <typename Archive> void load(Archive &ar, const unsigned int)
{
ar >> SERIALIZATION_NVP(is_null);
if (!is_null) {
try {
node = new NodeType(tree_ptr);
ar >> SERIALIZATION_NVP2("node", *node);
ar >> SERIALIZATION_NVP(has_children);
} catch (...) {
delete node;
node = nullptr;
throw;
}
}
}
SERIALIZE_SPLIT_MEMBERS();
};
/*
\ | | _ _| | | | _)
\ | _ \ _` | _ \ | __ `__ \ __ \ | _ \ __ `__ \ _ \ __ \ __| _` | __| | _ \ __ \
|\ | ( | ( | __/ | | | | | | | __/ | | | __/ | | | ( | | | ( | | |
_| \_| \___/ \__,_| \___| ___| _| _| _| .__/ _| \___| _| _| _| \___| _| _| \__| \__,_| \__| _| \___/ _| _|
_|
*/
/*** covering distance of subtree at current node ***/
template <class RecType, class Metric> typename Node<RecType, Metric>::Distance Node<RecType, Metric>::covdist()
{
return std::pow(base, level);
}
/*** separating distance between nodes at current level ***/
template <class RecType, class Metric> typename Node<RecType, Metric>::Distance Node<RecType, Metric>::sepdist()
{
return 2 * std::pow(base, level - 1);
}
/*** distance between current node and point pp ***/
template <class RecType, class Metric>
typename Node<RecType, Metric>::Distance Node<RecType, Metric>::dist(const RecType &pp) const
{
return tree_ptr->metric(get_data(), pp);
}
/*** distance between current node and node n ***/
template <class RecType, class Metric>
typename Node<RecType, Metric>::Distance Node<RecType, Metric>::dist(Node_ptr n) const
{
return tree_ptr->metric_by_id(ID, n->ID);
}
/*** insert a new child of current node with point p ***/
template <class RecType, class Metric>
Node<RecType, Metric> *Node<RecType, Metric>::setChild(const RecType &p, int new_id)
{
Node_ptr temp(new Node<RecType, Metric>(tree_ptr));
temp->level = level - 1;
temp->parent_dist = 0;
temp->ID = new_id;
temp->set_data(p);
temp->parent = this;
children.push_back(temp);
return temp;
}
/*** insert the subtree p as child of current node ***/
template <class RecType, class Metric>
Node<RecType, Metric> *Node<RecType, Metric>::setChild(Node_ptr p, int /*new_id*/)
{
if (p->level != level - 1) {
Node_ptr current = p;
std::stack<Node_ptr> travel;
current->level = level - 1;
travel.push(current);
while (travel.size() > 0) {
current = travel.top();
travel.pop();
for (const auto &child : *current) {
child->level = current->level - 1;
travel.push(child);
}
}
}
p->parent = this;
children.push_back(p);
return p;
}
/*
__ __| _ _| | | | _)
| __| _ \ _ \ | __ `__ \ __ \ | _ \ __ `__ \ _ \ __ \ __| _` | __| | _ \ __ \
| | __/ __/ | | | | | | | __/ | | | __/ | | | ( | | | ( | | |
_| _| \___| \___| ___| _| _| _| .__/ _| \___| _| _| _| \___| _| _| \__| \__,_| \__| _| \___/ _| _|
_|
*/
/*
__| | |
( _ \ \ (_-< _| _| | | _| _| _ \ _|
\___| \___/ _| _| ___/ \__| _| \_,_| \__| \__| \___/ _|
*/
/*** constructor: empty tree **/
template <class RecType, class Metric> Tree<RecType, Metric>::Tree(int truncate /*=-1*/, Metric d) : metric_(d)
{
root = NULL;
min_scale = 1000;
max_scale = 0;
truncate_level = truncate;
}
/*** constructor: with a signal data record **/
template <class RecType, class Metric>
Tree<RecType, Metric>::Tree(const RecType &p, int truncateArg /*=-1*/, Metric d) : metric_(d)
{
min_scale = 1000;
max_scale = 0;
truncate_level = truncateArg;
// root = std::make_unique<NodeType>();
root = new NodeType(this); // replaced by Max F
root->level = 0;
root->parent_dist = 0;
// root->ID = add_value(p);
root->ID = add_data(p, root); // replaced by Max F
}
/*** constructor: with a vector data records **/
template <class RecType, class Metric>
template <typename C>
Tree<RecType, Metric>::Tree(const C &p, int truncateArg /*=-1*/, Metric d) : metric_(d)
{
min_scale = 1000;
max_scale = 0;
truncate_level = truncateArg;
root = new NodeType(this);
root->level = 0;
root->parent_dist = 0;
root->ID = add_data(p[0], root);
for (std::size_t i = 1; i < p.size(); ++i) {
insert(p[i]);
}
}
/*** default deconstructor **/
template <class RecType, class Metric> Tree<RecType, Metric>::~Tree() { delete root; }
/*
| __| | _) | | _ ) _ \ _) |
(_-< _ \ _| _| ( \ | | _` | _| -_) \ _ \ | | | | | (_-< _| _` | \ _| -_)
___/ \___/ _| \__| \___| _| _| _| _| \__,_| _| \___| _| _| ___/ \_, | ___/ _| ___/ \__| \__,_| _| _| \__| \___|
___/
*/
template <class RecType, class Metric>
template <typename pointOrNodeType>
std::tuple<std::vector<int>, std::vector<typename Tree<RecType, Metric>::Distance>>
Tree<RecType, Metric>::sortChildrenByDistance(Node_ptr p, pointOrNodeType x) const
{
auto num_children = p->children.size();
std::vector<int> idx(num_children);
std::iota(std::begin(idx), std::end(idx), 0);
std::vector<Distance> dists(num_children);
for (unsigned i = 0; i < num_children; ++i) {
dists[i] = p->children[i]->dist(x);
}
auto comp_x = [&dists](int a, int b) { return dists[a] < dists[b]; };
std::sort(std::begin(idx), std::end(idx), comp_x);
return std::make_tuple(idx, dists);
}
/*
_ _| |
| \ (_-< -_) _| _|
___| _| _| ___/ \___| _| \__|
*/
template <class RecType, class Metric>
std::size_t Tree<RecType, Metric>::insert_if(const std::vector<RecType> &p, Distance treshold)
{
std::size_t inserted = 0;
for (const auto &rec : p) {
if (root->dist(rec) > treshold) {
insert(rec);
inserted++;
}
}
return inserted;
}
template <class RecType, class Metric>
std::tuple<std::size_t, bool> Tree<RecType, Metric>::insert_if(const RecType &p, Distance treshold)
{
if (empty()) {
std::size_t id = insert(p);
return std::make_tuple(id, true);
}
std::pair<Node_ptr, Distance> result(root, root->dist(p));
nn_(root, result.second, p, result);
if (result.second > treshold) {
auto n = insert(p);
return std::make_tuple(n, true);
}
return std::make_pair(result.first->ID, false);
}
/*** vector of data record insertion **/
template <class RecType, class Metric> bool Tree<RecType, Metric>::insert(const std::vector<RecType> &p)
{
for (const auto &rec : p) {
insert(rec);
}
return true;
}
/*** data record insertion **/
template <class RecType, class Metric> std::size_t Tree<RecType, Metric>::insert(const RecType &x)
{
std::unique_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk; // prevent AppleCLang warning;
auto node = new NodeType(this);
node->set_level(0);
node->set_parent_dist(0);
node->ID = add_data(x, node);
node->set_parent(nullptr);
// root insertion
if (root == NULL) {
root = node;
} else {
root = insert(root, node);
}
return node->ID;
}
/*** data record insertion **/
template <class RecType, class Metric> Node<RecType, Metric> *Tree<RecType, Metric>::insert(Node_ptr p, Node_ptr x)
{
Node_ptr result;
// normal insertion
if (p->dist(x) > p->covdist()) {
while (p->dist(x) > base * p->covdist() / (base - 1)) {
Node_ptr current = p;
Node_ptr parent = NULL;
while (current->children.size() > 0) {
parent = current;
current = current->children.back();
}
if (parent != NULL) {
parent->children.pop_back();
current->set_level(p->get_level() + 1);
current->children.push_back(p);
p->parent = current;
p->parent_dist = p->dist(current);
p = current;
p->parent = nullptr;
p->parent_dist = 0;
} else {
p->level += 1;
}
}
x->level = p->level + 1;
x->parent = nullptr;
x->children.push_back(p);
p->parent_dist = p->dist(x);
p->parent = x;
p = x;
max_scale = p->level;
result = p;
} else {
result = insert_(p, x);
}
return result;
}
template <class RecType, class Metric> inline auto Tree<RecType, Metric>::findAnyLeaf() -> Node_ptr
{
Node_ptr current = root;
while (!current->children.empty()) {
current = current->children.back();
}
return current;
}
template <class RecType, class Metric> void Tree<RecType, Metric>::extractNode(Node_ptr node)
{
Node_ptr parent = node->parent;
if (parent == nullptr)
return;
unsigned num_children = parent->children.size();
for (unsigned i = 0; i < num_children; ++i) {
if (parent->children[i] == node) {
parent->children[i] = parent->children.back();
parent->children.pop_back();
break;
}
}
node->parent = nullptr;
}
/*
-_) _| _` | (_-< -_)
\___| _| \__,_| ___/ \___|
*/
template <class RecType, class Metric> bool Tree<RecType, Metric>::erase(const RecType &p)
{
bool ret_val = false;
// find the best node to inser
std::unique_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk; // prevent AppleCLang warning
std::pair<Node_ptr, Distance> result(root, root->dist(p));
nn_(root, result.second, p, result);
if (result.second <= 0.0) {
Node_ptr node_p = result.first;
Node_ptr parent_p = node_p->get_parent();
if (node_p == root) {
if (node_p->get_children().empty()) {
delete root;
root = nullptr;
data.clear();
index_map.clear();
return true;
}
auto leaf = findAnyLeaf();
extractNode(leaf);
leaf->set_level(root->get_level());
root = leaf;
leaf->children.assign(node_p->children.begin(), node_p->children.end());
for (auto l : leaf->get_children()) {
l->set_parent(leaf);
}
ret_val = true;
remove_data(node_p->get_ID());
node_p->children.clear();
delete node_p;
}
else {
// erase node from parent's list of child
unsigned num_children = parent_p->children.size();
for (unsigned i = 0; i < num_children; ++i) {
if (parent_p->children[i] == node_p) {
parent_p->children[i] = parent_p->children.back();
parent_p->children.pop_back();
break;
}
}
// insert each child of the node in new root again.
for (Node_ptr q : node_p->children) {
root = Tree<RecType, Metric>::insert_(root, q);
}
node_p->children.clear();
remove_data(node_p->get_ID());
delete node_p;
ret_val = true;
}
}
return ret_val;
}
/*
\ \
_| _| _| _|
Nearest Neighbour
*/
template <class RecType, class Metric>
typename Tree<RecType, Metric>::Node_ptr Tree<RecType, Metric>::nn(const RecType &p) const
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
std::pair<Node_ptr, Distance> result(root, root->dist(p));
nn_(root, result.second, p, result);
return result.first;
}
template <class RecType, class Metric>
void Tree<RecType, Metric>::nn_(Node_ptr current, Distance dist_current, const RecType &p,
std::pair<Node_ptr, Distance> &nn) const
{
if (dist_current < nn.second) // If the current node is the nearest neighbour
{
nn.first = current;
nn.second = dist_current;
}
auto idx__dists = sortChildrenByDistance(current, p);
auto idx = std::get<0>(idx__dists);
auto dists = std::get<1>(idx__dists);
for (const auto &child_idx : idx) {
Node_ptr child = current->children[child_idx];
Distance dist_child = dists[child_idx];
if (nn.second > dist_child - 2 * child->covdist())
nn_(child, dist_child, p, nn);
}
}
/*
|
| / \ \
_ \_ \ _| _| _| _|
k-Nearest Neighbours */
template <class RecType, class Metric>
std::vector<std::pair<typename Tree<RecType, Metric>::Node_ptr, typename Tree<RecType, Metric>::Distance>>
Tree<RecType, Metric>::knn(const RecType &queryPt, unsigned numNbrs) const
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
using NodePtr = typename Tree<RecType, Metric>::Node_ptr;
// Do the worst initialization
std::pair<NodePtr, Distance> dummy(nullptr, std::numeric_limits<Distance>::max());
// List of k-nearest points till now
std::vector<std::pair<NodePtr, Distance>> nnList(numNbrs, dummy);
// Call with root
Distance dist_root = root->dist(queryPt);
std::size_t nnSize = 0;
nnSize = knn_(root, dist_root, queryPt, nnList, nnSize);
if (nnSize < nnList.size()) {
nnList.resize(nnSize);
}
return nnList;
}
template <class RecType, class Metric>
std::size_t Tree<RecType, Metric>::knn_(Node_ptr current, Distance dist_current, const RecType &p,
std::vector<std::pair<Node_ptr, Distance>> &nnList, std::size_t nnSize) const
{
if (dist_current < nnList.back().second) // If the current node is eligible to get into the list
{
auto comp_x = [](std::pair<Node_ptr, Distance> a, std::pair<Node_ptr, Distance> b) {
return a.second < b.second;
};
std::pair<Node_ptr, Distance> temp(current, dist_current);
nnList.insert(std::upper_bound(nnList.begin(), nnList.end(), temp, comp_x), temp);
nnList.pop_back();
nnSize++;
}
auto idx__dists = sortChildrenByDistance(current, p);
auto idx = std::get<0>(idx__dists);
auto dists = std::get<1>(idx__dists);
for (const auto &child_idx : idx) {
Node_ptr child = current->children[child_idx];
Distance dist_child = dists[child_idx];
if (nnList.back().second > dist_child - 2 * child->covdist())
nnSize = knn_(child, dist_child, p, nnList, nnSize);
}
return nnSize;
}
/*
_| _` | \ _` | -_)
_| \__,_| _| _| \__, | \___| ____/
Range Neighbours Search
*/
template <class RecType, class Metric>
std::vector<std::pair<typename Tree<RecType, Metric>::Node_ptr, typename Tree<RecType, Metric>::Distance>>
Tree<RecType, Metric>::rnn(const RecType &queryPt, Distance distance) const
{
std::vector<std::pair<Node_ptr, Distance>> nnList; // List of nearest neighbors in the rnn
Distance dist_root = root->dist(queryPt);
rnn_(root, dist_root, queryPt, distance, nnList); // Call with root
return nnList;
}
template <class RecType, class Metric>
void Tree<RecType, Metric>::rnn_(Node_ptr current, Distance dist_current, const RecType &p, Distance distance,
std::vector<std::pair<Node_ptr, Distance>> &nnList) const
{
if (dist_current < distance) // If the current node is eligible to get into the list
{
std::pair<Node_ptr, Distance> temp(current, dist_current);
nnList.push_back(temp);
}
auto idx__dists = sortChildrenByDistance(current, p);
auto idx = std::get<0>(idx__dists);
auto dists = std::get<1>(idx__dists);
for (const auto &child_idx : idx) {
Node_ptr child = current->children[child_idx];
Distance dist_child = dists[child_idx];
if (dist_child < distance + 2 * child->covdist())
rnn_(child, dist_child, p, distance, nnList);
}
}
/*
_)
(_-< | _ / -_)
___/ _| ___| \___|
tree size
*/
template <class RecType, class Metric> size_t Tree<RecType, Metric>::size()
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
return data.size();
}
/*
| \ \ / |
_| _ \ \ \ / -_) _| _| _ \ _|
\__| \___/ \_/ \___| \__| \__| \___/ _|
export to vector
*/
template <class RecType, class Metric> std::vector<RecType> Tree<RecType, Metric>::toVector()
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
std::vector<std::pair<RecType, int>> zipped;
std::stack<Node_ptr> stack;
Node_ptr current;
stack.push(root);
while (stack.size() > 0) {
current = stack.top();
stack.pop();
zipped.push_back(std::make_pair(current->get_data(), current->ID)); // Add to dataset
for (const auto &child : *current)
stack.push(child);
}
// Sort the vector by index
std::sort(std::begin(zipped), std::end(zipped), [&](const auto &a, const auto &b) { return a.second < b.second; });
std::vector<RecType> data(zipped.size());
for (int i = 0; i < zipped.size(); ++i) {
data[i] = zipped[i].first;
}
return data;
}
template <class RecType, class Metric> RecType Tree<RecType, Metric>::operator[](size_t id) const
{
auto p = index_map.find(id);
if (p == index_map.end()) {
throw std::runtime_error("tree has no such ID:" + std::to_string(id));
}
return data[p->second].first;
}
/*
_ \ __ __| |
| | -_) \ \ / | _ \ _ \ | (_-<
___/ \___| \_/ _| \___/ \___/ _| ___/
debugging functions
*/
// get root level == max_level
template <class RecType, class Metric> int Tree<RecType, Metric>::levelSize()
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
return root->level;
}
template <class RecType, class Metric> std::map<int, unsigned> Tree<RecType, Metric>::print_levels()
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
std::map<int, unsigned> level_count;
std::stack<Node_ptr> stack;
Node_ptr curNode;
stack.push(root);
while (stack.size() > 0) {
curNode = stack.top();
stack.pop();
std::cout << "level: " << curNode->level << ", node ID: " << curNode->ID << ", child ids: ";
for (std::size_t i = 0; i < curNode->children.size(); ++i) {
std::cout << curNode->children[i]->ID << ", ";
}
std::cout << std::endl;
level_count[curNode->level]++;
// Now push the children
for (const auto &child : *curNode)
stack.push(child);
}
return level_count;
}
template <class RecType, class Metric> bool Tree<RecType, Metric>::check_covering() const
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
bool result = true;
std::stack<Node_ptr> stack;
Node_ptr curNode;
if (root == nullptr)
return true;
stack.push(root);
while (stack.size() > 0) {
curNode = stack.top();
stack.pop();
// Check covering for the current -> children pair
for (const auto &child : *curNode) {
stack.push(child);
if (curNode->dist(child) > curNode->covdist()) {
std::cout << "covering ill here (" << curNode->get_ID() << ") --> (" << child->get_ID()
<< ") dist < covdist " << curNode->dist(child) << " < " << curNode->covdist()
<< " level:" << curNode->get_level() << std::endl;
result = false;
}
}
}
return result;
}
template <class RecType, class Metric> void Tree<RecType, Metric>::print() const
{
if (root != nullptr)
print_(root, std::cout);
else {
std::cout << "Empty tree" << std::endl;
}
}
template <class RecType, class Metric> void Tree<RecType, Metric>::print(std::ostream &ostr) const
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
if (root != nullptr)
print_(root, ostr);
else {
ostr << "Empty tree" << std::endl;
}
}
template <class RecType, class Metric> void Tree<RecType, Metric>::print_(NodeType *node_p, std::ostream &ostr) const
{
static std::string depth2;
static char depth[2056];
static int di = 0;
auto push = [&](char c) {
depth[di++] = ' ';
depth[di++] = c;
depth[di++] = ' ';
depth[di++] = ' ';
depth[di] = 0;
};
auto pop = [&]() { depth[di -= 4] = 0; };
ostr << "(" << node_p->ID << ")" << std::endl;
if (node_p->children.empty())
return;
for (std::size_t i = 0; i < node_p->children.size(); ++i) {
ostr << depth;
if (i < node_p->children.size() - 1) {
ostr << " ├──";
push('|');
} else {
ostr << " └──";
push(' ');
}
print_(node_p->children[i], ostr);
pop();
}
}
template <class RecType, class Metric> auto Tree<RecType, Metric>::get_all_nodes() -> std::vector<Node_ptr>
{
std::vector<Node_ptr> all_nodes;
all_nodes.push_back(root);
get_all_nodes_(root, all_nodes);
return all_nodes;
}
template <class RecType, class Metric>
void Tree<RecType, Metric>::get_all_nodes_(Node_ptr node_p, std::vector<Node_ptr> &output)
{
for (std::size_t i = 0; i < node_p->children.size(); ++i) {
output.push_back(node_p->children[i]);
get_all_nodes_(node_p->children[i], output);
}
}
/*** traverse the tree from root and do something with every node ***/
template <class RecType, class Metric> void Tree<RecType, Metric>::traverse(const std::function<void(Node_ptr)> &f)
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
// iterate though the tree...
Node_ptr curNode = root;
std::stack<Node_ptr> nodeStack;
nodeStack.push(curNode);
while (nodeStack.size() > 0) {
curNode = nodeStack.top();
nodeStack.pop();
f(curNode); // .. and callback each node.
for (const auto &child : *curNode) {
nodeStack.push(child);
}
}
}
template <class RecType, class Metric>
void Tree<RecType, Metric>::traverse_child(const std::function<void(Node_ptr)> &f)
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
// iterate though the tree...
std::stack<Node_ptr> nodeStack;
Node_ptr curNode = root;
nodeStack.push(curNode);
while (nodeStack.size() > 0) {
curNode = nodeStack.top();
nodeStack.pop();
for (const auto &child : *curNode) {
nodeStack.push(child); //.. and callback all child nodes.
f(child);
}
}
return;
}
template <class RecType, class Metric>
template <class Archive>
inline void Tree<RecType, Metric>::serialize_aux(Node_ptr node, Archive &archive)
{
SerializedNode<RecType, Metric> sn(node);
if (node->children.size() > 0) {
archive << SERIALIZATION_NVP2("node", sn);
for (auto &c : node->children) {
serialize_aux(c, archive);
}
SerializedNode<RecType, Metric> snn(this);
archive << SERIALIZATION_NVP2("node", snn);
} else {
archive << SERIALIZATION_NVP2("node", sn);
}
}
template <class RecType, class Metric>
template <class Archive>
inline void Tree<RecType, Metric>::serialize(Archive &archive)
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
archive << data << index_map;
serialize_aux(root, archive);
}
template <class RecType, class Metric>
template <class Archive, class Stream>
inline void Tree<RecType, Metric>::deserialize(Archive &input, Stream &stream)
{
SerializedNode<RecType, Metric> node(this);
std::unique_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
input >> data >> index_map;
try {
input >> SERIALIZATION_NVP2("node", node);
std::stack<Node_ptr> parentstack;
parentstack.push(node.node);
while (!stream.eof()) {
SerializedNode<RecType, Metric> node(this);
input >> SERIALIZATION_NVP2("node", node);
if (!node.is_null) {
parentstack.top()->children.push_back(node.node);
node.node->parent = parentstack.top();
if (node.has_children) {
parentstack.push(node.node);
}
} else {
parentstack.pop();
}
}
} catch (...) { /* hack to catch end of stream */
}
root = node.node;
}
template <class RecType, class Metric>
inline bool Tree<RecType, Metric>::same_tree(const Node_ptr lhs, const Node_ptr rhs) const
{
std::shared_lock<std::shared_timed_mutex> lk(global_mut);
(void)lk;
if (lhs == rhs) {
return true;
}
if (lhs->ID != rhs->ID || lhs->level != rhs->level || lhs->parent_dist != rhs->parent_dist ||
lhs->get_data() != rhs->get_data()) {
return false;
}
if (lhs->children.size() != rhs->children.size())
return false;
for (std::size_t i = 0; i < lhs->children.size(); i++) {
if (!same_tree(lhs->children[i], rhs->children[i]))
return false;
}
return true;
}
template <typename RecType, class Metric>
inline Node<RecType, Metric> *Tree<RecType, Metric>::insert_(Node_ptr p, Node_ptr x)
{
auto children = sortChildrenByDistance(p, x);
auto child_idx = std::get<0>(children);
for (auto qi : child_idx) {
auto &q = p->children[qi];
if (q->dist(x) <= q->covdist()) {
auto q1 = insert_(q, x);
p->children[qi] = q1;
q1->parent = p;
q1->parent_dist = p->dist(q1);
return p;
}
}
p->children.push_back(x);
x->parent = p;
x->parent_dist = p->dist(x);
x->level = p->level - 1;
return p;
}
template <typename RecType, class Metric>
inline auto Tree<RecType, Metric>::rebalance(Node_ptr p, Node_ptr x) -> Node_ptr
{
x->level = p->level - 1;
auto p1 = p;
for (std::size_t i = 0; i < p->children.size(); i++) {
rset_t res = rebalance_(p, p->children[i], x);
Node_ptr q1 = std::get<0>(res);
if (q1 != nullptr) {
p1->children[i] = q1;
}
auto moveset = std::get<1>(res);
for (auto r : moveset) {
x = insert(x, r);
}
}
p1->setChild(x);
return p1;
}
template <typename RecType, class Metric>
inline auto Tree<RecType, Metric>::rebalance_(Node_ptr p, Node_ptr q, Node_ptr x) -> rset_t
{
if (p->dist(q) > q->dist(x)) {
std::vector<Node_ptr> moveset;
std::vector<Node_ptr> stayset;
auto descendants = q->descendants();
for (auto &r : descendants) {
if (r->dist(p) > r->dist(x)) {
moveset.push_back(r);
} else {
stayset.push_back(r);
}
}
return std::make_tuple(nullptr, moveset, stayset);
} else {
std::vector<Node_ptr> moveset1;
std::vector<Node_ptr> stayset1;
auto q1 = q;
for (auto it = q->children.begin(); it != q->children.end();) {
auto r = *it;
auto res = rebalance_(p, r, x);
auto r1 = std::get<0>(res);
auto moveset = std::get<1>(res);
auto stayset = std::get<2>(res);
moveset1.insert(moveset1.end(), moveset.begin(), moveset.end());
stayset1.insert(stayset1.end(), stayset.begin(), stayset.end());
if (r1 == nullptr) {
it = q->children.erase(it);
} else {
*it = r1;
it++;
}
}
for (auto it = stayset1.begin(); it != stayset1.end();) {
auto r = *it;
if (r->dist(q1) <= q1->covdist()) {
q1 = insert(q1, r);
it = stayset1.erase(it);
} else {
it++;
}
}
return std::make_tuple(q1, moveset1, stayset1);
}
}
template <typename RecType, typename Metric>
inline double Tree<RecType, Metric>::find_neighbour_radius(const std::vector<RecType> &points)
{
double radius = std::numeric_limits<double>::min();
auto &p1 = points[0];
for (std::size_t i = 1; i < points.size(); i++) {
double distance = metric(p1, points[i]);
if (distance > radius)
radius = distance;
}
return radius;
}
template <typename RecType, typename Metric>
inline double Tree<RecType, Metric>::find_neighbour_radius(const std::vector<std::size_t> &IDS,
const std::vector<RecType> &points)
{
double radius = std::numeric_limits<double>::min();
auto &p1 = points[IDS[0]];
for (std::size_t i = 1; i < IDS.size(); i++) {
double distance = metric(p1, points[IDS[i]]);
if (distance > radius)
radius = distance;
}
return radius;
}
template <typename RecType, typename Metric>
inline double Tree<RecType, Metric>::find_neighbour_radius(const std::vector<std::size_t> &IDS)
{
double radius = std::numeric_limits<double>::min();
auto &p1 = (*this)[IDS[0]];
for (std::size_t i = 1; i < IDS.size(); i++) {
double distance = metric(p1, (*this)[IDS[i]]);
if (distance > radius)
radius = distance;
}
return radius;
}
template <typename RecType, typename Metric>
inline bool Tree<RecType, Metric>::update_idx(std::size_t &cur_idx, const std::vector<std::size_t> &distribution_sizes,
std::vector<std::vector<std::size_t>> &result)
{
if (result[cur_idx].size() == distribution_sizes[cur_idx])
cur_idx++;
if (cur_idx == result.size())
return true;
return false;
}
template <typename RecType, typename Metric>
inline bool Tree<RecType, Metric>::grab_sub_tree(Node_ptr proot, const RecType ¢er,
std::unordered_set<std::size_t> &parsed_points,
const std::vector<std::size_t> &distribution_sizes,
std::size_t &cur_idx, std::vector<std::vector<std::size_t>> &result)
{
auto childs = this->sortChildrenByDistance(proot, center);
auto idx = std::get<0>(childs);
auto dists = std::get<1>(childs);
bool is_root_added = false;
if (dists.empty() || dists[0] > proot->dist(center)) {
if (parsed_points.find(proot->ID) == parsed_points.end()) {
result[cur_idx].push_back(proot->ID);
parsed_points.insert(proot->ID);
is_root_added = true;
if (update_idx(cur_idx, distribution_sizes, result))
return true;
}
}
std::size_t index = 0;
for (auto i : idx) {
if (!is_root_added && dists[index] > proot->dist(center)) {
if (parsed_points.find(proot->ID) == parsed_points.end()) {
result[cur_idx].push_back(proot->ID);
parsed_points.insert(proot->ID);
is_root_added = true;
if (update_idx(cur_idx, distribution_sizes, result))
return true;
}
}
if (parsed_points.find(proot->children[i]->ID) == parsed_points.end())
if (grab_sub_tree(proot->children[i], center, parsed_points, distribution_sizes, cur_idx, result))
return true;
index++;
}
if (!is_root_added) {
if (parsed_points.find(proot->ID) == parsed_points.end()) {
result[cur_idx].push_back(proot->ID);
parsed_points.insert(proot->ID);
if (update_idx(cur_idx, distribution_sizes, result))
return true;
}
}
return false;
}
template <typename RecType, typename Metric>
inline bool Tree<RecType, Metric>::grab_tree(Node_ptr start_point, const RecType ¢er,
std::unordered_set<std::size_t> &parsed_points,
const std::vector<std::size_t> &distribution_sizes, std::size_t &cur_idx,
std::vector<std::vector<std::size_t>> &result)
{
if (grab_sub_tree(start_point, center, parsed_points, distribution_sizes, cur_idx, result))
return true;
parsed_points.insert(start_point->ID);
Node_ptr proot = start_point->parent;
while (proot != nullptr) {
if (grab_sub_tree(proot, center, parsed_points, distribution_sizes, cur_idx, result))
return true;
proot = proot->parent;
}
return false;
}
inline void is_distribution_ok(const std::vector<double> &distribution)
{
if (distribution.empty())
return;
double d0 = distribution[0];
for (std::size_t i = 1; i < distribution.size(); i++) {
if (distribution[i] < d0)
throw unsorted_distribution_exception{};
if (distribution[i] < 0.0 || distribution[i] > 1.0)
throw bad_distribution_exception{};
d0 = distribution[i];
}
}
template <typename RecType, typename Metric>
inline std::vector<std::vector<std::size_t>> Tree<RecType, Metric>::clustering(const std::vector<double> &distribution,
const std::vector<std::size_t> &IDS,
const std::vector<RecType> &points)
{
is_distribution_ok(distribution);
double radius = find_neighbour_radius(IDS, points);
return clustering_impl(distribution, points[IDS[0]], radius);
}
template <typename RecType, typename Metric>
inline std::vector<std::vector<std::size_t>> Tree<RecType, Metric>::clustering(const std::vector<double> &distribution,
const std::vector<std::size_t> &IDS)
{
is_distribution_ok(distribution);
double radius = find_neighbour_radius(IDS);
Node_ptr center = this->get(IDS[0]);
return clustering_impl(distribution, center, radius);
}
template <typename RecType, typename Metric>
inline std::vector<std::vector<std::size_t>> Tree<RecType, Metric>::clustering(const std::vector<double> &distribution,
const std::vector<RecType> &points)
{
is_distribution_ok(distribution);
double radius = find_neighbour_radius(points);
return clustering_impl(distribution, points[0], radius);
}
template <typename RecType, typename Metric>
inline std::vector<std::vector<std::size_t>>
Tree<RecType, Metric>::clustering_impl(const std::vector<double> &distribution, const RecType ¢er, double radius)
{
std::vector<std::size_t> distribution_sizes;
distribution_sizes.reserve(distribution.size());
auto tree_size = size();
for (auto d : distribution) {
distribution_sizes.push_back(static_cast<double>(tree_size) * d);
}
std::size_t ls = distribution_sizes[0];
for (std::size_t i = 1; i < distribution_sizes.size(); i++) {
auto ls1 = distribution_sizes[i];
distribution_sizes[i] -= ls;
ls = ls1;
}
auto proot = nn(center);
int level = proot->level;
double level_radius = std::pow(base, level);
// find level covering all points
while (level_radius < radius) {
proot = proot->parent;
level_radius = std::pow(base, proot->level);
}
std::size_t cur_distrib_idx = 0;
std::vector<std::vector<std::size_t>> result(distribution.size());
while (cur_distrib_idx < distribution_sizes.size() && distribution_sizes[cur_distrib_idx] == 0) {
cur_distrib_idx++;
}
if (cur_distrib_idx == distribution_sizes.size())
return result;
std::unordered_set<std::size_t> parsed_points;
grab_tree(proot, center, parsed_points, distribution_sizes, cur_distrib_idx, result);
return result;
}
template <typename T> std::string convert_to_string(const T &t) { return std::to_string(t); }
#define DECLARE_CONVERT(type) \
template <> inline std::string convert_to_string<std::vector<type>>(const std::vector<type> &v) \
{ \
std::ostringstream ostr; \
ostr << "[ "; \
for (std::size_t i = 0; i < v.size(); i++) { \
ostr << std::to_string(v[i]); \
if (i != v.size() - 1) \
ostr << ","; \
} \
ostr << " ]"; \
return ostr.str(); \
}
DECLARE_CONVERT(int)
DECLARE_CONVERT(unsigned int)
DECLARE_CONVERT(int64_t)
DECLARE_CONVERT(uint64_t)
DECLARE_CONVERT(double)
DECLARE_CONVERT(float)
DECLARE_CONVERT(char)
template <typename RecType, typename Metric> inline std::string Tree<RecType, Metric>::to_json()
{
return to_json([](const auto &r) { return convert_to_string(r); });
}
template <typename RecType, typename Metric>
inline std::string Tree<RecType, Metric>::to_json(std::function<std::string(const RecType &)> printer)
{
struct node_t {
std::size_t ID;
RecType value;
};
struct edge_t {
std::size_t source;
std::size_t target;
Distance distance;
};
std::vector<node_t> nodes;
std::vector<edge_t> edges;
traverse([&nodes, &edges](auto p) {
nodes.emplace_back(node_t{p->ID, p->get_data()});
if (p->parent != nullptr) {
edges.emplace_back(edge_t{p->parent->ID, p->ID, p->parent_dist});
}
});
std::ostringstream ostr;
ostr << "{" << std::endl;
ostr << "\"nodes\": [" << std::endl;
for (std::size_t i = 0; i < nodes.size(); i++) {
auto &n = nodes[i];
ostr << "{ \"id\":" << n.ID << ", \"values\":" << printer(n.value) << "}";
if (i != nodes.size() - 1)
ostr << ",";
ostr << std::endl;
}
ostr << "]," << std::endl;
ostr << "\"edges\": [" << std::endl;
for (std::size_t i = 0; i < edges.size(); i++) {
auto &n = edges[i];
ostr << "{ \"source\":" << n.source << ", \"target\":" << n.target << ", \"distance\":" << std::fixed
<< n.distance << "}";
if (i != edges.size() - 1)
ostr << ",";
ostr << std::endl;
}
ostr << "]}" << std::endl;
return ostr.str();
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::distance_to_root(Node_ptr p) const -> std::pair<Distance, std::size_t>
{
Distance dist = 0;
std::size_t cnt = 0;
while (p->parent != nullptr) {
dist += p->parent_dist;
cnt++;
p = p->parent;
}
return std::pair{dist, cnt};
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::distance_to_level(Node_ptr &p, int level) const -> std::pair<Distance, std::size_t>
{
Distance dist = 0;
std::size_t cnt = 0;
while (p->level < level) {
dist += p->parent_dist;
cnt++;
p = p->parent;
}
return std::pair{dist, cnt};
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::graph_distance(Node_ptr p1, Node_ptr p2) const -> std::pair<Distance, std::size_t>
{
if (p1->parent == p2->parent) {
return std::pair{p1->parent_dist + p2->parent_dist, 2};
}
auto d1 = distance_to_root(p1);
auto d2 = distance_to_root(p2);
return std::pair{d1.first + d2.first, d1.second + d2.second};
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::distance_by_node(Node_ptr p1, Node_ptr p2) const -> Distance
{
std::pair<Distance, std::size_t> dist1{0, 0};
if (p1->level < p2->level) {
dist1 = distance_to_level(p1, p2->level);
} else if (p2->level < p1->level) {
dist1 = distance_to_level(p2, p1->level);
}
if (p1 == p2) {
return dist1.first / dist1.second;
}
auto dist = graph_distance(p1, p2);
return (dist1.first + dist.first) / (dist1.second + dist.second);
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::distance_by_id(std::size_t id1, std::size_t id2) const -> Distance
{
auto p1 = index_map.find(id1);
if (p1 == index_map.end()) {
throw std::runtime_error("tree has no such ID: " + std::to_string(id1));
}
auto p2 = index_map.find(id2);
if (p2 == index_map.end()) {
throw std::runtime_error("tree has no such ID: " + std::to_string(id2));
}
if (id1 == id2) {
return 0;
}
Node_ptr n1 = data[p1->second].second;
Node_ptr n2 = data[p2->second].second;
return distance_by_node(n1, n2);
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::distance(const RecType &r1, const RecType &r2) const -> Distance
{
auto nn1 = nn(r1);
auto nn2 = nn(r2);
if (nn1 == nn2) {
return 0;
}
return distance_by_node(nn1, nn2);
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::matrix() const -> blaze::CompressedMatrix<Distance, blaze::rowMajor>
{
auto N = data.size();
blaze::CompressedMatrix<Distance, blaze::rowMajor> m(N, N);
m.reserve(N * (N - 1) / 2);
for (std::size_t i = 0; i < data.size(); i++) {
for (std::size_t j = i + 1; j < data.size(); j++) {
if (data[i].second->parent == data[j].second) {
// node J is a parent for node I, so we can use parent_dist
m.append(i, j, data[i].second->parent_dist);
} else if (data[j].second->parent == data[i].second) {
// node I is a parent for node J, so we can use parent_dist
m.append(i, j, data[j].second->parent_dist);
} else {
m.append(i, j, metric(data[i].first, data[j].first));
}
}
m.finalize(i);
}
return m;
}
template <typename RecType, typename Metric>
auto Tree<RecType, Metric>::operator()(std::size_t id1, std::size_t id2) const -> Distance
{
if (data[id1].second->parent == data[id2].second) {
// node J is a parent for node I, so we can use parent_dist
return data[id1].second->parent_dist;
}
if (data[id2].second->parent == data[id1].second) {
// node I is a parent for node J, so we can use parent_dist
return data[id2].second->parent_dist;
}
return metric(data[id1].first, data[id2].first);
}
} // namespace metric
#endif
| 44,624
|
C++
|
.cpp
| 1,336
| 30.772455
| 120
| 0.620939
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,536
|
knn_graph.cpp
|
metric-space-ai_metric/metric/space/knn_graph.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
#include "knn_graph.hpp"
namespace metric {
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::KNNGraph(const Container &samples, size_t neighbors_num,
size_t max_bruteforce_size, int max_iterations,
double update_range)
: Graph<WeightType, isDense, isSymmetric>(samples.size()), _nodes(samples), _neighbors_num(neighbors_num),
_max_bruteforce_size(max_bruteforce_size), _max_iterations(max_iterations), _update_range(update_range)
{
construct(samples);
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
void KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::calculate_distance_matrix(const Container &samples)
{
Distance distance;
for (int i = 0; i < samples.size(); i++) {
// take each node
auto i_point = samples[i];
std::vector<distance_type> distances;
// then calculate distances for all other nodes
for (int j = 0; j < samples.size(); j++) {
auto i_other_point = samples[j];
distances.push_back(distance(i_point, i_other_point));
}
_distance_matrix.push_back(distances);
}
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
void KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::make_edge_pairs(const Container &samples)
{
this->matrix.resize(samples.size(), samples.size());
std::vector<int> ids(samples.size());
std::iota(ids.begin(), ids.end(), 0);
std::vector<std::pair<size_t, size_t>> edgesPairs;
double updated_percent = 1.0;
int iterations = 0;
// we iterate until we can found new edges (updated edges between nodes)
while (updated_percent > _update_range) {
// create or update approximated knn graph
auto newEdgesPairs = random_pair_division(samples, ids, _max_bruteforce_size);
// then update edge pair and check how many was updated
int was_size = edgesPairs.size();
for (int j = 0; j < newEdgesPairs.size(); j++) {
bool already_exist = false;
for (int k = 0; k < edgesPairs.size(); k++) {
if (edgesPairs[k] == newEdgesPairs[j] || (edgesPairs[k].first == newEdgesPairs[j].second &&
edgesPairs[k].second == newEdgesPairs[j].first)) {
already_exist = true;
break;
}
}
if (!already_exist) {
edgesPairs.push_back(newEdgesPairs[j]);
}
}
if (was_size > 0) {
updated_percent = (double)(edgesPairs.size() - was_size) / was_size;
}
iterations++;
if (iterations >= _max_iterations) {
break;
}
}
// finish graph
this->buildEdges(edgesPairs);
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
void KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::construct(const Container &samples)
{
calculate_distance_matrix(samples);
make_edge_pairs(samples);
this->valid = true;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
std::vector<std::pair<size_t, size_t>>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::random_pair_division(const Container &samples,
const std::vector<int> &ids,
int max_size)
{
Distance d;
std::vector<Sample> A;
std::vector<Sample> B;
std::vector<int> A_ids;
std::vector<int> B_ids;
std::vector<std::pair<size_t, size_t>> edgesPairs;
std::vector<std::pair<size_t, size_t>> edgesPairsResult;
auto n = samples.size();
if (n > 0) {
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<int> dist(0, n - 1);
if (n <= max_size) {
// conquer stage
edgesPairs = brute_force(samples, ids);
} else {
// divide stage
// take random nodes(samples)
auto a = samples[dist(mt)];
auto b = samples[dist(mt)];
Sample x;
// and divide all nodes to two groups, where each node is close to one of two initial points
for (int i = 0; i < n; i++) {
x = samples[i];
if (d(x, a) < d(x, b)) {
A.push_back(x);
A_ids.push_back(ids[i]);
} else {
B.push_back(x);
B_ids.push_back(ids[i]);
}
}
// and recursively divide both groups again
edgesPairsResult = random_pair_division(A, A_ids, max_size);
edgesPairs.insert(edgesPairs.end(), edgesPairsResult.begin(), edgesPairsResult.end());
edgesPairsResult = random_pair_division(B, B_ids, max_size);
edgesPairs.insert(edgesPairs.end(), edgesPairsResult.begin(), edgesPairsResult.end());
}
}
return edgesPairs;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
std::vector<std::pair<size_t, size_t>>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::brute_force(const Container &samples,
const std::vector<int> &ids)
{
std::vector<std::pair<size_t, size_t>> edgesPairs;
int update_count = 0;
std::vector<std::vector<distance_type>> distances;
std::vector<distance_type> distance_row;
for (int i = 0; i < ids.size(); i++) {
distance_row.clear();
for (int j = 0; j < ids.size(); j++) {
distance_row.push_back(_distance_matrix[i][j]);
}
distances.push_back(distance_row);
}
for (int i = 0; i < ids.size(); i++) {
auto idxs = sort_indexes(distances[i]);
for (int j = 0; j < idxs.size(); j++) {
// omit first item because it is pair between the same item
if (j == 0) {
continue;
}
// and break if we get '_neighbors_num' edges
if (j >= _neighbors_num) {
break;
}
bool already_exist = false;
std::vector<int>::const_iterator max_index = std::max_element(ids.begin(), ids.end());
// here we keep number of edges from each node
std::vector<int> num_edges_by_node(ids[std::distance(ids.begin(), max_index)] + 1, 0);
for (int k = 0; k < edgesPairs.size(); k++) {
num_edges_by_node[edgesPairs[k].first]++;
num_edges_by_node[edgesPairs[k].second]++;
if (edgesPairs[k] == std::pair<size_t, size_t>(ids[i], ids[idxs[j]]) ||
edgesPairs[k] == std::pair<size_t, size_t>(ids[idxs[j]], ids[i])) {
already_exist = true;
break;
}
// if we want to keep neighbours strickt not more then _neighbors_num
if (_not_more_neighbors) {
if (num_edges_by_node[edgesPairs[k].first] >= _neighbors_num ||
num_edges_by_node[edgesPairs[k].second] >= _neighbors_num) {
already_exist = true;
break;
}
}
}
// add current node and closest to result
if (!already_exist) {
edgesPairs.emplace_back(ids[i], ids[idxs[j]]);
}
}
}
return edgesPairs;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::vector<size_t> KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::gnnn_search(
const Sample &query, int max_closest_num, int iterations, int num_greedy_moves, int num_expansions)
{
std::vector<size_t> result;
// variables for choosen nodes during search
std::vector<distance_type> choosen_distances;
std::vector<int> choosen_nodes;
// temp variables
std::vector<distance_type> distances;
distance_type distance;
Distance distancer;
// num_expansions should be less then k(neighbors_num) of the graph
if (num_expansions > _neighbors_num) {
num_expansions = _neighbors_num;
}
// if params missed
if (num_expansions < 0) {
num_expansions = _neighbors_num;
}
// if still negative
if (num_expansions < 0) {
num_expansions = 1;
}
if (num_greedy_moves < 0) {
// if param is missed we choose 20% of all nodes as number of mooves
num_greedy_moves = round(_nodes.size() * 0.2);
}
//
if (_nodes.size() > 0) {
for (int i = 0; i < iterations; i++) {
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<int> dist(0, _nodes.size() - 1);
// get initial random node from the graph
int checking_node = dist(mt);
int prev_node = -1;
int new_node;
// walk from initial node on distance 'num_greedy_moves' steps
for (int j = 0; j < num_greedy_moves; j++) {
distances.clear();
// 0 index is for node itself, 1 - is first circle of neighbours
auto neighbours = this->getNeighbours(checking_node, 1)[1];
// get first num_expansions neighbours for the checking node and calculate distances to the query
for (int p = 0; p < num_expansions; p++) {
if (p < neighbours.size()) {
distance = distancer(_nodes[neighbours[p]], query);
distances.push_back(distance);
if (std::find(choosen_nodes.begin(), choosen_nodes.end(), neighbours[p]) ==
choosen_nodes.end()) {
choosen_distances.push_back(distance);
choosen_nodes.push_back(neighbours[p]);
}
}
}
auto min_index = std::min_element(distances.begin(), distances.end());
new_node = neighbours[std::distance(distances.begin(), min_index)];
// if we back to the visited node then we fall in loop and search is complete
if (new_node == prev_node) {
break;
}
prev_node = checking_node;
checking_node = new_node;
}
}
// sort distances and return corresopnding nodes from choosen
auto idxs = sort_indexes(choosen_distances);
for (int i = 0; i < max_closest_num; i++) {
if (i < idxs.size()) {
result.push_back(choosen_nodes[idxs[i]]);
}
}
}
return result;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename T1>
std::vector<size_t> KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::sort_indexes(const std::vector<T1> &v)
{
// initialize original index locations
std::vector<size_t> idx(v.size());
std::iota(idx.begin(), idx.end(), 0);
// sort indexes based on comparing values in v
std::sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) { return v[i1] < v[i2]; });
return idx;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::size_t KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::insert(const Sample &p)
{
_nodes.push_back(p);
construct(_nodes);
return _nodes.size() - 1;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
std::vector<std::size_t> KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::insert(const Container &p)
{
auto sz = _nodes.size();
_nodes.insert(_nodes.end(), std::begin(p), std::end(p));
construct(_nodes);
std::vector<std::size_t> res(p.size());
std::iota(res.begin(), res.end(), sz);
return res;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::pair<std::size_t, bool>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::insert_if(const Sample &p,
typename Distance::distance_type threshold)
{
auto nn = gnnn_search(p, 1);
Distance metric;
if (metric(_nodes[nn[0]], p) < threshold)
return std::pair{0, false};
auto id = insert(p);
return std::pair{id, true};
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
template <typename Container, typename>
std::vector<std::pair<std::size_t, bool>>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::insert_if(const Container &items,
typename Distance::distance_type threshold)
{
Distance metric;
std::vector<std::pair<std::size_t, bool>> v;
v.reserve(items.size());
std::size_t id = _nodes.size();
for (auto &i : items) {
auto nn = gnnn_search(i, 1);
if (metric(_nodes[nn[0]], i) < threshold) {
v.emplace_back(0, false);
} else {
v.emplace_back(id, true);
// _nodes.push_back(i);
id++;
}
}
for (std::size_t i = 0; i < items.size(); i++) {
if (v[i].second == true) {
_nodes.push_back(items[i]);
}
}
construct(_nodes);
return v;
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
void KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::erase(std::size_t idx)
{
auto p = _nodes.begin();
std::advance(p, idx);
_nodes.erase(p);
construct(_nodes);
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::size_t KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::nn(const Sample &p)
{
auto n = gnnn_search(p, 1);
return n[0];
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::vector<std::size_t> KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::knn(const Sample &p,
std::size_t K) const
{
return gnnn_search(p, K);
}
template <typename Sample, typename Distance, typename WeightType, bool isDense, bool isSymmetric>
std::vector<std::pair<std::size_t, typename Distance::distance_type>>
KNNGraph<Sample, Distance, WeightType, isDense, isSymmetric>::rnn(const Sample &query,
typename Distance::distance_type threshold) const
{
std::vector<std::pair<size_t, typename Distance::distance_type>> result;
// variables for choosen nodes during search
std::vector<distance_type> choosen_distances;
std::vector<int> choosen_nodes;
// temp variables
std::vector<distance_type> distances;
distance_type distance;
Distance distancer;
auto num_expansions = _nodes.size();
auto num_greedy_moves = round(_nodes.size() * 0.2);
int iterations = 10;
//
if (_nodes.size() > 0) {
for (int i = 0; i < iterations; i++) {
std::random_device rnd;
std::mt19937 mt(rnd());
std::uniform_int_distribution<int> dist(0, _nodes.size() - 1);
// get initial random node from the graph
int checking_node = dist(mt);
int prev_node = -1;
int new_node;
// walk from initial node on distance 'num_greedy_moves' steps
for (int j = 0; j < num_greedy_moves; j++) {
distances.clear();
// 0 index is for node itself, 1 - is first circle of neighbours
auto neighbours = this->getNeighbours(checking_node, 1)[1];
// get first num_expansions neighbours for the checking node and calculate distances to the query
for (int p = 0; p < num_expansions; p++) {
if (p < neighbours.size()) {
distance = distancer(_nodes[neighbours[p]], query);
distances.push_back(distance);
if (std::find(choosen_nodes.begin(), choosen_nodes.end(), neighbours[p]) ==
choosen_nodes.end()) {
choosen_distances.push_back(distance);
choosen_nodes.push_back(neighbours[p]);
}
}
}
auto min_index = std::min_element(distances.begin(), distances.end());
new_node = neighbours[std::distance(distances.begin(), min_index)];
// if we back to the visited node then we fall in loop and search is complete
if (new_node == prev_node) {
break;
}
prev_node = checking_node;
checking_node = new_node;
}
}
// sort distances and return corresopnding nodes from choosen
auto idxs = sort_indexes(choosen_distances);
for (std::size_t i = 0; i < idxs.size(); i++) {
if (choosen_distances[idxs[i]] <= threshold) {
result.push_back(std::pair{choosen_nodes[idxs[i]], choosen_distances[idxs[i]]});
} else {
break;
}
}
}
return result;
}
} // namespace metric
| 15,718
|
C++
|
.cpp
| 418
| 33.990431
| 120
| 0.689306
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,537
|
matrix.cpp
|
metric-space-ai_metric/metric/space/matrix.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_SPACE_MATRIX_CPP
#define _METRIC_SPACE_MATRIX_CPP
#include "matrix.hpp"
#include <stdexcept>
#include <type_traits>
namespace metric {
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::operator()(size_t i, size_t j) const -> distType
{
check_index(i);
check_index(j);
if (i < j)
return D_(i, j);
return D_(j, i);
}
template <typename RecType, typename Metric> auto Matrix<RecType, Metric>::operator[](size_t index) const -> RecType
{
return data_[index];
}
template <typename RecType, typename Metric> auto Matrix<RecType, Metric>::size() const -> std::size_t
{
return data_.size();
}
template <typename RecType, typename Metric> auto Matrix<RecType, Metric>::insert(const RecType &item) -> std::size_t
{
std::size_t old_size = data_.size();
D_.resize(old_size + 1, old_size + 1, true);
for (std::size_t i = 0; i < old_size; i++) {
D_.insert(i, old_size, metric_(data_[i], item));
}
data_.push_back(item);
return data_.size() - 1;
}
template <typename RecType, typename Metric>
template <typename Container, typename>
auto Matrix<RecType, Metric>::insert(const Container &items) -> std::vector<std::size_t>
{
std::vector<std::size_t> ids;
ids.reserve(items.size());
for (auto &i : items) {
auto id = insert(i);
ids.push_back(id);
}
return ids;
}
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::insert_if(const RecType &item, distType treshold) -> std::pair<std::size_t, bool>
{
if (size() == 0) {
return std::pair{insert(item), true};
}
std::pair<std::size_t, distType> nnid = nn_(item);
if (nnid.second <= treshold)
return std::pair{0, false};
return std::pair{insert(item), true};
}
template <typename RecType, typename Metric>
template <typename Container, typename>
auto Matrix<RecType, Metric>::insert_if(const Container &items, distType treshold)
-> std::vector<std::pair<std::size_t, bool>>
{
std::vector<std::pair<std::size_t, bool>> ids{};
ids.reserve(items.size());
for (auto &i : items) {
ids.push_back(insert_if(i, treshold));
}
return ids;
}
template <typename RecType, typename Metric> auto Matrix<RecType, Metric>::erase(std::size_t index) -> bool
{
check_index(index);
auto rows = D_.rows();
if (index != rows - 1) {
auto src = blaze::submatrix(D_, index + 1, index + 1, rows - index - 1, rows - index - 1);
auto dst = blaze::submatrix(D_, index, index, rows - index - 1, rows - index - 1);
dst = src;
if (index != 0) {
// auto src1 = blaze::submatrix(D_, 0, index + 1, id, rows - index - 1);
// auto dst1 = blaze::submatrix(D_, 0, index, index, rows - index - 1);
auto src1 = blaze::submatrix(D_, 0, index + 1, index, rows - index - 1);
auto dst1 = blaze::submatrix(D_, 0, index, index, rows - index - 1);
dst1 = src1;
}
}
D_.resize(rows - 1, rows - 1, true);
remove_data(index);
return true;
}
template <typename RecType, typename Metric> void Matrix<RecType, Metric>::set(std::size_t index, const RecType &p)
{
check_index(index);
std::size_t old_size = D_.rows();
for (std::size_t i = 0; i < old_size; i++) {
if (i < index) {
D_(i, index) = metric_(data_[i], p);
} else if (i > index) {
D_(index, i) = metric_(data_[i], p);
}
}
data_[index] = p;
}
template <typename RecType, typename Metric> auto Matrix<RecType, Metric>::nn(const RecType &p) const -> std::size_t
{
return nn_(p).first;
}
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::knn(const RecType &query, unsigned k) const
-> std::vector<std::pair<std::size_t, distType>>
{
auto nnp = nn_(query);
std::size_t nn_index = nnp.first;
std::vector<std::pair<std::size_t, distType>> result;
result.reserve(k);
for (std::size_t i = 0; i < D_.columns(); ++i) {
std::pair<std::size_t, distType> temp{i, metric_(query, data_[i])};
auto ins = std::upper_bound(result.begin(), result.end(), temp,
[](auto lhs, auto rhs) { return lhs.second < rhs.second; });
result.insert(ins, temp);
if (result.size() > k) {
result.pop_back();
}
}
return result;
}
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::rnn(const RecType &query, distType range) const
-> std::vector<std::pair<std::size_t, distType>>
{
std::unordered_map<std::size_t, distType> metric_cache;
auto nnp = nn_(query, metric_cache);
std::size_t nn_index = nnp.first;
std::vector<std::pair<std::size_t, distType>> result;
if (nnp.second > range)
return result;
for (std::size_t idx = 0; idx < D_.columns(); idx++) {
distType dist = metric_(query, data_[idx]);
if (dist > range)
continue;
std::pair<std::size_t, distType> temp{idx, dist};
auto ins = std::upper_bound(result.begin(), result.end(), temp,
[](auto lhs, auto rhs) { return lhs.second < rhs.second; });
result.insert(ins, temp);
}
return result;
}
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::nn_(const RecType &p) const -> std::pair<std::size_t, distType>
{
// brute force first nearest neighbour
std::size_t nn_index = 0;
distType min_dist = std::numeric_limits<distType>::max();
for (std::size_t i = 0; i < data_.size(); i++) {
auto dist = metric_(p, data_[i]);
if (dist < min_dist) {
min_dist = dist;
nn_index = i;
}
}
return std::pair{nn_index, min_dist};
}
template <typename RecType, typename Metric>
auto Matrix<RecType, Metric>::nn_(const RecType &p, std::unordered_map<std::size_t, distType> &metric_cache) const
-> std::pair<std::size_t, distType>
{
// brute force first nearest neighbour
std::size_t nn_index = 0;
distType min_dist = std::numeric_limits<distType>::max();
for (std::size_t i = 0; i < data_.size(); i++) {
auto dist = metric_(p, data_[i]);
metric_cache[i] = dist;
if (dist < min_dist) {
min_dist = dist;
nn_index = i;
}
}
return std::pair{nn_index, min_dist};
}
} // namespace metric
#endif
| 6,097
|
C++
|
.cpp
| 185
| 30.794595
| 117
| 0.672721
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,538
|
estimator_helpers.cpp
|
metric-space-ai_metric/metric/correlation/estimator_helpers.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#include "estimator_helpers.hpp"
#include <cmath>
#include <iostream>
#include <vector>
namespace metric {
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
inline std::vector<double> linspace(double a, double b, int n)
{
std::vector<double> array;
if (n > 1) {
double step = (b - a) / double(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
inline double polyeval(const std::vector<double> &poly, const double z)
{
const int n = poly.size();
double sum = poly[n - 1];
for (int i = n - 2; i >= 0; --i) {
sum *= z;
sum += poly[i];
}
return sum;
}
inline double erfinv_imp(const double p, const double q)
{
double result = 0;
if (p <= 0.5) {
static const float Y = 0.0891314744949340820313f;
static const std::vector<double> P = {-0.000508781949658280665617L, -0.00836874819741736770379L,
0.0334806625409744615033L, -0.0126926147662974029034L,
-0.0365637971411762664006L, 0.0219878681111168899165L,
0.00822687874676915743155L, -0.00538772965071242932965L};
static const std::vector<double> Q = {1,
-0.970005043303290640362L,
-1.56574558234175846809L,
1.56221558398423026363L,
0.662328840472002992063L,
-0.71228902341542847553L,
-0.0527396382340099713954L,
0.0795283687341571680018L,
-0.00233393759374190016776L,
0.000886216390456424707504L};
double g = p * (p + 10);
double r = polyeval(P, p) / polyeval(Q, p);
result = g * Y + g * r;
} else if (q >= 0.25) {
static const float Y = 2.249481201171875f;
static const std::vector<double> P = {
-0.202433508355938759655L, 0.105264680699391713268L, 8.37050328343119927838L,
17.6447298408374015486L, -18.8510648058714251895L, -44.6382324441786960818L,
17.445385985570866523L, 21.1294655448340526258L, -3.67192254707729348546L};
static const std::vector<double> Q = {1L,
6.24264124854247537712L,
3.9713437953343869095L,
-28.6608180499800029974L,
-20.1432634680485188801L,
48.5609213108739935468L,
10.8268667355460159008L,
-22.6436933413139721736L,
1.72114765761200282724L};
double g = std::sqrt(-2 * std::log(q));
double xs = q - 0.25;
double r = polyeval(P, xs) / polyeval(Q, xs);
result = g / (Y + r);
} else {
double x = std::sqrt(-std::log(q));
if (x < 3) {
static const float Y = 0.807220458984375f;
static const std::vector<double> P = {
-0.131102781679951906451L, -0.163794047193317060787L, 0.117030156341995252019L,
0.387079738972604337464L, 0.337785538912035898924L, 0.142869534408157156766L,
0.0290157910005329060432L, 0.00214558995388805277169L, -0.679465575181126350155e-6L,
0.285225331782217055858e-7L, -0.681149956853776992068e-9L};
static const std::vector<double> Q = {1,
3.46625407242567245975L,
5.38168345707006855425L,
4.77846592945843778382L,
2.59301921623620271374L,
0.848854343457902036425L,
0.152264338295331783612L,
0.01105924229346489121L};
double xs = x - 1.125;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 6) {
static const float Y = 0.93995571136474609375f;
static const std::vector<double> P = {
-0.0350353787183177984712L, -0.00222426529213447927281L, 0.0185573306514231072324L,
0.00950804701325919603619L, 0.00187123492819559223345L, 0.000157544617424960554631L,
0.460469890584317994083e-5L, -0.230404776911882601748e-9L, 0.266339227425782031962e-11L};
static const std::vector<double> Q = {1L,
1.3653349817554063097L,
0.762059164553623404043L,
0.220091105764131249824L,
0.0341589143670947727934L,
0.00263861676657015992959L,
0.764675292302794483503e-4L};
double xs = x - 3;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 18) {
static const float Y = 0.98362827301025390625f;
static const std::vector<double> P = {
-0.0167431005076633737133L, -0.00112951438745580278863L, 0.00105628862152492910091L,
0.000209386317487588078668L, 0.149624783758342370182e-4L, 0.449696789927706453732e-6L,
0.462596163522878599135e-8L, -0.281128735628831791805e-13L, 0.99055709973310326855e-16L};
static const std::vector<double> Q = {1L,
0.591429344886417493481L,
0.138151865749083321638L,
0.0160746087093676504695L,
0.000964011807005165528527L,
0.275335474764726041141e-4L,
0.282243172016108031869e-6L};
double xs = x - 6;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 44) {
static const float Y = 0.99714565277099609375f;
static const std::vector<double> P = {-0.0024978212791898131227L, -0.779190719229053954292e-5L,
0.254723037413027451751e-4L, 0.162397777342510920873e-5L,
0.396341011304801168516e-7L, 0.411632831190944208473e-9L,
0.145596286718675035587e-11L, -0.116765012397184275695e-17L};
static const std::vector<double> Q = {1L,
0.207123112214422517181L,
0.0169410838120975906478L,
0.000690538265622684595676L,
0.145007359818232637924e-4L,
0.144437756628144157666e-6L,
0.509761276599778486139e-9L};
double xs = x - 18;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else {
static const float Y = 0.99941349029541015625f;
static const std::vector<double> P = {-0.000539042911019078575891L, -0.28398759004727721098e-6L,
0.899465114892291446442e-6L, 0.229345859265920864296e-7L,
0.225561444863500149219e-9L, 0.947846627503022684216e-12L,
0.135880130108924861008e-14L, -0.348890393399948882918e-21L};
static const std::vector<double> Q = {1L,
0.0845746234001899436914L,
0.00282092984726264681981L,
0.468292921940894236786e-4L,
0.399968812193862100054e-6L,
0.161809290887904476097e-8L,
0.231558608310259605225e-11L};
double xs = x - 44;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
}
}
return result;
}
inline double erfcinv(const double z)
{
if ((z < 0) || (z > 2))
std::cout << "Argument outside range [0,2] in inverse erfc function (got p=%1%)." << std::endl;
double p, q, s;
if (z > 1) {
q = 2 - z;
p = 1 - q;
s = -1;
} else {
p = 1 - z;
q = z;
s = 1;
}
return s * erfinv_imp(p, q);
}
inline std::vector<double> icdf(const std::vector<double> &prob, const double mu, const double sigma)
{
std::vector<double> synth;
synth.reserve(prob.size());
for (auto p : prob) {
synth.push_back(mu + -1.41421356237309504880 * erfcinv(2 * p) * sigma);
}
return synth;
}
inline double variance(const std::vector<double> &data, const double mean)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += (data[i] - mean) * (data[i] - mean);
}
return sum;
}
inline double mean(const std::vector<double> &data)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += data[i];
}
double value = sum / data.size();
return value;
}
inline double peak2ems(const std::vector<double> &data)
{
double maxAbs = -1;
double rms = 0;
for (const auto v : data) {
const double absV = abs(v);
if (absV > maxAbs) {
maxAbs = absV;
}
rms += v * v;
}
rms /= data.size();
rms = sqrt(rms);
return maxAbs / rms;
}
} // namespace metric
| 8,143
|
C++
|
.cpp
| 225
| 30.417778
| 116
| 0.658898
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| true
| false
|
1,531,539
|
mgc.cpp
|
metric-space-ai_metric/metric/correlation/mgc.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2018 Michael Welsch
*/
#include "mgc.hpp"
#include <algorithm>
#include <assert.h>
#include <complex>
#include <functional>
#include <iterator>
#include <limits>
#include <numeric>
#include <vector>
#if defined(_MSC_VER)
/* Microsoft C/C++-compatible compiler */
#include <intrin.h>
#elif defined(GNUC) && (defined(x86_64) || defined(i386))
/* GCC-compatible compiler, targeting x86/x86-64 */
#include <x86intrin.h>
#elif defined(GNUC) && defined(ARM_NEON)
/* GCC-compatible compiler, targeting ARM with NEON */
#include <arm_neon.h>
#elif defined(GNUC) && defined(IWMMXT)
/* GCC-compatible compiler, targeting ARM with WMMX */
#include <mmintrin.h>
#elif (defined(GNUC) || defined(xlC)) && (defined(VEC) || defined(ALTIVEC))
/* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
#include <altivec.h>
#elif defined(GNUC) && defined(SPE)
/* GCC-compatible compiler, targeting PowerPC with SPE */
#include <spe.h>
#endif
#include <math.h>
#include <random>
#include <chrono>
#include <blaze/Math.h>
#include "../distance.hpp"
#include "../utils/graph/connected_components.hpp"
#include "estimator_helpers.hpp"
namespace metric {
// computes the (pairwise) distance matrix for arbitrary random access matrix like containers.
template <typename Container> Container distance_matrix(const Container &data)
{
typedef typename Container::value_type Row;
typedef typename Row::value_type T;
Container matrix(data.size(), Row(data.size())); // initialize
auto distance_function = metric::Euclidean<T>();
for (size_t i = 0; i < data.size(); ++i) {
matrix[i][i] = 0;
for (size_t j = i + 1; j < data.size(); ++j) {
T distance = distance_function(data[i], data[j]);
matrix[i][j] = distance;
matrix[j][i] = distance;
}
}
return matrix;
}
template <typename T> blaze::DynamicMatrix<size_t> MGC_direct::rank_distance_matrix(const DistanceMatrix<T> &data)
{
blaze::DynamicMatrix<size_t> matrix(data.rows(), data.columns());
std::vector<size_t> indexes(data.rows());
std::iota(indexes.begin(), indexes.end(), 0);
for (int i = 0; i < data.rows(); ++i) {
auto row = blaze::row(data, i);
std::sort(indexes.begin(), indexes.end(), [&row](auto i1, auto i2) { return row[i1] < row[i2]; });
/* Fill result row */
auto outRow = blaze::row(matrix, i);
for (size_t iter = 0; iter < row.size(); ++iter) {
outRow[indexes[iter]] = iter;
}
}
return matrix;
}
template <typename T> blaze::DynamicMatrix<size_t> MGC_direct::center_ranked_distance_matrix(const DistanceMatrix<T> &X)
{
auto A = rank_distance_matrix(X);
// blaze::transpose(A);
return A;
}
template <typename T> blaze::DynamicMatrix<T> MGC_direct::center_distance_matrix(const DistanceMatrix<T> &X)
{
blaze::DynamicVector<T, blaze::rowVector> list_of_sums = blaze::sum<blaze::columnwise>(X);
list_of_sums /= X.rows() - 1;
blaze::DynamicMatrix<T> centered_distance_matrix(X.rows(), X.columns());
for (auto i = 0; i < X.rows(); ++i) {
for (auto j = 0; j < X.rows(); ++j) {
centered_distance_matrix(i, j) = X(i, j) - list_of_sums[j];
}
}
for (int i = 0; i < X.rows(); ++i) {
centered_distance_matrix(i, i) = 0;
}
return centered_distance_matrix;
}
template <typename T>
blaze::DynamicMatrix<T> MGC_direct::local_covariance(const blaze::DynamicMatrix<T> &A, const blaze::DynamicMatrix<T> &B,
const blaze::DynamicMatrix<size_t> &RX,
const blaze::DynamicMatrix<size_t> &RY)
{
const size_t n = A.rows();
const size_t nX = blaze::max(RX) + 1;
const size_t nY = blaze::max(RY) + 1;
blaze::DynamicMatrix<T> covXY(nX, nY, 0);
blaze::DynamicMatrix<T, blaze::columnMajor> EX(nX, 1, 0);
blaze::DynamicMatrix<T> EY(1, nY, 0);
// summing up the entrywise product of A and B based on the ranks EX and EY
for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < n; ++j) {
const auto a = A(i, j);
const auto b = B(i, j);
const auto k = RX(i, j);
const auto l = RY(i, j);
covXY(k, l) += a * b;
EX(k, 0) += a;
EY(0, l) += b;
}
}
for (size_t k = 0; k < nX - 1; ++k) {
covXY(k + 1, 0) = covXY(k, 0) + covXY(k + 1, 0);
EX(k + 1, 0) += EX(k, 0);
}
blaze::DynamicVector<T, blaze::rowVector> covXY0 = blaze::row(covXY, 0);
for (size_t l = 0; l < nY - 1; ++l) {
covXY0[l + 1] += covXY0[l];
EY(0, l + 1) += EY(0, l);
}
for (size_t k = 0; k < nX - 1; ++k) {
for (size_t l = 0; l < nY - 1; ++l) {
covXY(k + 1, l + 1) += covXY(k + 1, l) + covXY(k, l + 1) - covXY(k, l);
}
}
covXY -= EX * EY / n / n;
return covXY;
}
template <typename T> T MGC_direct::rational_approximation(const T t)
{
// The absolute value of the error should be less than 4.5 e-4.
std::vector<T> c = {2.515517, 0.802853, 0.010328};
std::vector<T> d = {1.432788, 0.189269, 0.001308};
return t - ((c[2] * t + c[1]) * t + c[0]) / (((d[2] * t + d[1]) * t + d[0]) * t + 1.0);
}
template <typename T> T MGC_direct::normal_CDF_inverse(const T p)
{
if (p < 0.5) {
return -rational_approximation(std::sqrt(-2.0 * std::log(p))); // F^-1(p) = - G^-1(p)
} else {
return rational_approximation(std::sqrt(-2.0 * std::log(1 - p))); // F^-1(p) = G^-1(1-p)
}
}
template <typename T> T MGC_direct::icdf_normal(const T p) { return normal_CDF_inverse(p); }
template <typename T>
blaze::DynamicMatrix<bool> MGC_direct::significant_local_correlation(const blaze::DynamicMatrix<T> &localCorr, T p)
{
/* Sample size minus one */
T sz = T(localCorr.rows() - 1);
/* Normal approximation, which is equivalent to beta approximation for n larger than 10 */
T thres = icdf_normal(1 - p / sz) / sqrt(sz * (sz - 3) / 2 - 1);
/* Take the maximal of threshold and local correlation at the maximal scale */
thres = std::max(thres, localCorr(localCorr.rows() - 1, localCorr.rows() - 1));
/* Threshold R = (localCorr > thres) */
blaze::DynamicMatrix<bool> R;
R = blaze::map(localCorr, [thres](T e) { return e > thres; });
auto components = metric::graph::largest_connected_component(R);
if (components.empty()) {
return blaze::DynamicMatrix<bool>(R.rows(), R.columns(), false);
} else {
return components[0];
}
}
template <typename T> T MGC_direct::frobeniusNorm(const blaze::DynamicMatrix<T> &matrix)
{
const auto m = blaze::invsqrt(matrix);
const double result = blaze::sum(m);
return std::sqrt(result);
}
template <typename T>
T MGC_direct::max_in_matrix_regarding_second_boolean_matrix(const blaze::DynamicMatrix<T> &m1,
const blaze::DynamicMatrix<bool> &m2)
{
return blaze::max(m2 % m1);
}
template <typename T>
T MGC_direct::optimal_local_generalized_correlation(const blaze::DynamicMatrix<T> &corr,
const blaze::DynamicMatrix<bool> &R)
{
size_t m = corr.rows();
size_t n = corr.columns();
auto MGC = corr(corr.rows() - 1, corr.rows() - 1); // default sample mgc to local corr at maximal scale
auto optimalScale = m * n; // default the optimal scale to maximal scale
size_t R_sum = blaze::nonZeros(R);
if (frobeniusNorm(R) != double(0) && R_sum != R.rows() * R.columns())
// if (frobeniusNorm(R) != double(0) )
{
if (R_sum >= 2 * std::min(m, n)) {
// proceed only when the region area is sufficiently large
auto tmp = max_in_matrix_regarding_second_boolean_matrix(corr, R);
// find all scales within R that maximize the local correlation
size_t k = 0, l = 0;
for (size_t i = 0; i < corr.rows(); ++i) {
// ?
for (size_t j = 0; j < corr.rows(); ++j) {
if (corr(i, j) >= tmp && R(i, j) == true) {
k = i;
l = j;
}
}
}
if (tmp >= MGC) {
MGC = tmp;
optimalScale = l * m + (k + 1); // take the scale of maximal stat and change to single index
}
}
}
return MGC;
}
template <typename T>
void MGC_direct::normalize_generalized_correlation(blaze::DynamicMatrix<T> &corr, const blaze::DynamicMatrix<T> &varX,
const blaze::DynamicMatrix<T> &varY)
{
for (size_t i = 0; i < corr.rows(); ++i) {
for (size_t j = 0; j < corr.rows(); ++j) {
corr(i, j) = corr(i, j) / std::sqrt(varX(i, i) * varY(j, j));
if (isnan(corr(i, j))) {
corr(i, j) = 0;
} else if (corr(i, j) > 1) {
corr(i, j) = 1;
}
}
}
}
template <typename T> T MGC_direct::operator()(const DistanceMatrix<T> &X, const DistanceMatrix<T> &Y)
{
assert(X.rows() == Y.rows());
// center distance matrix
blaze::DynamicMatrix<T> A = center_distance_matrix(X);
blaze::DynamicMatrix<T> B = center_distance_matrix(Y);
auto RXt = center_ranked_distance_matrix(X);
auto RYt = center_ranked_distance_matrix(Y);
// transpose copies
blaze::DynamicMatrix<T> At = blaze::trans(A);
blaze::DynamicMatrix<T> Bt = blaze::trans(B);
blaze::DynamicMatrix<size_t> RX = blaze::trans(RXt);
blaze::DynamicMatrix<size_t> RY = blaze::trans(RYt);
// compute generalized correlation
auto corr = local_covariance(A, Bt, RX, RYt); // compute all local covariances
auto varX = local_covariance(A, At, RX, RXt); // compute local variances for first data
auto varY = local_covariance(B, Bt, RY, RYt); // compute local variances for second data
blaze::clear(A);
A.shrinkToFit();
blaze::clear(At);
At.shrinkToFit();
blaze::clear(B);
B.shrinkToFit();
blaze::clear(Bt);
Bt.shrinkToFit();
blaze::clear(RXt);
RXt.shrinkToFit();
blaze::clear(RX);
RX.shrinkToFit();
blaze::clear(RY);
RY.shrinkToFit();
blaze::clear(RYt);
RYt.shrinkToFit();
// normalize the generalized correlation
normalize_generalized_correlation(corr, varX, varY);
blaze::clear(varX);
varX.shrinkToFit();
blaze::clear(varY);
varY.shrinkToFit();
/* Find the largest connected region of significant local correlations */
auto R = significant_local_correlation(corr /*,p=0.02*/);
/* Find the maximal scaled correlation within the significant region (the Multiscale Graph Correlation) */
return optimal_local_generalized_correlation(corr, R);
}
template <typename T>
std::vector<double> MGC_direct::xcorr(const DistanceMatrix<T> &a, const DistanceMatrix<T> &b, const unsigned int n)
{
assert(a.rows() == b.rows());
assert(n <= std::numeric_limits<int>::max());
std::vector<double> result;
result.reserve(2 * n + 1);
int s = -n;
if (s <= (int)n) {
auto g = 9;
}
for (int shift = -n; shift <= (int)n; ++shift) {
DistanceMatrix<T> aShifted;
DistanceMatrix<T> bShifted;
const auto start = std::abs(shift);
const auto length = a.rows() - start;
if (shift < 0) {
aShifted = blaze::submatrix(a, start, start, length, length);
bShifted = blaze::submatrix(b, 0, 0, length, length);
} else {
aShifted = blaze::submatrix(a, 0, 0, length, length);
bShifted = blaze::submatrix(b, start, start, length, length);
}
result.push_back(operator()(aShifted, bShifted));
}
return result;
}
template <class RecType1, class Metric1, class RecType2, class Metric2>
template <typename Container1, typename Container2>
double MGC<RecType1, Metric1, RecType2, Metric2>::operator()(const Container1 &a, const Container2 &b) const
{
assert(a.size() == b.size());
/* Compute distance matrices */
auto X = computeDistanceMatrix<Container1>(a, metric1);
auto Y = computeDistanceMatrix<Container2>(b, metric2);
return MGC_direct()(X, Y);
}
template <class RecType1, class Metric1, class RecType2, class Metric2>
template <typename Container, typename Metric>
DistanceMatrix<double> MGC<RecType1, Metric1, RecType2, Metric2>::computeDistanceMatrix(const Container &c,
const Metric &metric) const
{
DistanceMatrix<double> X(c.size());
for (size_t i = 0; i < X.rows(); ++i) {
X(i, i) = 0;
for (size_t j = i + 1; j < X.columns(); ++j) {
double distance = metric(c[i], c[j]);
X(i, j) = distance;
}
}
return X;
}
template <class RecType1, class Metric1, class RecType2, class Metric2>
template <typename Container1, typename Container2>
double MGC<RecType1, Metric1, RecType2, Metric2>::estimate(const Container1 &a, const Container2 &b,
const size_t sampleSize, const double threshold,
size_t maxIterations) const
{
assert(a.size() == b.size());
const size_t dataSize = a.size();
/* Update maxIterations */
if (maxIterations == 0) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations > dataSize / sampleSize) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations < 1) {
return operator()(a, b);
}
/* Create shuffle indexes */
std::vector<size_t> indexes(dataSize);
std::iota(indexes.begin(), indexes.end(), 0);
auto rng = std::default_random_engine();
std::shuffle(indexes.begin(), indexes.end(), rng);
/* Create vector container for fast random access */
const std::vector<typename Container1::value_type> vectorA(a.begin(), a.end());
const std::vector<typename Container2::value_type> vectorB(b.begin(), b.end());
/* Create samples */
std::vector<typename Container1::value_type> sampleA;
std::vector<typename Container2::value_type> sampleB;
sampleA.reserve(sampleSize);
sampleB.reserve(sampleSize);
std::vector<double> mgcValues;
double mu = 0;
for (auto i = 1; i <= maxIterations; ++i) {
size_t start = (i - 1) * sampleSize;
size_t end = std::min(i * sampleSize - 1, dataSize - 1);
/* Create samples */
sampleA.clear();
sampleB.clear();
for (auto j = start; j < end; ++j) {
sampleA.push_back(vectorA[indexes[j]]);
sampleB.push_back(vectorB[indexes[j]]);
}
/* Get sample mgc value */
double mgc = operator()(sampleA, sampleB);
mgcValues.push_back(mgc);
std::sort(mgcValues.begin(), mgcValues.end());
const size_t n = mgcValues.size();
const auto p0 = linspace(0.5 / n, 1 - 0.5 / n, n);
mu = mean(mgcValues);
double sigma = variance(mgcValues, mu);
const std::vector<double> synth = icdf(p0, mu, sigma);
std::vector<double> diff;
diff.reserve(n);
for (auto i = 0; i < n; ++i) {
diff.push_back(mgcValues[i] - synth[i]);
}
auto convergence = peak2ems(diff) / n;
if (convergence < threshold) {
return mu;
}
}
return mu;
}
template <class RecType1, class Metric1, class RecType2, class Metric2>
template <typename Container1, typename Container2>
std::vector<double> MGC<RecType1, Metric1, RecType2, Metric2>::xcorr(const Container1 &a, const Container2 &b,
const int n) const
{
assert(a.size() == b.size());
/* Compute distance matrices */
auto X = computeDistanceMatrix<Container1>(a, metric1);
auto Y = computeDistanceMatrix<Container2>(b, metric2);
return MGC_direct().xcorr(X, Y, n);
}
} // namespace metric
| 14,629
|
C++
|
.cpp
| 404
| 33.435644
| 120
| 0.670279
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,540
|
entropy.cpp
|
metric-space-ai_metric/metric/correlation/entropy.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
#ifndef _METRIC_DISTANCE_K_RANDOM_ENTROPY_CPP
#define _METRIC_DISTANCE_K_RANDOM_ENTROPY_CPP
//#include "metric/utils/type_traits.hpp"
#include "epmgp.hpp"
#include "estimator_helpers.hpp"
#include "metric/space/tree.hpp"
#include <cmath>
#include <limits>
#include <vector>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace metric {
namespace entropy_details {
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
std::vector<double> linspace(double a, double b, int n)
{
std::vector<double> array;
if (n > 1) {
double step = (b - a) / double(n - 1);
int count = 0;
while (count < n) {
array.push_back(a + count * step);
++count;
}
} else {
array.push_back(b);
}
return array;
}
inline double polyeval(const std::vector<double> &poly, const double z)
{
const int n = poly.size();
double sum = poly[n - 1];
for (int i = n - 2; i >= 0; --i) {
sum *= z;
sum += poly[i];
}
return sum;
}
inline double erfinv_imp(const double p, const double q)
{
double result = 0;
if (p <= 0.5) {
static const float Y = 0.0891314744949340820313f;
static const std::vector<double> P = {-0.000508781949658280665617L, -0.00836874819741736770379L,
0.0334806625409744615033L, -0.0126926147662974029034L,
-0.0365637971411762664006L, 0.0219878681111168899165L,
0.00822687874676915743155L, -0.00538772965071242932965L};
static const std::vector<double> Q = {1,
-0.970005043303290640362L,
-1.56574558234175846809L,
1.56221558398423026363L,
0.662328840472002992063L,
-0.71228902341542847553L,
-0.0527396382340099713954L,
0.0795283687341571680018L,
-0.00233393759374190016776L,
0.000886216390456424707504L};
double g = p * (p + 10);
double r = polyeval(P, p) / polyeval(Q, p);
result = g * Y + g * r;
} else if (q >= 0.25) {
static const float Y = 2.249481201171875f;
static const std::vector<double> P = {
-0.202433508355938759655L, 0.105264680699391713268L, 8.37050328343119927838L,
17.6447298408374015486L, -18.8510648058714251895L, -44.6382324441786960818L,
17.445385985570866523L, 21.1294655448340526258L, -3.67192254707729348546L};
static const std::vector<double> Q = {1L,
6.24264124854247537712L,
3.9713437953343869095L,
-28.6608180499800029974L,
-20.1432634680485188801L,
48.5609213108739935468L,
10.8268667355460159008L,
-22.6436933413139721736L,
1.72114765761200282724L};
double g = std::sqrt(-2 * std::log(q));
double xs = q - 0.25;
double r = polyeval(P, xs) / polyeval(Q, xs);
result = g / (Y + r);
} else {
double x = std::sqrt(-std::log(q));
if (x < 3) {
static const float Y = 0.807220458984375f;
static const std::vector<double> P = {
-0.131102781679951906451L, -0.163794047193317060787L, 0.117030156341995252019L,
0.387079738972604337464L, 0.337785538912035898924L, 0.142869534408157156766L,
0.0290157910005329060432L, 0.00214558995388805277169L, -0.679465575181126350155e-6L,
0.285225331782217055858e-7L, -0.681149956853776992068e-9L};
static const std::vector<double> Q = {1,
3.46625407242567245975L,
5.38168345707006855425L,
4.77846592945843778382L,
2.59301921623620271374L,
0.848854343457902036425L,
0.152264338295331783612L,
0.01105924229346489121L};
double xs = x - 1.125;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 6) {
static const float Y = 0.93995571136474609375f;
static const std::vector<double> P = {
-0.0350353787183177984712L, -0.00222426529213447927281L, 0.0185573306514231072324L,
0.00950804701325919603619L, 0.00187123492819559223345L, 0.000157544617424960554631L,
0.460469890584317994083e-5L, -0.230404776911882601748e-9L, 0.266339227425782031962e-11L};
static const std::vector<double> Q = {1L,
1.3653349817554063097L,
0.762059164553623404043L,
0.220091105764131249824L,
0.0341589143670947727934L,
0.00263861676657015992959L,
0.764675292302794483503e-4L};
double xs = x - 3;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 18) {
static const float Y = 0.98362827301025390625f;
static const std::vector<double> P = {
-0.0167431005076633737133L, -0.00112951438745580278863L, 0.00105628862152492910091L,
0.000209386317487588078668L, 0.149624783758342370182e-4L, 0.449696789927706453732e-6L,
0.462596163522878599135e-8L, -0.281128735628831791805e-13L, 0.99055709973310326855e-16L};
static const std::vector<double> Q = {1L,
0.591429344886417493481L,
0.138151865749083321638L,
0.0160746087093676504695L,
0.000964011807005165528527L,
0.275335474764726041141e-4L,
0.282243172016108031869e-6L};
double xs = x - 6;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else if (x < 44) {
static const float Y = 0.99714565277099609375f;
static const std::vector<double> P = {-0.0024978212791898131227L, -0.779190719229053954292e-5L,
0.254723037413027451751e-4L, 0.162397777342510920873e-5L,
0.396341011304801168516e-7L, 0.411632831190944208473e-9L,
0.145596286718675035587e-11L, -0.116765012397184275695e-17L};
static const std::vector<double> Q = {1L,
0.207123112214422517181L,
0.0169410838120975906478L,
0.000690538265622684595676L,
0.145007359818232637924e-4L,
0.144437756628144157666e-6L,
0.509761276599778486139e-9L};
double xs = x - 18;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
} else {
static const float Y = 0.99941349029541015625f;
static const std::vector<double> P = {-0.000539042911019078575891L, -0.28398759004727721098e-6L,
0.899465114892291446442e-6L, 0.229345859265920864296e-7L,
0.225561444863500149219e-9L, 0.947846627503022684216e-12L,
0.135880130108924861008e-14L, -0.348890393399948882918e-21L};
static const std::vector<double> Q = {1L,
0.0845746234001899436914L,
0.00282092984726264681981L,
0.468292921940894236786e-4L,
0.399968812193862100054e-6L,
0.161809290887904476097e-8L,
0.231558608310259605225e-11L};
double xs = x - 44;
double R = polyeval(P, xs) / polyeval(Q, xs);
result = Y * x + R * x;
}
}
return result;
}
inline double erfcinv(const double z)
{
if ((z < 0) || (z > 2))
std::cout << "Argument outside range [0,2] in inverse erfc function (got p=%1%)." << std::endl;
double p, q, s;
if (z > 1) {
q = 2 - z;
p = 1 - q;
s = -1;
} else {
p = 1 - z;
q = z;
s = 1;
}
return s * erfinv_imp(p, q);
}
std::vector<double> icdf(const std::vector<double> &prob, const double mu, const double sigma)
{
std::vector<double> synth;
synth.reserve(prob.size());
for (auto p : prob) {
synth.push_back(mu + -1.41421356237309504880 * erfcinv(2 * p) * sigma);
}
return synth;
}
double variance(const std::vector<double> &data, const double mean)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += (data[i] - mean) * (data[i] - mean);
}
return sum;
}
double mean(const std::vector<double> &data)
{
double sum = 0;
for (size_t i = 0; i < data.size(); ++i) {
sum += data[i];
}
double value = sum / data.size();
return value;
}
double peak2ems(const std::vector<double> &data)
{
double maxAbs = -1;
double rms = 0;
for (const auto v : data) {
const double absV = abs(v);
if (absV > maxAbs) {
maxAbs = absV;
}
rms += v * v;
}
rms /= data.size();
rms = sqrt(rms);
return maxAbs / rms;
}
template <typename T> T conv_diff_entropy(T in)
{
if (in < 1)
return 1 / std::exp(1) * std::exp(in);
else
return in;
}
template <typename T> T conv_diff_entropy_inv(T in)
{
if (in < 1)
return std::log(in) + 1;
else
return in;
}
template <typename T1, typename T2> T1 log(T1 logbase, T2 x) { return std::log(x) / std::log(logbase); }
double mvnpdf(blaze::DynamicVector<double> x, blaze::DynamicVector<double> mu, blaze::DynamicMatrix<double> Sigma)
{
size_t n = x.size();
assert(mu.size() == n && Sigma.columns() == n && Sigma.rows() == n);
auto centered = x - mu;
auto p = blaze::trans(centered) * blaze::inv(Sigma) * centered;
// return std::exp(-p/2) / ( std::sqrt(blaze::det(Sigma)) * std::pow(2*M_PI, (double)n/2.0) );
return std::exp(-p / 2) / std::sqrt(blaze::det(Sigma) * std::pow(2 * M_PI, n));
}
double mvnpdf(blaze::DynamicVector<double> x)
{
return (mvnpdf(x, blaze::DynamicVector<double>(x.size(), 0), blaze::IdentityMatrix<double>(x.size())));
}
/* from https://github.com/masakazu-ishihata/irand.git
The digamma function is the derivative of gammaln.
Reference:
J Bernardo,
Psi ( Digamma ) Function,
Algorithm AS 103,
Applied Statistics,
Volume 25, Number 3, pages 315-317, 1976.
From http://www.psc.edu/~burkardt/src/dirichlet/dirichlet.f
(with modifications for negative numbers and extra precision)
*/
double digamma(double x)
{
double result;
static const double neginf = -std::numeric_limits<double>::infinity(), c = 12, s = 1e-6, d1 = -0.57721566490153286,
d2 = 1.6449340668482264365, /* pi^2/6 */
s3 = 1. / 12, s4 = 1. / 120, s5 = 1. / 252, s6 = 1. / 240, s7 = 1. / 132;
// s8 = 691/32760,
// s9 = 1/12,
// s10 = 3617/8160;
/* Illegal arguments */
if ((x == neginf) || std::isnan(x)) {
return -std::numeric_limits<double>::infinity();
}
/* Singularities */
if ((x <= 0) && (floor(x) == x)) {
return neginf;
}
/* Negative values */
/* Use the reflection formula (Jeffrey 11.1.6):
* digamma(-x) = digamma(x+1) + pi*cot(pi*x)
*
* This is related to the identity
* digamma(-x) = digamma(x+1) - digamma(z) + digamma(1-z)
* where z is the fractional part of x
* For example:
* digamma(-3.1) = 1/3.1 + 1/2.1 + 1/1.1 + 1/0.1 + digamma(1-0.1)
* = digamma(4.1) - digamma(0.1) + digamma(1-0.1)
* Then we use
* digamma(1-z) - digamma(z) = pi*cot(pi*z)
*/
if (x < 0) {
return digamma(1 - x) + M_PI / tan(-M_PI * x);
}
/* Use Taylor series if argument <= S */
if (x <= s)
return d1 - 1 / x + d2 * x;
/* Reduce to digamma(X + N) where (X + N) >= C */
result = 0;
while (x < c) {
result -= 1 / x;
x++;
}
/* Use de Moivre's expansion if argument >= C */
/* This expansion can be computed in Maple via asympt(Psi(x),x) */
if (x >= c) {
double r = 1 / x;
result += std::log(x) - 0.5 * r; // added "std::"
r *= r;
result -= r * (s3 - r * (s4 - r * (s5 - r * (s6 - r * s7))));
}
return result;
}
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
template <typename Container, typename Functor>
double estimate(const Container &data, const Functor &entropy, const size_t sampleSize, const double threshold,
size_t maxIterations)
{
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
const size_t dataSize = data.size();
// Update maxIterations
if (maxIterations == 0) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations > dataSize / sampleSize) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations < 1) {
return entropy(data);
}
// Create shuffle indexes
std::vector<size_t> indexes(dataSize);
std::iota(indexes.begin(), indexes.end(), 0);
auto rng = std::default_random_engine();
std::shuffle(indexes.begin(), indexes.end(), rng);
// Create vector container for fast random access
const std::vector<V> vectorA(data.begin(), data.end());
// Create samples
std::vector<V> sampleA;
sampleA.reserve(sampleSize);
std::vector<double> entropyValues;
double mu = 0;
for (auto i = 1; i <= maxIterations; ++i) {
size_t start = (i - 1) * sampleSize;
size_t end = std::min(i * sampleSize - 1, dataSize - 1);
// Create samples
sampleA.clear();
for (auto j = start; j < end; ++j) {
sampleA.push_back(vectorA[indexes[j]]);
}
// Get sample mgc value
double sample_entopy = entropy(sampleA);
entropyValues.push_back(sample_entopy);
std::sort(entropyValues.begin(), entropyValues.end());
const size_t n = entropyValues.size();
const auto p0 = entropy_details::linspace(0.5 / n, 1 - 0.5 / n, n);
mu = entropy_details::mean(entropyValues);
double sigma = entropy_details::variance(entropyValues, mu);
const std::vector<double> synth = entropy_details::icdf(p0, mu, sigma);
std::vector<double> diff;
diff.reserve(n);
for (auto i = 0; i < n; ++i) {
diff.push_back(entropyValues[i] - synth[i]);
}
auto convergence = entropy_details::peak2ems(diff) / n;
std::cout << n << " " << convergence << " " << sample_entopy << " " << mu << std::endl;
if (convergence < threshold) {
return mu;
}
}
return mu;
}
template <typename Container, typename Functor>
double estimate(const Container &a, const Container &b, const Functor &f, const size_t sampleSize,
const double threshold, size_t maxIterations)
{
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
assert(a.size() == b.size());
const size_t dataSize = a.size();
/* Update maxIterations */
if (maxIterations == 0) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations > dataSize / sampleSize) {
maxIterations = dataSize / sampleSize;
}
if (maxIterations < 1) {
return f(a, b);
}
/* Create shuffle indexes */
std::vector<size_t> indexes(dataSize);
std::iota(indexes.begin(), indexes.end(), 0);
auto rng = std::default_random_engine();
std::shuffle(indexes.begin(), indexes.end(), rng);
/* Create vector container for fast random access */
const std::vector<V> vectorA(a.begin(), a.end());
const std::vector<V> vectorB(b.begin(), b.end());
/* Create samples */
std::vector<V> sampleA;
std::vector<V> sampleB;
sampleA.reserve(sampleSize);
sampleB.reserve(sampleSize);
std::vector<double> mgcValues;
double mu = 0;
for (auto i = 1; i <= maxIterations; ++i) {
size_t start = (i - 1) * sampleSize;
size_t end = std::min(i * sampleSize - 1, dataSize - 1);
/* Create samples */
sampleA.clear();
sampleB.clear();
for (auto j = start; j < end; ++j) {
sampleA.push_back(vectorA[indexes[j]]);
sampleB.push_back(vectorB[indexes[j]]);
}
/* Get sample mgc value */
double mgc = f(sampleA, sampleB);
mgcValues.push_back(mgc);
std::sort(mgcValues.begin(), mgcValues.end());
const size_t n = mgcValues.size();
const auto p0 = linspace(0.5 / n, 1 - 0.5 / n, n);
mu = mean(mgcValues);
double sigma = variance(mgcValues, mu);
const std::vector<double> synth = icdf(p0, mu, sigma);
std::vector<double> diff;
diff.reserve(n);
for (auto i = 0; i < n; ++i) {
diff.push_back(mgcValues[i] - synth[i]);
}
auto convergence = peak2ems(diff) / n;
if (convergence < threshold) {
return mu;
}
}
return mu;
}
} // namespace entropy_details
// ----------------------------------- entropy
// updated version, for different metric
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
template <typename RecType, typename Metric>
template <typename Container>
double EntropySimple<RecType, Metric>::operator()( // non-kpN version, DEPRECATED
const Container &data
// bool avoid_repeated
) const
{
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
if (data.empty() || data[0].empty()) {
return 0;
}
if (data.size() < k + 1)
throw std::invalid_argument("number of points in dataset must be larger than k");
double d = data[0].size();
// add_noise(data);
double entropyEstimate = 0;
double log_sum = 0;
metric::Tree<V, Metric> tree(data[0], -1, metric);
for (std::size_t i = 1; i < data.size(); ++i) {
tree.insert_if(data[i], std::numeric_limits<T>::epsilon());
}
auto n = tree.size();
for (std::size_t i = 0; i < n; i++) {
auto res = tree.knn(data[i], k + 1);
entropyEstimate += std::log(res.back().second);
}
double N = (double)n;
entropyEstimate = entropyEstimate * d / N; // mean log * d
// entropyEstimate += boost::math::digamma(N) - boost::math::digamma(k) + d*std::log(2.0);
entropyEstimate += entropy_details::digamma(N) - entropy_details::digamma(k) + d * std::log(2.0);
if constexpr (!std::is_same<Metric, typename metric::Chebyshev<T>>::value) {
double p = 1; // Manhatten and other metrics (TODO check if it is correct for them!)
if constexpr (std::is_same<Metric, typename metric::Euclidean<T>>::value) {
p = 2; // Euclidean
} else if constexpr (std::is_same<Metric, typename metric::P_norm<T>>::value) {
p = metric.p; // general Minkowsky
}
// entropyEstimate += d * std::log(std::tgamma(1 + 1 / p)) - std::log(std::tgamma(1 + d / p)); // boost
entropyEstimate += d * std::log(tgamma(1 + 1 / p)) - std::log(tgamma(1 + d / p));
}
entropyEstimate /= std::log(logbase);
if (exp)
return entropy_details::conv_diff_entropy(entropyEstimate); // conversion of values below 1 to exp scale
else
return entropyEstimate;
}
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
template <typename RecType, typename Metric>
template <typename Container>
double EntropySimple<RecType, Metric>::estimate(const Container &a, const size_t sampleSize, const double threshold,
size_t maxIterations) const
{
return entropy_details::estimate(a, *this, sampleSize, threshold, maxIterations);
}
// ----------------------------------- entropy with kpN approximation
template <typename RecType, typename Metric>
template <typename Container>
double Entropy<RecType, Metric>::operator()(const Container &data) const
{
using T = type_traits::underlying_type_t<Container>;
using V = type_traits::index_value_type_t<Container>;
size_t n = data.size();
size_t d = data[0].size();
size_t k_ = k;
size_t p_ = p;
if (p_ >= n)
p_ = n - 1; // TODO we need to signal somehow that parameters are altered
if (k_ >= p_)
k_ = p_ - 1;
if (p_ < 3)
p_ = 3;
if (k_ < 2)
k_ = 2;
if (n < 4)
return std::nan("estimation failed");
double h = 0;
int got_results = 0; // absents in Matlab original code
metric::Tree<V, Metric> tree(data, -1, metric);
blaze::DynamicMatrix<double> Nodes(p_, d, 0);
blaze::DynamicVector<double> mu(d, 0);
blaze::DynamicVector<double> lb(d, 0);
blaze::DynamicVector<double> ub(d, 0);
blaze::DynamicVector<double> x_vector(d, 0);
for (size_t i = 0; i < n; ++i) {
auto res = tree.knn(data[i], p_);
auto eps = res[k_ - 1].second;
blaze::reset(mu);
for (size_t p_idx = 0; p_idx < p_; ++p_idx) { // r v realizations from the tree
for (size_t d_idx = 0; d_idx < d; ++d_idx) { // dimensions
// Nodes(p_idx, d_idx) = res[p_idx].first->data[d_idx];
// mu[d_idx] += res[p_idx].first->data[d_idx];
Nodes(p_idx, d_idx) = res[p_idx].first->get_data()[d_idx];
mu[d_idx] += res[p_idx].first->get_data()[d_idx];
}
}
mu = mu / p_;
Nodes = Nodes - blaze::expand(blaze::trans(mu), Nodes.rows());
double offset = 1e-8;
// double offset = 1e-5; // TODO consider dependence on machine epsilon
auto K =
blaze::evaluate((blaze::trans(Nodes) * Nodes) * p_ / (p_ - 1) + blaze::IdentityMatrix<double>(d) * offset);
blaze::reset(lb);
blaze::reset(ub);
for (size_t d_idx = 0; d_idx < d; ++d_idx) { // dimensions
lb[d_idx] = data[i][d_idx] - eps;
ub[d_idx] = data[i][d_idx] + eps;
x_vector[d_idx] = data[i][d_idx];
}
auto g_local = epmgp::local_gaussian_axis_aligned_hyperrectangles<double>(mu, K, lb, ub);
double logG = std::get<0>(g_local);
if (!std::isnan(logG)) { // UNLIKE original Matlab code, we exclude points that result in NaN
double g = entropy_details::mvnpdf(x_vector, mu, K);
h += logG - std::log(g);
got_results++;
}
}
double result;
if (got_results <= 20) // this absents in Matlab original code. TODO adjust min number of points
result = std::nan("estimation failed");
// result = boost::math::digamma(n) - boost::math::digamma(k) + h/n;
result = entropy_details::digamma(n) - entropy_details::digamma(k) + h / n;
if (exp)
return entropy_details::conv_diff_entropy(result); // conversion of values below 1 to exp scale
return result;
}
// averaged entropy estimation: code COPIED from mgc.*pp with only mgc replaced with entropy, TODO refactor to avoid
// code dubbing
template <typename RecType, typename Metric>
template <typename Container>
double Entropy<RecType, Metric>::estimate(const Container &a, const size_t sampleSize, const double threshold,
size_t maxIterations) const
{
return entropy_details::estimate(a, *this, sampleSize, threshold, maxIterations);
}
// --------------------------- VMixing
template <typename RecType, typename Metric>
template <typename C>
typename std::enable_if_t<!type_traits::is_container_of_integrals_v<C>, type_traits::underlying_type_t<C>>
VMixing_simple<RecType, Metric>::operator()(const C &Xc, const C &Yc) const
{ // non-kpN version, DEPRECATED
using T = type_traits::underlying_type_t<C>;
auto N = Xc.size();
if (N < k + 1 || Yc.size() < k + 1)
throw std::invalid_argument("number of points in dataset must be larger than k");
std::vector<std::vector<T>> X;
for (const auto &e : Xc)
X.push_back(std::vector<T>(std::begin(e), std::end(e))); // TODO optimize
std::vector<std::vector<T>> Y;
for (const auto &e : Yc)
Y.push_back(std::vector<T>(std::begin(e), std::end(e)));
std::vector<std::vector<T>> XY; // concatenation instead of combine(X, Y);
XY.reserve(X.size() + Y.size());
XY.insert(XY.end(), X.begin(), X.end());
XY.insert(XY.end(), Y.begin(), Y.end());
auto e = EntropySimple<void, Metric>(metric, k);
auto result = 2 * e(XY) - e(Xc) - e(Yc);
return result;
}
template <typename RecType, typename Metric>
template <typename C>
double VMixing_simple<RecType, Metric>::estimate(const C &a, const C &b, const size_t sampleSize,
const double threshold, size_t maxIterations) const
{
return entropy_details::estimate(a, b, *this, sampleSize, threshold, maxIterations);
}
template <typename RecType, typename Metric>
template <typename C>
typename std::enable_if_t<!type_traits::is_container_of_integrals_v<C>, type_traits::underlying_type_t<C>>
VMixing<RecType, Metric>::operator()(const C &Xc, const C &Yc) const
{
using T = type_traits::underlying_type_t<C>;
auto N = Xc.size();
if (N < k + 1 || Yc.size() < k + 1)
throw std::invalid_argument("number of points in dataset must be larger than k");
std::vector<std::vector<T>> X;
for (const auto &e : Xc)
X.push_back(std::vector<T>(std::begin(e), std::end(e))); // TODO optimize
std::vector<std::vector<T>> Y;
for (const auto &e : Yc)
Y.push_back(std::vector<T>(std::begin(e), std::end(e)));
std::vector<std::vector<T>> XY; // concatenation instead of combine(X, Y);
XY.reserve(X.size() + Y.size());
XY.insert(XY.end(), X.begin(), X.end());
XY.insert(XY.end(), Y.begin(), Y.end());
auto e = Entropy<void, Metric>(metric, k, p);
auto result = 2 * e(XY) - e(Xc) - e(Yc);
return result;
}
template <typename RecType, typename Metric>
template <typename C>
double VMixing<RecType, Metric>::estimate(const C &a, const C &b, const size_t sampleSize, const double threshold,
size_t maxIterations) const
{
return entropy_details::estimate(a, b, *this, sampleSize, threshold, maxIterations);
}
/* // VOI code, works and may be enabled
namespace voi_details {
template <typename C1, typename C2, typename T=type_traits::underlying_type_t<C1>>
std::vector<std::vector<T>> combine(const C1& X, const C2& Y)
{
std::size_t N = X.size();
std::size_t dx = X[0].size();
std::size_t dy = Y[0].size();
std::vector<std::vector<T>> XY(N);
for (std::size_t i = 0; i < N; i++) {
XY[i].resize(dx + dy);
std::size_t k = 0;
for (std::size_t j = 0; j < dx; j++, k++) {
XY[i][k] = X[i][j];
}
for (std::size_t j = 0; j < dy; j++, k++) {
XY[i][k] = Y[i][j];
}
}
return XY;
}
}
template <typename C, typename Metric>
typename std::enable_if_t<!type_traits::is_container_of_integrals_v<C>, type_traits::underlying_type_t<C>>
VOI_simple(const C& Xc, const C& Yc, int k)
{
using T = type_traits::underlying_type_t<C>;
auto N = Xc.size();
if (N < k + 1 || Yc.size() < k + 1)
throw std::invalid_argument("number of points in dataset must be larger than k");
std::vector<std::vector<T>> X;
for (const auto& e: Xc)
X.push_back(std::vector<T>(std::begin(e), std::end(e))); // TODO optimize
std::vector<std::vector<T>> Y;
for (const auto& e: Yc)
Y.push_back(std::vector<T>(std::begin(e), std::end(e)));
std::vector<std::vector<T>> XY = voi_details::combine(X, Y);
auto e = EntropySimple<void, Metric>(Metric(), k);
auto result = 2 * e(XY) - e(Xc) - e(Yc);
return result;
}
template <typename C, typename Metric>
typename std::enable_if_t<!type_traits::is_container_of_integrals_v<C>, type_traits::underlying_type_t<C>>
VOI(const C& Xc, const C& Yc, int k, int p)
{
using T = type_traits::underlying_type_t<C>;
auto N = Xc.size();
if (N < k + 1 || Yc.size() < k + 1)
throw std::invalid_argument("number of points in dataset must be larger than k");
std::vector<std::vector<T>> X;
for (const auto& e: Xc)
X.push_back(std::vector<T>(std::begin(e), std::end(e))); // TODO optimize
std::vector<std::vector<T>> Y;
for (const auto& e: Yc)
Y.push_back(std::vector<T>(std::begin(e), std::end(e)));
std::vector<std::vector<T>> XY = voi_details::combine(X, Y);
auto e = Entropy<void, Metric>(Metric(), k, p);
auto result = 2 * e(XY) - e(Xc) - e(Yc);
return result;
}
// */
} // namespace metric
#endif
| 26,469
|
C++
|
.cpp
| 714
| 33.526611
| 116
| 0.661396
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,541
|
epmgp.cpp
|
metric-space-ai_metric/metric/correlation/epmgp.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2019 Panda Team
*/
// approximation of probability for multidimensional normal distribution bounded by (hyper)rectangle
// https://arxiv.org/pdf/1111.6832.pdf
// based on local_gaussian.m Matlab code
#include "epmgp.hpp"
#include <blaze/Blaze.h>
#include <cassert>
#include <cmath>
#include <limits>
#include <tuple>
#include <vector>
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
namespace epmgp {
const double inf = std::numeric_limits<double>::infinity();
// https://stackoverflow.com/questions/39777360/accurate-computation-of-scaled-complementary-error-function-erfcx
double erfcx_double(double x)
{
double a, d, e, m, p, q, r, s, t;
a = fmax(x, 0.0 - x); // NaN preserving absolute value computation
/* Compute q = (a-4)/(a+4) accurately. [0,INF) -> [-1,1] */
m = a - 4.0;
p = a + 4.0;
r = 1.0 / p;
q = m * r;
t = fma(q + 1.0, -4.0, a);
e = fma(q, -a, t);
q = fma(r, e, q);
/* Approximate (1+2*a)*exp(a*a)*erfc(a) as p(q)+1 for q in [-1,1] */
p = 0x1.edcad78fc8044p-31; // 8.9820305531190140e-10
p = fma(p, q, 0x1.b1548f14735d1p-30); // 1.5764464777959401e-09
p = fma(p, q, -0x1.a1ad2e6c4a7a8p-27); // -1.2155985739342269e-08
p = fma(p, q, -0x1.1985b48f08574p-26); // -1.6386753783877791e-08
p = fma(p, q, 0x1.c6a8093ac4f83p-24); // 1.0585794011876720e-07
p = fma(p, q, 0x1.31c2b2b44b731p-24); // 7.1190423171700940e-08
p = fma(p, q, -0x1.b87373facb29fp-21); // -8.2040389712752056e-07
p = fma(p, q, 0x1.3fef1358803b7p-22); // 2.9796165315625938e-07
p = fma(p, q, 0x1.7eec072bb0be3p-18); // 5.7059822144459833e-06
p = fma(p, q, -0x1.78a680a741c4ap-17); // -1.1225056665965572e-05
p = fma(p, q, -0x1.9951f39295cf4p-16); // -2.4397380523258482e-05
p = fma(p, q, 0x1.3be1255ce180bp-13); // 1.5062307184282616e-04
p = fma(p, q, -0x1.a1df71176b791p-13); // -1.9925728768782324e-04
p = fma(p, q, -0x1.8d4aaa0099bc8p-11); // -7.5777369791018515e-04
p = fma(p, q, 0x1.49c673066c831p-8); // 5.0319701025945277e-03
p = fma(p, q, -0x1.0962386ea02b7p-6); // -1.6197733983519948e-02
p = fma(p, q, 0x1.3079edf465cc3p-5); // 3.7167515521269866e-02
p = fma(p, q, -0x1.0fb06dfedc4ccp-4); // -6.6330365820039094e-02
p = fma(p, q, 0x1.7fee004e266dfp-4); // 9.3732834999538536e-02
p = fma(p, q, -0x1.9ddb23c3e14d2p-4); // -1.0103906603588378e-01
p = fma(p, q, 0x1.16ecefcfa4865p-4); // 6.8097054254651804e-02
p = fma(p, q, 0x1.f7f5df66fc349p-7); // 1.5379652102610957e-02
p = fma(p, q, -0x1.1df1ad154a27fp-3); // -1.3962111684056208e-01
p = fma(p, q, 0x1.dd2c8b74febf6p-3); // 2.3299511862555250e-01
/* Divide (1+p) by (1+2*a) ==> exp(a*a)*erfc(a) */
d = a + 0.5;
r = 1.0 / d;
r = r * 0.5;
q = fma(p, r, r); // q = (p+1)/(1+2*a)
t = q + q;
e = (p - q) + fma(t, -a, 1.0); // residual: (p+1)-q*(1+2*a)
r = fma(e, r, q);
/* Handle argument of infinity */
if (a > 0x1.fffffffffffffp1023)
r = 0.0;
/* Handle negative arguments: erfcx(x) = 2*exp(x*x) - erfcx(|x|) */
if (x < 0.0) {
s = x * x;
d = fma(x, x, -s);
e = exp(s);
r = e - r;
r = fma(e, d + d, r);
r = r + e;
if (e > 0x1.fffffffffffffp1023)
r = e; // avoid creating NaN
}
return r;
}
template <typename T>
auto truncNormMoments(std::vector<T> lowerBIN, std::vector<T> upperBIN, std::vector<T> muIN, std::vector<T> sigmaIN)
-> std::tuple<std::vector<T>, std::vector<T>, std::vector<T>>
{
size_t n = lowerBIN.size();
assert(upperBIN.size() == n && muIN.size() == n && sigmaIN.size() == n);
std::vector<T> logZhatOUT(n, 0);
std::vector<T> muHatOUT(n, 0);
std::vector<T> sigmaHatOUT(n, 0);
for (size_t i = 0; i < n; ++i) {
auto lowerB = lowerBIN[i];
auto upperB = upperBIN[i];
auto mu = muIN[i];
auto sigma = sigmaIN[i];
assert(lowerB <= upperB);
T logZhat, meanConst, varConst;
auto a = (lowerB - mu) / (std::sqrt(2 * sigma));
auto b = (upperB - mu) / (std::sqrt(2 * sigma));
if (std::isinf(a) && std::isinf(b)) {
if (sgn(a) == sgn(b)) {
logZhatOUT[i] = -inf;
muHatOUT[i] = a;
sigmaHatOUT[i] = 0;
continue;
} else {
logZhatOUT[i] = 0;
muHatOUT[i] = mu;
sigmaHatOUT[i] = sigma;
continue;
}
} else {
if (a > b) {
logZhatOUT[i] = -inf;
muHatOUT[i] = mu;
sigmaHatOUT[i] = 0;
continue;
} else {
if (a == -inf) {
if (b > 26) {
auto logZhatOtherTail = std::log(0.5) + std::log(erfcx(b)) - b * b;
logZhat = std::log(1 - std::exp(logZhatOtherTail));
} else
logZhat = std::log(0.5) + std::log(erfcx(-b)) - b * b;
meanConst = -2 / erfcx(-b);
varConst = -2 / erfcx(-b) * (upperB + mu);
} else {
if (b == inf) {
if (a < -26) {
auto logZhatOtherTail = std::log(0.5) + std::log(erfcx(-a)) - a * a;
logZhat = std::log(1 - std::exp(logZhatOtherTail));
// logZhat = 0; // lim[-Inf]logZhat = 0
} else {
logZhat = std::log(0.5) + std::log(erfcx(a)) - a * a;
}
meanConst = 2 / erfcx(a);
varConst = 2 / erfcx(a) * (lowerB + mu);
} else {
if (sgn(a) == sgn(b)) {
auto abs_a = std::abs(a);
auto abs_b = std::abs(b);
auto maxab = (abs_a < abs_b) ? abs_b : abs_a;
auto minab = (abs_a < abs_b) ? abs_a : abs_b;
logZhat = std::log(0.5) - minab * minab +
std::log(std::abs(std::exp(-(maxab * maxab - minab * minab)) * erfcx(maxab) -
erfcx(minab)));
meanConst = 2 * sgn(a) *
(1 / ((erfcx(std::abs(a)) - std::exp(a * a - b * b) * erfcx(std::abs(b)))) -
1 / ((std::exp(b * b - a * a) * erfcx(std::abs(a)) - erfcx(std::abs(b)))));
varConst =
2 * sgn(a) *
((lowerB + mu) / ((erfcx(std::abs(a)) - std::exp(a * a - b * b) * erfcx(std::abs(b)))) -
(upperB + mu) / ((std::exp(b * b - a * a) * erfcx(std::abs(a)) - erfcx(std::abs(b)))));
} else {
if (std::abs(b) >= std::abs(a)) {
if (a >= -26) {
logZhat = std::log(0.5) - a * a +
std::log(erfcx(a) - std::exp(-(b * b - a * a)) * erfcx(b));
meanConst = 2 * (1 / ((erfcx(a) - std::exp(a * a - b * b) * erfcx(b))) -
1 / ((std::exp(b * b - a * a) * erfcx(a) - erfcx(b))));
varConst = 2 * ((lowerB + mu) / ((erfcx(a) - std::exp(a * a - b * b) * erfcx(b))) -
(upperB + mu) / ((std::exp(b * b - a * a) * erfcx(a) - erfcx(b))));
} else {
logZhat = std::log(0.5) + std::log(2 - std::exp(-(b * b)) * erfcx(b) -
std::exp(-(a * a)) * erfcx(-a));
meanConst = 2 * (1 / ((erfcx(a) - std::exp(a * a - b * b) * erfcx(b))) -
1 / (std::exp(b * b) * 2 - erfcx(b)));
varConst = 2 * ((lowerB + mu) / ((erfcx(a) - std::exp(a * a - b * b) * erfcx(b))) -
(upperB + mu) / (std::exp(b * b) * 2 - erfcx(b)));
}
} else {
if (b <= 26) {
logZhat = std::log(0.5) - b * b +
std::log(erfcx(-b) - std::exp(-(a * a - b * b)) * erfcx(-a));
meanConst = -2 * (1 / ((erfcx(-a) - std::exp(a * a - b * b) * erfcx(-b))) -
1 / ((std::exp(b * b - a * a) * erfcx(-a) - erfcx(-b))));
varConst =
-2 * ((lowerB + mu) / ((erfcx(-a) - std::exp(a * a - b * b) * erfcx(-b))) -
(upperB + mu) / ((std::exp(b * b - a * a) * erfcx(-a) - erfcx(-b))));
} else {
logZhat = std::log(0.5) + std::log(2 - std::exp(-(a * a)) * erfcx(-a) -
std::exp(-(b * b)) * erfcx(b));
meanConst = -2 * (1 / (erfcx(-a) - std::exp(a * a) * 2) -
1 / (std::exp(b * b - a * a) * erfcx(-a) - erfcx(-b)));
varConst = -2 * ((lowerB + mu) / (erfcx(-a) - std::exp(a * a) * 2) -
(upperB + mu) / (std::exp(b * b - a * a) * erfcx(-a) - erfcx(-b)));
}
}
}
}
}
}
}
auto muHat = mu + meanConst * std::sqrt(sigma / (2 * M_PI));
auto sigmaHat = sigma + varConst * std::sqrt(sigma / (2 * M_PI)) + mu * mu - muHat * muHat;
logZhatOUT[i] = logZhat;
muHatOUT[i] = muHat;
sigmaHatOUT[i] = sigmaHat;
}
return std::make_tuple(logZhatOUT, muHatOUT, sigmaHatOUT);
}
template <typename T>
auto local_gaussian_axis_aligned_hyperrectangles(blaze::DynamicVector<T> m, blaze::DynamicMatrix<T> K,
blaze::DynamicVector<T> lowerB, blaze::DynamicVector<T> upperB)
-> std::tuple<T, blaze::DynamicVector<T>, blaze::DynamicMatrix<T>>
{
size_t n = m.size();
assert(lowerB.size() == n && upperB.size() == n && K.rows() == n);
size_t maxSteps = 200;
T epsConverge = 1e-8;
blaze::DynamicVector<T> tauSite(K.rows(), 0);
blaze::DynamicVector<T> nuSite(K.rows(), 0);
T logZ = 0;
blaze::DynamicVector<T> mu = (lowerB + upperB) / 2.0;
blaze::DynamicMatrix<T> sigma = K;
blaze::DynamicVector<T> KinvM = blaze::evaluate(blaze::inv(K) * m);
blaze::DynamicVector<T> muLast(mu.size(), 1);
muLast = muLast * -inf;
bool converged = false;
size_t k = 1;
// here we only define expressions, calculations are made inside the loop below
auto tauCavity = 1 / blaze::diagonal(sigma) - tauSite;
auto nuCavity = mu / blaze::diagonal(sigma) - nuSite;
blaze::DynamicVector<T> sighat(n, 0);
auto deltatauSite = 1.0 / sighat - tauCavity - tauSite;
auto logZhat = blaze::DynamicVector<T>(n, 0);
blaze::DynamicMatrix<T> L;
std::vector<T> muInSTL(n, 0);
std::vector<T> sigmaInSTL(n, 0);
std::vector<T> lowerbSTL(lowerB.size(), 0);
std::vector<T> upperbSTL(upperB.size(), 0);
while (!converged && k < maxSteps) {
blaze::DynamicVector<T> muInBlaze(nuCavity * (1 / tauCavity));
blaze::DynamicVector<T> sigmaInBlaze = 1 / tauCavity;
for (size_t i = 0; i < n; ++i) {
muInSTL[i] = muInBlaze[i];
sigmaInSTL[i] = sigmaInBlaze[i];
lowerbSTL[i] = lowerB[i];
upperbSTL[i] = upperB[i];
}
auto hat = truncNormMoments(lowerbSTL, upperbSTL, muInSTL, sigmaInSTL);
auto logZhatSTL = std::get<0>(hat);
auto muhatSTL = std::get<1>(hat);
auto sighatSTL = std::get<2>(hat);
blaze::DynamicVector<T> muhat(muhatSTL.size(), 0);
// blaze::DynamicVector<double> sighat (sighatSTL.size(), 0); // moved outside loop
assert(logZhat.size() == n && muhat.size() == n && sighat.size() == n); // TODO remove after testing
for (size_t i = 0; i < n; ++i) {
logZhat[i] = logZhatSTL[i];
muhat[i] = muhatSTL[i];
sighat[i] = sighatSTL[i];
}
// auto deltatauSite = 1.0/sighat - tauCavity - tauSite; // definition moved out of loop
tauSite = blaze::evaluate(tauSite + deltatauSite);
nuSite = blaze::evaluate(muhat / sighat - nuCavity);
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> sSiteHalf(tauSite.size(), 0);
for (size_t i = 0; i < tauSite.size(); ++i) {
if (tauSite[i] < 0 || std::isnan(tauSite[i])) // this differs from Matlab code
sSiteHalf(i, i) = 0;
else
sSiteHalf(i, i) = std::sqrt(tauSite[i]);
}
blaze::IdentityMatrix<T> eye(K.rows());
blaze::llh(eye + sSiteHalf * K * sSiteHalf, L);
L = blaze::trans(L); // get lower from upper
// L = eye + sSiteHalf*K*sSiteHalf; // TODO remove
// std::cout << "L:\n" << L << "\n";
// blaze::potrf(L, 'U'); // LAPACK issue
// std::cout << "L_chol:\n" << L << "\n";
auto V = blaze::inv(blaze::trans(L)) * (sSiteHalf * K);
sigma = K - blaze::trans(V) * V;
mu = sigma * (nuSite + KinvM);
blaze::DynamicVector<T> diff = muLast - mu;
if (std::sqrt(blaze::trans(diff) * diff) < epsConverge) // (norm(muLast-mu)) < epsConverge
converged = true;
else
muLast = mu;
k++;
}
T lZ1 = 0;
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> tau(n, 0);
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> diagTauSite(n, 0);
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> diagTauCavity(n, 0);
// blaze::DynamicMatrix<double> diagTauSite (n, n, 0);
// blaze::DynamicMatrix<double> diagTauCavity (n, n, 0);
for (size_t i = 0; i < n; ++i) {
lZ1 += std::log(1 + tauSite[i] / tauCavity[i]) * 0.5 - std::log(L(i, i));
tau(i, i) = 1 / (tauCavity[i] + tauSite[i]);
diagTauSite(i, i) = tauSite[i];
diagTauCavity(i, i) = tauCavity[i];
}
blaze::DynamicVector<T> diffSite(nuSite - tauSite * m);
T lZ2 = 0.5 * (blaze::trans(diffSite) * (sigma - tau) * diffSite);
auto lZ3 = 0.5 * (blaze::trans(nuCavity) *
(blaze::inv(diagTauSite + diagTauCavity) * (tauSite * nuCavity / tauCavity - 2 * nuSite)));
auto lZ4 =
-0.5 * (blaze::trans(tauCavity * m) * (blaze::inv(diagTauSite + diagTauCavity) * (tauSite * m - 2 * nuSite)));
logZ = lZ1 + lZ2 + lZ3 + lZ4 + blaze::sum(logZhat);
return std::make_tuple(logZ, mu, sigma);
}
// complex value version, needs debugging
/*
template <typename T>
std::tuple<T, blaze::DynamicVector<T>, blaze::DynamicMatrix<T>>
local_gaussian_axis_aligned_hyperrectangles(
blaze::DynamicVector<T> m,
blaze::DynamicMatrix<T> K,
blaze::DynamicVector<T> lowerB,
blaze::DynamicVector<T> upperB
)
{
size_t n = m.size();
assert(lowerB.size() == n && upperB.size() == n && K.rows() == n);
size_t maxSteps = 200;
T epsConverge = 1e-8;
blaze::DynamicVector<T> tauSite (K.rows(), 0);
blaze::DynamicVector<T> nuSite (K.rows(), 0);
T logZ = 0;
blaze::DynamicVector<T> mu = (lowerB + upperB) / 2.0;
blaze::DynamicMatrix<T> sigma = K;
blaze::DynamicVector<T> KinvM = blaze::evaluate(blaze::inv(K) * m);
blaze::DynamicVector<T> muLast (mu.size(), 1);
muLast = muLast * -inf;
bool converged = false;
size_t k = 1;
// here we only define expressions, calculations are made inside the loop below
auto tauCavity = 1/blaze::diagonal(sigma) - tauSite;
auto nuCavity = mu/blaze::diagonal(sigma) - nuSite;
blaze::DynamicVector<T> sighat (n, 0);
auto deltatauSite = 1.0/sighat - tauCavity - tauSite;
auto logZhat = blaze::DynamicVector<T>(n, 0);
blaze::DynamicMatrix<T> L;
blaze::DynamicMatrix<std::complex<T>> Lc; // complex
std::vector<T> muInSTL (n, 0);
std::vector<T> sigmaInSTL (n, 0);
std::vector<T> lowerbSTL (lowerB.size(), 0);
std::vector<T> upperbSTL (upperB.size(), 0);
while (!converged && k < maxSteps) {
blaze::DynamicVector<T> muInBlaze ( nuCavity * (1/tauCavity) );
blaze::DynamicVector<T> sigmaInBlaze = 1/tauCavity;
for (size_t i = 0; i < n; ++i) {
muInSTL[i] = muInBlaze[i];
sigmaInSTL[i] = sigmaInBlaze[i];
lowerbSTL[i] = lowerB[i];
upperbSTL[i] = upperB[i];
}
auto hat = truncNormMoments(lowerbSTL, upperbSTL, muInSTL, sigmaInSTL);
auto logZhatSTL = std::get<0>(hat);
auto muhatSTL = std::get<1>(hat);
auto sighatSTL = std::get<2>(hat);
blaze::DynamicVector<T> muhat (muhatSTL.size(), 0);
//blaze::DynamicVector<double> sighat (sighatSTL.size(), 0); // moved outside loop
assert(logZhat.size() == n && muhat.size() == n && sighat.size() == n); // TODO remove after testing
for (size_t i = 0; i < n; ++i) {
logZhat[i] = logZhatSTL[i];
muhat[i] = muhatSTL[i];
sighat[i] = sighatSTL[i];
}
//auto deltatauSite = 1.0/sighat - tauCavity - tauSite; // definition moved out of loop
tauSite = blaze::evaluate(tauSite + deltatauSite);
nuSite = blaze::evaluate(muhat/sighat - nuCavity);
// blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> sSiteHalf (tauSite.size(), 0);
// for (size_t i = 0; i<tauSite.size(); ++i) {
// if (tauSite[i] < 0 || std::isnan(tauSite[i]))
// sSiteHalf(i, i) = 0; // TODO check well
// else
// sSiteHalf(i, i) = std::sqrt(tauSite[i]);
// }
// blaze::IdentityMatrix<T> eye (K.rows());
// blaze::llh(eye + sSiteHalf*K*sSiteHalf, L);
// L = blaze::trans(L); // get lower from upper
bool negative_exists = false;
blaze::DiagonalMatrix<blaze::DynamicMatrix<std::complex<T>>> sSiteHalf (tauSite.size(), 0);
for (size_t i = 0; i<tauSite.size(); ++i) {
if (tauSite[i] < 0) {
negative_exists = true;
sSiteHalf(i, i) = std::sqrt(tauSite[i]);
break;
}
}
if (negative_exists) {
for (size_t i = 0; i<tauSite.size(); ++i) {
if (tauSite[i] > -1e-8) {
tauSite[i] = 0;
sSiteHalf(i, i) = 0; // TODO check epsilon!!
}
}
}
blaze::IdentityMatrix<std::complex<T>> eye (K.rows());
blaze::DynamicMatrix<std::complex<T>> Kc = K;
Lc = K; // TODO create empty // blaze::DynamicMatrix<std::complex<T>>(K.rows(), 0);
auto llh_arg = eye + sSiteHalf*Kc*sSiteHalf;
//blaze::llh(eye + sSiteHalf*Kc*sSiteHalf, Lc);
blaze::llh(llh_arg, Lc);
Lc = blaze::trans(Lc); // get lower from upper
L = blaze::real(Lc);
//L = eye + sSiteHalf*K*sSiteHalf; // TODO remove
//std::cout << "L:\n" << L << "\n";
//blaze::potrf(L, 'U'); // LAPACK issue
//std::cout << "L_chol:\n" << L << "\n";
// auto V = blaze::inv(blaze::trans(L)) * (sSiteHalf*K);
auto V = blaze::inv(blaze::trans(Lc)) * (sSiteHalf*Kc);
//sigma = K - blaze::trans(V)*V;
sigma = blaze::real(Kc - blaze::trans(V)*V);
mu = sigma*(nuSite + KinvM);
blaze::DynamicVector<T> diff = muLast - mu;
if (std::sqrt(blaze::trans(diff) * diff) < epsConverge) // (norm(muLast-mu)) < epsConverge
converged = true;
else
muLast = mu;
k++;
}
T lZ1 = 0;
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> tau (n, 0);
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> diagTauSite (n, 0);
blaze::DiagonalMatrix<blaze::DynamicMatrix<T>> diagTauCavity (n, 0);
//blaze::DynamicMatrix<double> diagTauSite (n, n, 0);
//blaze::DynamicMatrix<double> diagTauCavity (n, n, 0);
for (size_t i = 0; i<n; ++i) {
lZ1 += std::log(1 + tauSite[i]/tauCavity[i])*0.5 - std::log(L(i, i));
tau(i, i) = 1/(tauCavity[i] + tauSite[i]);
diagTauSite(i, i) = tauSite[i];
diagTauCavity(i, i) = tauCavity[i];
}
blaze::DynamicVector<T> diffSite (nuSite - tauSite*m);
T lZ2 = 0.5*(blaze::trans(diffSite)*(sigma-tau)*diffSite);
auto lZ3 = 0.5*( blaze::trans(nuCavity)*( blaze::inv(diagTauSite + diagTauCavity)*(tauSite*nuCavity/tauCavity -
2*nuSite) ) ); auto lZ4 = - 0.5*( blaze::trans(tauCavity*m)*( blaze::inv(diagTauSite + diagTauCavity)*(tauSite*m -
2*nuSite) ) ); logZ = lZ1 + lZ2 + lZ3 + lZ4 + blaze::sum(logZhat);
return std::make_tuple(logZ, mu, sigma);
}
// */
} // namespace epmgp
| 18,157
|
C++
|
.cpp
| 435
| 37.83908
| 116
| 0.595683
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,542
|
init.cpp
|
metric-space-ai_metric/python/src/init.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
//#include "metric_converters.hpp"
//#include "stl_wrappers.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl_bind.h>
#include <vector>
#include <iostream>
namespace py = pybind11;
/*void export_converters()
{
IterableConverter()
.from_python<std::vector<double>>()
.from_python<std::vector<int>>()
.from_python<std::vector<std::vector<double>>>()
.from_python<std::vector<std::vector<int>>>()
;
NumpyScalarConverter()
.from_python<signed char>()
.from_python<short>()
.from_python<int>()
.from_python<long>()
.from_python<long long>()
.from_python<unsigned char>()
.from_python<unsigned short>()
.from_python<unsigned int>()
.from_python<unsigned long>()
.from_python<unsigned long long>()
.from_python<float>()
.from_python<double>();
NumpyArrayConverter()
.from_python<WrapStlVector<double>>()
.from_python<WrapStlMatrix<double>>()
.from_python<WrapStlVector<WrapStlVector<double>>>();
}*/
/*void export_containers() {
typedef std::vector<double> VectorDouble;
typedef std::vector<int> VectorInt;
typedef std::vector<bool> VectorBool;
typedef std::vector<unsigned char> VectorUChar;
typedef std::vector<unsigned int> VectorUInt;
typedef std::vector<unsigned long> VectorULong;
typedef std::vector<VectorDouble> VectorVectorDouble;
typedef std::vector<VectorInt> VectorVectorInt;
typedef std::vector<VectorULong> VectorVectorULong;
py::class_<VectorDouble>("VectorDouble").def(py::vector_indexing_suite<VectorDouble>());
py::class_<VectorInt>("VectorInt").def(py::vector_indexing_suite<VectorInt>());
py::class_<VectorBool>("VectorBool").def(py::vector_indexing_suite<VectorBool>());
py::class_<VectorUChar>("VectorUChar").def(py::vector_indexing_suite<VectorUChar>());
py::class_<VectorUInt>("VectorUInt").def(py::vector_indexing_suite<VectorUInt>());
py::class_<VectorULong>("VectorULong").def(py::vector_indexing_suite<VectorULong>());
py::class_<VectorVectorDouble>("VectorVectorDouble").def(py::vector_indexing_suite<VectorVectorDouble>());
py::class_<VectorVectorInt>("VectorVectorInt").def(py::vector_indexing_suite<VectorVectorInt>());
py::class_<VectorVectorULong>("VectorVectorULong").def(py::vector_indexing_suite<VectorVectorULong>());
}*/
void export_blaze_matrices(py::module& m);
std::vector<long> data(const std::vector<long>& ro_a, std::vector<long>& rw_a) {
std::vector<long> result = {1, 2, 3};
std::cout << &ro_a << " x " << &rw_a << " -> " << &result << std::endl;
result.insert(result.end(), ro_a.begin(), ro_a.end());
rw_a.front() = 777;
return result;
}
PYBIND11_MAKE_OPAQUE(std::vector<long>);
PYBIND11_MODULE(metric, m) {
py::bind_vector<std::vector<long>>(m, "LongVector", py::buffer_protocol())
.def("ptr", [](std::vector<long>& self){ return (unsigned long)&self; });
py::bind_vector<std::vector<double>>(m, "DoubleVector", py::buffer_protocol());
//export_converters();
// exposing C++ return types
//export_containers();
export_blaze_matrices(m);
m.def("test", &data,
py::arg("a"),
py::arg("b")
);
}
| 3,518
|
C++
|
.cpp
| 81
| 38.148148
| 110
| 0.670275
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,543
|
blaze.cpp
|
metric-space-ai_metric/python/src/blaze.cpp
|
#include <blaze/Blaze.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
#include <string>
namespace py = pybind11;
template <typename Value>
void wrap_blaze_compressed_matrix(py::module& m) {
using Matrix = blaze::CompressedMatrix<Value>;
auto cls = py::class_<Matrix>(m, "CompressedMatrix");
cls.def(py::init<>());
cls.def(py::init<size_t, size_t>(),
py::arg("m"),
py::arg("n")
);
cls.def(py::init<size_t, size_t, size_t>(),
py::arg("m"),
py::arg("n"),
py::arg("nonzeros")
);
cls.def(py::init<size_t, size_t, std::vector<size_t>>(),
py::arg("m"),
py::arg("n"),
py::arg("nonzeros")
);
cls.def(py::init([](py::array_t<Value, py::array::c_style> array) {
/* Request a buffer descriptor from Python */
py::buffer_info info = array.request();
auto data = array.template unchecked<2>();
/* Some sanity checks ... */
if (info.shape[1] != 3)
throw std::runtime_error("Incompatible buffer dimension!");
const size_t nr_points = data.shape(0);
size_t rows = 0;
size_t cols = 0;
// calculate max row and col
for (size_t i = 0; i < nr_points; i++) {
rows = std::max(rows, size_t(data(i, 0)));
cols = std::max(cols, size_t(data(i, 1)));
}
auto matrix = new Matrix(rows, cols, nr_points);
// we can't guarantee order in input data, so we use insert here
for (size_t i = 0; i < nr_points; i++) {
matrix->insert(data(i, 0), data(i, 1), data(i, 2));
}
return matrix;
}), py::arg().noconvert());
cls.def("__repr__", [](Matrix& self) {
return "CompressedMatrix<" + std::to_string(self.rows()) + "x" + std::to_string(self.columns()) + ">";
});
}
template <typename Value>
void wrap_blaze_dynamic_matrix(py::module& m) {
using Matrix = blaze::DynamicMatrix<Value>;
py::class_<Matrix>(m, "DynamicMatrix");
}
void export_blaze_matrices(py::module& m) {
wrap_blaze_compressed_matrix<double>(m);
wrap_blaze_dynamic_matrix<double>(m);
// py::implicitly_convertible<py::array_t<double>, blaze::CompressedMatrix<double>>();
}
| 2,286
|
C++
|
.cpp
| 62
| 30.580645
| 110
| 0.581826
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,544
|
kmeans.cpp
|
metric-space-ai_metric/python/src/mapping/kmeans.cpp
|
#include <chrono> // FIXME
#include "metric/mapping/kmeans.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
namespace py = pybind11;
template <typename T>
void register_wrapper_kmeans(py::module& m) {
m.def("kmeans", &metric::kmeans<T>,
py::arg("data"),
py::arg("k") = 0.5,
py::arg("maxiter") = 200,
py::arg("metric") = "Euclidean", // TODO: fix typo
py::arg("random_see") = -1
);
}
void export_metric_kmeans(py::module& m) {
register_wrapper_kmeans<double>(m);
}
| 582
|
C++
|
.cpp
| 20
| 25.3
| 60
| 0.634409
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,545
|
init.cpp
|
metric-space-ai_metric/python/src/mapping/init.cpp
|
/*
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
Copyright (c) 2020 Panda Team
*/
#include <pybind11/pybind11.h>
namespace py = pybind11;
void export_metric_autoencoder(py::module& m);
void export_metric_DSPCC(py::module& m);
void export_metric_kmeans(py::module& m);
void export_metric_kmedoids(py::module& m);
PYBIND11_MODULE(mapping, m) {
export_metric_autoencoder(m);
export_metric_DSPCC(m);
export_metric_kmeans(m);
export_metric_kmedoids(m);
}
| 625
|
C++
|
.cpp
| 18
| 32.166667
| 69
| 0.737977
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,546
|
autoencoder.cpp
|
metric-space-ai_metric/python/src/mapping/autoencoder.cpp
|
#include "metric/mapping/autoencoder.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
#include <string>
namespace py = pybind11;
template<typename InputDataType, typename Scalar>
void register_wrapper_autoencoder(py::module& m) {
using Mapping = metric::Autoencoder<InputDataType, Scalar>;
auto encoder = py::class_<Mapping>(m, "Autoencoder");
encoder.def(py::init<const std::string&>(), py::arg("jsonString"));
encoder.def("train", &Mapping::train);
encoder.def("encode", &Mapping::encode);
encoder.def("decode", &Mapping::decode);
encoder.def("predict", &Mapping::predict);
}
void export_metric_autoencoder(py::module& m) {
register_wrapper_autoencoder<uint8_t, double>(m);
}
| 773
|
C++
|
.cpp
| 20
| 35.85
| 71
| 0.728972
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,547
|
DSPCC.cpp
|
metric-space-ai_metric/python/src/mapping/DSPCC.cpp
|
#include "metric/mapping/DSPCC.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
#include <deque>
namespace py = pybind11;
template <typename RecType, typename Metric>
void register_wrapper_DSPCC(py::module& m) {
using Mapping = metric::DSPCC<RecType, Metric>;
using Matrix = std::vector<std::vector<RecType>>;
using Queue = std::deque<std::vector<RecType>>;
auto dspcc = py::class_<Mapping>(m, "DSPCC");
dspcc.def(py::init<const std::vector<RecType>&, size_t, size_t, float, size_t>(),
py::arg("training_dataset"),
py::arg("n_features") = 1,
py::arg("n_subbands") = 4,
py::arg("time_freq_balance") = 0.5,
py::arg("n_top_features") = 16
);
Matrix (Mapping::*encode) (const std::vector<RecType>&) = &Mapping::time_freq_PCFA_encode;
dspcc.def("time_freq_PCFA_encode", encode);
dspcc.def("time_freq_PCFA_decode", &Mapping::time_freq_PCFA_decode);
dspcc.def("encode", &Mapping::encode);
dspcc.def("decode", &Mapping::decode);
}
void export_metric_DSPCC(py::module& m) {
register_wrapper_DSPCC<std::vector<double>, void>(m);
}
| 1,173
|
C++
|
.cpp
| 29
| 36.241379
| 94
| 0.666374
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,548
|
kmedoids.cpp
|
metric-space-ai_metric/python/src/mapping/kmedoids.cpp
|
#include "metric/mapping/kmedoids.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
namespace py = pybind11;
template <typename RecType, typename Metric>
void register_wrapper_kmedoids(py::module& m) {
m.def("kmedoids", &metric::kmedoids<RecType, Metric>,
py::arg("dm"),
py::arg("k")
);
}
void export_metric_kmedoids(py::module& m) {
register_wrapper_kmedoids<std::vector<double>, metric::Euclidean<double>>(m);
}
| 504
|
C++
|
.cpp
| 16
| 28.5
| 81
| 0.71281
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,549
|
ESN.cpp
|
metric-space-ai_metric/python/src/mapping/subs/ESN.cpp
|
#include "metric/distance/k-related/Standards.hpp"
#include "metric/mapping/ESN.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
namespace py = pybind11;
template <typename RecType, typename Metric>
void register_wrapper_ESN(py::module& m) {
using Class = metric::ESN<RecType, Metric>;
using Container = std::vector<RecType>;
auto esn = py::class_<Class>(m, "ESN");
esn.def(py::init<size_t, double, double, double, size_t, double>(),
py::arg("w_size") = 500,
py::arg("w_connections") = 10,
py::arg("w_sr") = 0.6,
py::arg("alpha") = 0.5,
py::arg("washout") = 1,
py::arg("beta") = 0.5
);
void (Class::*train)(const Container&, const Container&) = &Class::train;
Container (Class::*predict)(const Container&) = &Class::predict;
esn.def("train", train);
esn.def("predict", predict);
}
void export_metric_ESN(py::module& m) {
using Value = double;
using RecType = std::vector<Value>;
register_wrapper_ESN<RecType, metric::Euclidean<Value>>(m);
}
PYBIND11_MODULE(esn, m) {
export_metric_ESN(m);
}
| 1,154
|
C++
|
.cpp
| 33
| 30.818182
| 77
| 0.64906
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,550
|
redif.cpp
|
metric-space-ai_metric/python/src/mapping/subs/redif.cpp
|
#include "metric/mapping/Redif.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
namespace py = pybind11;
template <typename Value, typename Metric>
void register_wrapper_Redif(py::module& m) {
using Class = metric::Redif<Value, Metric>;
using Container = std::vector<std::vector<Value>>;
auto cls = py::class_<Class>(m, "Redif");
cls.def(py::init<const Container&, size_t, size_t, Metric>(),
py::arg("train_data"),
py::arg("n_neighbors") = 10,
py::arg("n_iter") = 15,
py::arg("metric") = Metric()
);
Container (Class::*encode_vector)(const Container&) = &Class::encode;
cls.def("encode", encode_vector, py::arg("x"));
Container (Class::*decode_vector)(const Container&) = &Class::decode;
cls.def("decode", decode_vector, py::arg("xEncoded"));
}
void export_metric_Redif(py::module& m) {
using Value = double;
register_wrapper_Redif<Value, metric::Euclidean<Value>>(m);
}
PYBIND11_MODULE(redif, m) {
export_metric_Redif(m);
}
| 1,069
|
C++
|
.cpp
| 29
| 32.931034
| 73
| 0.663768
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,551
|
dt.cpp
|
metric-space-ai_metric/python/src/mapping/subs/dt.cpp
|
#include "metric/mapping/ensembles/DT.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <pybind11/functional.h>
#include <functional>
#include <vector>
#include <variant>
#include <algorithm>
namespace py = pybind11;
// TODO: Python Distance wrapper, Python Accessor wrapper
class Dimension {
public:
/**
* @brief Construct a new Dimension object
*
* @param accessor_ field accessor
* @param m metric object
*/
Dimension(py::object accessor, py::object distance)
: _accessor(accessor), _distance(distance)
{
}
/**
* @brief Calculate distance between fields in records
*
* @param r1 data record
* @param r2 data record
* @return distance between fileds in records r1 and r2
*/
double get_distance(py::object r1, py::object r2) const
{
return this->_distance(this->_accessor(r1), this->_accessor(r2)).cast<double>();
}
private:
py::object _accessor;
py::object _distance;
};
//template<typename Record, typename ReturnType>
//class FunctionWrapper
//{
// py::object obj;
//public:
// FunctionWrapper(py::object obj)
// : obj(obj)
// {
// }
//
// ReturnType operator()(const Record& record) {
// return this->obj(record).cast<ReturnType>();
// }
//};
template <typename Record>
void register_wrapper_DT(py::module& m) {
using Class = metric::DT<Record>;
using Container = std::vector<Record>;
using Callback = std::function<int(const Record&)>;
using Dims = std::vector<std::variant<Dimension>>;
void (Class::*train)(const Container&, Dims, Callback&) = &Class::train;
void (Class::*predict)(const Container&, Dims, std::vector<int>&) = &Class::predict;
auto dt = py::class_<Class>(m, "DT");
dt.def(py::init<double, double>(),
py::arg("entropy_threshold") = 0,
py::arg("gain_threshold") = 0
);
dt.def("train", +[](Class& self, const Container& data, const std::vector<Dimension>& dims, Callback& response){
std::vector<std::variant<Dimension>> variantDims; // FIXME: better way to convert variants (fix CPP maybe?)
for (auto& dim : dims) {
variantDims.push_back(dim);
}
return self.train(data, variantDims, response);
},
py::arg("payments"),
py::arg("dimensions"),
py::arg("response")
);
dt.def("predict", +[](Class& self, const Container& data, const std::vector<Dimension>& dims){
std::vector<int> output;
std::vector<std::variant<Dimension>> variantDims; // FIXME: better way to convert variants (fix CPP maybe?)
for (auto& dim : dims) {
variantDims.push_back(dim);
}
self.predict(data, variantDims, output);
return output;
},
py::arg("data"),
py::arg("dimensions")
);
py::class_<Dimension>(m, "Dimension")
.def(py::init<py::object, py::object>(), py::arg("accessor"), py::arg("distance"));
}
void export_metric_DT(py::module& m) {
register_wrapper_DT<py::object>(m);
// IterableConverter()
// .from_python<std::vector<Dimension>>()
// .from_python<std::vector<py::object>>()
// ;
}
PYBIND11_MODULE(dt, m) {
export_metric_DT(m);
}
| 3,338
|
C++
|
.cpp
| 101
| 28.108911
| 121
| 0.621823
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,552
|
dbscan.cpp
|
metric-space-ai_metric/python/src/mapping/subs/dbscan.cpp
|
#include "metric/mapping/dbscan.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <vector>
namespace py = pybind11;
template<typename RecType, typename Metric, typename T>
void register_wrapper_dbscan(py::module& m) {
m.def("dbscan", &metric::dbscan<RecType, Metric, T>);
}
void export_metric_dbscan(py::module& m) {
register_wrapper_dbscan<std::vector<double>, metric::Euclidean<double>, double>(m);
}
PYBIND11_MODULE(dbscan, m) {
export_metric_dbscan(m);
}
| 524
|
C++
|
.cpp
| 16
| 30.75
| 87
| 0.746032
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,553
|
ensembles.cpp
|
metric-space-ai_metric/python/src/mapping/subs/ensembles.cpp
|
#include "metric/mapping/ensembles.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <functional>
#include <vector>
#include <variant>
namespace py = pybind11;
template <typename Record, typename WeakLearner, typename Subsampler>
void register_wrapper_Boosting(py::module& m) {
using Mapping = metric::Boosting<Record, WeakLearner, Subsampler>;
using Container = std::vector<Record>;
using Features = std::vector<std::function<double(Record)>>;
using Callback = std::function<bool(Record)>;
void (Mapping::*train)(Container&, Features&, Callback&, bool) = &Mapping::train;
void (Mapping::*predict)(Container&, Features&, std::vector<bool>&) = &Mapping::predict;
py::class_<Mapping>(m, "Boosting")
.def(py::init<int, double, double, WeakLearner>(),
py::arg("ensemble_size_"),
py::arg("share_overall"),
py::arg("share_minor"),
py::arg("weak_classifier")
)
.def("train", train)
.def("predict", predict);
}
template <typename Record, typename WeakLearnerVariant, typename Subsampler>
void register_wrapper_Bagging(py::module& m) {
using Mapping = metric::Bagging<Record, WeakLearnerVariant, Subsampler>;
using Container = std::vector<Record>;
using Features = std::vector<std::function<double(Record)>>;
using Callback = std::function<bool(Record)>;
void (Mapping::*train)(Container&, Features&, Callback&, bool) = &Mapping::train;
void (Mapping::*predict)(Container&, Features&, std::vector<bool>&) = &Mapping::predict;
py::class_<Mapping>(m, "Bagging")
.def(py::init<int, double, double, std::vector<double>, std::vector<WeakLearnerVariant>>(),
py::arg("ensemble_size"),
py::arg("share_overall"),
py::arg("share_minor"),
py::arg("type_weight"),
py::arg("weak_classifiers")
)
.def("train", train)
.def("predict", predict);
}
// TODO: implement WeakLearner interface class to provide in python
template <class Record>
class PythonWeekLearner {
public:
/**
* @brief train model on test dataset
*
* @param payments test dataset
* @param features
* @param response
*/
virtual void train(
py::array& payments,
std::vector<std::function<double(Record)>>& features,
std::function<bool(Record)>& response
);
/**
* @brief use model to classify input data
*
* @param data input data
* @param features
* @param predictions[out] prediction result
*/
virtual void predict(
py::array& data,
std::vector<std::function<double(Record)>>& features,
std::vector<bool>& predictions
);
/**
* @brief clone object
*/
virtual std::shared_ptr<PythonWeekLearner<Record>> clone();
};
void export_metric_ensembles(py::module& m) {
using Record = std::vector<double>;
using WeakLearner = metric::edmClassifier<Record, CSVM>;
using WeakLearnerVariant = std::variant<metric::edmSVM<Record>, metric::edmClassifier<Record, CSVM>>;
register_wrapper_Boosting<Record, WeakLearner, metric::SubsampleRUS<Record>>(m);
register_wrapper_Bagging<Record, WeakLearnerVariant, metric::SubsampleRUS<Record>>(m);
}
PYBIND11_MODULE(ensembles, m) {
export_metric_ensembles(m);
}
| 3,384
|
C++
|
.cpp
| 88
| 32.818182
| 105
| 0.663825
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,554
|
affprop.cpp
|
metric-space-ai_metric/python/src/mapping/subs/affprop.cpp
|
#include "metric/mapping/affprop.hpp"
#include "metric_types.hpp"
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <type_traits>
namespace py = pybind11;
template <typename RecType, typename Metric>
void wrap_affprop(py::module& m) {
using Class = metric::AffProp<RecType, Metric>;
using Value = typename std::invoke_result<Metric, const RecType&, const RecType&>::type;
auto cls = py::class_<Class>(m, "AffProp", "Affinity Propagation");
cls.def(py::init<>());
cls.def(py::init<Value, int, Value, Value>(),
py::arg("preference") = 0.5,
py::arg("maxiter") = 200,
py::arg("tol") = 1.0e-6,
py::arg("damp") = 0.5
);
cls.def("__call__", &Class::operator(),
py::arg("dm")
);
}
void export_metric_affprop(py::module& m) {
using Value = double;
using RecType = std::vector<Value>;
using Functor = std::function<Value(const RecType&, const RecType&)>;
using Metric = metric::Euclidean<Value>;
// boost::mpl::for_each<metric::MetricTypes, boost::mpl::make_identity<boost::mpl::_1>>([&](auto metr) {
// using Metric = typename decltype(metr)::type;
wrap_affprop<RecType, Metric>(m);
// });
// wrap_affprop<RecType, Functor>;
}
PYBIND11_MODULE(affprop, m) {
export_metric_affprop(m);
}
| 1,342
|
C++
|
.cpp
| 37
| 32.405405
| 107
| 0.647421
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
1,531,555
|
KOC.cpp
|
metric-space-ai_metric/python/src/mapping/subs/KOC.cpp
|
#include "metric_types.hpp"
#include "distance/custom.hpp"
#include "metric/mapping/KOC.hpp"
#include <boost/mpl/for_each.hpp>
#include <boost/mpl/vector.hpp>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/numpy.h>
#include <pybind11/functional.h>
#include <typeindex>
#include <random>
#include <optional>
namespace py = pybind11;
template <typename recType, typename Graph, typename Metric, typename Distribution>
auto create_KOC(
Graph graph,
Metric metric,
Distribution distribution,
double anomaly_sigma = 1.0,
double start_learn_rate = 0.8,
double finish_learn_rate = 0.0,
size_t iterations = 20,
std::optional<double> nbh_start_size = std::nullopt,
double nbh_range_decay = 2.0,
std::optional<long long> random_seed = std::nullopt
) -> metric::KOC_details::KOC<recType, Graph, Metric, Distribution>
{
return metric::KOC_details::KOC<recType, Graph, Metric, Distribution>(
graph,
metric,
anomaly_sigma,
start_learn_rate,
finish_learn_rate,
iterations,
distribution,
nbh_start_size.value_or(std::sqrt(double(graph.getNodesNumber()))),
nbh_range_decay,
random_seed.value_or(std::chrono::system_clock::now().time_since_epoch().count())
);
}
template <typename Record, class Graph, class Metric, class Distribution = std::normal_distribution<double>>
void wrap_metric_KOC(py::module& m) {
using Factory = metric::KOC_factory<Record, Graph, Metric, Distribution>;
using Class = typename Factory::KOC;
using value_type = typename Factory::T;
// KOC factory
m.def("KOC", &create_KOC<Record, Graph, Metric, Distribution>,
py::arg("graph"),
py::arg("metric"),
py::arg("distribution"),
py::arg("anomaly_sigma") = 1.0,
py::arg("start_learn_rate") = 0.8,
py::arg("finish_learn_rate") = 0.0,
py::arg("iterations") = 20,
py::arg("nbh_start_size") = (double *) nullptr,
py::arg("nbh_range_decay") = 2.0,
py::arg("random_seed") = (long long *) nullptr
);
// KOC implementation
const std::string className = "KOC_" + metric::getTypeName<Graph>() + "_" + metric::getTypeName<Metric>();
auto koc = py::class_<Class>(m, className.c_str());
std::vector<bool> (Class::*check_if_anomaly1)(const std::vector<Record>&) = &Class::check_if_anomaly;
bool (Class::*check_if_anomaly2)(const Record&) = &Class::check_if_anomaly;
koc.def("train", &Class::train,
py::arg("samples"),
py::arg("num_clusters"),
py::arg("min_cluster_size") = 1
);
koc.def("top_outliers", &Class::top_outliers,
py::arg("samples"),
py::arg("count") = 10
);
koc.def("assign_to_clusters", &Class::assign_to_clusters, (py::arg("samples")));
koc.def("check_if_anomaly", check_if_anomaly1, (py::arg("samples")));
koc.def("check_if_anomaly", check_if_anomaly2, (py::arg("sample")));
}
// TODO: add distribution
// TODO: add python graphs and distribution
void export_metric_KOC(py::module& m) {
using Value = double;
using Container = std::vector<Value>;
using MetricTypes = boost::mpl::vector<
metric::Euclidean<Value>
,metric::Manhatten<Value>
,metric::Chebyshev<Value>
,metric::P_norm<Value>
,std::function<Value(const Container&, const Container&)>
>;
using GraphTypes = boost::mpl::vector<
metric::Grid4
,metric::Grid6
,metric::Grid8
>;
boost::mpl::for_each<MetricTypes, boost::mpl::make_identity<boost::mpl::_1>>([&](auto metr) {
using MetricType = typename decltype(metr)::type;
boost::mpl::for_each<GraphTypes, boost::mpl::make_identity<boost::mpl::_1>>([&](auto graph) {
using GraphType = typename decltype(graph)::type;
wrap_metric_KOC<Container, GraphType, MetricType>(m);
});
});
}
PYBIND11_MODULE(koc, m) {
export_metric_KOC(m);
}
| 4,000
|
C++
|
.cpp
| 104
| 32.817308
| 110
| 0.644679
|
metric-space-ai/metric
| 34
| 14
| 44
|
MPL-2.0
|
9/20/2024, 10:43:29 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.