text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
/*
* MyIMUAgent.cpp
*
* Created on: 23 dic 2015
* Author: andrea
*/
#include <agents/MyIMUAgent.h>
#include <iostream>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <boost/log/trivial.hpp>
#include <boost/math/quaternion.hpp>
#include <events/MyIMUSample.h>
#include <events/MyBaroSample.h>
#include <syslog.h>
MyIMUAgent::MyIMUAgent(boost::shared_ptr<MyEventBus> bus, vector<MyEvent::EventType> acceptedEventTypes) : MyAgent(bus, acceptedEventTypes), initialized(false), lastTickMicros(0), lastDTimeMicros(0) {
}
MyIMUAgent::~MyIMUAgent() {
}
bool MyIMUAgent::initialize() {
if(imu.dmpBegin()) {
initialized = true;
}
return initialized;
}
void MyIMUAgent::calcTickTimestamp() {
uint32_t now = boost::posix_time::microsec_clock::local_time().time_of_day().total_microseconds();
lastDTimeMicros = uint16_t(now - lastTickMicros);
lastTickMicros = now;
}
void MyIMUAgent::processEvent(boost::shared_ptr<MyEvent> event) {
if(!initialized) {
initialize();
}
if(this->getState() == MyAgentState::Active) {
if(event->getType() == MyEvent::EventType::Tick) {
this->calcTickTimestamp();
if(imu.pulse()) {
const MPU6050::SensorData& md = imu.getData();
// syslog(LOG_INFO, "YPR: y(%3.2f), p(%3.2f), r(%3.2f) - Accel: x(%d), y(%d), z(%d)", md.ypr[0], md.ypr[1], md.ypr[2], md.accel.x, md.accel.y, md.accel.z);
boost::math::quaternion<float> q(md.q.w,md.q.x,md.q.y,md.q.z);
boost::shared_ptr<MyEvent> evOut(boost::make_shared<MyIMUSample>(this->getUuid(), q, md.ypr[0]*57.324840764f, md.ypr[1]*57.324840764f, md.ypr[2]*57.324840764f, md.gravity, md.accel, md.linearAccel));
m_signal(evOut);
}
if(bmp.pulse()) {
const BMP085::SensorData& md = bmp.getData();
boost::shared_ptr<MyBaroSample> evOut(boost::make_shared<MyBaroSample>(this->getUuid(), md.altitude, md.estimatedAltitude, md.pressure, md.seaLevelPressure, md.temperature, md.rawPressure, md.rawTemperature, md.dtimeMillis));
m_signal(evOut);
}
}
}
}
|
{"hexsha": "e46dfff5f4f1f3dec51132045f0074cc5b22fe11", "size": 2052, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/agents/MyIMUAgent.cpp", "max_stars_repo_name": "LinuxDroneLab/MyDrone", "max_stars_repo_head_hexsha": "33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-05-31T09:46:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T12:33:43.000Z", "max_issues_repo_path": "src/agents/MyIMUAgent.cpp", "max_issues_repo_name": "LinuxDroneLab/MyLinuxDrone", "max_issues_repo_head_hexsha": "33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 17.0, "max_issues_repo_issues_event_min_datetime": "2018-09-03T05:41:37.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-15T07:48:20.000Z", "max_forks_repo_path": "src/agents/MyIMUAgent.cpp", "max_forks_repo_name": "LinuxDroneLab/MyLinuxDrone", "max_forks_repo_head_hexsha": "33b8e9f15cebf79da0141e4d8aa5f4d57da73b3e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7796610169, "max_line_length": 229, "alphanum_fraction": 0.6959064327, "num_tokens": 645}
|
//
// Created by Martin Drewes on 13/05/2020.
//
#include <filesystem>
#include <fstream>
#include <iostream>
#include <sstream>
#include <regex>
#include <boost/algorithm/string.hpp>
#include "template.hpp"
#include "utils.hpp"
namespace Templates {
std::string Template::Render() {
return text;
}
std::string Template::Render(tree t) {
std::regex r("\\{\\{(.*?)\\}\\}");
token_vector tokens = utils::tokenize(text, r);
return text;
}
std::string Template::Render(std::map<std::string, std::string> t) {
std::cout << text.size() << std::endl;
std::string n=text;
for (auto x: t) {
n = n.replace(n.find("{{"+x.first+"}}"), x.first.length()+4, x.second);
}
n.shrink_to_fit();
return n;
}
TemplateStore::TemplateStore(std::string path) {
std::cout << "Load templates in " << path << std::endl;
for (const auto &entry : std::filesystem::directory_iterator(path)) {
auto fname = entry.path().filename().string();
auto data = utils::read_file(entry.path().string());
std::cout << entry.path().string() << " , " << fname << " " << data.size() << std::endl;
templates.insert(
std::pair<std::string, Template>(fname, Template(data)));
}
}
Template TemplateStore::Find(std::string name) {
auto t = templates.find(name);
if (t != templates.end()) {
return t->second;
}
return Template("404 Template");
}
Template *TemplateStore::FindPtr(std::string name) {
auto t = templates.find(name);
if (t != templates.end()) {
return &(t->second);
}
auto tf=Template("404 Template");
return &tf;
}
TemplateStore *Store;
void LoadTemplates(std::string path) {
Store = new TemplateStore(path);
}
void write_json(std::basic_ostream<char> &o, tree t) {
boost::property_tree::write_json(o, t);
}
}
|
{"hexsha": "06ebab4479230c4611b8c40f01c96ad9c17af9e3", "size": 2062, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/framework/template.cpp", "max_stars_repo_name": "martinskou/wolf", "max_stars_repo_head_hexsha": "7fa0b6a99ce2fa5086f665cac9e78806648c0517", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/framework/template.cpp", "max_issues_repo_name": "martinskou/wolf", "max_issues_repo_head_hexsha": "7fa0b6a99ce2fa5086f665cac9e78806648c0517", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/framework/template.cpp", "max_forks_repo_name": "martinskou/wolf", "max_forks_repo_head_hexsha": "7fa0b6a99ce2fa5086f665cac9e78806648c0517", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5476190476, "max_line_length": 101, "alphanum_fraction": 0.5455868089, "num_tokens": 488}
|
import torch
import torch.nn.functional as F
from torch import nn, optim
import numpy as np
from src.utils.generators.shapenet_generater import Generator
from src.utils.generators.mixed_len_generator import MixedGenerateData
from src.utils.generators.wake_sleep_gen import WakeSleepGen
from src.utils.generators.shapenet_generater import Generator
from globals import device
class FIDModel(nn.Module):
def __init__(self):
super(FIDModel, self).__init__()
self.conv1 = nn.Conv2d(1, 8, 3, padding=(1, 1))
self.conv2 = nn.Conv2d(8, 16, 3, padding=(1, 1))
self.conv3 = nn.Conv2d(16, 32, 3, padding=(1, 1))
self.dense = nn.Linear(2048, 1)
def forward(self, x):
batch_size = x.shape[0]
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
x = self.dense(x.view((batch_size, -1)))
return x
def encode(self, x):
batch_size = x.shape[0]
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv3(x)), (2, 2))
return x
def loss_function(self, logits, labels):
return F.cross_entropy(logits, labels)
if __name__ == '__main__':
inference_train_size = 10000
inference_test_size = 3000
vocab_size = 400
batch_size = 300
sub_batch_size = batch_size // 3
epochs = 50
data_labels_paths = {3: "data/synthetic/one_op/expressions.txt",
5: "data/synthetic/two_ops/expressions.txt",
7: "data/synthetic/three_ops/expressions.txt",
9: "data/synthetic/four_ops/expressions.txt",
11: "data/synthetic/five_ops/expressions.txt",
13: "data/synthetic/six_ops/expressions.txt"}
dataset_sizes = {
3: [30000, 5000],
5: [110000, 50000],
7: [170000, 50000],
9: [270000, 50000],
11: [370000, 100000],
13: [370000, 100000]
}
syn_batch_size = sub_batch_size // len(dataset_sizes)
syn_gen = MixedGenerateData(data_labels_paths=data_labels_paths,
batch_size=syn_batch_size)
syn_gen_train = {}
syn_gen_test = {}
for k in data_labels_paths.keys():
syn_gen_train[k] = syn_gen.get_train_data(
syn_batch_size,
k,
num_train_images=dataset_sizes[k][0],
jitter_program=True)
syn_gen_test[k] = syn_gen.get_test_data(
syn_batch_size,
k,
num_train_images=dataset_sizes[k][0],
num_test_images=dataset_sizes[k][1],
jitter_program=True)
def get_syn_batch(gen):
sub_batches = []
for k in dataset_sizes.keys():
sub_batches.append(torch.from_numpy(next(gen[k])[0][-1, :, 0:1, :, :]).to(device))
return torch.cat(sub_batches)
# inf_gen = WakeSleepGen(f"wake_sleep_data/inference/best_simple_labels/labels/labels.pt",
# f"wake_sleep_data/inference/best_simple_labels/labels/val/labels.pt",
# batch_size=sub_batch_size,
# train_size=inference_train_size,
# test_size=inference_test_size)
# inf_gen_train = inf_gen.get_train_data()
# inf_gen_test = inf_gen.get_test_data()
cad_generator = Generator()
real_gen_train = cad_generator.train_gen(
batch_size=sub_batch_size,
path="data/cad/cad.h5",
if_augment=False)
real_gen_test = cad_generator.val_gen(
batch_size=sub_batch_size,
path="data/cad/cad.h5",
if_augment=False)
fake_gen = WakeSleepGen(f"wake_sleep_data/generator/best_gen_labels/labels.pt",
f"wake_sleep_data/generator/best_gen_labels/val/labels.pt",
batch_size=sub_batch_size,
train_size=inference_train_size,
test_size=inference_test_size)
fake_gen_train = fake_gen.get_train_data()
fake_gen_test = fake_gen.get_test_data()
model = FIDModel().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
actual_batch_size = ((sub_batch_size * 3) + (syn_batch_size * 6))
labels = torch.cat([torch.ones(syn_batch_size * 6), torch.zeros(sub_batch_size),
torch.full((sub_batch_size,), 2)]).long().to(device)
labels = labels.to(device)
for epoch in range(epochs):
train_loss = 0
acc = 0
for batch_idx in range(inference_train_size // batch_size):
optimizer.zero_grad()
syn_batch = get_syn_batch(syn_gen_train)
real_batch = torch.from_numpy(next(real_gen_train)[-1, :, 0:1, :, :]).to(device)
fake_batch = next(fake_gen_train)[0][-1, :, 0:1, :, :].to(device)
batch = torch.cat([syn_batch, real_batch, fake_batch])
logits = model(batch)
loss = model.loss_function(logits, labels)
acc += (logits.max(dim=1)[1] == labels).float().sum() / len(labels)
train_loss += float(loss)
print(f"epoch {epoch}, batch {batch_idx}, train loss {loss.data}")
loss.backward()
optimizer.step()
print(f"average train loss {epoch}: {train_loss / (inference_train_size // batch_size)}, acc {acc / (inference_train_size // batch_size)}")
test_loss = 0
acc = 0
for batch_idx in range(inference_test_size // batch_size):
with torch.no_grad():
syn_batch = get_syn_batch(syn_gen_test)
real_batch = torch.from_numpy(next(real_gen_test)[-1, :, 0:1, :, :]).to(device)
fake_batch = next(fake_gen_test)[0][-1, :, 0:1, :, :].to(device)
batch = torch.cat([syn_batch, real_batch, fake_batch])
logits = model(batch)
loss = model.loss_function(logits, labels)
acc += (logits.max(dim=1)[1] == labels).float().sum() / len(labels)
test_loss += float(loss)
print(f"average test loss {epoch}: {test_loss / (inference_test_size // batch_size)}, acc {acc / (inference_train_size // batch_size)}")
torch.save(model.state_dict(), f"trained_models/fid-model-three.pth")
|
{"hexsha": "86fdae8717fc68d9a92d3182e767dd5a5f7e20a7", "size": 6448, "ext": "py", "lang": "Python", "max_stars_repo_path": "fid_model.py", "max_stars_repo_name": "HomerW/CSGNet", "max_stars_repo_head_hexsha": "4ecc7f3e836867118dba3d5f220ed5e74a536b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fid_model.py", "max_issues_repo_name": "HomerW/CSGNet", "max_issues_repo_head_hexsha": "4ecc7f3e836867118dba3d5f220ed5e74a536b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fid_model.py", "max_forks_repo_name": "HomerW/CSGNet", "max_forks_repo_head_hexsha": "4ecc7f3e836867118dba3d5f220ed5e74a536b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5675675676, "max_line_length": 147, "alphanum_fraction": 0.59941067, "include": true, "reason": "import numpy", "num_tokens": 1621}
|
#!/usr/local/bin/python
import pandas as pd
from datetime import timedelta
import numpy as np
import networkx as nx
from MarkovChain import *
import os
DATA_DIR = "~/Projects/markov_traffic/data/"
PLOTS_DIR = "~/Projects/markov_traffic/Plots_data/"
def read_trips_file(filename):
filename = DATA_DIR + filename
parser = lambda date: pd.datetime.strptime(date, "%m/%d/%Y %H:%S:%f")
data = pd.read_csv(filename, sep=",", header=0, index_col=0)
data['start_date'] = pd.to_datetime(data['start_date'], format="%m/%d/%Y %H:%S:%f")
data['end_date'] = pd.to_datetime(data['end_date'], format="%m/%d/%Y %H:%S:%f")
data = data[['start_date','strt_statn','end_date','end_statn']]
data = data.dropna(axis=0, how='any')
cols=['strt_statn','end_statn']
data[cols] = data[cols].applymap(np.int64)
return data
def read_stations_file(filename):
filename = DATA_DIR + filename
data = pd.read_csv(filename, sep=",", header=0)
data = data[data['status'] == 'Existing']
return data
def read_status_file(filename):
filename = DATA_DIR + filename
data = pd.read_csv(filename, sep=",", header=0, index_col=0)
data['update'] = pd.to_datetime(data['update'], format="%Y-%m-%d %H:%S:%f")
return data
def get_mc_attributes(start_time="2012-04-01 10:00:00", duration=120):
# Create csv read iterator
data = read_trips_file("hubway_trips_2012.csv")
start_time = pd.to_datetime(start_time)
end_time = start_time + timedelta(minutes=duration)
df = data[(data['start_date'] >= start_time) & (data['end_date'] <= end_time)]
stations = read_stations_file("hubway_stations.csv")
status = read_status_file("stationstatus_2012_4.csv")
status_df = status[status['update'] == start_time]
# Remove trips starting or ending in the stations not present in stations dataframe
# or stations not present in the status file
station_ids = set(stations['id'])
status_df = status_df[status_df['station_id'].isin(station_ids)]
df = df[(df['strt_statn'].isin(station_ids)) & (df['end_statn'].isin(station_ids))]
trips_df = pd.DataFrame({'weight' : df.groupby(['strt_statn','end_statn']).size()})
trips_df = trips_df.reset_index()
print "Creating networkx graph"
G = nx.from_pandas_dataframe(trips_df, 'strt_statn', 'end_statn', 'weight', create_using=nx.DiGraph())
G = nx.stochastic_graph(G, weight='weight')
# Add stations that are present in status_ids but not in trips_df
status_ids = set(status['station_id'])
for node in status_ids - set(G.nodes()):
G.add_node(node)
print "Creating transition matrix"
transition_matrix = nx.to_numpy_matrix(G, weight='weight')
transition_matrix = np.squeeze(np.asarray(transition_matrix))
print "Creating object assignment and distribution"
object_assignment = {}
object_distribution = {}
for node in G.nodes():
try:
object_assignment[node] = status_df[status_df['station_id'] == node].get('nbBikes').item()
except:
object_assignment[node] = 0
num_objects = sum(object_assignment.values())
for node in G.nodes():
object_distribution[node] = 1.0 *object_assignment[node]/num_objects
return (num_objects, transition_matrix, G, object_distribution)
def get_station_coordinate_dataframe(nodes_set):
stations = read_stations_file("hubway_stations.csv")
df = stations[stations['id'].isin(nodes_set)]
return df
def get_paths_coordinate_dataframe(edges_set):
nodes = []
for edge in edges_set:
nodes.append(edge[0])
nodes.append(edge[1])
nodes = set(nodes)
stations = read_stations_file("hubway_stations.csv")
stations_df = stations[stations['id'].isin(nodes)]
df = []
for i in xrange(len(edges_set)):
edge = edges_set[i]
source = edge[0]
target = edge[1]
temp = {}
temp['edge_id'] = i
temp['node'] = source
temp['node_type'] = 'source'
temp['lng'] = stations_df[stations_df['id'] == source].get('lng').item()
temp['lat'] = stations_df[stations_df['id'] == source].get('lat').item()
df.append(temp)
temp = {}
temp['edge_id'] = i
temp['node'] = target
temp['node_type'] = 'target'
temp['lng'] = stations_df[stations_df['id'] == target].get('lng').item()
temp['lat'] = stations_df[stations_df['id'] == target].get('lat').item()
df.append(temp)
df = pd.DataFrame(df)
return df
if __name__ == "__main__":
k = 10
num_objects, transition_matrix, G, object_distribution = get_mc_attributes(duration=600)
nc = MCNodeObjectives(len(G.nodes()), num_objects, 10, transition_matrix, object_distribution, G)
ec = MCEdgeObjectives(len(G.nodes()), num_objects, 10, transition_matrix, object_distribution, G)
nodes_set = nc.smart_greedy(k)[0]
edges_set = ec.smart_greedy(k)[0]
nodes_df = get_station_coordinate_dataframe(nodes_set)
edges_df = get_paths_coordinate_dataframe(edges_set)
# Export nodes_df
nodes_df.to_csv(PLOTS_DIR + "hubway_plot_nodes.csv.gz", sep=",", header=True, index=False, compression='gzip')
edges_df.to_csv(PLOTS_DIR + "hubway_plot_edges.csv.gz", sep=",", header=True, index=False, compression='gzip')
# Call the R script to make the plots
os.system("~/Projects/markov_traffic/src/R/plot_hubway_stations.r")
|
{"hexsha": "3c70dd0e0bc093e76b357331281adb4dbe8ff96b", "size": 5431, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python_old/plot_hubway_stations.py", "max_stars_repo_name": "chdhr-harshal/MCMonitor", "max_stars_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-11-04T20:35:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T09:06:43.000Z", "max_issues_repo_path": "src/python_old/plot_hubway_stations.py", "max_issues_repo_name": "chdhr-harshal/MCMonitor", "max_issues_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python_old/plot_hubway_stations.py", "max_forks_repo_name": "chdhr-harshal/MCMonitor", "max_forks_repo_head_hexsha": "330fc1a8f8cf83620fd6b0e503707c91e97af16d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-05T09:10:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-05T09:10:41.000Z", "avg_line_length": 36.4496644295, "max_line_length": 114, "alphanum_fraction": 0.6709629902, "include": true, "reason": "import numpy,import networkx", "num_tokens": 1390}
|
import numpy as np
import scipy.sparse as sp
import copy
import warnings
import pandas as pd
import sys
import math
from sklearn.metrics.pairwise import euclidean_distances,pairwise_distances_argmin_min
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.utils import check_random_state, check_array,gen_batches
from sklearn.utils.extmath import stable_cumsum, row_norms,squared_norm
from sklearn.utils.validation import FLOAT_DTYPES, _num_samples
from sklearn.exceptions import ConvergenceWarning
from sklearn.cluster import KMeans
from scipy.sparse.csgraph import connected_components
from collections import Counter
from joblib import Parallel, delayed, effective_n_jobs
def _k_estimation(X, n_clusters, x_squared_norms, random_state, n_local_trials=None, delta = 0.0001):
''' Optimize the cluster number in dataset X by containing (1-delta) information (modified from sklearn.cluster._kmeans._k_init)
Parameters
-----------------------
X: dataset for cluster number estimation
n_clusters: maximum cluster number
x_squared_norms
random_state
n_local_trials
delta: threshold for D(x) cut down
Retrun
------------------------
estimate_cluster_num: estimate cluster number k
centers[0:estimate_cluster_num]: seeds from original dataset X
'''
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_estimate'
random_state = check_random_state(random_state)
if n_local_trials is None:
n_local_trials = 2 + int(np.log(n_clusters))
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
max_diff_pot = 0
form_pot = current_pot
estimate_cluster_num = n_clusters
for c in range(1, n_clusters):
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),rand_vals)
distance_to_candidates = euclidean_distances(X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
form_pot = current_pot
current_pot = best_pot
closest_dist_sq = best_dist_sq
if c >= 2:
if c == 2:
max_diff_pot = form_pot - current_pot
elif (form_pot - current_pot)/(max_diff_pot+0.0)<delta:
estimate_cluster_num = c
break
return centers[0:estimate_cluster_num], estimate_cluster_num
class k_means_center:
''' Structure for centers in k-means
Parameter
-----------------------
X: points in cluster
Attributions
-----------------------
center_ :mean of X
size_:size of X
pfunv_:sum of square distance of point in X and center_
'''
def __init__(self, **kargs):
if 'X' in kargs:
X = kargs.get('X')
self.center_ = np.mean(X, axis = 0)
self.size_ = np.size(X, axis = 0)
self.pfunv_ = np.sum(np.linalg.norm(X - self.center_))
if 'center' in kargs:
self.center_=kargs.get('center')
if 'size' in kargs:
self.size_=kargs.get('size')
if 'pfunv' in kargs:
self.pfunv_=kargs.get('pfunv')
class edge_center(k_means_center):
''' Structure for center and its summary from edge
Function
add_centers: combine centers
'''
def __init__(self, **kargs):
if 'kcenter' in kargs:
kcenter = kargs.get('kcenter')
self.center_ = kcenter.center_
self.size_ = kcenter.size_
self.pfunv_ = kcenter.pfunv_
if ('center' in kargs) and ('pfunv' in kargs):
self.center_ = kargs.get('center')
self.size_ = kargs.get('size')
self.pfunv_ = kargs.get('pfunv')
def copy(self):
return edge_center(center = self.center_, size = self.size_,
pfunv = self.pfunv_)
def add_centers(self, centers):
for c in centers:
center = (c.center_*c.size_ + self.center_*self.size_)\
/(self.size_+ c.size_+0.0)
size = self.size_+ c.size_
pfunv = c.pfunv_ + self.pfunv_\
+ self.size_*np.linalg.norm(center - self.center_)**2\
+ c.size_*np.linalg.norm(center - c.center_)**2
self.center_ = center
self.size_ = size
self.pfunv_ = pfunv
def edge_summarize(X, n_clusters, delta, copy_data=False):
''' Run k-means at the edge server, including cluster number estimation, \
k-means clustering and center calculation
Parameters
--------------------------
X: dataset for clustering
n_clusters: maximum cluster number for k-means
delta: parameter for cluster number k estimation
copy_data: Return original data(True) or labels(False) of cluster
Return
--------------------------
centers: array of k_means_center
datasets: depends on copy_data, original data(True) or labels(False) of clusters
'''
# cluster number estimation by _k_init with 1-delta information remaining
Xnorm = row_norms(X, squared=True)
centers,n_clusters = _k_estimation(X, n_clusters, x_squared_norms=Xnorm, random_state=True, delta=delta)
# Note: using the comment if only recording communication size
# k_means = KMeans(init=centers, n_clusters=n_clusters)
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
k_means.fit(X)
centers = []
X_df = pd.DataFrame(X)
X_df['label'] = k_means.labels_
for c_i in range(0, n_clusters):
items = X_df[X_df['label']==c_i]
items = items.drop(columns = 'label')
centers.append(k_means_center(X = np.array(items)))
return np.array(centers), k_means.labels_
def _k_cluster_init(X, x_weights, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init weighted n_clusters seeds according to k-means++(modified from sklearn.cluster._kmeans._k_init)
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/cluster/_kmeans.py
Parameters
----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
x_weights : size of each cluster
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : int, RandomState instance
The generator used to initialize the centers. Use an int to make the
randomness deterministic.
See :term:`Glossary <random_state>`.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
random_state = check_random_state(random_state)
assert x_squared_norms is not None, 'x_squared_norms None in _k_cluster_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(x_weights.sum())
cumsum = 0
for cluster_i, size in enumerate(x_weights):
if cumsum>center_id:
center_id = cluster_i
break
else:
cumsum+=size
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = (closest_dist_sq*x_weights).sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq*x_weights),
rand_vals)
# XXX: numerical imprecision can result in a candidate_id out of range
np.clip(candidate_ids, None, closest_dist_sq.size - 1,
out=candidate_ids)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
#**# update closest distances squared and potential for each candidate
np.minimum(closest_dist_sq, distance_to_candidates,
out=distance_to_candidates)
candidates_pot = (distance_to_candidates*x_weights).sum(axis=1)
# Decide which candidate is the best
best_candidate = np.argmin(candidates_pot)
current_pot = candidates_pot[best_candidate]
closest_dist_sq = distance_to_candidates[best_candidate]
best_candidate = candidate_ids[best_candidate]
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
return centers
def cloud_clustering(centers, k):
''' Combine clusters from edges with k-means method
Parameters
-------------------------
centers: array of edge_center
k: number of cluster
Return
-------------------------
new_centers: k centers after combination
'''
center_vs =np.array([c.center_ for c in centers])
center_we = np.array([c.size_ for c in centers])
Xnorm = row_norms(center_vs, squared=True)
cluster_center = _k_cluster_init(X = center_vs, x_weights=center_we , n_clusters=k,
x_squared_norms=Xnorm, random_state=True, n_local_trials=None)
kmeans = KMeans(n_clusters = k)
kmeans.fit(center_vs, sample_weight=center_we)
new_centers = []
for l in set(kmeans.labels_):
indexs = np.where(kmeans.labels_ == l)
centerlist = indexs[0]
# centerlist = list(set(labels[indexs[0]]))
if len(centerlist) > 1:
new_center = centers[centerlist[0]].copy()
new_center.add_centers([centers[i] for i in centerlist[1:len(centerlist)]])
new_centers.append(new_center)
else:
new_centers.append(centers[centerlist[0]].copy())
return new_centers
def f1(results, labels):
''' Function to measure f1-sore of clustering results
Parameter:
-----------------------------------------
results- (Nx1 array)clustering results
labels - (Nx1 array)labels
Return:
------------------------------------------
f1-score
(print f1-score of each cluster)
'''
results = np.array(results)
labels = np.array(labels)
f1s =0
for k_i in set(results):
indexs = np.where(results == k_i)
counter = Counter(labels[indexs[0]]).most_common(1)
c_len = len(np.where(labels == counter[0][0])[0])
kc_len = counter[0][1]
k_len = len(indexs[0])
p = kc_len/(k_len+0.0)
r = kc_len/(c_len+0.0)
f1 = 2*p*r/(p+r)
f1s+= f1*k_len
# print(f1, p,r)
f1s = f1s/(len(results)+0.0)
return f1s
|
{"hexsha": "43c2c3aa9242e54b562f4e32fdf6a50bc4597ad2", "size": 13066, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "IoTDATALab/EC-Clustering", "max_stars_repo_head_hexsha": "2a5dfaca0198728f5e80963e5ad07023363e80fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-24T03:04:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-24T03:04:22.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "IoTDATALab/EC-Clustering", "max_issues_repo_head_hexsha": "2a5dfaca0198728f5e80963e5ad07023363e80fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "IoTDATALab/EC-Clustering", "max_forks_repo_head_hexsha": "2a5dfaca0198728f5e80963e5ad07023363e80fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2046783626, "max_line_length": 132, "alphanum_fraction": 0.6389101485, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3062}
|
from allensdk.brain_observatory.ecephys.ecephys_project_cache import EcephysProjectCache
from sqlalchemy import delete
from sqlalchemy.orm import sessionmaker
import json
import numpy as np
import pandas as pd
from datetime import date,datetime,timedelta
import ast
import sqla_schema as sch
import ingest
import numbers
def clean_string(v):
if v is None:
return None
if isinstance(v, numbers.Number):
return v
v = ast.literal_eval(v.strip())
if isinstance(v, list):
return v[0]
return v
def ingest_stimulus_types(sessions, engine):
stim_table = pd.concat([s.stimulus_presentations for s in sessions])
# stimulus types
stim_types = pd.DataFrame(data={'name': stim_table['stimulus_name'].unique()})
stim_types.to_sql('stimulus_type', engine, index_label='id', if_exists='append')
def ingest_stimulus_presentations(session, engine):
# stimulus types
stim_types = pd.read_sql('stimulus_type', engine, index_col='id')
# stimulus presentations
stim_table = session.stimulus_presentations
stim_table = stim_table.replace({'null':None})
for k in ['phase','size','spatial_frequency']:
stim_table[k] = stim_table[k].apply(clean_string)
stim_table = stim_table.merge(stim_types.reset_index(), left_on='stimulus_name', right_on='name', how='left').rename(columns={'id':'stimulus_type_id'}).drop(columns=['stimulus_name','name'])
stim_table['session_id'] = pd.Series([session.ecephys_session_id]*len(stim_table))
stim_table.to_sql('stimulus_presentation', engine, index=False, if_exists='append')
def ingest_spike_times(session, engine):
with sessionmaker(engine)() as dbsession:
# spike times
for unit_id, unit_spike_times in session.spike_times.items():
dbst = sch.UnitSpikeTimes(unit_id=unit_id, spike_times=unit_spike_times)
print(unit_id)
dbsession.add(dbst)
dbsession.commit()
def main():
engine = ingest.connect_to_db()
print("cleaning tables")
tables = (sch.UnitSpikeTimes.__table__, sch.StimulusType.__table__, sch.StimulusPresentation.__table__)
sch.Base.metadata.drop_all(engine, tables=tables)
sch.Base.metadata.create_all(engine, tables=tables)
cache = ingest.get_ecephys_cache()
print("loading session metadata")
sessions = cache.get_session_table(suppress=[]).head(3)
all_session_data = [ cache.get_session_data(idx) for idx, row in sessions.iterrows() ]
print("ingesting stimulus types")
ingest_stimulus_types(all_session_data, engine)
for session in all_session_data:
print("ingesting stim table")
ingest_stimulus_presentations(session, engine)
print("ingesting spike times")
ingest_spike_times(session, engine)
if __name__ == '__main__': main()
|
{"hexsha": "ab196f4f9b616f1cefb75342d11f6fa12d73ebc5", "size": 2853, "ext": "py", "lang": "Python", "max_stars_repo_path": "pgaf/ingest_spikes.py", "max_stars_repo_name": "AllenNeuralDynamics/ephys-framework-tests", "max_stars_repo_head_hexsha": "ee940afeab54e5e25765a903a6b65f2e95be4c48", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pgaf/ingest_spikes.py", "max_issues_repo_name": "AllenNeuralDynamics/ephys-framework-tests", "max_issues_repo_head_hexsha": "ee940afeab54e5e25765a903a6b65f2e95be4c48", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-22T04:34:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T02:14:21.000Z", "max_forks_repo_path": "pgaf/ingest_spikes.py", "max_forks_repo_name": "AllenNeuralDynamics/ephys-framework-tests", "max_forks_repo_head_hexsha": "ee940afeab54e5e25765a903a6b65f2e95be4c48", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-01-21T22:38:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T01:30:09.000Z", "avg_line_length": 32.0561797753, "max_line_length": 194, "alphanum_fraction": 0.7153873116, "include": true, "reason": "import numpy", "num_tokens": 669}
|
# Carlos Morato, PhD.
# cwmorato@wpi.edu
# Deep Learning for Advanced Robot Perception
#
# MLP for Pima Indians Dataset serialize to YAML and HDF5
import os
import yaml
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers import Dense
from keras.models import model_from_yaml
import numpy
import os
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# load pima indians dataset
dataset = numpy.loadtxt("pima-indians-diabetes.csv", delimiter=",")
# split into input (X) and output (Y) variables
X = dataset[:,0:8]
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(16, input_dim=8, kernel_initializer='normal', activation='relu'))
# model.add(Dense(12, input_dim=8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(8, kernel_initializer='normal', activation='relu'))
# model.add(Dense(8, kernel_initializer='uniform', activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# model.add(Dense(1, kernel_initializer='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, nb_epoch=150, batch_size=10, verbose=0)
# evaluate the model
scores = model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# serialize model to YAML
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
# later...
# load YAML and create model
yaml_file = open('model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
score = loaded_model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (loaded_model.metrics_names[1], score[1]*100))
|
{"hexsha": "33c507c8cd9d7361431d42bac4730ad42f81d0e5", "size": 2125, "ext": "py", "lang": "Python", "max_stars_repo_path": "week4/Save your models for later using serialization/serialize_yaml.py", "max_stars_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion", "max_stars_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "week4/Save your models for later using serialization/serialize_yaml.py", "max_issues_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion", "max_issues_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "week4/Save your models for later using serialization/serialize_yaml.py", "max_forks_repo_name": "JackHaoyingZhou/RBE595_DL_Discussion", "max_forks_repo_head_hexsha": "db16141b1cd18a03f182d418a2cf092f57fe4a6b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.196969697, "max_line_length": 91, "alphanum_fraction": 0.7477647059, "include": true, "reason": "import numpy", "num_tokens": 536}
|
# -*- coding: utf-8 -*-
from wide_resnet import WideResNet
import numpy as np
import cv2
import dlib
depth = 16
width = 8
img_size = 64
# 人脸性别年龄预测模型
model = WideResNet(img_size, depth=depth, k=width)()
model.load_weights('weights.hdf5')
def draw_label(image, point, label, font=cv2.FONT_HERSHEY_SIMPLEX, font_scale=1, thickness=2):
# 在视频中写入预测结果
size = cv2.getTextSize(label, font, font_scale, thickness)[0]
x, y = point
cv2.rectangle(image, (x, y - size[1]), (x + size[0], y), (255, 0, 0), cv2.FILLED)
cv2.putText(image, label, point, font, font_scale, (255, 255, 255), thickness)
# dilb读取video,检测人脸
detector = dlib.get_frontal_face_detector()
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
while True:
ret, image_np = cap.read()
image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)
img_h = image_np.shape[0]
img_w = image_np.shape[1]
detected = detector(image_np, 1)
faces = []
# 检测出人脸
if len(detected) > 0:
for i, d in enumerate(detected):
x0, y0, x1, y1, w, h = d.left(), d.top(), d.right(), d.bottom(), d.width(), d.height()
cv2.rectangle(image_np, (x0, y0), (x1, y1), (255, 0, 0), 2)
# 扩大检测范围
x0 = max(int(x0 - 0.25 * w), 0)
y0 = max(int(y0 - 0.45 * h), 0)
x1 = min(int(x1 + 0.25 * w), img_w - 1)
y1 = min(int(y1 + 0.05 * h), img_h - 1)
w = x1 - x0
h = y1 - y0
if w > h:
x0 = x0 + w // 2 - h // 2
w = h
x1 = x0 + w
else:
y0 = y0 + h // 2 - w // 2
h = w
y1 = y0 + h
# 64 * 64
faces.append(cv2.resize(image_np[y0: y1, x0: x1, :], (img_size, img_size)))
faces = np.array(faces)
results = model.predict(faces)
predicted_genders = results[0]
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = results[1].dot(ages).flatten()
for i, d in enumerate(detected):
label = '{}, {}'.format(int(predicted_ages[i]), 'F' if predicted_genders[i][0] > 0.5 else 'M')
draw_label(image_np, (d.left(), d.top()), label)
cv2.imshow('gender and age', cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
|
{"hexsha": "f86fb42b2c80519f95492c22ec72115e316709b3", "size": 2451, "ext": "py", "lang": "Python", "max_stars_repo_path": "CycleGAN/video_gender/age-gender-estimation/gender_age_detect_guide.py", "max_stars_repo_name": "RacleRay/-Have_Fun_Doing", "max_stars_repo_head_hexsha": "8ebb7fcabc6148571d38f2f51eac47952ce54424", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-12T05:57:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-12T05:57:47.000Z", "max_issues_repo_path": "CycleGAN/video_gender/age-gender-estimation/gender_age_detect_guide.py", "max_issues_repo_name": "RacleRay/-Have_Fun_Doing", "max_issues_repo_head_hexsha": "8ebb7fcabc6148571d38f2f51eac47952ce54424", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CycleGAN/video_gender/age-gender-estimation/gender_age_detect_guide.py", "max_forks_repo_name": "RacleRay/-Have_Fun_Doing", "max_forks_repo_head_hexsha": "8ebb7fcabc6148571d38f2f51eac47952ce54424", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0253164557, "max_line_length": 106, "alphanum_fraction": 0.5503875969, "include": true, "reason": "import numpy", "num_tokens": 827}
|
See my concert information under Bill Wagman. I have been involved with KDVS for nearly 15 years, alternating weeks on The Saturday Morning Folk Show, Saturdays from 9:00 to noon.
|
{"hexsha": "af5f8a502baa878db7308dfae6a1ae790be3463a", "size": 181, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/WjWagman.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/WjWagman.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/WjWagman.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.3333333333, "max_line_length": 179, "alphanum_fraction": 0.8011049724, "num_tokens": 41}
|
#coding=utf-8
from __future__ import print_function
import os,logging,math,time
import argparse
import mxnet as mx
from mxnet import gluon,nd
import numpy as np
from mxnet.gluon.data.vision import transforms
from mxnet.gluon.data import DataLoader
from mxnet.gluon.data import Dataset
import mxnet.autograd as autograd
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
parser = argparse.ArgumentParser(description='Mxnet digest Training')
parser.add_argument('--lr', default=0.005, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true',
help='resume from checkpoint')
parser.add_argument('--model', default="resnext101", type=str,
help='model type (default: resnet101)')
parser.add_argument('--name', default='0', type=str, help='name of run')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--batchsize', default=64, type=int, help='batch size')
parser.add_argument('--epoch', default=200, type=int,
help='total epochs to run')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='use standard augmentation (default: True)')
parser.add_argument('--decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--mixup_alpha', default=0.2, type=float,
help='mixup interpolation coefficient (default: 1)')
args = parser.parse_args()
filehandler = logging.FileHandler('log/train_resnext101.log')
streamhandler = logging.StreamHandler()
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
logger.info(args)
BATCH_SIZE=args.batchsize
NUM_CLASSES=8
NUM_WORKERS=4
if args.seed != 0:
mx.random.seed(args.seed)
data_dirs={"train":"/home/gfkd/bito8/train_dir","val":"/home/gfkd/bito8/validation_dir"}
def default_loader(path):
try:
img=mx.image.imread(path,to_rgb=1)
return img
except:
print("Cannot read image: {}".format(path))
data_transforms= {
"train":transforms.Compose([
#mxnet v1.2 v1.3中Resize不一样
transforms.RandomResizedCrop((224,224)),
transforms.RandomFlipLeftRight(),
transforms.ToTensor()
]),
"val":transforms.Compose([
transforms.CenterCrop((224,224)),
transforms.ToTensor()
])}
# define your Dataset. Assume each line in your .txt file is [name/tab/label], for example:0001.jpg 1
class customData(Dataset):
def __init__(self, img_path, txt_path,mode='', data_transform=None, loader = default_loader):
with open(os.path.join(img_path,txt_path)) as input_file:
lines = input_file.readlines()
self.img_name = [os.path.join(img_path, line.strip('\n').rstrip().split()[0]) for line in lines]
#self.img_label = [labels_map[line.strip('\n').rstrip().split()[1]] for line in lines]
self.img_label = [int(line.strip('\n').rstrip().split()[1]) for line in lines]
self._data_transform = data_transform
self._loader = loader
self._mode = mode
def __len__(self):
return len(self.img_name)
def __getitem__(self, index):
img_name = self.img_name[index]
label = self.img_label[index]
img = self._loader(img_name)
if self._data_transform is not None:
try:
if self._mode != 'train':
img = mx.image.resize_short(img,256,1)
img = self._data_transform(img)
except Exception,e:
print (e.message)
print("Cannot transform image: {}".format(img_name))
return img, label
###############################################################
#define train data and validation data
###############################################################
data_sets = {x:customData(img_path=data_dirs[x],txt_path="labels",mode=x,data_transform=data_transforms[x]) for x in ["train","val"]}
train_loader = gluon.data.DataLoader(data_sets["train"], batch_size=BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,last_batch='discard')
val_loader = gluon.data.DataLoader(data_sets["val"], batch_size=BATCH_SIZE,
shuffle=False, num_workers=NUM_WORKERS,last_batch='discard')
dataset_sizes = {x: len(data_sets[x]) for x in ["train","val"]}
sym, arg_params, aux_params = mx.model.load_checkpoint('resnext-101-64x4d', 0)
print("Loaded checkpoint!")
data_names = [graph_input for graph_input in sym.list_inputs()
if graph_input not in arg_params and graph_input not in aux_params]
print(data_names)
ctx=[mx.gpu(i) for i in range(4)]
pre_trained = gluon.nn.SymbolBlock(outputs=sym, inputs=mx.sym.var(data_names[0]))
pre_trained.collect_params().initialize(ctx=ctx)
net_params = pre_trained.collect_params()
for param in arg_params:
if param in net_params:
net_params[param]._load_init(arg_params[param], ctx=ctx)
for param in aux_params:
if param in net_params:
net_params[param]._load_init(aux_params[param], ctx=ctx)
#修改最后一层
dense_layer=gluon.nn.Dense(NUM_CLASSES)
dense_layer.initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx)
net = gluon.nn.HybridSequential()
net.add(pre_trained)
net.add(dense_layer)
LEARNING_RATE = 0.005
WDECAY = 0.0001
MOMENTUM = 0.9
best_acc=0.0
#softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False)
net.hybridize()
def batch_fn(d,l, ctx):
data = gluon.utils.split_and_load(d, ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(l, ctx_list=ctx, batch_axis=0)
return data, label
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
train_metric = mx.metric.RMSE()
def evaluate(model, data_iterator,ctx):
acc_top1.reset()
acc_top5.reset()
for i, (data,label) in enumerate(data_iterator):
data, label = batch_fn(data,label, ctx)
outputs = [net(X.astype(np.float32, copy=False)) for X in data]
acc_top1.update(label, outputs)
acc_top5.update(label, outputs)
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
return (1-top1, 1-top5)
#def evaluate(model,data_iterator):
# num_instance = nd.zeros(1, ctx=ctx)
# sum_metric = nd.zeros(1,ctx=ctx, dtype=np.int32)
# for i, (data, label) in enumerate(data_iterator):
# data = data.astype(np.float32).as_in_context(ctx)
# label = label.astype(np.int32).as_in_context(ctx)
# #data, label = batch_fn(data,label, ctx)
# output = model(data)
# prediction = nd.argmax(output, axis=1).astype(np.int32)
# num_instance += len(prediction)
# sum_metric += (prediction==label).sum()
# accuracy = (sum_metric.astype(np.float32)/num_instance.astype(np.float32))
# return accuracy.asscalar()
def mixup_data(data,label,classes=NUM_CLASSES):
if isinstance(data,nd.NDArray):
data = [data]
if args.mixup_alpha > 0:
lam = np.random.beta(args.mixup_alpha, args.mixup_alpha)
else:
lam = 1.
data = [lam*X + (1-lam)*X[::-1] for X in data]
if isinstance(label,nd.NDArray):
label=[label]
res = []
for l in label:
y1 = l.one_hot(classes, on_value = 1., off_value = 0.)
y2 = l[::-1].one_hot(classes, on_value = 1., off_value = 0.)
res.append(lam*y1 + (1-lam)*y2)
return data,res
def adjust_learning_rate(epoch):
"""decrease the learning rate at 100 and 150 epoch"""
lr = args.lr
if epoch >= 20:
lr /= 10
if epoch >= 40:
lr /= 10
if epoch >= 60:
lr /= 10
if epoch >= 80:
lr /= 10
if epoch >= 100:
lr /= 10
return lr
# Train a given model using MNIST data
def train(model,epoch,loss_fn,ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
best_val_score = 1.0
# Use Adam optimizer
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': args.lr,
'wd':WDECAY,
'momentum':MOMENTUM})
since=time.time() # trainning start time
# Train for one epoch
for i in range(epoch):
begin=time.time()
train_metric.reset()
# Iterate through the images and labels in the training data
for batch_idx, (data, label) in enumerate(train_loader):
# get the images and labels
#data = data.as_in_context(ctx)
#label = label.as_in_context(ctx)
data, label = batch_fn(data,label, ctx)
data,label = mixup_data(data,label)
# Ask autograd to record the forward pass
with autograd.record():
# Run the forward pass
#output = model(data)
# Compute the loss
#loss = loss_fn(output, label)
# Run the forward pass
outputs = [model(X.astype(np.float32, copy=False)) for X in data]
# Compute the loss
loss = [loss_fn(yhat, y.astype(np.float32, copy=False)) for yhat, y in zip(outputs, label)] # Compute gradients
#loss.backward()
for l in loss:
l.backward()
# Update parameters
trainer.step(BATCH_SIZE)
trainer.set_learning_rate(adjust_learning_rate(i))
output_softmax = [mx.nd.SoftmaxActivation(out.astype('float32', copy=False)) for out in outputs]
train_metric.update(label,outputs)
# Print loss once in a while
if batch_idx%10==0 and batch_idx >0:
print('Epoch :[{0}]; Batch [{1}] loss: {2:.4f}'.format(i,batch_idx, nd.concatenate(loss).mean(0).asscalar()))
#nd.waitall()
train_metric_name, train_metric_score = train_metric.get()
err_top1_val,err_top5_val=evaluate(model,val_loader,ctx)
print('Epoch [{0}] cost {1:.0f}s '.format(i,time.time()-begin))
print("Epoch [{0}] Test Accuracy {1:.4f} ".format(i, 1-err_top1_val))
logger.info('[Epoch %d] training: %s=%f'%(i, train_metric_name, train_metric_score))
logger.info('[Epoch %d] validation: err-top1=%f err-top5=%f'%(i, err_top1_val, err_top5_val))
if err_top1_val < best_val_score:
checkpoint(model,i)
best_val_score = err_top1_val
#train finished time
time_elapsed=time.time()-since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
def checkpoint(net,epoch):
net.export('pdl-resnext',epoch=epoch)
def main():
print ("start trainning")
train(net,args.epoch,softmax_cross_entropy,ctx)
print ("finish trainning")
if __name__ == '__main__':
main()
|
{"hexsha": "7c1352366a8155a4ac52014c833a7d4b5d910ace", "size": 10886, "ext": "py", "lang": "Python", "max_stars_repo_path": "mxnet/train_mixup_resnext101.py", "max_stars_repo_name": "nipeone/dl", "max_stars_repo_head_hexsha": "5dec328077accc18adac05ffd1ea27cd474b176c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mxnet/train_mixup_resnext101.py", "max_issues_repo_name": "nipeone/dl", "max_issues_repo_head_hexsha": "5dec328077accc18adac05ffd1ea27cd474b176c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mxnet/train_mixup_resnext101.py", "max_forks_repo_name": "nipeone/dl", "max_forks_repo_head_hexsha": "5dec328077accc18adac05ffd1ea27cd474b176c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1661129568, "max_line_length": 133, "alphanum_fraction": 0.6345765203, "include": true, "reason": "import numpy", "num_tokens": 2688}
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import string
import pandas as pd
import os
from scipy.stats import ttest_ind
from matplotlib.ticker import FormatStrFormatter
matplotlib.use("agg")
types = ["MoA validation", "Multiple cell types", "Unseen cell type", "shRNA for LoF"]
os.chdir(open("../data_dir").read().strip())
sns.set(font_scale=1.3, style='ticks')
fig, axs = plt.subplots(2,2,figsize=(12,8))
axs = axs.flat
df = pd.read_csv("figures_data/all_results_supp.csv", sep=",")
for n, ax in enumerate(axs):
ax.text(-0.1, 1.05, string.ascii_lowercase[n], transform=ax.transAxes, size=20, weight='bold')
df2 = df[(df['Validation'] == types[n])]
df2 = df2[['Baseline', 'DeepCellState']]
sns.violinplot(data=df2, ax=ax, palette="Set2")
ax.set(ylabel='PCC')
ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.xaxis.set_ticks_position('none')
if n == 1:
df_2cell = pd.read_csv("figures_data/2cell_all_results.tsv", sep="\t")
t, p = ttest_ind(df2["DeepCellState"].to_list(), df_2cell["DeepCellState"].to_list())
else:
t, p = ttest_ind(df2["DeepCellState"].to_list(), df2["Baseline"].to_list())
print(types[n] + ": " + str(p))
print(str(np.mean(df2["Baseline"].to_numpy())) + " " + str(np.mean(df2["DeepCellState"].to_numpy())))
print()
plt.subplots_adjust(hspace=0.3, wspace=0.3)
plt.savefig("figures/multi.png")
|
{"hexsha": "f0154281c3b005d36195ae87cb4f5a10cc29b409", "size": 1446, "ext": "py", "lang": "Python", "max_stars_repo_path": "figures/multi_violin.py", "max_stars_repo_name": "umarov90/DeepFake", "max_stars_repo_head_hexsha": "e65c72f255817532e8a8a3afe2138ae270477601", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-28T08:08:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-30T02:15:54.000Z", "max_issues_repo_path": "figures/multi_violin.py", "max_issues_repo_name": "umarov90/DeepCellState", "max_issues_repo_head_hexsha": "e65c72f255817532e8a8a3afe2138ae270477601", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figures/multi_violin.py", "max_forks_repo_name": "umarov90/DeepCellState", "max_forks_repo_head_hexsha": "e65c72f255817532e8a8a3afe2138ae270477601", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-09T14:56:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T14:56:49.000Z", "avg_line_length": 37.0769230769, "max_line_length": 105, "alphanum_fraction": 0.6818810512, "include": true, "reason": "import numpy,from scipy", "num_tokens": 420}
|
Lemma silly_implication : (1 + 1) = 2 -> 0 * 3 = 0.
Proof. intros H. simpl. reflexivity. Qed.
Inductive and (P Q : Prop) : Prop :=
conj : P -> Q -> (and P Q).
Notation "P /\ Q" := (and P Q) : type_scope.
Theorem and_example : (0 = 0) /\ (4 = mult 2 2).
Proof.
apply conj.
reflexivity.
simpl.
reflexivity.
Qed.
Theorem proj1 : forall P Q : Prop, P /\ Q -> P.
Proof.
intros.
destruct H.
apply H.
Qed.
Theorem proj2 : forall P Q : Prop, P /\ Q -> Q.
Proof.
intros.
destruct H.
apply H0.
Qed.
Theorem and_commut : forall P Q : Prop, P /\ Q -> Q /\ P.
Proof.
intros.
split.
destruct H.
apply H0.
destruct H.
apply H.
Qed.
Theorem and_assoc : forall P Q R : Prop, P /\ (Q /\ R) -> (P /\ Q) /\ R.
Proof.
intros.
split.
destruct H.
split.
apply H.
destruct H0.
apply H0.
destruct H as [H1 H2].
destruct H2.
apply H0.
Qed.
Definition iff (P Q : Prop) := (P -> Q) /\ (Q -> P).
Notation "P <-> Q" := (iff P Q)
(at level 95, no associativity)
: type_scope.
Theorem iff_implies : forall P Q : Prop, (P <-> Q) -> P -> Q.
Proof.
intros P Q H.
destruct H.
apply H.
Qed.
Theorem iff_sym : forall P Q : Prop, (P <-> Q) -> (Q <-> P).
Proof.
intros.
destruct H.
split.
apply H0.
apply H.
Qed.
Theorem iff_refl : forall P : Prop, P <-> P.
Proof.
intros.
split.
intros H1.
apply H1.
intros H2.
apply H2.
Qed.
Theorem conj_trans : forall P Q R : Prop, P /\ Q -> Q /\ R -> P /\ R.
Proof.
intros.
split.
destruct H.
apply H.
destruct H0.
apply H1.
Qed.
Theorem iff_trans : forall P Q R : Prop, (P <-> Q) -> (Q <-> R) -> (P <-> R).
Proof.
intros P Q R HEPQ HEQR.
inversion HEPQ as [HPQ HQP].
inversion HEQR as [HQR HRQ].
split.
intros HP.
apply HQR.
apply HPQ.
apply HP.
intros HR.
apply HQP.
apply HRQ.
apply HR.
Qed.
Inductive or (P Q : Prop) : Prop :=
| or_introl : P -> or P Q
| or_intror : Q -> or P Q.
Notation "P \/ Q" := (or P Q) : type_scope.
Theorem or_commut : forall P Q : Prop, P \/ Q -> Q \/ P.
Proof.
intros.
destruct H as [H1 | H2].
apply or_intror.
apply H1.
apply or_introl.
apply H2.
Qed.
Theorem or_distributes_over_and_1 : forall P Q R : Prop,
P \/ (Q /\ R) -> (P \/ Q) /\ (P \/ R).
Proof.
intros P Q R H.
inversion H as [ HP | [ HQ HR ] ].
split.
left. apply HP.
left. apply HP.
split.
right.
apply HQ.
right.
apply HR.
Qed.
Theorem or_distributes_over_and_2 : forall P Q R : Prop, (P \/ Q) /\ (P \/ R) -> P \/ (Q /\ R).
Proof.
intros P Q R H.
inversion H as [HPQ HPR].
inversion HPQ as [ HP | HQ ].
left.
apply HP.
inversion HPR as [HP | HR].
left.
apply HP.
right.
apply conj.
apply HQ.
apply HR.
Qed.
Theorem or_distributes_over_and : forall P Q R : Prop, P \/ (Q /\ R) <-> (P \/ Q) /\ (P \/ R).
Proof.
split.
apply or_distributes_over_and_1.
apply or_distributes_over_and_2.
Qed.
Theorem andb_prop : forall b c, andb b c = true -> b = true /\ c = true.
Proof.
intros b c H.
destruct b.
destruct c.
apply conj. reflexivity. reflexivity.
inversion H.
inversion H.
Qed.
Theorem andb_true_intro : forall b c, b = true /\ c = true -> andb b c = true.
Proof.
intros b c H.
destruct b.
destruct c.
simpl.
reflexivity.
simpl.
inversion H.
apply H1.
destruct c.
simpl.
inversion H.
apply H0.
simpl.
inversion H.
apply H0.
Qed.
Theorem andb_false : forall b c, andb b c = false -> b = false \/ c = false.
Proof.
intros b c H.
destruct b. destruct c.
left.
inversion H.
right.
reflexivity.
left.
reflexivity.
Qed.
Theorem orb_prop : forall b c, orb b c = true -> b = true \/ c = true.
Proof.
intros b c H.
destruct b.
left.
reflexivity.
destruct c.
right.
reflexivity.
simpl.
left.
apply H.
Qed.
Theorem orb_false_elim : forall b c, orb b c = false -> b = false /\ c = false.
Proof.
intros b c H.
destruct b.
split.
apply H.
destruct c.
apply H.
reflexivity.
split.
reflexivity.
destruct c.
apply H.
reflexivity.
Qed.
Inductive False : Prop := .
Theorem False_implies_nonsense : False -> 2 + 2 = 5.
Proof.
intros.
inversion H.
Qed.
Theorem contradiction_implies_anything : forall P Q : Prop, (P /\ ~P) -> Q.
Proof.
intros.
destruct H.
unfold not in H0.
apply H0 in H.
inversion H.
Qed.
Theorem double_neg : forall P : Prop, P -> ~~P.
Proof.
intros.
unfold not.
intros.
apply H0 in H.
apply H.
Qed.
Theorem contrapositive : forall P Q : Prop, (P -> Q) -> (~Q -> ~P).
Proof.
intros P Q H. unfold not. intros notQ Pass.
apply notQ. apply H. apply Pass.
Qed.
Theorem peirce : forall P Q: Prop,
((P -> Q) -> P) -> P.
intros.
apply H.
Theorem first_eq : forall P Q : Prop, ((P -> Q) -> P) -> P -> ~~P -> P.
Proof.
intros.
apply H0.
Qed.
Theorem excluded_middle_irrefutable: forall (P:Prop), ~~(P \/ ~ P).
Proof.
unfold not.
intros P f.
apply f.
right.
intro p.
apply f.
left.
apply p.
Defined.
|
{"author": "DanielRrr", "repo": "Coq-Studies", "sha": "a7cd6bd7f61e91ca118a615e62dfe8fec50b70d3", "save_path": "github-repos/coq/DanielRrr-Coq-Studies", "path": "github-repos/coq/DanielRrr-Coq-Studies/Coq-Studies-a7cd6bd7f61e91ca118a615e62dfe8fec50b70d3/computational_logic9.v"}
|
#=
Author: Shunsuke Hori
=#
"""
This holds the results for Harrison Kreps. In particular, it
accepts two matrices Qa and Qb and compares the single belief,
optimistic belief, and pessimistic belief prices
"""
struct PriceHolder{TF<:AbstractFloat}
qa::Matrix{TF}
qb::Matrix{TF}
qpess::Matrix{TF}
qopt::Matrix{TF}
dividend_payoff::Vector{TF}
qaprice::Vector{TF}
qbprice::Vector{TF}
qpessprice::Vector{TF}
qoptprice::Vector{TF}
optimisticprice::Vector{TF}
pessimisticprice::Vector{TF}
phat_a::Vector{TF}
phat_b::Vector{TF}
end
function PriceHolder{TF<:AbstractFloat}(qa::Matrix,
qb::Matrix,
dividend_payoff::Vector;
beta::TF=.75,
max_iters::Integer=10000,
tolerance::TF=1e-16)
# Create the Pessimistic and Optimistic Beliefs
qpess = Array{TF}(2, 2)
qpess[1, :] = ifelse(qa[1, 2] < qb[1, 2], qa[1, :], qb[1, :])
qpess[2, :] = ifelse(qa[2, 2] < qb[2, 2], qa[2, :], qb[2, :])
qopt = Array{TF}(2, 2)
qopt[1, :] = ifelse(qa[1, 2] > qb[1, 2], qa[1, :], qb[1, :])
qopt[2, :] = ifelse(qa[2, 2] > qb[2, 2], qa[2, :], qb[2, :])
# Price everything
p_singlebelief, p_optimistic, phat_a, phat_b, p_pessimistic=
create_prices(qa, qb, qpess, qopt, dividend_payoff,
beta,max_iters,tolerance)
qaprice = p_singlebelief[1]
qbprice = p_singlebelief[2]
qpessprice = p_singlebelief[3]
qoptprice = p_singlebelief[4]
return PriceHolder(qa,
qb,
qpess,
qopt,
dividend_payoff,
qaprice,
qbprice,
qpessprice,
qoptprice,
p_optimistic,
p_pessimistic,
phat_a,
phat_b)
end
"""
Computes prices under all belief systems
"""
function create_prices(qa::Matrix, qb::Matrix, qpess::Matrix, qopt::Matrix,
dividend_payoff::Vector, bet::AbstractFloat,
max_iters::Integer, tolerance::AbstractFloat)
transitionmatrix = [qa, qb, qpess, qopt]
# Single Belief Prices
p_singlebelief = [price_singlebeliefs(q, dividend_payoff, beta=bet)
for q in transitionmatrix]
# Compute Optimistic and Pessimistic beliefs
p_optimistic, phat_a, phat_b =
price_optimisticbeliefs([qa, qb], dividend_payoff,
beta=bet, max_iter=max_iters, tol=tolerance)
p_pessimistic =
price_pessimisticbeliefs([qa, qb], dividend_payoff,
beta=bet, max_iter=max_iters, tol=tolerance)
return p_singlebelief, p_optimistic, phat_a, phat_b, p_pessimistic
end
function print_prices(ph::PriceHolder)
qaprice, qbprice, qpessprice, qoptprice =
ph.qaprice, ph.qbprice, ph.qpessprice, ph.qoptprice
optimisticprice, pessimisticprice =
ph.optimisticprice, ph.pessimisticprice
phata, phatb = ph.phat_a, ph.phat_b
println("The Single Belief Price Vectors are:")
println(" P(Qa) = $(qaprice)\n P(Qb) = $(qbprice)\n P(Qopt) = $(qoptprice)\n P(Qpess) = $(qpessprice)\n")
println("The Optimistic Belief Price Vector is:")
println(" P(Optimistic) = $(optimisticprice)\n Phat(a) = $(phata)\n Phat(b) = $(phatb)\n")
println("The Pessimistic Belief Price Vector is:")
println(" P(Pessimistic) = $(pessimisticprice)")
end
ph=PriceHolder(qa, qb, [0.0, 1.0], beta=0.75)
print_prices(ph)
|
{"hexsha": "f71e49caecb2d5dc0c21d626b406566f98097cbc", "size": 3740, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "harrison_kreps/harrison_kreps_code.jl", "max_stars_repo_name": "chenwang/QuantEcon.lectures.code", "max_stars_repo_head_hexsha": "8832a74acd219a71cb0a99dc63c5e976598ac999", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 56, "max_stars_repo_stars_event_min_datetime": "2017-05-09T10:45:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-20T20:33:27.000Z", "max_issues_repo_path": "harrison_kreps/harrison_kreps_code.jl", "max_issues_repo_name": "chenwang/QuantEcon.lectures.code", "max_issues_repo_head_hexsha": "8832a74acd219a71cb0a99dc63c5e976598ac999", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-06-30T01:52:46.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-01T20:09:47.000Z", "max_forks_repo_path": "harrison_kreps/harrison_kreps_code.jl", "max_forks_repo_name": "QuantEcon/QuantEcon.lectures.code", "max_forks_repo_head_hexsha": "d61ac7bc54529dd5c77470c17539eb2418b047c9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 117, "max_forks_repo_forks_event_min_datetime": "2017-04-25T16:09:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T02:30:29.000Z", "avg_line_length": 34.3119266055, "max_line_length": 109, "alphanum_fraction": 0.5679144385, "num_tokens": 1055}
|
I graduated from Davis and no longer live nearby to try out all the restaurants. Everything below is from my time in Davis, Sept 2003 August 2007. Appreciate the DavisWiki; I wish more cities would have their own with an active community.
I love to promote diversity and native languages so please feel free to change the names of the restaurants here on my page!
Restaurants I have eaten at... (My rating out of 5)
علي بابا Ali Baba (4) Delicious gyros but fries are < Sams but Ill return
Burger King (3) No bad stories, just like any other fast food place
Caffe Italia (5) Great service, great food, good price, CRAYONS
Cafe Casablanca (3) WOW EXPENSIVE! The setting was completely cute with the movie going for people who wait to be seated. The service was excellent. The food was decent but definitely not worth the price. I ended up paying $20 for an entree, drink, tax and tip. If they bring down their prices, then Ill return.
Cafe Roma (4) Very good coffee, nice service, kinda shabby looking, will return again when theres music and its more busy
Carls Jr. (2) Pricy but on campus location is convenient
Cenarios Pizza (3) Decent pizza, Id eat it again
Chuys Taqueria (4) Now I dont have to drive hella far ) Love the tacos, the horchata was room temperature tho (
Cindys (2) Only thing good is the service and the malts
Ciocolat (3) Extremely delicious but very pricey!
Coffee House (3) Average, customer service could be better
Cold Stone Creamery (4) Friendly customer service and tasty ice cream. Expensive tho
Crepeville La ville des crêpes (5) Tuna Melt Sandwich mmm Crepes mmm A tab bit pricy but the food is worth it
Davis Noodle City (4) Very friendly and best dumplings ever!
Dos Coyotes (1) Expensive and ewww
El Mariachi (5) mmm Burrito… and I love the salsa selection!
富士のシェフ Fuji Chef (4) Good lunch buffet but wait was forever
Fuzio (3) Food was decent but not stellar. Price is ok.
Fluffy Donuts (4) mmm Donut holes mmm
Hibachi (3) Very much like Teriyaki Express but you get less food. But better seating
Hoa Viet (3) OK food, quick serve, cheap price
The Hotdogger Hotdogger (4) Delicious, quick service, skinny hot dog but tasty
House of Chang (5) Excellent Honey Walnut Prawns, good food, fast delivery
Hunan (0) I got the shits
Huong Lan Sandwiches (4) Delicious, tough bread, cute cashier D Definately will return
IHOP Internationales Haus der Pfannkuchen (2) Same generic food but always quick service and decent variety
InNOut (4) Grilled Cheese everytime, Root Beer Floats, Fries with no salt
Jack in the Box (3) Why did they change the pita? ( and the one downtown creeps me out
Jade Garden (4) Huge portions for the price, good food, will return
Muta Fart Mnywo Jamba Juice (3) Gift card + friend working there cheap and worth it
Kabul Afghan Cuisine (4) The service was excellent; Ive never eaten Afghan food before so the server helped me. The food was delicious and very happy with the choice. Not a 5 because there is a belly dancer and I know that belly dancing is not Afghan; Ive heard they brought one in to attract more customers.
KFC (4) I love the sides… Chicken and Corn Tuesdays for $2.99
La Esperanza (4) The chips were fresh, quick service, food came out quickly and was good, horchata excellent
Lamppost PIzza (2) Overpriced and greasy. Wont eat here again unless its free
London Fishn Chips (4) Excellent service, delicious calamari, will return
Manna Korean Restaurant (3) Had the BBQ Beef and didnt think anything special. But I love the potstickers. Ill return for those!
Marble Slab Creamery (3) Good tasting but not as good as Cold Stone, just a tad cheaper
McDonalds (1) 2 apple pies for $1 is the only thing Ill eat
Mocha Joe (5) The couple running the place is extremely kind, good coffee and its cheap!
Mountain Mikes Pizza (3) Average, Decent price with coupon
Mr. Chans (0) I got the shits
New Delhi Chaat Cafe (5) Excellent food, good service and good prices
Nobu Hiro (5) Excellent, quick service at the bar, get there at 11
Norms Pizza (4) DELICIOUS Philly Cheesecake, cooked when I ordered it. Alas they closed (
老茶馆 Old Teahouse (4) Love the boba, shredded Chicken fried rice, cripsy chicken, no room to sit, wait is sometimes forever, check your change
Osaka Sushi (4) Pricy but you get quality and quantity for it; good service
Papa Johns Padre Johns (5) Delivery is awesome, gotta love the cheese sticks
Papa Murphys (5) Awesome pizza for a much cheaper price. And cooking it at home is worth the wait and money I saved.
Κοίλωμα Pita Pita Pit (3) Pricy but the late hours are wonderful
Pizza Guys (3) Good price, was stingy with the meat on my pizza and had to wait for the wings to cook (they said my order would be ready in 20 minutes and when I got there 30 minutes later, my wings were not even in the oven)
Plutos (5) Won me over several times. Love those garlic fries
Redrum Burger (3) Expensive but good food
Quiznos (3) Too pricy but decent sandwiches
Sams Mediterranean Cuisine (4) Decent price for good food, loved the fries / (1) I went a second time and was sick for 3 days afterwards. ( I dont know what happened!
上海镇 Shanghai Town (4) Prices may seem expensive but you receive LOTS of food. One ordered has turned into two meals.
Sophias Thai Kitchen (4) Cute little place, good food, too many spicy food though
Starbucks (2) Maybe Ive just had too much
Steves Place Pizza (5) Garlic Chicken and sun dried tomatoes. A bit expensive but I think its worth it
Caffè DellItaliano Delle Stringhe Strings Italian Cafe (3) Soltanto degno esso durante i buffet di mercoledì (Translation: Only worth it during Wednesdays buffets)
Sugar and Spice (3) Very friendly, tastes good, limited selection, incredibly expensive. Ill return when Im too lazy to cook my own Pilipino food.
Symposium (4) (?) Enjoyed the pizza and loved the crust. Havent tried the restaurant yet.
Taqueria Davis (5) Hands down the best burritos in Davis. Love the salsa and the horchata. Very friendly.
Taqueria Guadalajara (4) Would have a perfect 5 if they had not messed up my order (
明白な Teriyaki Teriyaki Express (5) 私は2 つの食事に teriyaki の1 つの版を回す。 veggies を愛しなさい (Translation: I turn one plate of teriyaki into two meals. Love the veggies)
2K Thai Food (5) Inexpensive, clean, friendly, excellent food, wonderful service
Thai Bistro (4) Best customer service of all the Thai restaurants but not my fav
Thai Recipes (3) Decent food but was priced too high; get better tasting food for cheaper elsewhere
Village Bakery (4) Delicious surprise
Wendys (4) Pricy but open late
Wok n Roll (4) Cheap, good, tasty
Woodstocks Pizza La pizza del Woodstock (1) che cosa è così grande circa questo posto? (Translation: Whats so great about this place?)
Zen Toro (5) My highest rating. You get quality and quantity for the price. This is my favorite Japanese restaurant in town. Only down side: there is no ice in the water.
Outside of Davis
Chuys Taqueria in Winters (4) Worth driving out there and loved the tacos
California Pizza Kitchen in Sacramento (5) Ive never ordered anything bad, everything is delicious!
The Cheesecake Factory in Sacramento (3) Everything on the menu is fried, need more variety
Hooters in Sacramento (4) Good Philly Cheesesteak. Ok chicken wings, I ordered medium and they were not spicy at all. Next time Im getting 3 Mile Island or 911
Hot Dog on a Stick in Sacramento (3) Good everything but way too expensive $7 for 1 hot dog on a stick, fries and a drink
Manila Restaurant in Elk Grove (3) 8 pages Chinese and 2 pages Filipino (as in the language); Need more Filipino and Pilipino food (Because they serve Americanize food (Filipino) and not the traditional Pilipino food), pricy for small quantity
Tahoe Joe in Vacaville (5) Excellent. Delicious food for a good price and excellent service. I love their wood steaks and root beer.
BJs Restaurant and Brewery in Vacaville (4) You get the quality for a good price. I was just sad that they didnt have any root beer. And the pizookie is scrumptous.
Nations in Vacaville (5) Nations has never let me down. I grew up with Nations and I love coming here. They cook your food when you order it, the portions are huge, and the pies are the best.
Zacharys Pizza in Berkeley (3) OK but definately not worth driving down to have some. Sucks they dont take credit card. But for the quality, the price is fine. Does not compare at all to Chicago.
We both feel very strongly on this issue. My edits to your page were over the top, intended to make a point, but they went way too far after the point was made. My sincere apologies. Users/JabberWokky
I like how you have some of the restaurant names listed in their native characters on this page along with the English words. The restaurants page should be donethis way as well. Unfortunately, I tried to use Chinese characters (中文), but the wiki headline macro wont display them correctly; it just shows the escape sequences (& #20013; & #25991;). Users/CarlMcCabe CM this should work now.
I like the native language efforts, too. Were too isolated in the U.S. Users/GrahamFreeman
Too bad no local restaurants serve Roman food... oh well, scratch another thing off the list of Things Latin Speakers Cant Translate. Users/CindySperry
According to their website, Jamba Juices name derives from the African word jama, meaning to celebrate. Thats nice of them to be multicultural and stuff, but last time I checked African wasnt a language. I did a bit of digging and they say elsewhere its swahili. Jama typed into a http://africanlanguages.com/kdp/index.php?len swahili dictionary translates to family or companions, so I cant corroborate their story. Interestingly, Jamba translates to fart. Im certain its syntacticly wrong, but above Ive haphazardly (mis)translated Jamba Juice to muta fart mnywo, where Jamba is fart, and juice is muta mnywo (sweet drink). Users/CraigBrozinsky
20060517 01:04:15 nbsp Hi JoAnna, I love this page! I have eaten at every restaurant in Davis, but never really had the energy to build a page about it. Users/ChrisLambertus
20060701 18:42:21 nbsp They didnt have root beer or they were out of it? BJs brew their own root beer its delicious. Maybe they were just out of the batch? And I know its also on the desert menu as a float. Users/EdWins ES
They were out of the root beer ( Well I did end up going like an hour before they closed. JR
20060829 00:57:13 nbsp what about Village Bakery? i recommend asking for pesto + tomato sauce :) Users/EmilyFrançais Frenchie
20060917 16:09:01 nbsp Im glad to see so many Horchata recommendations. My plan for the next few years: find the best horchata in Davis. Users/MoTorres
20061007 10:22:59 nbsp The problem is that he was messing around with his bio page, which isnt his. His personal page is his baby. His bio page (as a public figure) belongs to the community. Users/WilliamLewis
20070126 13:25:23 nbsp I love this page ) Oh and if you go the other way (opposite Vacaville) you get to Folsom, which also has a BJs Users/TusharRawat
20070129 02:23:49 nbsp you need to eat at the educated eatery once Users/StevenDaubert
20070222 10:42:03 nbsp i love this page. It inspired me to create my own a while back. I wish everyone would do something similar. Its fun to see how people rank different restaurants. I especially litke the native characters Users/MattHh
20070302 00:03:16 nbsp Hey Joanna come and visit my restaurant in Woodland. We serve Japanese and Hawaiian Food. Our address is 10 N East St. Email me at rtaura@yahoo.com and I will send you a menu. Users/RobertTaura
20070302 09:44:04 nbsp Hey, JoAnna, youre very good at being sensitive to pigeonholing by race. Im trying to avoid that in Ethnic and Cultural Organizations. Do you have any thoughts on how not to lay it out so divisively but at the same time indexed for people looking for a particular group? Users/JabberWokky
20070302 10:08:29 nbsp JoAnna see Ethnic and Cultural Organizations. Theres a centralization going on, not deletion. Your content was also moved to Demographics as well. Users/JabberWokky
20070302 11:35:26 nbsp Thanks for the comment JoAnna. Certainly if a page exists, it shouldnt be deleted without some sort of discussion about its importance, as long as it is not blatant spam of course. I was mainy trying to curb the continuous deleting and reviving of some pages that was going on. Most of those thoughts could have been hashed out in a talk page before action was taken. My comments werent directed at you specifically, just the situation in general. Thanks. Users/DavidGrundler
20070302 12:06:31 nbsp I am going to be away for the weekend so I dont have much time to do edits, but if possible can u find some ASA and NAS people intersted in the wiki to add to the South Asian and Native American pages? Users/EricWu
20070302 12:33:24 nbsp I feel almost like the edit confirmation: Thank you for your attention to detail:) Users/EricWu
20070302 12:46:44 nbsp JoAnna, you dont need to be so paranoid about people deleting the Mixed Race entry. Its okay, were all in this together. It actually hurts me that youd think anyone would be against you in this. Were all working together to try and add more information. Content was moved around, but nothing you ever wrote was ever deleted. Its put me in an odd mood that youd think I (or anyone here on the wiki) would even consider silencing your voice. This morning was about balancing and figuring out where to locate types of information, not silencing people or groups. Fantastic entry, by the way. You might want to even split out Mixed Race Issues at UC Davis in terms of scholorships and demographics... thats why I moved some of your earlier content to the Demographics entry, because it illustrates how poorly they cover some people. Id still like your feedback on how best to organize Ethnic and Cultural Organizations so they dont exclude or minimize multicultural groups and organizations while still having a structure allowing people to look up various groups. Users/JabberWokky
20070302 14:47:51 nbsp Youre right... I started jumping around and moving stuff around without explaining first and giving you a chance to agree or disagree with the edits. Please give Ethnic and Cultural Organizations/Talk a read to see what I was trying to do. Users/JabberWokky
20070420 09:22:12 nbsp the pilipino/filipino debate is opening up again on the restaurants page. do you remember where the original discussion of the issue came up, so we can link there and avoid future revert wars? Users/CraigBrozinsky
20070508 18:29:41 nbsp 20070508 11:28:39 nbsp Just ask them at la crepe to cook it on the third griddle or second griddle a bit longer and they will for sure. Users/StevenDaubert P.S. have you been by Icekrimski cafe ? Try the gelato and tell me how it stacks up to cold stone!
20070524 18:57:23 nbsp Awesome catch on Aggie Barbershop! http://cheshirehall.net/link/ws/smile.png It is not like it is in an obscure corner of town, either. Users/JabberWokky
20070524 23:13:18 nbsp aggie barber shop alreayd exists, pretty sure Users/StevenDaubert
20070525 02:00:00 nbsp The aggie barber shop ? Users/StevenDaubert
PS The aggie barbershop aggie barbershop {{{<}}} there is the difference, I dont go around deleting pages without a reason, perhaps you should look closer at recent changes edit comments, and your question would have answered itself D ... P.P.S you still need to go to the educated eatery
JoAnna is right I thought it already existed too, so I did a search and it didnt come up. I would have made the same duplication error. Its because the search has been acting up for a couple weeks. I know Philip has been working on fixing it, but it looks like there are still problems left. Users/JabberWokky
I do not think this was a bug. I searched for barber shop and found the result immediately. Im guessing you searched for the single word barbershop.
I searched for the aggie barber shop and found it instantly, I know this cause I had to link it to carriers carpet & floor care a couple days ago because he is one who cleans the store and I was the one setting up the wiki page for it, and this was of course before the redirect, but the redirect is fine. Users/StevenDaubert
Ah, yes... not a bug in search. A bug in the user. I did do a search for Aggie Barbershop. Regardless, it comes up under both now. Not a biggie. jw
20070704 13:26:34 nbsp Ive seen a few changes you made, like in fluffy donuts. It seems that you rephrased things that were very well written but could possibly be misconstrued by sensitive people that are offended by raindrops and replaced it with colorless descriptive narration. I think their is a vast canyon of difference between stylistic differences and the factually erroneous with only one needing modification. And please dont pass wikipedia guidelines as some sort of rebuke because they are to writing as the DMV is to driving. Users/ChristopherMckenzie
20081019 09:00:07 nbsp you should give the putah creek cafe a shot next time your headed outside of Davis Westbound... Users/StevenDaubert
20081026 22:14:41 nbsp It probably would have been impossible to try every restaurant anyway, since the latest ones have seemed to to appeared and disappeared within a matter of months. Users/ElleWeber
|
{"hexsha": "39420f66d207ecd20ff13a9ffc84bf6760e258a7", "size": 17662, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/JoAnnaRich.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/JoAnnaRich.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/JoAnnaRich.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 110.3875, "max_line_length": 1110, "alphanum_fraction": 0.7716000453, "num_tokens": 4635}
|
import pandas as pd
import numpy as np
import copy
import math
import os
import sys
from tqdm import tqdm
from math import radians
import sklearn.metrics
from decouple import config
from config.construction_config import *
from heuristic.construction.insertion_generator import InsertionGenerator
from datetime import datetime, timedelta
from sklearn.metrics.pairwise import haversine_distances
pd.options.mode.chained_assignment = None
class ConstructionHeuristic:
def __init__(self, requests, vehicles):
self.vehicles = [i for i in range(vehicles)]
self.n = len(requests.index)
self.num_nodes_and_depots = 2 * vehicles + 2 * self.n
self.temp_requests = self.compute_pickup_time(requests)
self.requests = self.temp_requests.sort_values(
"Requested Pickup Time").reset_index(drop=True)
self.requests["Requested Pickup Time"] = pd.to_datetime(
self.requests["Requested Pickup Time"], format="%Y-%m-%d %H:%M:%S"
)
self.requests["Requested Dropoff Time"] = pd.to_datetime(
self.requests["Requested Dropoff Time"], format="%Y-%m-%d %H:%M:%S"
)
self.requests["Request Creation Time"] = pd.to_datetime(
self.requests["Request Creation Time"], format="%Y-%m-%d %H:%M:%S"
)
self.current_objective = timedelta(0)
self.T_ij = self.travel_matrix(self.requests)
self.introduced_vehicles = set()
self.infeasible_set = []
self.insertion_generator = InsertionGenerator(self)
self.preprocessed = self.preprocess_requests()
def compute_pickup_time(self, requests):
requests["Requested Pickup Time"] = pd.to_datetime(
requests["Requested Pickup Time"], format="%Y-%m-%d %H:%M:%S"
)
requests["Requested Dropoff Time"] = pd.to_datetime(
requests["Requested Dropoff Time"], format="%Y-%m-%d %H:%M:%S"
)
temp_T_ij = self.travel_matrix(requests)
nat_pickup = np.isnat(requests["Requested Pickup Time"])
for i in range(self.n):
if nat_pickup.iloc[i]:
requests["Requested Pickup Time"].iloc[i] = requests["Requested Dropoff Time"].iloc[i] - self.temp_travel_time(
i, self.n + i, True, temp_T_ij)
return requests
def temp_travel_time(self, to_id, from_id, fraction, temp_T_ij):
return timedelta(seconds=(1+F/2) * temp_T_ij[to_id, from_id]) if fraction else timedelta(seconds=temp_T_ij[to_id, from_id])
def preprocess_requests(self):
# link requests that are too close in time and space for the same vehicle to serve both requests:
travel_time = self.T_ij
request_time = self.requested_time_matrix()
P_ij = [set() for _ in range(self.n)]
for i in range(2 * self.n):
n_i = i - self.n if i >= self.n else i
for j in range(2 * self.n):
if request_time[i][j] is not None and timedelta(seconds=travel_time[i][j]) - 2*U_D > request_time[i][j]:
n_j = j - self.n if j >= self.n else j
P_ij[n_i].add(n_j+1)
P_ij[n_j].add(n_i+1)
return np.array(P_ij)
def construct_initial(self):
rid = 1
unassigned_requests = self.requests.copy()
self.introduced_vehicles.add(self.vehicles.pop(0))
route_plan = [[]]
for i in tqdm(range(unassigned_requests.shape[0]), colour='#39ff14'):
# while not unassigned_requests.empty:
request = unassigned_requests.iloc[i]
route_plan, new_objective = self.insertion_generator.generate_insertions(
route_plan=route_plan, request=request, rid=rid)
# update current objective
self.current_objective = new_objective
rid += 1
return route_plan, self.current_objective, self.infeasible_set
def new_objective(self, new_routeplan, new_infeasible_set):
total_deviation = timedelta(minutes=0)
total_travel_time = timedelta(minutes=0)
total_infeasible = timedelta(minutes=len(new_infeasible_set))
for vehicle_route in new_routeplan:
if len(vehicle_route) >= 2:
diff = (pd.to_datetime(
vehicle_route[-1][1]) - pd.to_datetime(vehicle_route[0][1])) / pd.Timedelta(minutes=1)
total_travel_time += timedelta(minutes=diff)
for n, t, d, p, w, _ in vehicle_route:
if d is not None:
d = d if d > timedelta(0) else -d
pen_dev = d - P_S if d > P_S else timedelta(0)
total_deviation += pen_dev
updated = alpha*total_travel_time + beta * \
total_deviation + gamma*total_infeasible
return updated
def print_new_objective(self, new_routeplan, new_infeasible_set):
total_deviation = timedelta(minutes=0)
total_travel_time = timedelta(minutes=0)
total_infeasible = timedelta(minutes=len(new_infeasible_set))
for vehicle_route in new_routeplan:
diff = (pd.to_datetime(
vehicle_route[-1][1]) - pd.to_datetime(vehicle_route[0][1])) / pd.Timedelta(minutes=1)
total_travel_time += timedelta(minutes=diff)
for n, t, d, p, w, _ in vehicle_route:
if d is not None:
d = d if d > timedelta(0) else -d
total_deviation += d
print("Total travel time", total_travel_time)
print("Total deviation", total_deviation)
print("Total infeasible", total_infeasible)
def travel_matrix(self, df):
# Lat and lon for each request
origin_lat_lon = list(
zip(np.deg2rad(df["Origin Lat"]), np.deg2rad(df["Origin Lng"]))
)
destination_lat_lon = list(
zip(np.deg2rad(df["Destination Lat"]),
np.deg2rad(df["Destination Lng"]))
)
request_lat_lon = origin_lat_lon + destination_lat_lon
vehicle_lat_lon = []
# Origins for each vehicle
for i in range(len(self.vehicles)):
vehicle_lat_lon.append(
(radians(59.946829115276145), radians(10.779841653639243))
)
# Destinations for each vehicle
for i in range(len(self.vehicles)):
vehicle_lat_lon.append(
(radians(59.946829115276145), radians(10.779841653639243))
)
# Positions
lat_lon = request_lat_lon + vehicle_lat_lon
# Distance matrix
D_ij = haversine_distances(lat_lon, lat_lon) * 6371
# Travel time matrix
speed = 20
T_ij = np.empty(
shape=(self.num_nodes_and_depots,
self.num_nodes_and_depots), dtype=timedelta
)
for i in range(self.num_nodes_and_depots):
for j in range(self.num_nodes_and_depots):
T_ij[i][j] = (
timedelta(hours=(D_ij[i][j] / speed)
).total_seconds()
)
# rush hour modelling:
if not (df.iloc[0]["Requested Pickup Time"].weekday() == 5):
for k in range(self.n):
for l in range(self.n):
if df.iloc[k]["Requested Pickup Time"].hour >= 15 and df.iloc[k]["Requested Pickup Time"].hour < 17 and df.iloc[l]["Requested Pickup Time"].hour >= 15 and df.iloc[l]["Requested Pickup Time"].hour < 17:
T_ij[k][l] = T_ij[k][l]*R_F
T_ij[k+self.n][l] = T_ij[k+self.n][l]*R_F
T_ij[k][l+self.n] = T_ij[k][l+self.n]*R_F
T_ij[k+self.n][l+self.n] = T_ij[k+self.n][l+self.n]*R_F
return T_ij
def requested_time_matrix(self):
# Requested time matrix
R_ij = np.empty(
shape=(2*self.n,
2*self.n), dtype=timedelta
)
nat_pickup = np.isnat(self.requests["Requested Pickup Time"])
nat_dropoff = np.isnat(self.requests["Requested Dropoff Time"])
for i in range(2*self.n):
for j in range(2*self.n):
if i == j:
continue
if j == i + self.n:
continue
if i == j - self.n:
continue
if i < self.n:
i_time = self.requests.iloc[i]["Requested Pickup Time"] if not nat_pickup.iloc[
i] else self.requests.iloc[i]["Requested Dropoff Time"] - timedelta(seconds=self.T_ij[i][i+self.n])
else:
i_time = self.requests.iloc[i - self.n]["Requested Dropoff Time"] if not nat_dropoff.iloc[
i - self.n] else self.requests.iloc[i - self.n]["Requested Pickup Time"] + timedelta(seconds=self.T_ij[i][i-self.n])
if j < self.n:
j_time = self.requests.iloc[j]["Requested Pickup Time"] if not nat_pickup.iloc[
j] else self.requests.iloc[j]["Requested Dropoff Time"] - timedelta(seconds=self.T_ij[j][j+self.n])
else:
j_time = self.requests.iloc[j - self.n]["Requested Dropoff Time"] if not nat_dropoff.iloc[
j - self.n] else self.requests.iloc[j - self.n]["Requested Pickup Time"] + timedelta(seconds=self.T_ij[j][j-self.n])
if pd.to_datetime(j_time) >= pd.to_datetime(i_time):
R_ij[i][j] = (pd.to_datetime(j_time) -
pd.to_datetime(i_time))
else:
R_ij[i][j] = None
return R_ij
def travel_time(self, to_id, from_id, fraction):
return timedelta(seconds=(1+F/2) * self.T_ij[to_id, from_id]) if fraction else timedelta(seconds=self.T_ij[to_id, from_id])
def get_max_travel_time(self, to_id, from_id):
return timedelta(seconds=(1+F) * self.T_ij[to_id, from_id])
|
{"hexsha": "a87eaef60b44cbff43f1daefc269175e17e91e63", "size": 9993, "ext": "py", "lang": "Python", "max_stars_repo_path": "heuristic/construction/construction.py", "max_stars_repo_name": "annalunde/master", "max_stars_repo_head_hexsha": "2552d43713e8ebca0b0e57bc5bebd1eaeeac1875", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-17T15:40:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T15:40:00.000Z", "max_issues_repo_path": "heuristic/construction/construction.py", "max_issues_repo_name": "annalunde/master", "max_issues_repo_head_hexsha": "2552d43713e8ebca0b0e57bc5bebd1eaeeac1875", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "heuristic/construction/construction.py", "max_forks_repo_name": "annalunde/master", "max_forks_repo_head_hexsha": "2552d43713e8ebca0b0e57bc5bebd1eaeeac1875", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0732758621, "max_line_length": 221, "alphanum_fraction": 0.5862103472, "include": true, "reason": "import numpy", "num_tokens": 2294}
|
program phaml_master
use phaml
implicit none
type(phaml_solution_type) :: soln
call phaml_create(soln,nproc=4)
call phaml_solve_pde(soln,print_grid_when=FINAL,print_grid_who=MASTER, &
print_error_when=FINAL,print_error_who=MASTER, &
print_errest_what=L2_ERREST, &
term_L2_err=2.0e-4_my_real)
call phaml_destroy(soln)
end program phaml_master
|
{"hexsha": "edb2001170592348eed14be90669d68f3ce6c946", "size": 405, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "testdir/test_termination/test07.f90", "max_stars_repo_name": "qsnake/phaml", "max_stars_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_stars_repo_licenses": ["mpich2"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-07T15:46:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-07T15:46:34.000Z", "max_issues_repo_path": "testdir/test_termination/test07.f90", "max_issues_repo_name": "qsnake/phaml", "max_issues_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_issues_repo_licenses": ["mpich2"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testdir/test_termination/test07.f90", "max_forks_repo_name": "qsnake/phaml", "max_forks_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_forks_repo_licenses": ["mpich2"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.75, "max_line_length": 72, "alphanum_fraction": 0.7135802469, "num_tokens": 111}
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
def set_serialize_factor(serialize_factor):
main_prog = paddle.static.default_main_program()
op = main_prog.current_block().ops[-1]
op._set_attr('serialize_factor', serialize_factor)
@unittest.skipIf(not paddle.is_compiled_with_ipu(),
"core is not compiled with IPU")
class TestBase(IPUOpTest):
def setUp(self):
self.set_atol()
self.set_training()
self.set_data_feed()
self.set_feed_attr()
self.set_op_attrs()
def set_data_feed(self):
self.feed = {
"x": np.random.uniform(size=[16, 32]).astype('float32'),
"y": np.random.uniform(size=[32, 16]).astype('float32'),
}
def set_feed_attr(self):
self.feed_shape = [x.shape for x in self.feed.values()]
self.feed_list = list(self.feed.keys())
self.feed_dtype = [x.dtype for x in self.feed.values()]
def set_op_attrs(self):
self.attrs = {"transpose_x": False, "transpose_y": False}
@IPUOpTest.static_graph
def build_model(self):
x = paddle.static.data(
name=self.feed_list[0],
shape=self.feed_shape[0],
dtype=self.feed_dtype[0])
y = paddle.static.data(
name=self.feed_list[1],
shape=self.feed_shape[1],
dtype=self.feed_dtype[1])
# decrator maybe the best choice, but need to modify api
out = paddle.matmul(x, y, **self.attrs)
set_serialize_factor(4)
self.fetch_list = [out.name]
def run_model(self, run_ipu):
self.build_model()
if run_ipu:
place = paddle.IPUPlace()
else:
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(self.startup_prog)
if run_ipu:
feed_list = self.feed_list
ipu_strategy = paddle.static.IpuStrategy()
ipu_strategy.set_graph_config(is_training=self.is_training)
program = paddle.static.IpuCompiledProgram(
self.main_prog,
ipu_strategy=ipu_strategy).compile(feed_list, self.fetch_list)
else:
program = self.main_prog
result = exe.run(program, feed=self.feed, fetch_list=self.fetch_list)
return result[0]
def test_base(self):
res0 = self.run_model(False)
res1 = self.run_model(True)
self.assertTrue(
np.allclose(
res0.flatten(), res1.flatten(), atol=self.atol))
self.assertTrue(res0.shape == res1.shape)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "0a273e91dd5716b14cbd942d248fa51afcfa01d7", "size": 3334, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py", "max_stars_repo_name": "RangeKing/Paddle", "max_stars_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-08-15T07:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-24T09:34:00.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py", "max_issues_repo_name": "RangeKing/Paddle", "max_issues_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/paddle/fluid/tests/unittests/ipu/test_matmul_serilize_ipu.py", "max_forks_repo_name": "RangeKing/Paddle", "max_forks_repo_head_hexsha": "2d87300809ae75d76f5b0b457d8112cb88dc3e27", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6767676768, "max_line_length": 78, "alphanum_fraction": 0.6415716857, "include": true, "reason": "import numpy", "num_tokens": 754}
|
//
// Copyright (c) 2019-2020 Kris Jusiak (kris at jusiak dot net)
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/ut.hpp>
#include <stdexcept>
namespace ut = boost::ut;
namespace cfg {
class runner {
public:
template <class... Ts>
auto on(ut::events::test<Ts...> test) {
test();
}
template <class... Ts>
auto on(ut::events::skip<Ts...>) {}
template <class TExpr>
auto on(ut::events::assertion<TExpr>) -> bool {
return true;
}
auto on(ut::events::fatal_assertion) {}
template <class TMsg>
auto on(ut::events::log<TMsg>) {}
};
} // namespace cfg
template <>
auto ut::cfg<ut::override> = cfg::runner{};
int main() {
using namespace ut;
"should be ignored"_test = [] {
expect(throws([] { throw std::runtime_error{"exception!"}; }));
expect(1_i == 2) << "doesn't fire";
};
}
|
{"hexsha": "26f03ee206914117579db79af843c71b8cda1506", "size": 953, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example/cfg/runner.cpp", "max_stars_repo_name": "ambushed/ut", "max_stars_repo_head_hexsha": "248df4dd091781b45b2cde7332774226d6a459b3", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 567.0, "max_stars_repo_stars_event_min_datetime": "2020-06-30T20:16:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:22:41.000Z", "max_issues_repo_path": "example/cfg/runner.cpp", "max_issues_repo_name": "ambushed/ut", "max_issues_repo_head_hexsha": "248df4dd091781b45b2cde7332774226d6a459b3", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 85.0, "max_issues_repo_issues_event_min_datetime": "2020-07-01T02:21:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T22:12:35.000Z", "max_forks_repo_path": "example/cfg/runner.cpp", "max_forks_repo_name": "ambushed/ut", "max_forks_repo_head_hexsha": "248df4dd091781b45b2cde7332774226d6a459b3", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 63.0, "max_forks_repo_forks_event_min_datetime": "2020-07-08T06:47:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T15:08:34.000Z", "avg_line_length": 21.6590909091, "max_line_length": 67, "alphanum_fraction": 0.6316894019, "num_tokens": 284}
|
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.models import load_model
from keras.utils import to_categorical
from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense, Flatten
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
import numpy as np
# Load the saved model
model = load_model('model1.h5')
# Run the images through inception-resnet and extract the features without the classification layer
IR2 = InceptionResNetV2(weights='imagenet', include_top=False)
# Initialize the function that will create our vocabulary
tokenizer = Tokenizer(filters='', split=" ", lower=False)
# Read a document and return a string
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
# Load all the HTML files
X = []
all_filenames = listdir('html/')
all_filenames.sort()
for filename in all_filenames:
X.append(load_doc('html/'+filename))
# Create the vocabulary from the html files
tokenizer.fit_on_texts(X)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'START'
# iterate over the whole length of the sequence
for i in range(900):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0][-100:]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# Print the prediction
print(' ' + word, end='')
# stop if we predict the end of the sequence
if word == 'END':
break
return
# TODO: Create a separate main.py with below:
# Load and image, preprocess it for IR2, extract features and generate the HTML
test_image = img_to_array(load_img('images/90.jpg', target_size=(299, 299)))
test_image = np.array(test_image, dtype=float)
test_image = preprocess_input(test_image)
test_features = IR2.predict(np.array([test_image]))
generate_desc(model, tokenizer, np.array(test_features), 100)
|
{"hexsha": "acfc27c9ba7d6a34d4aa7bd82ec9887e791cedc4", "size": 2869, "ext": "py", "lang": "Python", "max_stars_repo_path": "local/HTML/HTML_write.py", "max_stars_repo_name": "landongw/pdf-to-code", "max_stars_repo_head_hexsha": "fa7612ec7b1364310d4686b731773cd4e201c7c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-11-14T13:21:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-04T04:54:32.000Z", "max_issues_repo_path": "local/HTML/HTML_write.py", "max_issues_repo_name": "landongw/pdf-to-code", "max_issues_repo_head_hexsha": "fa7612ec7b1364310d4686b731773cd4e201c7c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "local/HTML/HTML_write.py", "max_forks_repo_name": "landongw/pdf-to-code", "max_forks_repo_head_hexsha": "fa7612ec7b1364310d4686b731773cd4e201c7c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8625, "max_line_length": 117, "alphanum_fraction": 0.7148832346, "include": true, "reason": "import numpy,from numpy", "num_tokens": 646}
|
import logging
import anndata as ad
import scipy.spatial
import scipy.sparse
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.neighbors import NearestNeighbors
## VIASH START
# Anything within this block will be removed by `viash` and will be
# replaced with the parameters as specified in your config.vsh.yaml.
par = {
"input_train_mod1": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod1.h5ad",
"input_train_mod2": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod2.h5ad",
"input_train_sol": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_sol.h5ad",
"input_test_mod1": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod1.h5ad",
"input_test_mod2": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod2.h5ad",
"output": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.prediction.h5ad",
"n_svd": 100,
}
## VIASH END
logging.basicConfig(level=logging.INFO)
logging.info("Load datasets")
input_train_mod1 = ad.read_h5ad(par["input_train_mod1"])
input_train_mod2 = ad.read_h5ad(par["input_train_mod2"])
# input_train_sol = ad.read_h5ad(par["input_train_sol"])
input_test_mod1 = ad.read_h5ad(par["input_test_mod1"])
input_test_mod2 = ad.read_h5ad(par["input_test_mod2"])
# This method runs PCA on each modality individually, then uses the Procrustes method to identify
# a linear transform that best superimposes the points from modality 1 onto modality 2.
# concatenate train and test data
mod1 = ad.concat(
{
"train": input_train_mod1,
"test": input_test_mod1
},
index_unique="-",
label="group"
)
mod2 = ad.concat(
{
"train": input_train_mod2,
"test": input_test_mod2
},
index_unique="-",
label="group"
)
# Create helper views to access the test data later
mod1te = mod1[mod1.obs["group"] == "test", :]
mod2te = mod2[mod2.obs["group"] == "test", :]
logging.info("Running PCA")
n_svd = min(par["n_svd"], mod1.n_obs, mod2.n_obs, mod1.n_vars, mod1.n_vars)
# Use TruncatedSVD for fast decomposition of the data
mod1.obsm["X_pca"] = TruncatedSVD(n_svd).fit_transform(mod1.X)
mod2.obsm["X_pca"] = TruncatedSVD(n_svd).fit_transform(mod2.X)
logging.info("Running Procrustes Alignment")
# This function takes in two matrices of points A and B, standardizes both, and applies a linear to
# matrix B to minimize the disparity measured as the sum of the squares of the pointwise distances
# between the two input datasets
mod1.obsm["X_pro"], mod2.obsm["X_pro"], disparity = scipy.spatial.procrustes(
mod1.obsm["X_pca"],
mod2.obsm["X_pca"],
)
logging.info("> Disparity value is: %0.3f" % disparity)
logging.info("Perform nearest neighbors")
# To get the matching matrix, for each point in mod1_test, we take the 1000 nearest neighbors of that
# point in the transformed mod2_test dataset
n_neighbors = min(1000, mod1te.n_obs, mod1te.n_vars, mod2te.n_obs, mod2te.n_vars)
nn = NearestNeighbors(n_neighbors=n_neighbors).fit(mod1te.obsm["X_pro"])
distances, indices = nn.kneighbors(X=mod2te.obsm["X_pro"])
logging.info("Create pairing matrix")
# Translate the neighborhood assignments to a pairing matrix that is (n_obs, n_obs)
# NOTE: `pairing_matrix` must have NO MORE than 1000*n_obs non-zero entries for fast metric computation
ind_i = np.tile(np.arange(mod1te.n_obs), (n_neighbors, 1)).T.flatten()
ind_j = indices.flatten()
ind_dist = distances.flatten()
ind_x = 2 * max(ind_dist) - ind_dist
pairing_matrix = scipy.sparse.csr_matrix(
(ind_x, (ind_i, ind_j)),
shape=(input_test_mod1.n_obs, input_test_mod2.n_obs)
)
# row normalise
prob_matrix = normalize(pairing_matrix, norm="l1")
print("Write prediction output")
prediction = ad.AnnData(
X=prob_matrix,
uns={
"dataset_id": input_train_mod1.uns["dataset_id"],
"method_id": "baseline_procrustes_knn"
}
)
prediction.write_h5ad(par["output"])
|
{"hexsha": "f8bad3cffd19bccc2ca84f1754e8d8e3c711d7f9", "size": 4253, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/match_modality/methods/baseline_procrustes_knn/script.py", "max_stars_repo_name": "dburkhardt/neurips2021_multimodal_viash", "max_stars_repo_head_hexsha": "e3449af07749bac6faf32613f91fd149a23250a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-08-04T23:20:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T09:12:06.000Z", "max_issues_repo_path": "src/match_modality/methods/baseline_procrustes_knn/script.py", "max_issues_repo_name": "dburkhardt/neurips2021_multimodal_viash", "max_issues_repo_head_hexsha": "e3449af07749bac6faf32613f91fd149a23250a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-08-03T09:17:46.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-16T12:31:54.000Z", "max_forks_repo_path": "src/match_modality/methods/baseline_procrustes_knn/script.py", "max_forks_repo_name": "dburkhardt/neurips2021_multimodal_viash", "max_forks_repo_head_hexsha": "e3449af07749bac6faf32613f91fd149a23250a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-08-04T23:21:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T09:13:27.000Z", "avg_line_length": 39.0183486239, "max_line_length": 142, "alphanum_fraction": 0.766282624, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1145}
|
#!/usr/bin/env python
import os
import sys
import numpy as np
from setuptools import setup, Extension
# Get the version number from ModelInterface.h
__version__ = None
with open("cthreeML/ModelInterface.h") as f:
for line in f:
if line.find("#define INTERFACE_VERSION")==0:
__version__ = "%i.0.3" % int(line.split(" ")[2])
print __version__
break
if __version__ is None:
raise RuntimeError("Could not probe version from ModelInterface.h")
# Now a global __version__ is available
# A list to store final messages to print at the end
final_messages = []
# Probe whether the user has specified its own boost directory through the BOOSTROOT
# environment variable
boost_root = os.environ.get("BOOSTROOT")
if boost_root:
# Check that the directory provided actually exists
if not os.path.exists(boost_root):
print("\nERROR: the directory %s specified in BOOSTROOT does not exist!" % boost_root)
sys.exit(-1)
# The user want to override pre-defined location of boost
print("\n\n **** Using boost.python from the env. variable $BOOSTROOT (%s)" % boost_root)
include_dirs = [os.path.join(boost_root, 'include')]
library_dirs = [os.path.join(boost_root, 'lib')]
# Check that the include and library directories exist
if not os.path.exists(include_dirs[0]):
print("\nERROR: the include directory %s for boost.python does not exist!" % include_dirs[0])
sys.exit(-1)
if not os.path.exists(library_dirs[0]):
print("\nERROR: the library directory %s for boost.python does not exist!" % library_dirs[0])
sys.exit(-1)
final_messages.append("Used boost.python from the env. variable BOOSTROOT")
final_messages.append(" Include dir: %s" % include_dirs)
final_messages.append(" Library dir: %s" % library_dirs)
else:
include_dirs = []
library_dirs = []
final_messages.append("Using boost.python from the system path.")
# Now add the numpy headers
include_dirs.append(np.get_include())
# Configure the variables to build the external module with the C/C++ wrapper
ext_modules_configuration = [
Extension("cthreeML.pyModelInterfaceCache",
["cthreeML/pyToCppModelInterfaceCache.cxx",],
libraries=["boost_python"],
include_dirs=include_dirs,
library_dirs=library_dirs,
extra_compile_args = [])]
headers_configuration = ["cthreeML/ModelInterface.h",
"cthreeML/pyToCppModelInterfaceCache.h"]
setup(
name="cthreeML",
packages=["cthreeML"],
version=__version__,
description="The C/C++ bridge for the Multi-Mission Maximum Likelihood framework (github.com/giacomov/3ML)",
long_description="The C/C++ bridge for the Multi-Mission Maximum Likelihood framework (github.com/giacomov/3ML)",
license='BSD-3',
author='Giacomo Vianello',
author_email='giacomo.vianello@gmail.com',
url='https://github.com/giacomov/cthreeML',
download_url='https://github.com/giacomov/cthreeML/archive/%s' % __version__,
keywords=['Likelihood', 'Multi-mission', '3ML', 'HAWC', 'Fermi', 'HESS', 'joint', 'fit', 'bayesian',
'multi-wavelength'],
classifiers=[],
ext_modules=ext_modules_configuration,
headers=headers_configuration,
install_requires=[])
# Now print the final messages if there are any
if len(final_messages) > 0:
print("\n#############")
print("FINAL NOTES:")
print("#############")
print("\n".join(final_messages))
|
{"hexsha": "8af020fff44ae707440bda7c0d2e97e7b64b4f58", "size": 3672, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "giacomov/c_threeML", "max_stars_repo_head_hexsha": "1bce7ba11309ebf327720f3a614c578ed85d4726", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "setup.py", "max_issues_repo_name": "giacomov/c_threeML", "max_issues_repo_head_hexsha": "1bce7ba11309ebf327720f3a614c578ed85d4726", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "setup.py", "max_forks_repo_name": "giacomov/c_threeML", "max_forks_repo_head_hexsha": "1bce7ba11309ebf327720f3a614c578ed85d4726", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-10-16T11:14:59.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-10T14:02:11.000Z", "avg_line_length": 26.2285714286, "max_line_length": 117, "alphanum_fraction": 0.6565904139, "include": true, "reason": "import numpy", "num_tokens": 848}
|
from dipsim import util
from dipsim import multiframe
import numpy as np
import matplotlib.pyplot as plt
import os; import time; start = time.time(); print('Running...')
# Main input parameters
row_labels = ['Geometry', r'$\sigma_{\Omega}$']
col_labels = ['Single-view (NA${}_\\textrm{ill}$=0, NA${}_\\textrm{det}$=1.1', 'Dual-view oblique symmetric widefield (NA=0.6, $\beta$=53${}^{\circ}$', 'Dual-view orthogonal symmetric light-sheet (NA=0.8)', 'Dual-view orthogonal symmetric light-sheet (NA=0.94)']
n_pts = 10000
n_cols = len(col_labels)
n_rows = 2
inch_fig = 5
dpi = 200
m1 = multiframe.MultiFrameMicroscope(ill_thetas=[0], det_thetas=[0],
ill_nas=[0.8], det_nas=[1.1],
ill_types=['sheet'], det_types=['lens'],
colors=['(1,0,0)'], n_frames=4,
n_pts=n_pts, max_photons=1000, n_samp=1.33)
angle = np.pi/2 - np.arcsin(0.6/1.33)
m2 = multiframe.MultiFrameMicroscope(ill_thetas=[angle, -angle], det_thetas=[-angle, angle],
ill_nas=2*[0.6], det_nas=2*[0.6],
ill_types=2*['wide'], det_types=2*['lens'],
colors=['(1,0,0)','(0,0,1)'], n_frames=4,
n_pts=n_pts, max_photons=500, n_samp=1.33)
m3 = multiframe.MultiFrameMicroscope(ill_thetas=[np.pi/4, -np.pi/4], det_thetas=[-np.pi/4, np.pi/4],
ill_nas=2*[0], det_nas=2*[0.8],
ill_types=2*['sheet'], det_types=2*['lens'],
colors=['(1,0,0)','(0,0,1)'], n_frames=4,
n_pts=n_pts, max_photons=500, n_samp=1.33)
m4 = multiframe.MultiFrameMicroscope(ill_thetas=[np.pi/4, -np.pi/4], det_thetas=[-np.pi/4, np.pi/4],
ill_nas=2*[0], det_nas=2*[1.33*np.sin(np.pi/4)],
ill_types=2*['sheet'], det_types=2*['lens'],
colors=['(1,0,0)','(0,0,1)'], n_frames=4,
n_pts=n_pts, max_photons=500, n_samp=1.33)
experiments = [m1, m2, m3, m4]
# Generate axes
size = (inch_fig*n_cols, inch_fig*n_rows)
fig, axs = plt.subplots(n_rows, n_cols, figsize=size)
plt.subplots_adjust(wspace=0.2, hspace=0)
if len(col_labels) == 1:
axs = np.expand_dims(axs, 1)
caxs = util.generate_caxs(axs)
# Compute and plot on axes
for i, exp in enumerate(experiments):
exp.calc_estimation_stats()
scene_string = exp.scene_string()
util.draw_scene(scene_string, my_ax=axs[0,i], dpi=dpi)
util.plot_sphere(directions=exp.directions, data=exp.sa_uncert,
color_norm='log', linthresh=1e-4,
color_min=8e-4, color_max=1e0,
my_ax=axs[1,i], my_cax=caxs[1,i])
caxs[0,i].axis('off')
data = exp.sa_uncert
maximum = np.max(data)
med = np.median(data)
mad = np.median(np.abs(data - med))
print(str(col_labels[i]) + ' {:.2e}'.format(maximum) + ' {:.2e}'.format(med) + ' {:.2e}'.format(mad))
# Label axes and save
util.label_rows_and_cols(axs, row_labels, col_labels, row_pos=(-0.2, 0.5))
print('Saving final figure.')
fig.savefig('compare-microscopes.pdf', dpi=dpi)
print('Total time: '+str(np.round(time.time() - start, 2)))
os.system('say "done"')
|
{"hexsha": "97e4c7b867663f394f92d2b8a7068b993328a0d4", "size": 3453, "ext": "py", "lang": "Python", "max_stars_repo_path": "paper/figures/compare-microscopes.py", "max_stars_repo_name": "talonchandler/dipsim", "max_stars_repo_head_hexsha": "04904871924276fd1662ca15b7224166d271c0d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "paper/figures/compare-microscopes.py", "max_issues_repo_name": "talonchandler/dipsim", "max_issues_repo_head_hexsha": "04904871924276fd1662ca15b7224166d271c0d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "paper/figures/compare-microscopes.py", "max_forks_repo_name": "talonchandler/dipsim", "max_forks_repo_head_hexsha": "04904871924276fd1662ca15b7224166d271c0d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6296296296, "max_line_length": 262, "alphanum_fraction": 0.5508253692, "include": true, "reason": "import numpy", "num_tokens": 1029}
|
import numpy as np
import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
#%matplotlib nbagg
workdir = "calibration-pics/"
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)
imboard = board.draw((6000, 6000))
cv2.imwrite(workdir + "chessboard.jpg", imboard)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(imboard, cmap = mpl.cm.gray, interpolation = "nearest")
ax.axis("off")
plt.show()
|
{"hexsha": "405f7b5897df6e7117149df14e3802c79745a5da", "size": 564, "ext": "py", "lang": "Python", "max_stars_repo_path": "camera-calibration/charuco-maker.py", "max_stars_repo_name": "SamyBarras/ocoda", "max_stars_repo_head_hexsha": "fc06fc5604500a0c31cb4ab8ddecb88e3fd282e5", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "camera-calibration/charuco-maker.py", "max_issues_repo_name": "SamyBarras/ocoda", "max_issues_repo_head_hexsha": "fc06fc5604500a0c31cb4ab8ddecb88e3fd282e5", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "camera-calibration/charuco-maker.py", "max_forks_repo_name": "SamyBarras/ocoda", "max_forks_repo_head_hexsha": "fc06fc5604500a0c31cb4ab8ddecb88e3fd282e5", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6842105263, "max_line_length": 66, "alphanum_fraction": 0.7606382979, "include": true, "reason": "import numpy", "num_tokens": 177}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import time, os, codecs, json
import numpy as np
from utils.tools import DatasetGenerator, ResultWriter, create_masks, generate_masks
from utils.CustomSchedule import CustomSchedule
from utils.EarlystopHelper import EarlystopHelper
from utils.Metrics import MAE, MAPE
from models import Stream_T, STSAN
import tensorflow as tf
from data_parameters import data_parameters
class TrainModel:
def __init__(self, model_index, args):
""" use mirrored strategy for distributed training """
self.strategy = tf.distribute.MirroredStrategy()
strategy = self.strategy
print('Number of GPU devices: {}'.format(strategy.num_replicas_in_sync))
param = data_parameters[args.dataset]
self.param = param
self.model_index = model_index
if args.test_model:
args.n_layer = 1
args.d_model = 8
args.dff = 32
args.n_head = 1
args.conv_layer = 1
args.conv_filter = 8
args.n_w = 0
args.n_d = 1
args.n_wd_times = 1
args.n_p = 0
args.n_before = 0
args.l_half = 3
self.args = args
self.args.l_hist = (args.n_w + args.n_d) * args.n_wd_times + args.n_p
self.GLOBAL_BATCH_SIZE = args.BATCH_SIZE * strategy.num_replicas_in_sync
self.dataset_generator = DatasetGenerator(args.dataset,
self.GLOBAL_BATCH_SIZE,
args.n_w,
args.n_d,
args.n_wd_times,
args.n_p,
args.n_before,
args.l_half,
args.pre_shuffle,
args.test_model)
self.es_patiences = [5, args.es_patience]
self.es_threshold = args.es_threshold
self.data_max = param['data_max'][:param['pred_type']]
def pretrain(self, train_dataset, val_dataset):
strategy = self.strategy
args = self.args
param = self.param
test_model = args.test_model
test_threshold_t = 2 / param['data_max'][2]
result_writer = ResultWriter("results/{}.txt".format(self.model_index))
test_dataset = None
def tf_summary_scalar(summary_writer, name, value, step):
with summary_writer.as_default():
tf.summary.scalar(name, value, step=step)
def print_verbose_t(epoch, final_test):
if final_test:
template_rmse = "Transition RMSE: {:.2f}({:.6f})\n".format \
(rmse_test_t.result() * param['data_max'][2], rmse_test_t.result())
template = "Final:\n" + template_rmse
result_writer.write(template)
else:
template = "Epoch {} Transition RMSE: {:.6f}\n".format(epoch + 1, rmse_test_t.result())
result_writer.write('Validation Result (Min-Max Norm, filtering out trivial grids):\n' + template)
loss_object = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
def loss_function(real, pred):
loss_ = loss_object(real, pred)
return tf.nn.compute_average_loss(loss_, global_batch_size=self.GLOBAL_BATCH_SIZE)
rmse_train_t = tf.keras.metrics.RootMeanSquaredError(dtype=tf.float32)
rmse_test_t = tf.keras.metrics.RootMeanSquaredError(dtype=tf.float32)
learning_rate = CustomSchedule(args.d_model, args.warmup_steps)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
stream_t = Stream_T(
args.n_layer,
args.d_model,
args.n_head,
args.dff,
args.conv_layer,
args.conv_filter,
args.l_hist,
args.l_half,
args.r_d)
def train_step(enc_ft, enc_ex, dec_ft, dec_ex, y_t):
enc_inp = enc_ft[..., 2:]
dec_inp = dec_ft[..., 2:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(enc_inp, dec_inp)
with tf.GradientTape() as tape:
pred_t, _ = stream_t(enc_inp, enc_ex, dec_inp, dec_ex, True,
enc_padding_mask, combined_mask, dec_padding_mask)
loss_t = loss_function(y_t, pred_t)
gradients = tape.gradient(loss_t, stream_t.trainable_variables)
optimizer.apply_gradients(zip(gradients, stream_t.trainable_variables))
rmse_train_t(y_t, pred_t)
return loss_t
@tf.function
def distributed_train_step(enc_ft, enc_ex, dec_ft, dec_ex, y_t):
per_replica_losses = strategy.run(train_step, args=(enc_ft, enc_ex, dec_ft, dec_ex, y_t,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
def test_step(enc_ft, enc_ex, dec_ft, dec_ex, y_t):
enc_inp = enc_ft[..., 2:]
dec_inp = dec_ft[..., 2:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(enc_inp, dec_inp)
pred_t, _ = stream_t(enc_inp, enc_ex, dec_inp, dec_ex, False,
enc_padding_mask, combined_mask, dec_padding_mask)
mask = tf.where(tf.math.greater(y_t, test_threshold_t))
masked_real = tf.gather_nd(y_t, mask)
masked_pred = tf.gather_nd(pred_t, mask)
rmse_test_t(masked_real, masked_pred)
@tf.function
def distributed_test_step(enc_ft, enc_ex, dec_ft, dec_ex, y_t):
return strategy.run(test_step, args=(enc_ft, enc_ex, dec_ft, dec_ex, y_t,))
def evaluate(eval_dataset, epoch, verbose, final_test=False):
rmse_test_t.reset_states()
for (batch, (inp, tar)) in enumerate(eval_dataset):
enc_ft = inp["enc_inp_ft"]
enc_ex = inp["enc_inp_ex"]
dec_ft = inp["dec_inp_ft"]
dec_ex = inp["dec_inp_ex"]
y_t = tar["y_t"]
distributed_test_step(
enc_ft, enc_ex, dec_ft, dec_ex, y_t)
if verbose:
print_verbose_t(epoch, final_test)
""" Start training... """
built = False
es_flag_t = False
check_flag_t = False
es_helper_t = EarlystopHelper('t', self.es_patiences, self.es_threshold)
summary_writer = tf.summary.create_file_writer(
os.environ['HOME'] + '/tensorboard/stsan/{}'.format(self.model_index))
step_cnt = 0
last_epoch = 0
checkpoint_path = "./checkpoints/stream_t/{}".format(self.model_index)
ckpt = tf.train.Checkpoint(Stream_T=stream_t, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path,
max_to_keep=(args.es_patience + 1))
if os.path.isfile(checkpoint_path + '/ckpt_record.json'):
with codecs.open(checkpoint_path + '/ckpt_record.json', encoding='utf-8') as json_file:
ckpt_record = json.load(json_file)
last_epoch = ckpt_record['epoch']
es_flag_t = ckpt_record['es_flag_t']
check_flag_t = ckpt_record['check_flag_t']
es_helper_t.load_ckpt(checkpoint_path)
step_cnt = ckpt_record['step_cnt']
ckpt.restore(ckpt_manager.checkpoints[-1])
result_writer.write("Check point restored at epoch {}".format(last_epoch))
result_writer.write("Start training Stream-T...\n")
for epoch in range(last_epoch, args.MAX_EPOCH + 1):
if es_flag_t or epoch == args.MAX_EPOCH:
print("Stream-T: Early stoping...")
if es_flag_t:
ckpt.restore(ckpt_manager.checkpoints[0])
else:
ckpt.restore(ckpt_manager.checkpoints[es_helper_t.get_bestepoch() - epoch - 1])
print('Checkpoint restored!! At epoch {}\n'.format(es_helper_t.get_bestepoch()))
break
start = time.time()
rmse_train_t.reset_states()
for (batch, (inp, tar)) in enumerate(train_dataset):
enc_ft = inp["enc_inp_ft"]
enc_ex = inp["enc_inp_ex"]
dec_ft = inp["dec_inp_ft"]
dec_ex = inp["dec_inp_ex"]
y_t = tar["y_t"]
total_loss = distributed_train_step(enc_ft, enc_ex, dec_ft, dec_ex, y_t)
if not built and args.model_summary:
stream_t.summary(print_fn=result_writer.write)
built = True
step_cnt += 1
tf_summary_scalar(summary_writer, "total_loss_t", total_loss, step_cnt)
if (batch + 1) % 100 == 0 and args.verbose_train:
template = 'Epoch {} Batch {} Transition RMSE: {:.6f}'.format \
(epoch + 1, batch + 1, rmse_train_t.result())
print(template)
if args.verbose_train:
template = 'Epoch {} Transition RMSE: {:.6f}\n'.format(epoch + 1, rmse_train_t.result())
result_writer.write(template)
tf_summary_scalar(summary_writer, "rmse_train_transition", rmse_train_t.result(), epoch + 1)
eval_rmse = float(rmse_train_t.result().numpy())
if test_model or (not check_flag_t and es_helper_t.refresh_status(eval_rmse)):
check_flag_t = True
if check_flag_t:
evaluate(val_dataset, epoch, 1, False)
tf_summary_scalar(summary_writer, "rmse_test_transition", rmse_test_t.result(), epoch + 1)
es_rmse = float(rmse_test_t.result().numpy())
es_flag_t = es_helper_t.check(es_rmse, epoch)
tf_summary_scalar(summary_writer, "best_epoch_t", es_helper_t.get_bestepoch(), epoch + 1)
if args.always_test and (epoch + 1) % args.always_test == 0:
if not test_dataset:
test_dataset = self.dataset_generator.build_dataset(
'test', args.load_saved_data, strategy, args.no_save)
result_writer.write("Always Test:")
evaluate(test_dataset, epoch, 1, False)
ckpt_save_path = ckpt_manager.save()
ckpt_record = {'built': built, 'epoch': epoch + 1, 'best_epoch': es_helper_t.get_bestepoch(),
'check_flag_t': check_flag_t, 'es_flag_t': es_flag_t, 'step_cnt': step_cnt}
ckpt_record = json.dumps(ckpt_record, indent=4)
with codecs.open(checkpoint_path + '/ckpt_record.json', 'w', 'utf-8') as outfile:
outfile.write(ckpt_record)
es_helper_t.save_ckpt(checkpoint_path)
print('Save checkpoint for epoch {} at {}\n'.format(epoch + 1, ckpt_save_path))
tf_summary_scalar(summary_writer, "epoch_time_t", time.time() - start, epoch + 1)
print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
if test_model:
es_flag_t = True
result_writer.write("Start testing Stream-T (filtering out trivial grids):")
test_dataset = self.dataset_generator.build_dataset(
'test', args.load_saved_data, strategy, args.no_save) if not test_dataset else test_dataset
evaluate(test_dataset, epoch, 1, True)
tf_summary_scalar(summary_writer, "rmse_final_transition", rmse_test_t.result(), 1)
return stream_t, test_dataset
def train_stsan(self, stream_t, train_dataset, val_dataset, test_dataset):
strategy = self.strategy
args = self.args
param = self.param
test_model = args.test_model
test_threshold_f = [param['test_threshold'][i] / self.data_max[i]
for i in range(param['pred_type'])]
pred_type = param['pred_type']
data_name = param['data_name']
weights = args.weights
is_weights = type(weights) is np.ndarray
result_writer = ResultWriter("results/{}.txt".format(self.model_index))
def tf_summary_scalar(summary_writer, name, value, step):
with summary_writer.as_default():
tf.summary.scalar(name, value, step=step)
def print_verbose_f(epoch, final_test):
if final_test:
template_rmse = "RMSE:\n"
for i in range(pred_type):
template_rmse += '{}: {:.2f}({:.6f})\n'.format(
data_name[i],
rmse_test_f[i].result() * self.data_max[i],
rmse_test_f[i].result()
)
template_mae = "MAE:\n"
for i in range(pred_type):
template_mae += '{}: {:.2f}({:.6f})\n'.format(
data_name[i],
mae_test[i].result() * self.data_max[i],
mae_test[i].result()
)
template_mape = "MAPE:\n"
for i in range(pred_type):
template_mape += '{}: {:.2f}\n'.format(data_name[i], mape_test[i].result())
template = "Final:\n" + template_rmse + template_mae + template_mape
result_writer.write(template)
else:
template = "Epoch {} RMSE:\n".format(epoch + 1)
for i in range(pred_type):
template += "{}: {:.6f}\n".format(data_name[i], rmse_test_f[i].result())
result_writer.write('Validation Result (Min-Max Norm, filtering out trivial grids):\n' + template)
loss_object = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
def loss_function(real, pred):
loss_ = loss_object(real, pred)
return tf.nn.compute_average_loss(loss_, global_batch_size=self.GLOBAL_BATCH_SIZE)
rmse_train_f = [tf.keras.metrics.RootMeanSquaredError(dtype=tf.float32) for _ in range(pred_type)]
rmse_test_f = [tf.keras.metrics.RootMeanSquaredError(dtype=tf.float32) for _ in range(pred_type)]
mae_test = [MAE() for _ in range(pred_type)]
mape_test = [MAPE() for _ in range(pred_type)]
learning_rate = CustomSchedule(args.d_model, args.warmup_steps)
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
stsan = STSAN(
stream_t,
args.n_layer,
args.d_model,
args.n_head,
args.dff,
args.conv_layer,
args.conv_filter,
args.l_hist,
args.l_half,
args.r_d)
def train_step(enc_ft, enc_ex, dec_ft, dec_ex, y):
enc_padding_mask, combined_mask, dec_padding_mask = generate_masks(enc_ft, dec_ft)
with tf.GradientTape() as tape:
pred_f, _ = stsan(enc_ft, enc_ex, dec_ft, dec_ex, True,
enc_padding_mask, combined_mask, dec_padding_mask)
loss_f = loss_function(y * weights, pred_f * weights) if is_weights else loss_function(y, pred_f)
gradients = tape.gradient(loss_f, stsan.trainable_variables)
optimizer.apply_gradients(zip(gradients, stsan.trainable_variables))
for i in range(pred_type):
rmse_train_f[i](y[..., i], pred_f[..., i])
return loss_f
@tf.function
def distributed_train_step(enc_ft, enc_ex, dec_ft, dec_ex, y):
per_replica_losses = strategy.run(train_step, args=(enc_ft, enc_ex, dec_ft, dec_ex, y,))
return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
def test_step(enc_ft, enc_ex, dec_ft, dec_ex, y, final_test=False):
enc_padding_mask, combined_mask, dec_padding_mask = generate_masks(enc_ft, dec_ft)
pred_f, _ = stsan(enc_ft, enc_ex, dec_ft, dec_ex, False,
enc_padding_mask, combined_mask, dec_padding_mask)
for i in range(pred_type):
real = y[..., i] * (weights[i] if is_weights else 1)
mask = tf.where(tf.math.greater(real, test_threshold_f[i]))
masked_real = tf.gather_nd(real, mask)
masked_pred = tf.gather_nd(pred_f[..., i], mask)
rmse_test_f[i](masked_real, masked_pred)
if final_test:
mae_test[i](masked_real, masked_pred)
mape_test[i](masked_real, masked_pred)
@tf.function
def distributed_test_step(enc_ft, enc_ex, dec_ft, dec_ex, y, final_test):
return strategy.run(test_step, args=(enc_ft, enc_ex, dec_ft, dec_ex, y, final_test,))
def evaluate(eval_dataset, epoch, verbose, final_test):
for i in range(pred_type):
rmse_test_f[i].reset_states()
for (batch, (inp, tar)) in enumerate(eval_dataset):
enc_ft = inp["enc_inp_ft"]
enc_ex = inp["enc_inp_ex"]
dec_ft = inp["dec_inp_ft"]
dec_ex = inp["dec_inp_ex"]
y = tar["y"]
distributed_test_step(
enc_ft, enc_ex, dec_ft, dec_ex, y, final_test)
if verbose:
print_verbose_f(epoch, final_test)
""" Start training... """
built = False
es_flag_f = False
check_flag_f = False
es_helper_f = EarlystopHelper('f', self.es_patiences, self.es_threshold)
summary_writer = tf.summary.create_file_writer(
os.environ['HOME'] + '/tensorboard/stsan/{}'.format(self.model_index))
step_cnt = 0
last_epoch = 0
checkpoint_path = "./checkpoints/stsan/{}".format(self.model_index)
ckpt = tf.train.Checkpoint(STSAN=stsan, optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path,
max_to_keep=(args.es_patience + 1))
if os.path.isfile(checkpoint_path + '/ckpt_record.json'):
with codecs.open(checkpoint_path + '/ckpt_record.json', encoding='utf-8') as json_file:
ckpt_record = json.load(json_file)
built = ckpt_record['built']
last_epoch = ckpt_record['epoch']
es_flag_f = ckpt_record['es_flag_f']
check_flag_f = ckpt_record['check_flag_f']
es_helper_f.load_ckpt(checkpoint_path)
step_cnt = ckpt_record['step_cnt']
ckpt.restore(ckpt_manager.checkpoints[-1])
result_writer.write("Check point restored at epoch {}".format(last_epoch))
result_writer.write("Start training STSAN...\n")
for epoch in range(last_epoch, args.MAX_EPOCH + 1):
if es_flag_f or epoch == args.MAX_EPOCH:
print("STSAN: Early stoping...")
if es_flag_f:
ckpt.restore(ckpt_manager.checkpoints[0])
else:
ckpt.restore(ckpt_manager.checkpoints[es_helper_f.get_bestepoch() - epoch - 1])
print('Checkpoint restored!! At epoch {}\n'.format(es_helper_f.get_bestepoch()))
break
start = time.time()
for i in range(pred_type):
rmse_train_f[i].reset_states()
for (batch, (inp, tar)) in enumerate(train_dataset):
enc_ft = inp["enc_inp_ft"]
enc_ex = inp["enc_inp_ex"]
dec_ft = inp["dec_inp_ft"]
dec_ex = inp["dec_inp_ex"]
y = tar["y"]
total_loss = distributed_train_step(enc_ft, enc_ex, dec_ft, dec_ex, y)
if not built and args.model_summary:
stsan.summary(print_fn=result_writer.write)
built = True
step_cnt += 1
tf_summary_scalar(summary_writer, "total_loss_f", total_loss, step_cnt)
if (batch + 1) % 100 == 0 and args.verbose_train:
template = 'Epoch {} Batch {} RMSE:'.format(epoch + 1, batch + 1)
for i in range(pred_type):
template += ' {} {:.6f}'.format(data_name[i], rmse_train_f[i].result())
print(template)
if args.verbose_train:
template = ''
for i in range(pred_type):
template += ' {} {:.6f}'.format(data_name[i], rmse_train_f[i].result())
tf_summary_scalar(
summary_writer, " rmse_train_{}".format(data_name[i]), rmse_train_f[i].result(), epoch + 1)
template = 'Epoch {} RMSE: {}\n'.format(epoch + 1, template)
result_writer.write(template)
eval_rmse = 0.0
for i in range(pred_type):
eval_rmse += float(rmse_train_f[i].result().numpy() * (weights[i] if is_weights else 1))
if test_model or (not check_flag_f and es_helper_f.refresh_status(eval_rmse)):
check_flag_f = True
if check_flag_f:
evaluate(val_dataset, epoch, 1, False)
es_rmse = [0.0 for _ in range(pred_type)]
for i in range(pred_type):
if is_weights:
es_rmse[i] += float(rmse_test_f[i].result().numpy() * weights[i])
else:
es_rmse[i] += float(rmse_test_f[i].result().numpy())
tf_summary_scalar(summary_writer, "rmse_test_{}".format(data_name[i]),
rmse_test_f[i].result(), epoch + 1)
es_flag_f = es_helper_f.check(es_rmse[0] + es_rmse[1], epoch)
tf_summary_scalar(summary_writer, "best_epoch_f", es_helper_f.get_bestepoch(), epoch + 1)
if args.always_test and (epoch + 1) % args.always_test == 0:
if not test_dataset:
test_dataset = self.dataset_generator.build_dataset(
'test', args.load_saved_data, strategy, args.no_save)
result_writer.write("Always Test:")
evaluate(test_dataset, epoch, 1, False)
ckpt_save_path = ckpt_manager.save()
ckpt_record = {'built': built, 'epoch': epoch + 1, 'best_epoch': es_helper_f.get_bestepoch(),
'check_flag_f': check_flag_f, 'es_flag_f': es_flag_f, 'step_cnt': step_cnt}
ckpt_record = json.dumps(ckpt_record, indent=4)
with codecs.open(checkpoint_path + '/ckpt_record.json', 'w', 'utf-8') as outfile:
outfile.write(ckpt_record)
es_helper_f.save_ckpt(checkpoint_path)
print('Save checkpoint for epoch {} at {}\n'.format(epoch + 1, ckpt_save_path))
tf_summary_scalar(summary_writer, "epoch_time_f", time.time() - start, epoch + 1)
print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start))
if test_model:
es_flag_f = True
result_writer.write("Start testing (filtering out trivial grids):")
test_dataset = self.dataset_generator.build_dataset(
'test', args.load_saved_data, strategy, args.no_save) if not test_dataset else test_dataset
evaluate(test_dataset, epoch, 1, True)
for i in range(pred_type):
tf_summary_scalar(summary_writer, "rmse_final_{}".format(data_name[i]), rmse_test_f[i].result(), 1)
return stsan
def train(self):
strategy = self.strategy
args = self.args
train_dataset = self.dataset_generator.build_dataset('train', args.load_saved_data, strategy, args.no_save)
val_dataset = self.dataset_generator.build_dataset('val', args.load_saved_data, strategy, args.no_save)
with self.strategy.scope():
stream_t, test_dataset = self.pretrain(train_dataset, val_dataset)
_ = self.train_stsan(stream_t, train_dataset, val_dataset, test_dataset)
|
{"hexsha": "bb445ee77c3516f9c7baf342bbddecb80817a5d4", "size": 24505, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "starkhxl/AMEX", "max_stars_repo_head_hexsha": "7c186ae4f1e7421eda5b37d7ae1d0f3bcb20e25c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-06-27T06:43:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-01T11:19:33.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "starkhxl/AMEX", "max_issues_repo_head_hexsha": "7c186ae4f1e7421eda5b37d7ae1d0f3bcb20e25c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-06-16T06:44:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-10T04:01:07.000Z", "max_forks_repo_path": "train.py", "max_forks_repo_name": "starkhxl/AMEX", "max_forks_repo_head_hexsha": "7c186ae4f1e7421eda5b37d7ae1d0f3bcb20e25c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-09T06:39:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-09T06:39:15.000Z", "avg_line_length": 43.8372093023, "max_line_length": 115, "alphanum_fraction": 0.575474393, "include": true, "reason": "import numpy", "num_tokens": 5386}
|
// Copyright (c) 2014 Robert Ramey
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <iostream>
#include <cassert>
#include <typeinfo>
#include <boost/core/demangle.hpp>
#include "../include/safe_compare.hpp"
template<class T1, class T2>
void print_argument_types(
T1 v1,
T2 v2
){
const std::type_info & ti1 = typeid(v1);
const std::type_info & ti2 = typeid(v2);
std::cout
<< boost::core::demangle(ti1.name()) << ','
<< boost::core::demangle(ti2.name());
}
using namespace boost::numeric;
template<class T1, class T2>
bool test_safe_compare_impl(
T1 v1,
T2 v2,
char expected_result
){
switch(expected_result){
case '=': {
if(! safe_compare::equal(v1, v2))
return false;
if(safe_compare::less_than(v1, v2))
return false;
if(safe_compare::greater_than(v1, v2))
return false;
break;
}
case '<': {
if(! safe_compare::less_than(v1, v2))
return false;
if(safe_compare::greater_than(v1, v2))
return false;
if(safe_compare::equal(v1, v2))
return false;
break;
}
case '>':{
if(! safe_compare::greater_than(v1, v2))
return false;
if(safe_compare::less_than(v1, v2))
return false;
if(safe_compare::equal(v1, v2))
return false;
break;
}
}
return true;
}
template<class T1, class T2>
bool test_safe_compare(
T1 v1,
T2 v2,
char expected_result
){
print_argument_types(v1, v2);
const bool result = test_safe_compare_impl(v1, v2, expected_result);
if(! result)
std::cout << " failed";
std::cout << '\n';
return result;
}
#include "test.hpp"
#include "test_values.hpp"
const char *test_compare_result[VALUE_ARRAY_SIZE] = {
// 0 0 0 0
// 012345670123456701234567012345670
// 012345678901234567890123456789012
/* 0*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/* 1*/ ">=>>><>>><>>><>>>=<<><<<><<<><<<>",
/* 2*/ "<<=<<<><<<><<<><<<<<<<<<<<<<<<<<<",
/* 3*/ "<<>=<<>=<<>=<<>=<<<<<<<<<<<<<<<<<",
/* 4*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/* 5*/ ">>>>>=>>><>>><>>>>>>>=<<><<<><<<>",
/* 6*/ "<<<<<<=<<<><<<><<<<<<<<<<<<<<<<<<",
/* 7*/ "<<>=<<>=<<>=<<>=<<<<<<<<<<<<<<<<<",
/* 8*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/* 9*/ ">>>>>>>>>=>>><>>>>>>>>>>>=<<><<<>",
/*10*/ "<<<<<<<<<<=<<<><<<<<<<<<<<<<<<<<<",
/*11*/ "<<>=<<>=<<>=<<>=<<<<<<<<<<<<<<<<<",
/*12*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/*13*/ ">>>>>>>>>>>>>=>>>>>>>>>>>>>>>=<<>",
/*14*/ "<<<<<<<<<<<<<<=<<<<<<<<<<<<<<<<<<",
/*15*/ "<<>=<<>=<<>=<<>=<<<<<<<<<<<<<<<<",
// 0 0 0 0
// 012345670123456701234567012345670
// 012345678901234567890123456789012
/*16*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/*17*/ ">=>>><>>><>>><>>>=<<><<<><<<><<<>",
/*18*/ ">>>>><>>><>>><>>>>=<><<<><<<><<<>",
/*19*/ ">>>>><>>><>>><>>>>>=><<<><<<><<<>",
/*20*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/*21*/ ">>>>>=>>><>>><>>>>>>>=<<><<<><<<>",
/*22*/ ">>>>>>>>><>>><>>>>>>>>=<><<<><<<>",
/*23*/ ">>>>>>>>><>>><>>>>>>>>>=><<<><<<>",
/*24*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/*25*/ ">>>>>>>>>=>>><>>>>>>>>>>>=<<><<<>",
/*26*/ ">>>>>>>>>>>>><>>>>>>>>>>>>=<><<<>",
/*27*/ ">>>>>>>>>>>>><>>>>>>>>>>>>>=><<<>",
/*28*/ "=<>>=<>>=<>>=<>>=<<<=<<<=<<<=<<<>",
/*29*/ ">>>>>>>>>>>>>=>>>>>>>>>>>>>>>=<<>",
/*30*/ ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>=<>",
/*31*/ ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>=>",
/*32*/ "<<>><<>><<>><<>><<<<<<<<<<<<<<<<="
};
#define TEST_IMPL(v1, v2, result) \
rval &= test_safe_compare( \
v1, \
v2, \
result \
);
/**/
void break_check(unsigned int i, unsigned int j){
std::cout << i << ',' << j << ',';
}
#define TESTX(value_index1, value_index2) \
break_check(value_index1, value_index2); \
TEST_IMPL( \
BOOST_PP_ARRAY_ELEM(value_index1, VALUES), \
BOOST_PP_ARRAY_ELEM(value_index2, VALUES), \
test_compare_result[value_index1][value_index2] \
)
/**/
int main(int, char *[]){
bool rval = true;
TEST_EACH_VALUE_PAIR
std::cout << (rval ? "success!" : "failure") << std::endl;
return ! rval ;
}
|
{"hexsha": "d132c2df942a1a4360e47736352fd99b9bff7bc8", "size": 4431, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_safe_compare.cpp", "max_stars_repo_name": "janisozaur/safe_numerics", "max_stars_repo_head_hexsha": "c494e9d6bddc47292b1bb1552469196b01fcdc55", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_safe_compare.cpp", "max_issues_repo_name": "janisozaur/safe_numerics", "max_issues_repo_head_hexsha": "c494e9d6bddc47292b1bb1552469196b01fcdc55", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_safe_compare.cpp", "max_forks_repo_name": "janisozaur/safe_numerics", "max_forks_repo_head_hexsha": "c494e9d6bddc47292b1bb1552469196b01fcdc55", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4038461538, "max_line_length": 72, "alphanum_fraction": 0.3972015346, "num_tokens": 1369}
|
from ..qt_compat import QtGui, QtCore
import numpy as np
import logging as log
import os
import matplotlib
from ..plugins import Plugin
from ..core import DataModel, LayerManager, LabelManager, Launcher
from .mpl_widgets import PerspectiveCanvas
from .base import SComboBox
class ConfidenceViewer(Plugin):
name = 'Confidence Viewer'
def __init__(self, ptype=Plugin.Widget):
super(ConfidenceViewer, self).__init__(ptype=ptype)
self.DM = DataModel.instance()
self.LBLM = LabelManager.instance()
self.DM.confidence_changed.connect(self.replot)
vbox = QtWidgets.QVBoxLayout()
self.layout.addLayout(vbox, 0, 0)
self.orient = 0
self.canvases = (None,)
self.idx = (self.DM.data.shape[self.orient]) // 2
topbox = QtWidgets.QHBoxLayout()
vbox.addLayout(topbox, 0)
# Perspective
self.combo = SComboBox()
self.combo.addItem("Axial")
self.combo.addItem("Sagittal")
self.combo.addItem("Coronal")
self.combo.setCurrentIndex(0)
self.combo.currentIndexChanged.connect(self.on_combo)
topbox.addWidget(self.combo)
# Slider
self.slider = QtWidgets.QSlider(1)
self.slider.setMinimum(0)
self.slider.setMaximum(self.DM.data.shape[0]-1)
self.slider.setValue(self.idx)
self.slider.valueChanged.connect(self.on_value)
self.slider.setSingleStep(1)
topbox.addWidget(self.slider)
# Slider Text
self.text_idx = QtWidgets.QLabel(str(self.idx))
topbox.addWidget(self.text_idx)
self.container = QtWidgets.QWidget()
self.container.setLayout(QtGui.QGridLayout())
vbox.addWidget(self.container, 1)
def on_value(self, idx):
self.idx = idx
self.text_idx.setText(str(self.idx))
def clear(self):
for widget in self.canvases:
if widget is not None:
widget.setParent(None)
widget.deleteLater()
self.canvases = (None,)
def on_combo(self, orient):
self.orient = orient
self.idx = (self.DM.data.shape[self.orient]) // 2
for widget in self.canvases:
if widget is not None:
widget.idx = self.idx
widget.orient = orient
widget.replot()
def replot(self):
self.clear()
total = self.LBLM.len()
spunique = np.unique(self.DM.gtlabels[:])
cols = total if total < 4 else total//2 if total%2==0 else total//2+1
rows = total//cols if total%cols==0 else total//cols+1
k = 0
labels = list(self.LBLM.labels())
self.canvases = []
for i in range(rows):
for j in range(cols):
name = None if i*cols+j >= len(labels) else labels[i*cols+j].name
if k in spunique:
widget = ConfidenceCanvas(k, name=name)
k += 1
else:
widget = ConfidenceCanvas(name=name)
self.canvases += [widget]
self.slider.valueChanged.connect(widget.update_volume)
self.container.layout().addWidget(widget, i, j)
|
{"hexsha": "06a9c5860e78ec4c47e725623b03bd56c976975a", "size": 3221, "ext": "py", "lang": "Python", "max_stars_repo_path": "survos/widgets/conficence_viewer.py", "max_stars_repo_name": "paskino/SuRVoS", "max_stars_repo_head_hexsha": "e01e784442e2e9f724826cdb70f3a50c034c6455", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2016-09-30T08:04:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T07:24:18.000Z", "max_issues_repo_path": "survos/widgets/conficence_viewer.py", "max_issues_repo_name": "paskino/SuRVoS", "max_issues_repo_head_hexsha": "e01e784442e2e9f724826cdb70f3a50c034c6455", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 81, "max_issues_repo_issues_event_min_datetime": "2016-11-21T15:32:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-20T00:22:27.000Z", "max_forks_repo_path": "survos/widgets/conficence_viewer.py", "max_forks_repo_name": "paskino/SuRVoS", "max_forks_repo_head_hexsha": "e01e784442e2e9f724826cdb70f3a50c034c6455", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-11-22T10:19:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-04T06:15:48.000Z", "avg_line_length": 32.5353535354, "max_line_length": 81, "alphanum_fraction": 0.5985718721, "include": true, "reason": "import numpy", "num_tokens": 727}
|
#include <istat/test.h>
#include <istat/istattime.h>
#include <istat/Mmap.h>
#include "../daemon/StatCounterFactory.h"
#include "../daemon/StatStore.h"
#include <boost/filesystem.hpp>
using namespace istat;
RetentionPolicy rp("10s:1d");
RetentionPolicy xrp("");
class FakeProtectedDiskMmap : public Mmap
{
public:
FakeProtectedDiskMmap(int64_t freeSpace)
: freeSpace_(freeSpace)
{
}
virtual int open(char const *name, int flags)
{
return -1;
}
virtual int close(int fd)
{
return -1;
}
virtual ssize_t read(int fd, void *ptr, ssize_t amt)
{
return -1;
}
virtual ssize_t write(int fd, void *ptr, ssize_t amt)
{
return -1;
}
virtual ptrdiff_t seek(int fd, ptrdiff_t offset, int whence)
{
return -1;
}
virtual ssize_t tell(int fd)
{
return -1;
}
virtual int truncate(int fd, ssize_t size)
{
return -1;
}
virtual void *map(int fd, int64_t offset, size_t size, bool writable)
{
return 0;
}
virtual bool unmap(void const *ptr, size_t size)
{
return false;
}
virtual bool flush(void const *ptr, size_t size, bool immediate)
{
return false;
}
virtual int64_t availableSpace(char const *path)
{
return freeSpace_;
}
virtual void dispose()
{
}
virtual void counters(int64_t *oMaps, int64_t *oUnmaps, int64_t *oOpens, int64_t *oCloses)
{
}
private:
int64_t freeSpace_;
};
void run_tests(void)
{
Mmap *mm;
mm = NewMmap();
{
boost::asio::io_service service;
std::string storepath("/tmp/test/statstore");
boost::filesystem::remove_all(storepath);
boost::filesystem::create_directories(storepath);
boost::shared_ptr<IStatCounterFactory> statCounterFactory(new StatCounterFactory(storepath, mm, rp));
StatStore store(storepath, getuid(), service, statCounterFactory, mm);
store.record("taco", 42.42);
std::list<std::pair<std::string, CounterResponse> > oList;
store.listMatchingCounters("bbq", oList);
assert_equal(0, oList.size());
store.listMatchingCounters("taco is delicious!", oList);
assert_equal(0, oList.size());
store.listMatchingCounters("taco", oList);
assert_equal(1, oList.size());
store.record("taco.bell", 42.42);
store.record("*taco.cheese", 42.42);
std::list<std::pair<std::string, CounterResponse> > oList2;
store.listMatchingCounters("taco*", oList2);
assert_equal(3, oList2.size());
std::list<std::pair<std::string, CounterResponse> >::iterator ptr = oList2.begin();
assert_equal("taco.bell", (*ptr).first);
assert_equal(true, (*ptr).second.isLeaf);
assert_equal(CounterResponse::DisplayTypeGauge, (*ptr).second.counterType);
std::advance(ptr, 1);
assert_equal("taco.cheese", (*ptr).first);
assert_equal(true, (*ptr).second.isLeaf);
assert_equal(CounterResponse::DisplayTypeEvent, (*ptr).second.counterType);
std::advance(ptr, 1);
assert_equal("taco", (*ptr).first);
assert_equal(false, (*ptr).second.isLeaf);
assert_equal(CounterResponse::DisplayTypeAggregate, (*ptr).second.counterType);
}
mm->dispose();
// Ensure full disk does not have available space.
mm = new FakeProtectedDiskMmap(0);
{
boost::asio::io_service service;
std::string storepath("/tmp/test/statstore");
boost::filesystem::remove_all(storepath);
boost::filesystem::create_directories(storepath);
boost::shared_ptr<IStatCounterFactory> statCounterFactory(new StatCounterFactory(storepath, mm, rp));
StatStore store(storepath, getuid(), service, statCounterFactory, mm);
assert_equal(store.hasAvailableSpace(), false);
}
mm->dispose();
// Ensure disk with 1GB free still has available space.
mm = new FakeProtectedDiskMmap(1024L * 1024L * 1024L);
{
boost::asio::io_service service;
std::string storepath("/tmp/test/statstore");
boost::filesystem::remove_all(storepath);
boost::filesystem::create_directories(storepath);
boost::shared_ptr<IStatCounterFactory> statCounterFactory(new StatCounterFactory(storepath, mm, rp));
StatStore store(storepath, getuid(), service, statCounterFactory, mm);
assert_equal(store.hasAvailableSpace(), true);
}
mm->dispose();
}
void func()
{
run_tests();
}
int main(int argc, char const *argv[])
{
return istat::test(func, argc, argv);
}
|
{"hexsha": "40cca1d0db1469e05bc0b886358173c7d21a7f79", "size": 4663, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/test_StatStore.cpp", "max_stars_repo_name": "yjpark/istatd", "max_stars_repo_head_hexsha": "859a67c4c633a9e96f3f0b990a94afa54aa20224", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_StatStore.cpp", "max_issues_repo_name": "yjpark/istatd", "max_issues_repo_head_hexsha": "859a67c4c633a9e96f3f0b990a94afa54aa20224", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_StatStore.cpp", "max_forks_repo_name": "yjpark/istatd", "max_forks_repo_head_hexsha": "859a67c4c633a9e96f3f0b990a94afa54aa20224", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.755952381, "max_line_length": 109, "alphanum_fraction": 0.6358567446, "num_tokens": 1170}
|
import numpy as np
import pytest
from ntab import Table
#-------------------------------------------------------------------------------
def test_empty():
tab = Table()
assert tab.num_cols == 0
assert tab.num_rows == 0
tab.arrs["x"] = np.arange(10)
assert tab.num_cols == 1
assert tab.num_rows == 10
tab.arrs["y"] = (np.arange(10) + 1)**2
assert tab.num_cols == 2
assert tab.num_rows == 10
with pytest.raises(ValueError):
tab.arrs["z"] = np.arange(12)
def test_remove_last():
tab = Table(x=[1, 3, 5, 7, 9])
assert tab.num_cols == 1
assert tab.num_rows == 5
assert tuple(tab.names) == ("x", )
del tab.arrs["x"]
assert tab.num_cols == 0
assert tab.num_rows == 0
assert tuple(tab.names) == ()
|
{"hexsha": "4d55e25b53583a301d2f29003d03ac4f586e91b7", "size": 786, "ext": "py", "lang": "Python", "max_stars_repo_path": "ntab/test/test_arrs.py", "max_stars_repo_name": "alexhsamuel/ntab", "max_stars_repo_head_hexsha": "9039d0e10d0f1a86fb16a33c05c79dfb931b28ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ntab/test/test_arrs.py", "max_issues_repo_name": "alexhsamuel/ntab", "max_issues_repo_head_hexsha": "9039d0e10d0f1a86fb16a33c05c79dfb931b28ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2017-05-10T21:46:14.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-01T10:37:17.000Z", "max_forks_repo_path": "ntab/test/test_arrs.py", "max_forks_repo_name": "alexhsamuel/ntab", "max_forks_repo_head_hexsha": "9039d0e10d0f1a86fb16a33c05c79dfb931b28ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8333333333, "max_line_length": 80, "alphanum_fraction": 0.5267175573, "include": true, "reason": "import numpy", "num_tokens": 221}
|
/-
Copyright (c) 2022 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
-/
import morphisms.finite
import morphisms.finite_type
import for_mathlib.integral
import morphisms.universally_closed
import ring_theory.ring_hom.integral
import for_mathlib.algebra_is_pushout
/-!
# Integral morphisms
A morphism of schemes is integral if it is affine and the component of the sheaf map on integral opens
is integral.
We show that this property is local, and is stable under compositions and base-changes.
-/
noncomputable theory
open category_theory category_theory.limits opposite topological_space
universe u
namespace algebraic_geometry
variables {X Y : Scheme.{u}} (f : X ⟶ Y)
/--
A morphism is `integral` if the preimages of integral open sets are integral.
-/
@[mk_iff]
class integral (f : X ⟶ Y) extends affine f : Prop :=
(is_integral_of_affine [] :
∀ U : opens Y.carrier, is_affine_open U → (f.1.c.app (op U)).is_integral)
def integral.affine_property : affine_target_morphism_property :=
affine_and (λ R S _ _ f, by exactI ring_hom.is_integral f)
lemma integral_eq_affine_property :
@integral = target_affine_locally integral.affine_property :=
by { ext, rw [integral_iff, integral.affine_property,
affine_and_target_affine_locally_iff ring_hom.is_integral_respects_iso] }
lemma integral.affine_property_is_local :
integral.affine_property.is_local :=
is_local_affine_and _ ring_hom.is_integral_respects_iso ring_hom.localization_is_integral
ring_hom.is_integral_of_localization_span
lemma integral_is_local_at_target :
property_is_local_at_target @integral :=
integral_eq_affine_property.symm ▸ integral.affine_property_is_local.target_affine_locally_is_local
lemma integral_respects_iso : morphism_property.respects_iso @integral :=
integral_is_local_at_target.respects_iso
lemma integral_stable_under_composition : morphism_property.stable_under_composition @integral :=
by { rw integral_eq_affine_property, exact affine_and_stable_under_composition _
ring_hom.is_integral_stable_under_composition }
lemma integral_stable_under_base_change : morphism_property.stable_under_base_change @integral :=
by { rw integral_eq_affine_property, exact affine_and_stable_under_base_change _
ring_hom.is_integral_respects_iso ring_hom.localization_is_integral
ring_hom.is_integral_of_localization_span
ring_hom.is_integral_stable_under_base_change }
lemma integral_le_affine :
@integral ≤ @affine :=
by { rw integral_eq_affine_property, exact target_affine_locally_affine_and_le_affine _ }
lemma integral_Spec_iff {R S : CommRing} (f : R ⟶ S) :
integral (Scheme.Spec.map f.op) ↔ ring_hom.is_integral f :=
begin
rw [integral_eq_affine_property,
integral.affine_property_is_local.affine_target_iff,
integral.affine_property, affine_and_Spec_iff ring_hom.is_integral_respects_iso]
end
lemma finite_eq_integral_inf_locally_of_finite_type :
@finite = @integral ⊓ @locally_of_finite_type :=
begin
apply property_ext_of_le_affine finite_le_affine
(inf_le_left.trans integral_le_affine) finite_is_local_at_target
(integral_is_local_at_target.inf locally_of_finite_type_is_local_at_target),
intros R S f,
simp_rw [pi.inf_apply, finite_Spec_iff, integral_Spec_iff, locally_of_finite_type_Spec_iff],
exact ⟨λ h, ⟨h.to_is_integral, h.to_finite_type⟩,
λ h, ring_hom.finite.of_is_integral_of_finite_type h.1 h.2⟩
end
instance finite.to_integral [hf : finite f] : integral f :=
by { rw finite_eq_integral_inf_locally_of_finite_type at hf, exact hf.1 }
instance finite.to_locally_of_finite_type [hf : finite f] : locally_of_finite_type f :=
by { rw finite_eq_integral_inf_locally_of_finite_type at hf, exact hf.2 }
-- lemma integral.affine_open_cover_tfae {X Y : Scheme.{u}} (f : X ⟶ Y) :
-- tfae [integral f,
-- ∃ (𝒰 : Scheme.open_cover.{u} Y) [∀ i, is_affine (𝒰.obj i)],
-- ∀ (i : 𝒰.J), is_affine (pullback f (𝒰.map i)) ∧
-- ring_hom.integral (Scheme.Γ.map (pullback.snd : pullback f (𝒰.map i) ⟶ _).op),
-- ∀ (𝒰 : Scheme.open_cover.{u} Y) [∀ i, is_affine (𝒰.obj i)] (i : 𝒰.J),
-- is_affine (pullback f (𝒰.map i)) ∧
-- ring_hom.integral (Scheme.Γ.map (pullback.snd : pullback f (𝒰.map i) ⟶ _).op),
-- ∀ {U : Scheme} (g : U ⟶ Y) [is_affine U] [is_open_immersion g],
-- is_affine (pullback f g) ∧
-- ring_hom.integral (Scheme.Γ.map (pullback.snd : pullback f g ⟶ _).op)] :=
-- integral_eq_affine_property.symm ▸
-- integral.affine_property_is_local.affine_open_cover_tfae f
-- lemma integral.open_cover_tfae {X Y : Scheme.{u}} (f : X ⟶ Y) :
-- tfae [integral f,
-- ∃ (𝒰 : Scheme.open_cover.{u} Y), ∀ (i : 𝒰.J),
-- integral (pullback.snd : (𝒰.pullback_cover f).obj i ⟶ 𝒰.obj i),
-- ∀ (𝒰 : Scheme.open_cover.{u} Y) (i : 𝒰.J),
-- integral (pullback.snd : (𝒰.pullback_cover f).obj i ⟶ 𝒰.obj i),
-- ∀ (U : opens Y.carrier), integral (f ∣_ U),
-- ∀ {U : Scheme} (g : U ⟶ Y) [is_open_immersion g],
-- integral (pullback.snd : pullback f g ⟶ _)] :=
-- affine_eq_affine_property.symm ▸
-- affine_affine_property_is_local.open_cover_tfae f
lemma integral_over_affine_iff [is_affine Y] :
integral f ↔ is_affine X ∧ ring_hom.is_integral (Scheme.Γ.map f.op) :=
integral_eq_affine_property.symm ▸
integral.affine_property_is_local.affine_target_iff f
lemma integral.affine_open_cover_iff {X Y : Scheme.{u}} (𝒰 : Scheme.open_cover.{u} Y)
[∀ i, is_affine (𝒰.obj i)] (f : X ⟶ Y) :
integral f ↔ ∀ i, is_affine (pullback f (𝒰.map i)) ∧
ring_hom.is_integral (Scheme.Γ.map (pullback.snd : pullback f (𝒰.map i) ⟶ _).op) :=
integral_eq_affine_property.symm ▸
integral.affine_property_is_local.affine_open_cover_iff f 𝒰
lemma integral.open_cover_iff {X Y : Scheme.{u}} (𝒰 : Scheme.open_cover.{u} Y)
[∀ i, is_affine (𝒰.obj i)] (f : X ⟶ Y) :
integral f ↔ ∀ i, integral (pullback.snd : pullback f (𝒰.map i) ⟶ _) :=
integral_eq_affine_property.symm ▸
integral.affine_property_is_local.target_affine_locally_is_local.open_cover_iff f 𝒰
instance {X Y S : Scheme} (f : X ⟶ S) (g : Y ⟶ S) [integral g] :
integral (pullback.fst : pullback f g ⟶ X) :=
integral_stable_under_base_change (is_pullback.of_has_pullback f g).flip infer_instance
instance {X Y S : Scheme} (f : X ⟶ S) (g : Y ⟶ S) [integral f] :
integral (pullback.snd : pullback f g ⟶ Y) :=
integral_stable_under_base_change (is_pullback.of_has_pullback f g) infer_instance
lemma topologically_is_closed_map_respects_iso :
(morphism_property.topologically @is_closed_map).respects_iso :=
begin
apply morphism_property.stable_under_composition.respects_iso,
{ intros X Y Z f g hf hg, exact hg.comp hf },
{ intros X Y e, exact (Top.homeo_of_iso $ Scheme.forget_to_Top.map_iso e).is_closed_map },
end
lemma is_closed_map_of_is_integral_of_is_affine [integral f] [is_affine Y] :
is_closed_map f.1.base :=
begin
haveI := is_affine_of_affine f,
apply (topologically_is_closed_map_respects_iso.arrow_mk_iso_iff
(Spec_Γ_arrow_iso_of_is_affine f)).mpr,
apply prime_spectrum.is_closed_map_of_is_integral,
exact (integral.is_integral_of_affine f _ (top_is_affine_open _) : _),
end
@[priority 100]
instance integral.to_universally_closed [hf : integral f] : universally_closed f :=
begin
constructor,
rintros X' Y' i₁ i₂ f' H,
replace hf := integral_stable_under_base_change H.flip hf,
clear_dependent X Y,
apply (is_closed_map_iff_is_closed_map_of_supr_eq_top Y'.affine_cover.supr_opens_range).mpr,
introI i,
rw [← morphism_restrict_val_base],
haveI := integral_is_local_at_target.2 f' (Y'.affine_cover.map i).opens_range infer_instance,
haveI : is_affine _ := range_is_affine_open_of_open_immersion (Y'.affine_cover.map i),
apply is_closed_map_of_is_integral_of_is_affine
end
open_locale polynomial
local attribute [instance] polynomial.polynomial_algebra_of_algebra
open_locale big_operators
lemma polynomial.reflect_map {R S : Type*} [comm_ring R] [comm_ring S] (p : R[X]) (f : R →+* S) (n : ℕ) :
(p.map f).reflect n = (p.reflect n).map f :=
begin
ext i, simp,
end
lemma _root_.ring_hom.is_integral_elem_of_is_nilpotent {R S : Type*} [comm_ring R] [comm_ring S]
(f : R →+* S) {x : S}
(hx : is_nilpotent x) : f.is_integral_elem x :=
begin
cases hx with n hx,
refine ⟨polynomial.monomial n (1 : R), polynomial.leading_coeff_monomial _ _, _⟩,
rw [polynomial.eval₂_monomial, hx, mul_zero]
end
lemma integral_eq_affine_inf_universally_closed :
@integral = @affine ⊓ @universally_closed :=
begin
apply le_antisymm,
{ introsI X Y f hf, exact ⟨infer_instance, infer_instance⟩ },
{ apply property_le_of_le_affine inf_le_left
(affine_is_local_at_target.inf universally_closed_is_local_at_target)
integral_is_local_at_target,
simp_rw [pi.inf_apply, integral_Spec_iff],
rintros R S f ⟨-, h₂⟩ a,
by_cases ha : is_nilpotent a, { exact ring_hom.is_integral_elem_of_is_nilpotent _ ha },
let p : S[X] := polynomial.monomial 1 a - polynomial.C 1,
letI := f.to_algebra,
haveI : universally_closed (Scheme.Spec.map (CommRing.of_hom (algebra_map R S)).op),
{ convert h₂; exact CommRing.of_eq _ },
have := universally_closed.out _ _ _
((algebra.is_pushout.to_is_pushout R S R[X] S[X]).op.map Scheme.Spec) _
(prime_spectrum.is_closed_zero_locus $ {p}),
change is_closed (prime_spectrum.comap (algebra_map R[X] S[X]) ''
prime_spectrum.zero_locus {p}) at this,
rw [← prime_spectrum.zero_locus_span, ← closure_eq_iff_is_closed,
prime_spectrum.closure_image_comap_zero_locus, prime_spectrum.zero_locus_span] at this,
have : (1 : R[X]) ∈ ideal.span {polynomial.X} ⊔ (ideal.span {p}).comap (algebra_map R[X] S[X]),
{ rw [← ideal.eq_top_iff_one, sup_comm, ← prime_spectrum.zero_locus_empty_iff_eq_top,
prime_spectrum.zero_locus_sup, this, prime_spectrum.zero_locus_span,
set.eq_empty_iff_forall_not_mem],
rintros _ ⟨⟨x, hx : _ ⊆ _, rfl⟩, hx' : _ ⊆ _⟩,
apply x.2.1,
replace hx' : polynomial.X ∈ x.as_ideal,
{ rw set.singleton_subset_iff at hx', change _ ∈ x.as_ideal at hx',
rwa [polynomial.polynomial_algebra_of_algebra_algebra_map_apply, polynomial.map_X] at hx' },
rw set.singleton_subset_iff at hx,
have : _ - (_ - _) ∈ _ := sub_mem (x.as_ideal.mul_mem_left (polynomial.C a) hx') hx,
rwa [polynomial.monomial_eq_C_mul_X, pow_one, sub_sub_cancel,
map_one, ← ideal.eq_top_iff_one] at this },
rw ideal.mem_span_singleton_sup at this,
obtain ⟨a, b, hb, e⟩ := this,
have h : b.coeff 0 = 1,
{ apply_fun (λ p, polynomial.coeff p 0) at e,
rwa [polynomial.coeff_add, polynomial.coeff_mul_X_zero, polynomial.coeff_one_zero,
zero_add] at e },
rw [ideal.mem_comap, polynomial.polynomial_algebra_of_algebra_algebra_map_apply,
ideal.mem_span_singleton] at hb,
obtain ⟨q, hq : b.map f = _⟩ := hb,
refine ⟨b.reverse * polynomial.X ^ (1 + q.nat_degree), _, _⟩,
{ casesI subsingleton_or_nontrivial R with hR, { exact subsingleton.elim _ _ },
rw [polynomial.monic, polynomial.leading_coeff_mul_X_pow, polynomial.reverse_leading_coeff,
← h, polynomial.trailing_coeff],
congr' 1,
exact le_zero_iff.mp (polynomial.nat_trailing_degree_le_of_ne_zero $ h.symm ▸ one_ne_zero) },
{ rw [polynomial.eval₂_eq_eval_map, polynomial.reverse, polynomial.map_mul,
← polynomial.reflect_map, polynomial.map_pow, polynomial.map_X,
← polynomial.rev_at_zero (1 + q.nat_degree), ← polynomial.reflect_monomial,
← polynomial.reflect_mul, pow_zero, mul_one, hq, ← add_assoc, polynomial.reflect_mul,
polynomial.eval_mul, polynomial.reflect_sub, polynomial.reflect_C,
polynomial.monomial_eq_C_mul_X, polynomial.reflect_C_mul_X_pow, polynomial.eval_sub,
polynomial.eval_C_mul, polynomial.eval_C_mul, polynomial.eval_pow, polynomial.eval_X,
polynomial.eval_pow, polynomial.eval_X, polynomial.rev_at_le, add_tsub_cancel_right,
← pow_succ, one_mul, sub_self, zero_mul],
{ exact le_add_self },
{ refine (polynomial.nat_degree_add_le _ _).trans (max_le _ _),
{ exact (polynomial.nat_degree_monomial_le _).trans le_add_self },
{ rw [← polynomial.C_neg, polynomial.nat_degree_C], exact zero_le _ } },
{ exact le_refl _ },
{ exact polynomial.nat_degree_map_le _ _ },
{ rw [pow_zero, polynomial.nat_degree_one], exact zero_le _ } } }
end
@[priority 100]
instance universally_closed.to_integral {X Y : Scheme} (f : X ⟶ Y) [H : integral f] :
universally_closed f :=
by { rw integral_eq_affine_inf_universally_closed at H, exact H.2 }
instance integral_comp {X Y Z : Scheme} (f : X ⟶ Y) (g : Y ⟶ Z)
[integral f] [integral g] : integral (f ≫ g) :=
integral_stable_under_composition _ _ infer_instance infer_instance
end algebraic_geometry
|
{"author": "erdOne", "repo": "lean-AG-morphisms", "sha": "bfb65e7d5c17f333abd7b1806717f12cd29427fd", "save_path": "github-repos/lean/erdOne-lean-AG-morphisms", "path": "github-repos/lean/erdOne-lean-AG-morphisms/lean-AG-morphisms-bfb65e7d5c17f333abd7b1806717f12cd29427fd/src/morphisms/integral.lean"}
|
#!/usr/bin/env python
"""
Performs "standard" analysis on a SMLM movie given parameters.
Hazen 1/18
"""
import numpy
import os
import storm_analysis.sa_library.sa_h5py as saH5Py
import storm_analysis.sa_utilities.fitz_c as fitzC
import storm_analysis.sa_utilities.hdf5_to_bin as hdf5ToBin
import storm_analysis.sa_utilities.hdf5_to_txt as hdf5ToTxt
import storm_analysis.sa_utilities.tracker as tracker
import storm_analysis.sa_utilities.xyz_drift_correction as xyzDriftCorrection
def convert(h5_name, parameters):
"""
Performs requested file format conversions (if any).
"""
if (parameters.getAttr("convert_to", "") != ""):
print()
print("File format conversions.")
if (".bin" in parameters.getAttr("convert_to")):
print(" Converting to Insight3 format.")
hdf5ToBin.hdf5ToBin(h5_name,
h5_name[:-5] + ".bin")
if (".txt" in parameters.getAttr("convert_to")):
print(" Converting to text.")
hdf5ToTxt.hdf5ToTxt(h5_name,
h5_name[:-5] + ".txt")
def driftCorrection(h5_name, parameters):
"""
Performs drift correction.
"""
drift_name = h5_name[:-5] + "_drift.txt"
# Check if we have been asked not to do z drift correction.
# The default is to do the correction.
z_correct = True
if (parameters.getAttr("z_correction", 1) == 0):
z_correct = False
# Get z range from the parameters file. Note these are
# in microns.
#
[min_z, max_z] = parameters.getZRange()
xyzDriftCorrection.xyzDriftCorrection(h5_name,
drift_name,
parameters.getAttr("frame_step"),
parameters.getAttr("d_scale"),
min_z,
max_z,
z_correct)
def peakFinding(find_peaks, movie_reader, data_writer, parameters):
"""
Does the peak finding.
"""
curf = data_writer.getStartFrame()
movie_reader.setup(curf)
# Analyze the movie.
#
# Catch keyboard interrupts & "gracefully" exit.
#
try:
while(movie_reader.nextFrame()):
# Find the localizations.
peaks = find_peaks.analyzeImage(movie_reader)
# Save results
data_writer.addPeaks(peaks, movie_reader)
print("Frame:",
movie_reader.getCurrentFrameNumber(),
data_writer.getNumberAdded(),
data_writer.getTotalPeaks())
print("")
movie_reader.close()
data_writer.close()
find_peaks.cleanUp()
return True
except KeyboardInterrupt:
print("Analysis stopped.")
movie_reader.close()
data_writer.close()
find_peaks.cleanUp()
return False
def standardAnalysis(find_peaks, movie_reader, data_writer, parameters):
"""
Perform standard analysis.
movie_reader - sa_utilities.analysis_io.MovieReader object.
data_writer - sa_utilities.analysis_io.DataWriter object.
"""
# Peak finding
#
print()
print("Peak finding")
if peakFinding(find_peaks, movie_reader, data_writer, parameters):
# Do drift correction, tracking, and zfit (for '3d' model).
#
trackDriftCorrect(data_writer.getFilename(),
parameters)
# Perform requested file format conversions.
#
convert(data_writer.getFilename(), parameters)
print()
print("Analysis complete")
def trackDriftCorrect(h5_name, parameters):
"""
Does tracking and drift correction, as well as '3d' z
fitting (if requested).
"""
# Z fitting, '3d' model, localizations.
#
if parameters.hasAttr("do_zfit") and (parameters.getAttr("do_zfit", 0) != 0):
if (parameters.getAttr("model", "") == "3d"):
print()
print("'3d' localization z fitting.")
zFitting(h5_name, parameters, False)
# Drift correction.
#
if (parameters.getAttr("drift_correction", 0) != 0):
print()
print("Drift Correction.")
driftCorrection(h5_name, parameters)
# Tracking and averaging.
#
# This also adds the category field to the localizations.
#
print()
print("Tracking.")
tracker.tracker(h5_name,
descriptor = parameters.getAttr("descriptor"),
max_gap = parameters.getAttr("max_gap", 0),
radius = parameters.getAttr("radius"))
# Z fitting, '3d' model, tracks.
#
if parameters.hasAttr("do_zfit") and (parameters.getAttr("do_zfit", 0) != 0):
if (parameters.getAttr("model", "") == "3d"):
print()
print("'3d' tracks z fitting.")
zFitting(h5_name, parameters, True)
# Mark out of z range localizations and tracks as category 9.
#
print()
print("Checking z values.")
zCheck(h5_name, parameters)
def zCheck(h5_name, parameters):
"""
Mark all locations outside of the specified z range as category 9.
"""
[min_z, max_z] = parameters.getZRange()
with saH5Py.SAH5Py(h5_name) as h5:
# Localizations.
if h5.hasLocalizationsField("z"):
for fnum, locs in h5.localizationsIterator(fields = ["category", "z"]):
if((fnum%2000)==0):
print(" frame", fnum)
cat = locs["category"]
z_mask = (locs["z"] < min_z) | (locs["z"] > max_z)
cat[z_mask] = 9
h5.addLocalizationData(cat, fnum, "category")
# Tracks.
if h5.hasTracks():
for index, locs in enumerate(h5.tracksIterator(fields = ["category", "z"])):
if((index%5)==0):
print(" track group", index)
cat = locs["category"]
z_mask = (locs["z"] < min_z) | (locs["z"] > max_z)
cat[z_mask] = 9
h5.addTrackData(cat, index, "category")
def zFitting(h5_name, parameters, fit_tracks):
"""
Does z fitting for the '3d' model.
"""
[wx_params, wy_params] = parameters.getWidthParams()
[min_z, max_z] = parameters.getZRange()
z_step = parameters.getAttr("z_step", 1.0e-3)
if fit_tracks:
fitzC.fitzTracks(h5_name,
parameters.getAttr("cutoff"),
wx_params,
wy_params,
min_z,
max_z,
z_step)
else:
fitzC.fitzRaw(h5_name,
parameters.getAttr("cutoff"),
wx_params,
wy_params,
min_z,
max_z,
z_step)
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
|
{"hexsha": "b70b54b0321664b9b555031d7a838172e1555687", "size": 8118, "ext": "py", "lang": "Python", "max_stars_repo_path": "storm_analysis/sa_utilities/std_analysis.py", "max_stars_repo_name": "oxfordni/storm-analysis", "max_stars_repo_head_hexsha": "835a5c17497c563c3632db561ae7e7c9144a8dd1", "max_stars_repo_licenses": ["CNRI-Python"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "storm_analysis/sa_utilities/std_analysis.py", "max_issues_repo_name": "oxfordni/storm-analysis", "max_issues_repo_head_hexsha": "835a5c17497c563c3632db561ae7e7c9144a8dd1", "max_issues_repo_licenses": ["CNRI-Python"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "storm_analysis/sa_utilities/std_analysis.py", "max_forks_repo_name": "oxfordni/storm-analysis", "max_forks_repo_head_hexsha": "835a5c17497c563c3632db561ae7e7c9144a8dd1", "max_forks_repo_licenses": ["CNRI-Python"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0, "max_line_length": 88, "alphanum_fraction": 0.5869672333, "include": true, "reason": "import numpy", "num_tokens": 1800}
|
import matplotlib.pyplot as plt
import numpy as np
from skimage import data
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
from skimage.morphology import disk
noise_mask = np.full((128, 128), 28, dtype=np.uint8)
noise_mask[32:-32, 32:-32] = 30
noise = (noise_mask * np.random.random(noise_mask.shape) - 0.5 *
noise_mask).astype(np.uint8)
img = noise + 128
entr_img = entropy(img, disk(10))
fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(10, 4))
img0 = ax0.imshow(noise_mask, cmap='gray')
ax0.set_title("Object")
ax1.imshow(img, cmap='gray')
ax1.set_title("Noisy image")
ax2.imshow(entr_img, cmap='viridis')
ax2.set_title("Local entropy")
fig.tight_layout()
image = img_as_ubyte(data.checkerboard())
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 4),
sharex=True, sharey=True)
img0 = ax0.imshow(image, cmap=plt.cm.gray)
ax0.set_title("Image")
ax0.axis("off")
fig.colorbar(img0, ax=ax0)
img1 = ax1.imshow(entropy(image, disk(5)), cmap='gray')
ax1.set_title("Entropy")
ax1.axis("off")
fig.colorbar(img1, ax=ax1)
fig.tight_layout()
plt.show()
|
{"hexsha": "e2eb8ccfeff87492c866c1d13c01a7b7a934651b", "size": 1190, "ext": "py", "lang": "Python", "max_stars_repo_path": "Groups/Group_ID_37/Entropy feature/Entropy.py", "max_stars_repo_name": "aryapushpa/DataScience", "max_stars_repo_head_hexsha": "89ba01c18d3ed36942ffdf3e1f3c68fd08b05324", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-12-13T07:53:22.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-20T18:49:27.000Z", "max_issues_repo_path": "Groups/Group_ID_37/Entropy feature/Entropy.py", "max_issues_repo_name": "Gulnaz-Tabassum/DataScience", "max_issues_repo_head_hexsha": "1fd771f873a9bc0800458fd7c05e228bb6c4e8a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Groups/Group_ID_37/Entropy feature/Entropy.py", "max_forks_repo_name": "Gulnaz-Tabassum/DataScience", "max_forks_repo_head_hexsha": "1fd771f873a9bc0800458fd7c05e228bb6c4e8a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2020-12-12T11:23:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-04T13:09:38.000Z", "avg_line_length": 25.8695652174, "max_line_length": 71, "alphanum_fraction": 0.6731092437, "include": true, "reason": "import numpy", "num_tokens": 341}
|
using DynamicHMCModels
ProjDir = @__DIR__
cd(ProjDir)
df = DataFrame(CSV.read(joinpath("..", "..", "data", "chimpanzees.csv"), delim=';'))
df[!, :pulled_left] = convert(Array{Int64}, df[:, :pulled_left])
df[!, :prosoc_left] = convert(Array{Int64}, df[:, :prosoc_left])
first(df, 5)
Base.@kwdef mutable struct Chimpanzees{Ty <: AbstractVector,
Tx <: AbstractMatrix}
"Observations."
y::Ty
"Covariates"
x::Tx
"Number of observations"
N::Int
end
# Write a function to return properly dimensioned transformation.
function make_transformation(model::Chimpanzees)
as( (β = as(Array, size(model.x, 2)), ) )
end
# Instantiate the model with data and inits.
N = size(df, 1)
x = hcat(ones(Int64, N), df[:, :prosoc_left]);
y = df[:, :pulled_left]
model = Chimpanzees(;y=y, x=x, N=N);
# Make the model callable with a single argument.
function (model::Chimpanzees)(θ)
@unpack y, x, N = model # extract the data
@unpack β = θ # works on the named tuple too
ll = 0.0
ll += sum(logpdf.(Normal(0, 10), β)) # a & bp
ll += sum([loglikelihood(Binomial(1, logistic(dot(x[i, :], β))), [y[i]]) for i in 1:N])
ll
end
println()
θ = (β = [1.0, 2.0],)
model(θ)
println()
# Wrap the problem with a transformation, then use Flux for the gradient.
P = TransformedLogDensity(make_transformation(model), model)
∇P = ADgradient(:ForwardDiff, P)
results = mcmc_with_warmup(Random.GLOBAL_RNG, ∇P, 1000)
posterior = P.transformation.(results.chain)
DynamicHMC.Diagnostics.EBFMI(results.tree_statistics)
DynamicHMC.Diagnostics.summarize_tree_statistics(results.tree_statistics)
a3d = Array{Float64, 3}(undef, 1000, 2, 1);
for j in 1:1
for i in 1:1000
a3d[i, 1:2, j] = values(posterior[i].β)
end
end
# Create MCMCChains object
cnames = ["bp", "bpC"]
sections = Dict(
:parameters => ["bp", "bpC"]
)
chns = create_mcmcchains(a3d, cnames, sections, start=1)
stan_result = "
Iterations = 1:1000
Thinning interval = 1
Chains = 1,2,3,4
Samples per chain = 1000
Empirical Posterior Estimates:
Mean SD Naive SE MCSE ESS
a 0.05103234 0.12579086 0.0019889282 0.0035186307 1000
bp 0.55711212 0.18074275 0.0028577937 0.0040160451 1000
Quantiles:
2.5% 25.0% 50.0% 75.0% 97.5%
a -0.19755400 -0.029431425 0.05024655 0.12978825 0.30087758
bp 0.20803447 0.433720250 0.55340400 0.67960975 0.91466915
";
# Summarize results
describe(chns)
# End of `10/m10.2d.jl`
|
{"hexsha": "fad58f438b5ec5375b18a90b593756cc424a834e", "size": 2463, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/10/m10.2d.jl", "max_stars_repo_name": "samusz/DynamicHMCModels.jl", "max_stars_repo_head_hexsha": "33fa8d2f84d0862f3b45c36aa349891b8b8dc5ea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/10/m10.2d.jl", "max_issues_repo_name": "samusz/DynamicHMCModels.jl", "max_issues_repo_head_hexsha": "33fa8d2f84d0862f3b45c36aa349891b8b8dc5ea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/10/m10.2d.jl", "max_forks_repo_name": "samusz/DynamicHMCModels.jl", "max_forks_repo_head_hexsha": "33fa8d2f84d0862f3b45c36aa349891b8b8dc5ea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1326530612, "max_line_length": 91, "alphanum_fraction": 0.6686967113, "num_tokens": 866}
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_allclose
import pytest
import mne
from mne.datasets import testing
from mne.label import read_label
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.utils import run_tests_if_main
from mne.dipole import Dipole
from mne.source_estimate import VolSourceEstimate
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
def _check_stcs(stc1, stc2):
"""Check STC correctness."""
assert_allclose(stc1.times, stc2.times)
assert_allclose(stc1.data, stc2.data)
assert_allclose(stc1.vertices[0], stc2.vertices[0])
assert_allclose(stc1.vertices[1], stc2.vertices[1])
assert_allclose(stc1.tmin, stc2.tmin)
assert_allclose(stc1.tstep, stc2.tstep)
@pytest.mark.timeout(120) # ~30 sec on AppVeyor and Travis Linux
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_inverse_standard():
"""Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = read_forward_solution(fname_fwd)
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
stc_prox = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='prox')
with pytest.warns(None): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_prox.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_prox.data, stc_cd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_prox.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_prox.vertices[1][0] in label.vertices
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
with pytest.warns(None): # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
_check_stcs(stc_cd, stc_dip)
with pytest.warns(None): # CD
stc, _ = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
# irMxNE tests
with pytest.warns(None): # CD
stc = mixed_norm(evoked_l21, forward, cov, alpha,
n_mxne_iter=5, loose=loose, depth=depth,
maxit=300, tol=1e-8, active_set_size=10,
solver='cd')
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 80.0),
bem=None, mindist=5.0,
exclude=2.0)
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=stc.vertices[:1],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
run_tests_if_main()
|
{"hexsha": "8a097b3f3b1299636198d8f348ab36f6291ca7cf", "size": 9506, "ext": "py", "lang": "Python", "max_stars_repo_path": "mne/inverse_sparse/tests/test_mxne_inverse.py", "max_stars_repo_name": "vferat/mne-python", "max_stars_repo_head_hexsha": "54e07b3257ee44ae28c5253f47ef73909ef23bfd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mne/inverse_sparse/tests/test_mxne_inverse.py", "max_issues_repo_name": "vferat/mne-python", "max_issues_repo_head_hexsha": "54e07b3257ee44ae28c5253f47ef73909ef23bfd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2017-09-12T11:08:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-04T11:11:29.000Z", "max_forks_repo_path": "mne/inverse_sparse/tests/test_mxne_inverse.py", "max_forks_repo_name": "vferat/mne-python", "max_forks_repo_head_hexsha": "54e07b3257ee44ae28c5253f47ef73909ef23bfd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-01-28T13:48:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-10T16:02:11.000Z", "avg_line_length": 43.2090909091, "max_line_length": 79, "alphanum_fraction": 0.6289711761, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2663}
|
abstract type AbstractThunk <: AbstractTangent end
struct MutateThunkException <: Exception end
function Base.showerror(io::IO, e::MutateThunkException)
print(io, "Tried to mutate a thunk, this is not supported. `unthunk` it first.")
return nothing
end
Base.Broadcast.broadcastable(x::AbstractThunk) = broadcastable(unthunk(x))
@inline function Base.iterate(x::AbstractThunk)
val = unthunk(x)
element, state = iterate(val)
return element, (val, state)
end
@inline function Base.iterate(::AbstractThunk, (underlying_object, state))
next = iterate(underlying_object, state)
next === nothing && return nothing
element, new_state = next
return element, (underlying_object, new_state)
end
Base.:(==)(a::AbstractThunk, b::AbstractThunk) = unthunk(a) == unthunk(b)
Base.:(==)(a::AbstractThunk, b) = unthunk(a) == b
Base.:(==)(a, b::AbstractThunk) = a == unthunk(b)
Base.:(-)(a::AbstractThunk) = -unthunk(a)
Base.:(-)(a::AbstractThunk, b) = unthunk(a) - b
Base.:(-)(a, b::AbstractThunk) = a - unthunk(b)
Base.:(/)(a::AbstractThunk, b) = unthunk(a) / b
Base.:(/)(a, b::AbstractThunk) = a / unthunk(b)
Base.real(a::AbstractThunk) = real(unthunk(a))
Base.imag(a::AbstractThunk) = imag(unthunk(a))
Base.Complex(a::AbstractThunk) = Complex(unthunk(a))
Base.Complex(a::AbstractThunk, b::AbstractThunk) = Complex(unthunk(a), unthunk(b))
Base.mapreduce(f, op, a::AbstractThunk; kws...) = mapreduce(f, op, unthunk(a); kws...)
function Base.mapreduce(f, op, itr, a::AbstractThunk; kws...)
return mapreduce(f, op, itr, unthunk(a); kws...)
end
Base.sum(a::AbstractThunk; kws...) = sum(unthunk(a); kws...)
Base.sum!(r, A::AbstractThunk; kws...) = sum!(r, unthunk(A); kws...)
Base.fill(a::AbstractThunk, b::Integer) = fill(unthunk(a), b)
Base.vec(a::AbstractThunk) = vec(unthunk(a))
Base.reshape(a::AbstractThunk, args...) = reshape(unthunk(a), args...)
Base.getindex(a::AbstractThunk, args...) = getindex(unthunk(a), args...)
Base.setindex!(a::AbstractThunk, value, key...) = throw(MutateThunkException())
Base.selectdim(a::AbstractThunk, args...) = selectdim(unthunk(a), args...)
LinearAlgebra.Array(a::AbstractThunk) = Array(unthunk(a))
LinearAlgebra.Matrix(a::AbstractThunk) = Matrix(unthunk(a))
LinearAlgebra.Diagonal(a::AbstractThunk) = Diagonal(unthunk(a))
LinearAlgebra.LowerTriangular(a::AbstractThunk) = LowerTriangular(unthunk(a))
LinearAlgebra.UpperTriangular(a::AbstractThunk) = UpperTriangular(unthunk(a))
LinearAlgebra.Symmetric(a::AbstractThunk, uplo=:U) = Symmetric(unthunk(a), uplo)
LinearAlgebra.Hermitian(a::AbstractThunk, uplo=:U) = Hermitian(unthunk(a), uplo)
function LinearAlgebra.diagm(
kv::Pair{<:Integer,<:AbstractThunk}, kvs::Pair{<:Integer,<:AbstractThunk}...
)
return diagm((k => unthunk(v) for (k, v) in (kv, kvs...))...)
end
function LinearAlgebra.diagm(
m, n, kv::Pair{<:Integer,<:AbstractThunk}, kvs::Pair{<:Integer,<:AbstractThunk}...
)
return diagm(m, n, (k => unthunk(v) for (k, v) in (kv, kvs...))...)
end
LinearAlgebra.tril(a::AbstractThunk) = tril(unthunk(a))
LinearAlgebra.tril(a::AbstractThunk, k) = tril(unthunk(a), k)
LinearAlgebra.triu(a::AbstractThunk) = triu(unthunk(a))
LinearAlgebra.triu(a::AbstractThunk, k) = triu(unthunk(a), k)
LinearAlgebra.tr(a::AbstractThunk) = tr(unthunk(a))
LinearAlgebra.cross(a::AbstractThunk, b) = cross(unthunk(a), b)
LinearAlgebra.cross(a, b::AbstractThunk) = cross(a, unthunk(b))
LinearAlgebra.cross(a::AbstractThunk, b::AbstractThunk) = cross(unthunk(a), unthunk(b))
LinearAlgebra.dot(a::AbstractThunk, b) = dot(unthunk(a), b)
LinearAlgebra.dot(a, b::AbstractThunk) = dot(a, unthunk(b))
LinearAlgebra.dot(a::AbstractThunk, b::AbstractThunk) = dot(unthunk(a), unthunk(b))
LinearAlgebra.ldiv!(a, b::AbstractThunk) = throw(MutateThunkException())
LinearAlgebra.rdiv!(a::AbstractThunk, b) = throw(MutateThunkException())
LinearAlgebra.mul!(A, B::AbstractThunk, C) = mul!(A, unthunk(B), C)
LinearAlgebra.mul!(C::AbstractThunk, A, B, α, β) = throw(MutateThunkException())
function LinearAlgebra.mul!(C::AbstractThunk, A::AbstractThunk, B, α, β)
return throw(MutateThunkException())
end
function LinearAlgebra.mul!(C::AbstractThunk, A, B::AbstractThunk, α, β)
return throw(MutateThunkException())
end
function LinearAlgebra.mul!(C::AbstractThunk, A::AbstractThunk, B::AbstractThunk, α, β)
return throw(MutateThunkException())
end
LinearAlgebra.mul!(C, A::AbstractThunk, B, α, β) = mul!(C, unthunk(A), B, α, β)
LinearAlgebra.mul!(C, A, B::AbstractThunk, α, β) = mul!(C, A, unthunk(B), α, β)
function LinearAlgebra.mul!(C, A::AbstractThunk, B::AbstractThunk, α, β)
return mul!(C, unthunk(A), unthunk(B), α, β)
end
function LinearAlgebra.BLAS.ger!(alpha, x::AbstractThunk, y, A)
return LinearAlgebra.BLAS.ger!(alpha, unthunk(x), y, A)
end
function LinearAlgebra.BLAS.ger!(alpha, x, y::AbstractThunk, A)
return LinearAlgebra.BLAS.ger!(alpha, x, unthunk(y), A)
end
function LinearAlgebra.BLAS.gemv!(tA, alpha, A, x::AbstractThunk, beta, y)
return LinearAlgebra.BLAS.gemv!(tA, alpha, A, unthunk(x), beta, y)
end
function LinearAlgebra.BLAS.gemv(tA, alpha, A, x::AbstractThunk)
return LinearAlgebra.BLAS.gemv(tA, alpha, A, unthunk(x))
end
function LinearAlgebra.BLAS.scal!(n, a::AbstractThunk, X, incx)
return LinearAlgebra.BLAS.scal!(n, unthunk(a), X, incx)
end
function LinearAlgebra.LAPACK.trsyl!(transa, transb, A, B, C::AbstractThunk, isgn=1)
return throw(MutateThunkException())
end
"""
@thunk expr
Define a [`Thunk`](@ref) wrapping the `expr`, to lazily defer its evaluation.
"""
macro thunk(body)
# Basically `:(Thunk(() -> $(esc(body))))` but use the location where it is defined.
# so we get useful stack traces if it errors.
func = Expr(:->, Expr(:tuple), Expr(:block, __source__, body))
return :(Thunk($(esc(func))))
end
"""
unthunk(x)
On `AbstractThunk`s this removes 1 layer of thunking.
On any other type, it is the identity operation.
"""
@inline unthunk(x) = x
Base.conj(x::AbstractThunk) = @thunk(conj(unthunk(x)))
Base.adjoint(x::AbstractThunk) = @thunk(adjoint(unthunk(x)))
Base.transpose(x::AbstractThunk) = @thunk(transpose(unthunk(x)))
#####
##### `Thunk`
#####
"""
Thunk(()->v)
A thunk is a deferred computation.
It wraps a zero argument closure that when invoked returns a differential.
`@thunk(v)` is a macro that expands into `Thunk(()->v)`.
To evaluate the wrapped closure, call [`unthunk`](@ref) which is a no-op when the
argument is not a `Thunk`.
```jldoctest
julia> t = @thunk(3)
Thunk(var"#4#5"())
julia> unthunk(t)
3
```
### When to `@thunk`?
When writing `rrule`s (and to a lesser exent `frule`s), it is important to `@thunk`
appropriately.
Propagation rules that return multiple derivatives may not have all deriviatives used.
By `@thunk`ing the work required for each derivative, they then compute only what is needed.
#### How do thunks prevent work?
If we have `res = pullback(...) = @thunk(f(x)), @thunk(g(x))`
then if we did `dx + res[1]` then only `f(x)` would be evaluated, not `g(x)`.
Also if we did `ZeroTangent() * res[1]` then the result would be `ZeroTangent()` and `f(x)` would not be evaluated.
#### So why not thunk everything?
`@thunk` creates a closure over the expression, which (effectively) creates a `struct`
with a field for each variable used in the expression, and call overloaded.
Do not use `@thunk` if this would be equal or more work than actually evaluating the expression itself.
This is commonly the case for scalar operators.
For more details see the manual section [on using thunks effectively](http://www.juliadiff.org/ChainRulesCore.jl/dev/writing_good_rules.html#Use-Thunks-appropriately-1)
"""
struct Thunk{F} <: AbstractThunk
f::F
end
@inline unthunk(x::Thunk) = x.f()
Base.show(io::IO, x::Thunk) = print(io, "Thunk($(repr(x.f)))")
Base.convert(::Type{<:Thunk}, a::AbstractZero) = @thunk(a)
"""
InplaceableThunk(add!::Function, val::Thunk)
A wrapper for a `Thunk`, that allows it to define an inplace `add!` function.
`add!` should be defined such that: `ithunk.add!(Δ) = Δ .+= ithunk.val`
but it should do this more efficently than simply doing this directly.
(Otherwise one can just use a normal `Thunk`).
Most operations on an `InplaceableThunk` treat it just like a normal `Thunk`;
and destroy its inplacability.
"""
struct InplaceableThunk{T<:Thunk,F} <: AbstractThunk
add!::F
val::T
end
unthunk(x::InplaceableThunk) = unthunk(x.val)
function Base.show(io::IO, x::InplaceableThunk)
return print(io, "InplaceableThunk($(repr(x.val)), $(repr(x.add!)))")
end
|
{"hexsha": "16384d69e246dff93b133098762b81ae19c6f0a4", "size": 8568, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/tangent_types/thunks.jl", "max_stars_repo_name": "st--/ChainRulesCore.jl", "max_stars_repo_head_hexsha": "834901efde5698b7ae0ace1bd2a0ea7953e7fd1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tangent_types/thunks.jl", "max_issues_repo_name": "st--/ChainRulesCore.jl", "max_issues_repo_head_hexsha": "834901efde5698b7ae0ace1bd2a0ea7953e7fd1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tangent_types/thunks.jl", "max_forks_repo_name": "st--/ChainRulesCore.jl", "max_forks_repo_head_hexsha": "834901efde5698b7ae0ace1bd2a0ea7953e7fd1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4215246637, "max_line_length": 168, "alphanum_fraction": 0.7040149393, "num_tokens": 2608}
|
\documentclass[11pt]{article}
\usepackage{acl2014}
\usepackage{times}
\usepackage{url}
\usepackage{latexsym}
\usepackage{graphicx}
\usepackage{adjustbox}
\usepackage{array}
\usepackage{booktabs}
\usepackage{multirow}
\usepackage{multicol}% http://ctan.org/pkg/multicols
\usepackage{tabularx, booktabs}
\usepackage{framed}
\usepackage{setspace}
% Change this if needed to set titlebox size.
%\setlength\titlebox{5cm}
\title{LING 573: Final Project Report}
\author{Clara Gordon \\
University of Washington \\
Seattle, WA \\
{\tt cgordon1@uw.edu} \\\And
Claire Jaja \\
University of Washington \\
Seattle, WA \\
{\tt cjaja@uw.edu} \\\And
Andrea Kahn \\
University of Washington \\
Seattle, WA \\
{\tt amkahn@uw.edu} \\}
\date{}
\begin{document}
\maketitle
\begin{abstract}
In this paper, we describe a question answering system for handling factoid questions. The system, implemented in Python, follows a typical pipeline, including query processing, information retrieval, and answer candidate extraction and ranking modules. Using the AQUAINT corpus of English News Text as a document collection, it produces answers for questions from the QA track of the Text Retrieval Conference (TREC).
\end{abstract}
\section{Introduction}
Question answering (QA) has long been a prominent problem in the field of natural language processing. In contrast to information retrieval (IR) systems, which return relevant documents based on search terms, a question answering system takes a natural-language question as input and outputs a natural-language answer. IR is typically a component of the system, but the addition of question and answer processing prevents users from having to sift through long documents to find the information they are seeking.
We implemented a question answering system, the Question Answering Integrated Linguistic System (QuAILS) (see logo in Figure 1), to handle factoid questions from the QA track of the Text Retrieval Conference (TREC), using the AQUAINT Corpus of English News Text as a document collection. Our system achieves lenient MRR scores in the upper .30s, meaning that, on average, the correct answer appears third in the list of answers returned.
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{QuAILS.jpg}
\caption{QuAILS logo.}
\end{figure}
\section{System Overview}
Our system is coded in Python. Third-party modules that we use include Indri/Lemur (for IR), pymur (a Python wrapper for Indri/Lemur), Beautiful Soup (for XML parsing), and NLTK (for tokenization, part of speech tagging, named entity chunking, and thesaurus-based query expansion). We chose Indri/Lemur for IR because of its specific handling of TREC-formatted question files. We use a stopword list taken from the Indri/Lemur documentation.
We use Indri's IndriBuildIndex code to build an index. It has a parameter file specified as an argument which gives the path to the document collection, the path to the output index, and other parameters. Indexing of the AQUAINT corpus takes approximately 15 minutes. We created several different versions of the index, using both Porter and Krovetz stemmers, both including and excluding a list of stopwords. Having found previous best results with the Porter-stemmed index including a stopword list, we conducted our final set of tests using that particular index and opted to construct the index for the AQUAINT-2 corpus with those parameters as well.
The core of our system is a three-part pipeline, consisting of modules for question processing, IR, and answer processing, respectively. The system architecture is shown in Figure 2.
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{system_architecture.jpg}
\caption{System architecture.}
\end{figure}
\section{Approach}
The question answering system is called by a wrapper script, question\_answering.py, which take a mandatory run tag argument and 20 optional system parameters (see Table 1). It uses the third-party module Beautiful Soup to parse the XML in the TREC document and generate a list of questions. Python's multiprocessing module is used to parallelize the questions in the question file, so that multiple questions may be passed through the pipeline (described in detail below) at once. For each in the group of answers returned for each question, the wrapper script prints the question ID, the run tag, the document ID associated with the answer, and the answer to the output file.
A separate script, cache\_web\_results.py, submits each unprocessed factoid query to Ask.com for later use in query expansion and information retrieval. We omit any query processing in this portion of the system because we assume Ask.com has its own query processing algorithms which can better handle the raw query. The script uses BeautifulSoup to scrape the text snippets associated with each result, and stores them in a text document in the cached\_web\_results directory. After running several experiments, we have found that four pages of results is optimal.
\begin{table*}[!ht]
\centering
\caption{Parameters (Note: Settings displayed are those used for the final evaluation run.)}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{.5in} >{\centering}m{2in} >{\centering}m{2in} >{\centering\arraybackslash}m{1in} }
\toprule
\textbf{Category} & \textbf{Parameter} & \textbf{Description} & \textbf{Setting} \\
\midrule
General & q\_file & filepath of TREC question file & /dropbox/13-14/573/Data/Questions/evaltest/ QA2007\_testset.xml \\
& stoplist & filepath of list of stopwords & src/stoplist.dft \\
& web\_cache & filepath of cached Ask.com web results & src/cached\_web\_results/ QA2007\_testset. 4pg.web\_cache \\
& index & filepath of Indri index & src/indexes/AQUAINT-2/porter.stoplist \\
QP & stopword\_filter\_target & whether to perform target stopword filtering & False \\
& target\_upweighting & upweighting factor for target terms & 1 \\
& ne\_upweighting & upweighting factor for NEs & 1 \\
& num\_web\_exp\_terms & number of web-redundancy terms added to query & 5 \\
& weight\_web\_exp\_terms & weight of web-redundancy terms & 0.5 \\
& num\_lin\_exp\_terms & number of Lin-thesaurus terms added to query & 0 \\
& weight\_lin\_exp\_query & weight given to Lin-thesaurus-expanded query & 0 \\
IR & indri\_passages & number of passages returned Indri & 20 \\
& passage\_length & character length of passage returned by indri & 75 \\
& indri\_window\_size & size of window in Indri search & 30 \\
& snippet\_weight & weight assigned to web snippets & 0.9 \\
AP & num\_docs & number of AQUAINT docs answer must occur in & 1 \\
& num\_passages & number of passages answer must occur in & 10 \\
& snippet\_passage\_count & how many passages a web snippet counts as & 10 \\
& passages\_per\_doc\_id & number of passages to return per document ID & 1 \\
\bottomrule
\end{tabular}
\end{table*}
%\begin{table*}
% \begin{center}
% \resizebox{\linewidth}{!}{
%\begin{tabular}{|l|l|l|l|}
%\hline
%\textbf{Category} & \textbf{Parameter} & \textbf{Description} & \textbf{Setting} \\ \hline
%General & q\_file & filepath of TREC question file & /dropbox/13-14/573/Data/Questions/evaltest/QA2007\_testset.xml %\\\cline{2-4}
%& stoplist & filepath of list of stopwords & src/stoplist.dft \\\cline{2-4}
%& web\_cache & filepath of cached web Ask.com web results & src/cached\_web\_results/QA2007\_testset.4pg.web\_cache %\\\cline{2-4}
%& index & filepath of Indri index & src/indexes/AQUAINT-2/porter.stoplist \\\hline
%QP & stopword\_filter\_target & whether to perform target stopword filtering & False \\\cline{2-4}
%& target\_upweighting & \pbox{20cm} upweighting factor for target terms & 1 \\\cline{2-4}
%& ne\_upweighting & \pbox{20cm} upweighting factor for NEs & 1 \\\cline{2-4}
%& num\_web\_exp\_terms & number of web-redundancy terms added to query & 5 \\\cline{2-4}
%& weight\_web\_exp\_terms & weight of web-redundancy terms & 0.5 \\\cline{2-4}
%& num\_lin\_exp\_terms & number of Lin-thesaurus terms added to query & 0 \\\cline{2-4}
%& weight\_lin\_exp\_query & weight given to Lin-thesaurus-expanded query & 0 \\\hline
%IR & indri\_passages & number of passages returned Indri & 20 \\\cline{2-4}
%& passage\_length & character length of passage returned by indri & 75 \\\cline{2-4}
%& indri\_window\_size & size of window in Indri search & 30 \\\cline{2-4}
%& snippet\_weight & weight assigned to web snippets & 0.9 \\\hline
%AP & num\_docs & number of AQUAINT docs answer must occur in & 1 \\\cline{2-4}
%& num\_passages & number of passages answer must occur in & 10 \\\cline{2-4}
%& snippet\_passage\_count & how many passages a web snippet counts as & 10 \\\cline{2-4}
%& passages\_per\_doc\_id & number of passages to return per document ID & 1 \\\cline{2-4}
%& passages\_per\_answer\_candidate & number of passages to return per answer candidate & 1 \\ \hline
%\end{tabular}}
%\vspace{1mm}
%\emph{Table xxx: Parameters (Note: Settings displayed are those used for the final evaluation run.)}
%\end{center}
%\end{table*}
Classes that are used by multiple modules in the pipeline are defined in the module general\_classes.py. These include:
\begin{itemize}
\item Question class: A Question object stores as attributes the TREC question ID, the question type, the TREC natural-language question stored as a string, and the ``target" (the context given for a set of questions in TREC 2004-2006; defaults to None).
\item SearchQuery class: A SearchQuery object stores as attributes a dictionary of search terms, each of which can be one or more words, mapped to weights indicating how important those terms are perceived as being, and an overall weight for the query, which will be used to calculate the probability of the corresponding AnswerCandidate.
\item AnswerTemplate class: An AnswerTemplate object stores as attributes the question ID, a set of basic search query terms from the original question, and a dictionary for the weights of each answer type, where the weights will be used to reweight AnswerCandidate objects during answer processing.
\item Passage class: A Passage object stores as attributes the text of a snippet, its weight, and the document ID.
\end{itemize}
\subsection{System architecture}
Our pipeline for processing a single question consists of three components found in three separate modules, described below. The pipeline takes a Question object as input and outputs a list of AnswerCandidate objects.
\subsubsection{Query processing}
The query processing module is responsible for creating a set of weighted search queries (which are passed to the information retrieval module to be used for passage retrieval) and instantiating an answer template (which is passed to the answer processing module to be used during answer ranking).
A QueryProcessor object is initialized with a Question object, a stopword list, and a set of cached web snippets. It then generates a vocabulary, or a list of words (i.e., whitespace-tokenized strings) occurring in the question/target and their counts. We use NLTK's named-entity chunker to remove named entities from the question before tokenizing to prevent the named entities themselves from being tokenized. (Performing named entity chunking on the target caused our accuracy to drop significantly, so we chose to forgo named entity chunking of the target and did not consider target tokens when performing Lin thesaurus-based query expansion.) The named entities are then added back into the set of query terms when the queries are generated. We experimented with upweighting the named entities, but our final set of parameter values did not include named entity upweighting (i.e., the ne\_upweighting parameter was set to its default value of 1). Some implementation decisions made during this step drastically affected our scores; in addition to performing or not performing named entity chunking on the target, the choice as to whether or not to filter stopwords from the target also had a significant effect on our score. (We made stopword\_filter\_target a boolean parameter with the default value of False, and found this setting to be optimal.)
After instantiating a QueryProcessor object, the wrapper script then calls a method in the query-processing module that instantiates an AnswerTemplate object that will subsequently be passed to the answer processing module. The answer template contains the query processor's vocabulary so that the answer processing module can disregard answer candidates that contain query terms. In addition, this method employs regular-expression matching on the original natural-language question to identify questions as requiring an answer that is the name of a person (i.e., proper noun), the name of an organization (i.e., proper noun), the name of an object (i.e., common noun), the name of a location (i.e., proper noun), a time expression, a number expression, or some expression that does not fall into one of these categories. Some regular expressions correspond to one of these answer types; others correspond to multiple answer types. A regular expression match causes the corresponding answer types to be given a higher weight in the AnswerTemplate dictionary of answer type weights. In the final version of the system, we set the weights of answer types matched by a regular expression to 0.9, the weights of all other answer types to 0.1, and the weight of ``other" to 0.5 if no regular expression matches. (These are weights, not probabilities, so the choice of scale is somewhat arbitrary; what matters is their magnitude relative to one another.)
The query processor is then used to generate a list of weighted search queries, each of which is in turn a set of weighted search terms. In the final version of the system, the query processor generates three queries: an initial search query that contains the set of words occurring in the question and the question target, with the word counts as weights and the query weight set to 1; a web-expanded version of the initial query, with the weights of the expanded terms taken as a parameter and the query weight set to 1; and a Lin thesaurus-expanded version of the initial query, with the weights of the expanded terms set by the Lin expansion function (see below) and the query weight taken as a parameter.
We implemented two types of query expansion: web-based and Lin thesaurus-based. Web-based query expansion adds the \emph{n} most frequently appearing unigrams in the web snippets associated with a question (not including stopwords or words already appearing in the query) to the original set of weighted query terms, assigning these terms some default weight. An n value and a weight for the terms are passed to the query processing module as parameters, with default values 5 and 0.5, respectively; we found these parameters to be optimal given the combinations we tested (though with more time, we would have chosen to test these parameters more extensively; see Discussion).
We also implemented query expansion using NLTK's thesaurus corpus from \newcite{lin1998automatic} and its associated scored\_synonyms method, which returns similar terms grouped by part of speech (noun, verb, or adjective). Our expand\_query method returns a SearchQuery object containing all of the original query terms and their corresponding weights, as well as the top \emph{n} synonyms for certain query terms, their weights being the product of the weight of the original term of which they are synonyms and the weight returned by scored\_synonyms. To determine which query terms to expand, we performed part of speech-tagging on the original question text, followed by named entity-chunking (both using NLTK), and only expanded non-named entity terms that did not appear in the stopword list; in addition, we only expanded terms that were tagged as nouns or verbs and only returned related thesaurus terms classified by the thesaurus as being nouns or verbs, respectively. An \emph{n} value and a weight for this expanded query are passed as parameters, with default values of 0 and 0. We tested several non-default values of these parameters and found that changing these values negatively affected our MRR, so we did not make use of Lin query expansion in our final run.
\subsubsection{Information retrieval}
The information retrieval module uses the Indri/Lemur IR system to retrieve \emph{n} passages for each set of query terms passed to it by the query processing module, where \emph{n} is a parameter. Empirical tests show that returning 20 Indri passages provides the best results. We use Base64 conversion for our queries in order to avoid encoding errors with punctuation. Although we use Indri directly to index the document collection, this portion of the system uses the pymur wrapper to run the query and retrieve passage results. We use the given offset indices to retrieve the passage from the original text. Since these indices refer to the stemmed text, however, the passages may be a slightly different window from that selected by Indri. The pymur commands provide the document ID number and document weight. Together with the reconstructed passage text, these are used to construct a Passage object for each passage.
We make use of certain Indri Query Language functionalities in addition to Base64 conversion. Because some of the ``tokens" contained in the Query objects are in fact space-delimited named entity phrases, the syntax of the Indri query treats each token as ordered window that must be matched completely. Each of these windows are weighted according to the weights specified in the Query object. The length of the returned passage and search window are also specified by parameters. We found the best results using passages of a length of 75 words and a search window of length 30.
We implement web boosting by using the cached web results stored before runtime to augment the returned passages. These cached results are retrieved and stored as Passage objects, similar to the AQUAINT passages. The weights of these passages are also parameterized in our script. Initial experiments showed that these web snippets are very likely to contain the answer, and we weight them similarly to a very high scoring AQUAINT document in our IR framework: log(0.9). The document ID for these Passage objects is set to None to indicate that these are web snippets, not passages coming from AQUAINT documents. Together, the AQUAINT and web-based Passage objects are passed on to the answer extraction module.
\subsubsection{Answer candidate extraction and ranking}
The answer processing module is used to extract and rank answers. An object of this class is initialized with a list of Passage objects, an AnswerTemplate object, and an optional stopword list. This object can then generate and rank answers. This is done in a series of steps, inspired by \newcite{lin2007exploration}.
First, possible answers are extracted from the Passages by generating all unigrams, bigrams, trigrams, and 4-grams from the text of each passage; the score of each of these possible answers is the sum of the retrieval scores of the passage it is found in. If an n-gram appears multiple times in a passage, the n-gram's score is updated each time the n-gram appears, so a possible answer that appears frequently in a passage is scored higher than one that appears just once in the passage. At the end, a list of AnswerCandidate objects is generated which contains the question ID, a possible answer, its score, the document collection documents (and specific passages within those documents) it is found in, and the total number of passages it is found in. Since we believe that the web snippets are more likely to contain the correct answer than the documents from the document collection, we allow each web snippet to count as \emph{n} passages, where \emph{n} is a parameter passed in to the wrapper script, with a default value of 10. We found best results with this default value.
After this, the answer candidates go through a filtering step. At this step, any answers that start or end with a stopword or standalone punctuation token, or contain any words from the original query (retrieved from the AnswerTemplate) are discarded. Additionally, any answers that did not appear in at least \emph{m} document collection documents are discarded, and any answers that did not appear in at least \emph{p} total passages are discarded, where \emph{m} and \emph{p} are parameters passed in to the wrapper script, with default values of \emph{m} = 1 and \emph{p} = 10. We found best results with these default values.
Then, a combining step updates the score of each answer to be the current score plus the sum of the scores of the unigram answers contained within it. This prevents unigrams from being the highest ranked answers and instead favors longer answers.
Next, the answers are reweighted. Regular expressions are used to guess the type of each answer. Three different categories are captured; 1) person, organization, or location (identified by an answer beginning with a capital letter), 2) time expression (identified by an answer containing month words or a pattern resembling a date), and 3) numbers (identified by digits as well as number words). Then, the weights from the AnswerTemplate are applied accordingly. Since person, organization, and location are not distinguished in the answers, the highest weight among these three in the AnswerTemplate is used for any answer identified as being in that category.
Then, the answer candidates are ranked by score. AQUAINT passages are chosen based on these n-gram answers, given a few different criteria. Up to \emph{q} passages are returned per answer candidate, with up to \emph{r} passages per document ID for allowed for the question. These parameters are set by default to \emph{q} = 1 and \emph{r} = 1, and these default values gave the best results in our tests. Starting at the highest-ranked answer candidate, the document ID which contributed the most passages containing that answer candidate is returned, along with the passage from that document ID with the highest Indri retrieval score, provided that the maximum number of passages per document ID (parameter \emph{r}) for that document ID has not already been met. The passage is truncated to 250 characters, centered on the occurrence of the n-gram answer candidate. This continues (moving on to the document ID which contributed the next most passages) until the maximum number of passages per answer candidate (parameter \emph{q}) has been reached for that answer candidate, and then the process continues with the next highest ranked answer candidate.
\section{Results}
We evaluated our results using the mean reciprocal rank (MRR) measure with strict and lenient evaluation. The results, based on automatic pattern scoring, are shown in the tables below. All scores are rounded to four significant digits.
\begin{table}[ht]
\centering
\caption{Passage retrieval experiments on dev-test data.}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{.5in} >{\centering}m{1in} >{\centering\arraybackslash}m{1in} }
\toprule
\textbf{Passage Count} & \textbf{Strict Score} & \textbf{Lenient Score} \\
\midrule
20 & 0.2072 & 0.3580 \\
40 & 0.1922 & 0.3514 \\
60 & 0.1837 & 0.3380 \\
80 & 0.1667 & 0.3282 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht]
\centering
\caption{Web cache experiments on dev-test data.}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{.5in} >{\centering}m{1in} >{\centering\arraybackslash}m{1in} }
\toprule
\textbf{Webpage Count} & \textbf{Strict Score} & \textbf{Lenient Score} \\
\midrule
2 & 0.1871 & 0.3480 \\
3 & 0.1922 & 0.3514 \\
4 & 0.1913 & 0.3524 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht]
\centering
\caption{Indri search experiments on dev-test data.}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{.5in} >{\centering\bfseries}m{.5in} >{\centering}m{.75in} >{\centering\arraybackslash}m{.75in} }
\toprule
\textbf{Passage Length} & \textbf{Window Length} & \textbf{Strict Score} & \textbf{Lenient Score} \\
\midrule
75 & 30 & 0.1979 & 0.3680 \\
100 & 50 & 0.1922 & 0.3514 \\
150 & 75 & 0.1835 & 0.3394 \\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[ht]
\centering
\caption{Final results}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{.75in} >{\centering}m{.75in} >{\centering\arraybackslash}m{.75in} }
\toprule
\textbf{Deliverable} & \textbf{Strict Score} & \textbf{Lenient Score} \\
\midrule
D2 & 0.0051 & 0.0289 \\
D3 & 0.1451 & 0.2639 \\
D4 (dev) & 0.2160 & 0.3773 \\
D4 (eval) & 0.1766 & 0.3584 \\
\bottomrule
\end{tabular}
\end{table}
\section{Discussion}
One of the salient takeaways of this project was that shallow and redundancy-based techniques go a long way. We achieved lenient MRR scores in the high .30s without doing any sort of deep processing (e.g., parsing, query reformulation, minimal recursion semantics), with especially large gains from web-based query expansion and the use of web snippets in generating and ranking answer candidates. With more time, we believe we could have attained even higher MRR scores through augmenting our system to employ some of the aforementioned deep processing techniques.
In terms of implementation, the choice to parallelize the processing of each question and the choice to pass as parameters to our wrapper script were immensely helpful. Our final system ran in approximately five minutes with a single job on our shared computing cluster. We were able to run 55 experiments on the development data and chose the set of parameter values that gave the highest MRR scores as the parameter settings for our final evaluation test run.
A limiting factor in determining optimal parameter settings was computing power. We believe that we would have been able to achieve higher scores had we been able to run more experiments. We chose combinations of parameters to test largely by isolating one parameter and varying it across several experiments while keeping other parameters consistent. Unfortunately, this doesn't capture how the parameters interact with one another, and we have reason to believe that many of them do (e.g., number of pages of web-cached results and number of web-based expansion terms). However, even assuming an average of three desired test settings for each parameter, testing every combination of values for 20 parameters would mean running $3^{20}$ (approximately 3.5 billion) experiments. Needless to say, this was not realistic given our computing cluster and the project timeline.
The results of the experiments we ran showed the biggest improvements with tweaks to parameters within information retrieval and in regards to web results. In particular, returning fewer document collection passages within Indri gave us better results (see Table 2). Additionally, we experimented with the passage and window length for our Indri queries, with results showing that a passage of 75 words with a 30 word window gave the best results (see Table 3). In terms of web results, we tested varying the number of pages of web search results for our cached web snippets; the best results were with three pages (for strict results) and four pages (for lenient results) (see Table 4). For brevity, details are not included on our other experiments. The results with the final best settings for the parameters (see Table 1 for a full list of what these settings are) are shown in the last two rows of Table 5. These results show a significant improvement over our results for previous deliverables (shown in the first two rows of the table) and illustrate a consistent improvement of our overall system throughout the past ten weeks.
Error analysis (see Table 6) on our final results show that we return the correct answer in our 20 responses 58.6\% of the time for the development set and 57.2\% of the time for the evaluation set. About half of these correct answers are returned as the first answer. A much larger number, however, are returned within the top 3. If we were able to improve our ranking so that when the correct answer is in the top three, it is the first one, we would have had lenient scores over .451 for the development set and .428 for the evaluation set. This doesn't include correct answers that we returned further down in the list. Thus, it seems like reranking would be a fruitful endeavor for improving the MRR scores.
However, additionally, this means that nearly half the questions do not have a correct answer anywhere in the answers returned. Improvements to answer generation and passage return may thus also be a possible arena for improving the scores.
%\begin{center}
%\begin{tabular}{|*{3}{c|}}
%\hline
% & \textbf{2006 (dev test)} & \textbf{2007 (eval test)} \\ \hline
%{\parbox[t][10mm]{30mm}{\center{\# of questions w/ answer patterns}}} & 386 & 290 \\ \hline
%{\parbox[t][10mm]{30mm}{\center{\# of questions where QuAILS returned correct answer}}} & 226 & 166 \\ \hline
%{\parbox[t][20mm]{30mm}{\center{\# of questions where QuAILS returned correct answer in top 10}}} & 224 & 161 \\ %\hline
%{\parbox[t][20mm]{30mm}{\center{\# of questions where QuAILS returned correct answer in top 3}}} & 174 & 124 \\ %\hline
%{\parbox[t][20mm]{30mm}{\center{\# of questions where QuAILS returned correct answer as first answer}}} & 107 & 73 %\\ \hline
%\end{tabular}
%\vspace{1mm}
%\emph{Table xxx: Error Analysis.}
%\end{center}
\begin{table}[ht]
\centering
\caption{Error Analysis}
\renewcommand{\arraystretch}{1.5}% Spread rows out...
\begin{tabular}{>{\centering\bfseries}m{1.2in} >{\centering}m{.5in} >{\centering\arraybackslash}m{.5in} }
\toprule
& \textbf{2006 (dev)} & \textbf{2007 (eval)} \\
\midrule
\# of questions w/ answer patterns & 386 & 290 \\
\# of questions where QuAILS returned correct answer & 226 & 166 \\
\# of questions where QuAILS returned correct answer in top 10 & 224 & 161 \\
\# of questions where QuAILS returned correct answer in top 3 & 174 & 124 \\
\# of questions where QuAILS returned correct answer as first answer & 107 & 73 \\
\bottomrule
\end{tabular}
\end{table}
\section{Conclusion}
We implemented a question answering system to handle factoid questions from the TREC QA shared task using the AQUAINT Corpus as a document collection. We experimented with question classification, query expansion using web cached snippets, and Lin synonym-based query expansion. Within information retrieval, we tested using different Indri parameters, as well as including web cached snippets as passages. For answer processing, we implemented ARANEA-style n-gram generation, filtering, and ranking. Through these various experiments, we were able to achieve lenient MRR results on both the development and test sets in the upper .30s and strict MRR results around .20.
\nocite{*}
\bibliographystyle{acl}
\bibliography{references}
%\begin{thebibliography}{}
%\end{thebibliography}
\end{document}
|
{"hexsha": "19aad98c01ff03c6c72922bacd7b2d3a9f90641d", "size": 31743, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/D4/D4.tex", "max_stars_repo_name": "amkahn/question-answering", "max_stars_repo_head_hexsha": "bbdd82561a025569efd904b94153a1e6f233db6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/D4/D4.tex", "max_issues_repo_name": "amkahn/question-answering", "max_issues_repo_head_hexsha": "bbdd82561a025569efd904b94153a1e6f233db6f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/D4/D4.tex", "max_forks_repo_name": "amkahn/question-answering", "max_forks_repo_head_hexsha": "bbdd82561a025569efd904b94153a1e6f233db6f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 99.5078369906, "max_line_length": 1451, "alphanum_fraction": 0.7665312037, "num_tokens": 7668}
|
# This code is based on : https://www.tensorflow.org/guide/keras/functional#all_models_are_callable_just_like_layers
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import cv2
"""
In the code below, get_model function returns a convolution model.
model1 , model2 , model3 creates 3 instances of the same model architecture.
Using the layers.average function the ensemble_model ensemble the set of models into a single model that averages their predictions.
"""
inputs = keras.Input(shape=(32, 32, 3) , name="img")
# Define the First Hidden Layers:
x = layers.Dense(255, activation="relu" , name="Add_Dense1")(inputs)
# Creating First Data Path way:
x1 = layers.Dense(32, activation="relu" , name="Add_Dense2")(x)
x1 = layers.Dense(16, activation="relu" , name="Add_Dense3")(x1)
# Creating Second Data Path way:
x2 = layers.Dense(128, activation="relu" , name="Add_Dense4")(x)
x2 = layers.Dense(64, activation="relu" , name="Add_Dense5")(x2)
x2 = layers.Dense(16, activation="relu" , name="Add_Dense6")(x2)
# Merging the Data Path ways:
added = layers.Add()([x1, x2])
# Adding the Last Layer:
outputs = layers.Dense(10)(added)
# Creating the Model:
model = keras.Model(inputs=inputs, outputs=outputs, name="model_name")
# Getting the Model summary:
model.summary()
# Display the Model in a Block Diagram
keras.utils.plot_model(model, "add_and_skip.png") # Include 'show_shapes=True' as a parameter to display th shapes of the respective layers.
cv2.imshow("add_and_skip.png")
|
{"hexsha": "72680eac89cd0c18ad4cc8a0e24224d255fa7e56", "size": 1548, "ext": "py", "lang": "Python", "max_stars_repo_path": "TensorflowFunctionalAPI/LayersAddExample.py", "max_stars_repo_name": "Atharva-Gundawar/Deep-Learning-Snippets", "max_stars_repo_head_hexsha": "5e351481e93e664d63cc0a3570add5b072cd621d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TensorflowFunctionalAPI/LayersAddExample.py", "max_issues_repo_name": "Atharva-Gundawar/Deep-Learning-Snippets", "max_issues_repo_head_hexsha": "5e351481e93e664d63cc0a3570add5b072cd621d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TensorflowFunctionalAPI/LayersAddExample.py", "max_forks_repo_name": "Atharva-Gundawar/Deep-Learning-Snippets", "max_forks_repo_head_hexsha": "5e351481e93e664d63cc0a3570add5b072cd621d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0, "max_line_length": 140, "alphanum_fraction": 0.75, "include": true, "reason": "import numpy", "num_tokens": 401}
|
import os, sys
import gc
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, "utils"))
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils.kcnet_utils import Netpara, debugPrint, Fc
import sph3d_ops_utils
import sph3gcn_util as s3g_util
global_step = 0
def gather_nd(input_tensor, indices):
"""
input_tensor: (b,n,c), float32
indices: (b,m), int
"""
batch_size = input_tensor.size(0)
# indices=indices.long()
return torch.stack([torch.index_select(input_tensor[k],0,indices[k]) for k in range(batch_size)]) # keep dim as xyz
def normalize_xyz(points):
points -= points.mean(dim=1,keepdim=True)
scale = torch.pow(points,2).sum(dim=-1,keepdim=True).max(dim=1,keepdim=True)[0]
scale = torch.sqrt(scale)
points /= scale
return points
# def _separable_conv3d_block(net, list_channels, bin_size, nn_index, nn_count, filt_idx,
# name, depth_multiplier=None, weight_decay=None, reuse=None,
# with_bn=True, with_bias=True, is_training=None):
# for l, num_out_channels in enumerate(list_channels):
# scope = name + '_' + str(l+1) # number from 1, not 0
# net = s3g_util.separable_conv3d(net, num_out_channels, bin_size,
# depth_multiplier[l], scope, nn_index,
# nn_count, filt_idx, weight_decay=weight_decay,
# with_bn=with_bn, with_bias=with_bias,
# reuse=reuse, is_training=is_training)
# return net
class SPH3D(nn.Module):
def __init__(self, input_channels, class_nums=1, config=None, device_id=0, initial_weights=True):
super(SPH3D, self).__init__()
self.input_channels = input_channels
self.class_nums = class_nums
self.device_id = device_id
self.config = config
self.global_radius = 100.0 # global_radius(>=2.0) should connect all points to each point in the cloud
# self.FC1 = Fc(input_channels,[64,128,384],input_dim=4, bn='BN', activation_fn='relu')
self.FC1= Fc(input_channels,[self.config.mlp],input_dim=3, bn='BN', activation_fn='relu') # out_c=32, b,n,in_c-->b,n,out_c
self.SeparableConv3d_block = nn.ModuleList()
c_in=32
# if self.config.use_raw:
# c_in+=3
for l in range(len(self.config.radius)):# [0.1, 0.2, 0.4]
if self.config.use_raw:
c_in+=3
prefix_name = 'conv'+str(l+1)
for k, c_out in enumerate(self.config.channels[l]): #[64,64]
scope = prefix_name + '_' + str(k+1) # number from 1, not 0
self.SeparableConv3d_block.append(s3g_util.SeparableConv3d(c_in,c_out,
self.config.binSize, self.config.multiplier[l][k], scope)
)
c_in = c_out
self.SeparableConv3d=s3g_util.SeparableConv3d(c_in, self.config.global_channels, 17, config.global_multiplier, #global_c=512,global_multiplier=2
'global_conv')
self.classify = nn.Sequential(
# nn.Linear(2048, 512,bias=False),
nn.Linear(64+128+128+512, 512, bias=False),
nn.BatchNorm1d(512),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(512, 256,bias=False),
nn.BatchNorm1d(256),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(256, class_nums)
)
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.parameters(), weight_decay=1e-5)
self.schedule = optim.lr_scheduler.StepLR(self.optimizer, 10, 0.6)
# if initial_weights:
# self.initialize_weights()
self.cuda(device_id)
def forward(self, point_cloud): #B,C,N
point_cloud=point_cloud.transpose(1,2).contiguous()
batch_size,num_point,c=point_cloud.size()
if self.config.normalize:
point_cloud = normalize_xyz(point_cloud)
xyz=point_cloud
query = xyz.mean(dim=1, keepdim=True) # the global viewing point, b,1,c
net = self.FC1(xyz) # (b,n,32)
global_feat=[]
index=0
for l in range(len(self.config.radius)):
if self.config.use_raw:
net=torch.cat([net,xyz],2) #---->(b,n,32+3)
# the neighbor information is the same within xyz_pose_1 and xyz_pose_2.
# Therefore, we compute it with xyz_pose_1, and apply it to xyz_pose_2 as well
intra_idx, intra_cnt, intra_dst, indices = s3g_util.build_graph(xyz, self.config.radius[l], self.config.nn_uplimit[l],
self.config.num_sample[l], sample_method=self.config.sample)
filt_idx = sph3d_ops_utils.spherical_kernel(xyz, xyz, intra_idx, intra_cnt, intra_dst,
self.config.radius[l], self.config.kernel)
for _ in self.config.channels[l]: #[64,64]
net = self.SeparableConv3d_block[index](net, intra_idx, intra_cnt,filt_idx)
index+=1
if self.config.num_sample[l]>1:
indices=indices.long()
# ==================================gather_nd====================================
xyz = gather_nd(xyz, indices)
intra_idx = gather_nd(intra_idx, indices)
intra_cnt = gather_nd(intra_cnt, indices)
intra_dst = gather_nd(intra_dst, indices)
# =====================================END=======================================
net = s3g_util.pool3d(net, intra_idx, intra_cnt,
method=self.config.pool_method, # max
scope='pool'+str(l+1)) # (b,m,c_out)
global_maxpool = net.max(dim=1, keepdim=True)[0] # (b,1,c_out)
global_feat.append(global_maxpool)
# =============================global feature extraction in the final layer==================
nn_idx, nn_cnt, nn_dst = s3g_util.build_global_graph(xyz, query, self.global_radius)
filt_idx = sph3d_ops_utils.spherical_kernel(xyz, query, nn_idx, nn_cnt, nn_dst,
self.global_radius, [8,2,1])
net = self.SeparableConv3d(net,nn_idx, nn_cnt, filt_idx) # (b,1,c_out)
global_feat.append(net)
y = torch.cat(global_feat,dim=2).view(batch_size,-1)
y= self.classify(y)
return y
def loss(self, outputs, targets):
return self.criterion(outputs, targets)
def fit(self, dataloader, epoch, writer):
global global_step
self.train()
batch_loss = 0.
epoch_loss = 0.
batch_nums = 0
if self.schedule is not None:
self.schedule.step()
print('----------epoch %d start train----------' % epoch)
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
self.optimizer.zero_grad()
outputs = self(inputs)
losses = self.loss(outputs, targets)
losses.backward()
self.optimizer.step()
batch_loss += losses.item()
epoch_loss += losses.item()
batch_nums += 1
if (batch_idx + 1) % 8 == 0: #batch_size=16 16*8=128 samples
print('[%d, %5d] loss %.3f' % (epoch, batch_idx, batch_loss / 8))
global_step += 1
# print('global_step={}'.format(global_step))
if writer is not None:
writer.add_scalar('scalar/batch_loss_every8',batch_loss / 8, global_step)
batch_loss = 0.
gc.collect()
torch.cuda.empty_cache()
print('-----------epoch %d end train-----------' % epoch)
print('epoch %d loss %.3f' % (epoch, epoch_loss / batch_nums))
return epoch_loss / batch_nums
def score(self, dataloader):
self.eval()
correct = 0.
total = 0
with torch.no_grad():
# for batch_idx, (inputs, targets) in enumerate(dataloader):
for batch_idx, (inputs, _, targets) in enumerate(dataloader): #zhuyijie
inputs = inputs.cuda(self.device_id)
targets = targets.cuda(self.device_id)
outputs = self(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
gc.collect()
torch.cuda.empty_cache()
print('Accuracy of the network on the test images: %.2f %%' % (100.0 * correct / total))
return correct / total
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.zero_()
|
{"hexsha": "c63ccc41a6ee5b372723de11d2a282538952d4f8", "size": 9995, "ext": "py", "lang": "Python", "max_stars_repo_path": "other_models/sph3d/SPH3D_modelnet.py", "max_stars_repo_name": "ZJUCAGD/GTS-CNN", "max_stars_repo_head_hexsha": "a329f314b795f0dea0f46db623ac955a47619e7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "other_models/sph3d/SPH3D_modelnet.py", "max_issues_repo_name": "ZJUCAGD/GTS-CNN", "max_issues_repo_head_hexsha": "a329f314b795f0dea0f46db623ac955a47619e7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "other_models/sph3d/SPH3D_modelnet.py", "max_forks_repo_name": "ZJUCAGD/GTS-CNN", "max_forks_repo_head_hexsha": "a329f314b795f0dea0f46db623ac955a47619e7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.2683982684, "max_line_length": 152, "alphanum_fraction": 0.539969985, "include": true, "reason": "import numpy", "num_tokens": 2258}
|
module DoubleBLAS
# support for (fairly) efficient Linear Algebra with DoubleFloats
#FIXME: change to explicit lists because namespace pollution is epidemic
using DoubleFloats
using LinearAlgebra
using SIMD
using UnsafeArrays
using Base.Threads
# steal some internals
using LinearAlgebra: lapack_size, BlasInt, checknonsingular, MulAddMul
using LinearAlgebra: _modify!
if VERSION < v"1.2"
using LinearAlgebra: has_offset_axes
require_one_based_indexing(x...) =
has_offset_axes(x...) && throw(ArgumentError("not implemented "
* "for offset axes"))
else
using Base: require_one_based_indexing
end
# stuff to extend
import LinearAlgebra: rmul!, lmul!, ldiv!
# see subordinate files for some others
export refinedldiv
# most public functions are defined in LinearAlgebra
################################################################
# SIMD config
# width of SIMD structures to use
const Npref = 8
# Using Npref = 8 on a system w/ 256b SIMD registers imposes a 10% penalty
# for most modest-size Double64 problems.
# There is no obvious penalty for large problems, and this
# presumably gives better performance on 512b systems.
# Kudos to the LLVM people.
################################################################
# Multi-threading internals
# TODO: implement set_num_threads
const mt_thresholds = Dict{Symbol,Any}()
"""
set_mt_threshold(n::Real, problem::Symbol)
Set the size threshold for multi-threading in the DoubleBLAS package to `n`,
for matrix operations of class `problem`.
"""
function set_mt_threshold(n::Real, problem::Symbol)
if problem ∈ keys(mt_thresholds)
destref = mt_thresholds[problem]
destref[] = Float64(n)
else
throw(ArgumentError("unrecognized problem key $problem: valid keys are $(keys(mt_thresholds))"))
end
nothing
end
function get_mt_threshold(problem::Symbol)
if problem ∈ keys(mt_thresholds)
srcref = mt_thresholds[problem]
else
throw(ArgumentError("unrecognized problem key $problem: valid keys are $(keys(mt_thresholds))"))
end
srcref = mt_thresholds[problem]
srcref[]
end
################################################################
# SIMD internals
@generated function vgethi(xv::StridedVecOrMat{DoubleFloat{T}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(HI(xv[i0+$i])) for i in 1:N]...)))
end
end
@generated function vgetlo(xv::StridedVecOrMat{DoubleFloat{T}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(LO(xv[i0+$i])) for i in 1:N]...)))
end
end
@inline function vputhilo!(xv::StridedVecOrMat{DoubleFloat{T}},i0,
zhi::Vec{N,T}, zlo::Vec{N,T}) where {N,T}
@inbounds for i=1:N
xv[i0+i] = DoubleFloat{T}((zhi[i],zlo[i]))
end
end
@generated function vgethire(xv::StridedVecOrMat{Complex{DoubleFloat{T}}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(HI(xv[i0+$i].re)) for i in 1:N]...)))
end
end
@generated function vgetlore(xv::StridedVecOrMat{Complex{DoubleFloat{T}}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(LO(xv[i0+$i].re)) for i in 1:N]...)))
end
end
@generated function vgethiim(xv::StridedVecOrMat{Complex{DoubleFloat{T}}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(HI(xv[i0+$i].im)) for i in 1:N]...)))
end
end
@generated function vgetloim(xv::StridedVecOrMat{Complex{DoubleFloat{T}}},i0,::Type{Vec{N,T}}) where {N,T}
quote
$(Expr(:meta, :inline))
$(Expr(:meta, :inbounds))
Vec{N,T}(tuple($([:(LO(xv[i0+$i].im)) for i in 1:N]...)))
end
end
@inline function vputhilo!(xv::StridedVecOrMat{Complex{DoubleFloat{T}}},i0,
zrhi::Vec{N,T}, zrlo::Vec{N,T},
zihi::Vec{N,T}, zilo::Vec{N,T}) where {N,T}
@inbounds for i=1:N
xv[i0+i] = complex(DoubleFloat{T}((zrhi[i],zrlo[i])),
DoubleFloat{T}((zihi[i],zilo[i])))
end
end
include("ops.jl")
include("dots.jl")
include("axpy.jl")
include("level2.jl")
include("givens.jl")
include("gemm.jl")
include("triangular.jl")
include("lu.jl")
include("chol.jl")
include("refine.jl")
end # module
|
{"hexsha": "aba8f42d1fd5e7a6ee3c7f75c11c2946a9be6cfd", "size": 4613, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/DoubleBLAS.jl", "max_stars_repo_name": "RalphAS/DoubleBLAS", "max_stars_repo_head_hexsha": "db60c787fcb4e3d1153b8358a44d6d6e3b56f7c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-04-17T03:34:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T15:23:54.000Z", "max_issues_repo_path": "src/DoubleBLAS.jl", "max_issues_repo_name": "RalphAS/DoubleBLAS", "max_issues_repo_head_hexsha": "db60c787fcb4e3d1153b8358a44d6d6e3b56f7c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-07T08:29:19.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-07T12:15:15.000Z", "max_forks_repo_path": "src/DoubleBLAS.jl", "max_forks_repo_name": "RalphAS/DoubleBLAS", "max_forks_repo_head_hexsha": "db60c787fcb4e3d1153b8358a44d6d6e3b56f7c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-05-07T08:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-07T08:30:48.000Z", "avg_line_length": 28.475308642, "max_line_length": 106, "alphanum_fraction": 0.6093648385, "num_tokens": 1339}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: dataflow.py
# Author: Qian Ge <geqian1001@gmail.com>
import os
import scipy.misc
import numpy as np
from datetime import datetime
_RNG_SEED = None
def get_rng(obj=None):
"""
This function is copied from `tensorpack
<https://github.com/ppwwyyxx/tensorpack/blob/master/tensorpack/utils/utils.py>`__.
Get a good RNG seeded with time, pid and the object.
Args:
obj: some object to use to generate random seed.
Returns:
np.random.RandomState: the RNG.
"""
seed = (id(obj) + os.getpid() +
int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
if _RNG_SEED is not None:
seed = _RNG_SEED
return np.random.RandomState(seed)
def get_file_list(file_dir, file_ext, sub_name=None):
re_list = []
if sub_name is None:
return np.array([os.path.join(root, name)
for root, dirs, files in os.walk(file_dir)
for name in sorted(files) if name.endswith(file_ext)])
else:
return np.array([os.path.join(root, name)
for root, dirs, files in os.walk(file_dir)
for name in sorted(files) if name.endswith(file_ext) and sub_name in name])
|
{"hexsha": "3f26943ac99b65c862d313e2a979320637bedc27", "size": 1232, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils/dataflow.py", "max_stars_repo_name": "conan7882/variational-autoencoder", "max_stars_repo_head_hexsha": "4960f252784a7dd2fbe203d7dad65938b57ee9c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 131, "max_stars_repo_stars_event_min_datetime": "2018-08-28T02:38:42.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-06T07:19:24.000Z", "max_issues_repo_path": "src/utils/dataflow.py", "max_issues_repo_name": "conan7882/variational-autoencoder", "max_issues_repo_head_hexsha": "4960f252784a7dd2fbe203d7dad65938b57ee9c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-02-19T09:22:47.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-04T01:44:55.000Z", "max_forks_repo_path": "src/utils/dataflow.py", "max_forks_repo_name": "conan7882/variational-autoencoder", "max_forks_repo_head_hexsha": "4960f252784a7dd2fbe203d7dad65938b57ee9c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-01-22T23:43:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T00:28:55.000Z", "avg_line_length": 29.3333333333, "max_line_length": 87, "alphanum_fraction": 0.6436688312, "include": true, "reason": "import numpy,import scipy", "num_tokens": 321}
|
"""
"""
import abc
import logging
from typing import List,Tuple
from numpy import deprecate
import shapely.geometry as sg
from shapely import ops
from shapely.geometry import Point, LineString
class CadImporter(abc.ABC):
"""
Base abstract class. All cad importers should subclass this class.
Imports CAD geometry into self.geometry
"""
def __init__(self,filename : str):
self.filename = filename
self.units : str = None # unitless
self.geometry : List[sg.base.BaseGeometry] = []
self.polygons : List[sg.Polygon] = []
self._already_zipped = False
@abc.abstractmethod
def process(self, **kwargs):
"""
Converts CAD file formats geometry to our geometry.
"""
pass
def zip(self, zip_length: float = 0.000001):
"""
Zip tries to reconcile not-quite-matching LineString start and end points.
Point < zip_length apart will be equated.
"""
zip = 0
for i in range(len(self.geometry)):
ls1 = self.geometry[i]
fp_1 = Point(ls1.coords[0]) #startpoint
lp_1 = Point(ls1.coords[-1]) #endpoint
for j in range(i+1,len(self.geometry)):
ls2 = self.geometry[j]
fp_2 = Point(ls2.coords[0])
lp_2 = Point(ls2.coords[-1])
if fp_1.distance(fp_2) < zip_length and fp_1.distance(fp_2) != 0:
self.geometry[j] = LineString([ls1.coords[0]]+ls2.coords[1:])
zip += 1
if fp_1.distance(lp_2) < zip_length and fp_1.distance(lp_2) != 0:
self.geometry[j] = LineString(ls2.coords[:-1]+[ls1.coords[0]])
zip += 1
if lp_1.distance(fp_2) < zip_length and lp_1.distance(fp_2) !=0:
self.geometry[j] = LineString([ls1.coords[-1]]+ls2.coords[1:])
zip += 1
if lp_1.distance(lp_2) < zip_length and lp_1.distance(lp_2)!=0:
self.geometry[j] = LineString(ls2.coords[:-1]+[ls1.coords[-1]])
zip += 1
self._already_zipped = True
logging.info(f"Zipped {zip} points")
def polygonize(self, simplify = True, force_zip = False, zip_length = 0.000001, retry_with_zip = True):
if not self.geometry:
raise CadImporterError('Cannot run polygonize() since no geometry yet. Have you run process()?')
if not force_zip:
result, dangles, cuts, invalids = ops.polygonize_full(self.geometry)
self.polygons = list(result.geoms)
if force_zip or (not self.polygons and not self._already_zipped and retry_with_zip):
self.zip(zip_length)
result, dangles, cuts, invalids = ops.polygonize_full(self.geometry)
self.polygons = list(result.geoms)
if self.polygons:
if simplify:
for i,p in enumerate(self.polygons):
self.polygons[i] = self.polygons[i].simplify(0)
else:
logging.error("Unable to from any closed polygons.")
return result, dangles, cuts, invalids
def cleanup(self, simplify = True, zip_length = 0.000001, retry_with_zip = True) -> str:
logging.info("cleanup is depreciated BTW. Use the polygonize function instead")
self.polygonize(simplify,zip_length,retry_with_zip)
return 'done'
def bounds(self) -> Tuple[float]:
"""
Returns, as (xmin,ymin,xmax,ymax) tuple, the bounding box which envelopes
this importer's geometry
"""
for g in self.geometry:
b = g.bounds
pass
class CadImporterError(Exception):
pass
|
{"hexsha": "7990d00b5b96a5fa0f511683343fb6e3a03d6a2b", "size": 3764, "ext": "py", "lang": "Python", "max_stars_repo_path": "cad_to_shapely/cadimporter.py", "max_stars_repo_name": "aegis1980/cad2shapely", "max_stars_repo_head_hexsha": "729e5ad1d987de09e3818ef15a691b0e0b2407de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cad_to_shapely/cadimporter.py", "max_issues_repo_name": "aegis1980/cad2shapely", "max_issues_repo_head_hexsha": "729e5ad1d987de09e3818ef15a691b0e0b2407de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cad_to_shapely/cadimporter.py", "max_forks_repo_name": "aegis1980/cad2shapely", "max_forks_repo_head_hexsha": "729e5ad1d987de09e3818ef15a691b0e0b2407de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2181818182, "max_line_length": 108, "alphanum_fraction": 0.582890542, "include": true, "reason": "from numpy", "num_tokens": 902}
|
from __future__ import print_function, division
"""...
"""
import petsc4py.PETSc as petsc
from six.moves import range
import config as cfg
from mrpy.mr_utils import mesh
import numpy as np
import math
import importlib
def matrix_add(tree, matrix, row, value, level, index_x=0, index_y=0, index_z=0, add_to_col=0):
"""...
"""
index = mesh.z_curve_index(tree.dimension, level, index_x, index_y, index_z)
if index in tree.tree_nodes and tree.nisleaf[index]:
col = tree.nindex_tree_leaves[index]
col += add_to_col
matrix.setValue(row, col, value, True)
#matrix[row, col] = matrix[row, col] + value
#elif index in tree.tree_nodes and not tree.nisleaf[index]:
# #the children of the nodes are leaves
# children_number = len(tree.nchildren[index])
# for child_index in tree.nchildren[index]:
# i = tree.nindex_x[child_index]
# j = tree.nindex_y[child_index]
# k = tree.nindex_z[child_index]
# matrix_add(tree, matrix, row, value*(1./children_number), level+1, i, j, k, add_to_col)
elif index not in tree.tree_nodes:
#the parent of the node is a leaf; we need to report the value of the node to its parent
i = int(math.floor(index_x/2)) # parent index_x
j = int(math.floor(index_y/2)) # parent index_y
k = int(math.floor(index_z/2)) # parent index_z
matrix_add(tree, matrix, row, value*1., level-1, i, j, k, add_to_col=add_to_col)
|
{"hexsha": "14c4b422d94e382780d6282a63dcc13169ea9d65", "size": 1495, "ext": "py", "lang": "Python", "max_stars_repo_path": "mrpy/spatial_operators/haar/2nd_order_ctr_finite_diff/matrix_aux.py", "max_stars_repo_name": "marc-nguessan/mrpy", "max_stars_repo_head_hexsha": "6fb0bce485234a45bb863f71bc2bdf0a22014de3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-06T10:48:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-09T20:07:08.000Z", "max_issues_repo_path": "mrpy/spatial_operators/haar/2nd_order_ctr_finite_diff/matrix_aux.py", "max_issues_repo_name": "marc-nguessan/mrpy", "max_issues_repo_head_hexsha": "6fb0bce485234a45bb863f71bc2bdf0a22014de3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-09T20:08:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-09T20:11:20.000Z", "max_forks_repo_path": "mrpy/spatial_operators/haar/2nd_order_ctr_finite_diff/matrix_aux.py", "max_forks_repo_name": "marc-nguessan/mrpy", "max_forks_repo_head_hexsha": "6fb0bce485234a45bb863f71bc2bdf0a22014de3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2222222222, "max_line_length": 100, "alphanum_fraction": 0.6595317726, "include": true, "reason": "import numpy", "num_tokens": 407}
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer, Dense, Embedding, TimeDistributed, Dropout
# from tensorflow_core.python.keras.layers import Layer, Dense, Embedding, TimeDistributed, Dropout
from tensorflow.python.keras.layers.dense_attention import BaseDenseAttention
def skill_outputs(inputs, skill_layer_size=None, n_skills=None, dropout=.2):
if skill_layer_size is None:
return TimeDistributed(Dense(n_skills,
activation='sigmoid'))(inputs)
else:
skill_layer = TimeDistributed(Dense(skill_layer_size,
activation='tanh', name='skill_summary'))
skills = Dropout(dropout)(skill_layer(inputs))
return TimeDistributed(Dense(1, activation='sigmoid'))(skills)
def embeddings(inputs, dim, onehot=True, layer_size=None):
if onehot:
return OneHotEmbedding(dim)(inputs)
else:
assert layer_size is not None
return CustomMaskEmbedding(dim, layer_size,
mask_value=-1.)(inputs)
class TimeBiasedAttention(BaseDenseAttention):
def __init__(self, use_scale=False, **kwargs):
super(TimeBiasedAttention, self).__init__(**kwargs)
self.use_scale = use_scale
def build(self, input_shape):
"""Creates scale variable if use_scale==True."""
if self.use_scale:
self.scale = self.add_weight(
name='scale',
shape=(),
initializer=tf.ones_initializer(),
dtype=self.dtype,
trainable=True)
else:
self.scale = None
super(TimeBiasedAttention, self).build(input_shape)
def _calculate_scores(self, query, key):
"""Calculates attention scores as a query-key dot product weighted by position distance.
Args:
query: Query tensor of shape `[batch_size, Tq, dim]`.
key: Key tensor of shape `[batch_size, Tv, dim]`.
Returns:
Tensor of shape `[batch_size, Tq, Tv]`.
"""
print(f'key shape {key.shape}, query shape {query.shape}')
scores = tf.divide(tf.matmul(query, key, transpose_b=True),
tf.sqrt(tf.cast(key.shape[-1], tf.float32)))
if self.scale is not None:
scores *= self.scale
return scores
def get_config(self):
config = {'use_scale': self.use_scale}
base_config = super(TimeBiasedAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CustomMaskEmbedding(Embedding):
def __init__(self, input_dim, output_dim, mask_value, **kwargs):
super().__init__(input_dim + 1, output_dim, embeddings_initializer='glorot_uniform', **kwargs)
self.mask_value = mask_value
def call(self, inputs):
return super().call(inputs + 1)
def compute_mask(self, inputs, mask=None):
return tf.not_equal(inputs, self.mask_value)
class OneHotEmbedding(Layer):
def __init__(self, output_dim, mask_value=-1., **kwargs):
super(OneHotEmbedding, self).__init__(trainable=False, **kwargs)
self.output_dim = output_dim
self.mask_value = mask_value
def call(self, inputs, **kwargs):
return tf.one_hot(tf.cast(inputs, 'int32'), self.output_dim, axis=-1)
def compute_mask(self, inputs, mask=None):
return tf.not_equal(inputs, self.mask_value)
def dot_product(t1, t2):
"""
return: dot product over last dimension
"""
return tf.reduce_sum(tf.multiply(t1, t2), axis=-1, keepdims=True)
def positional_encoding(
num_units,
batch_size,
max_attempts,
zero_pad=True,
scale=True,
scope="positional_encoding",
reuse=None):
'''Sinusoidal Positional_Encoding.
Args:
inputs: A 2d Tensor with shape of (N, T).
num_units: Output dimensionality
zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero
scale: Boolean. If True, the output will be multiplied by sqrt num_units(check details from paper)
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 'Tensor' with one more rank than inputs's, with the dimensionality should be 'num_units'
'''
N = batch_size
T = max_attempts
position_ind = tf.range(T)
position_enc = np.array([
[pos / np.power(10000, 2. * i / num_units) for i in range(num_units)]
for pos in range(T)], dtype=np.float32)
# Second part, apply the cosine to even columns and sin to odds.
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
# Convert to a tensor
lookup_table = tf.convert_to_tensor(position_enc)
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.tile(tf.expand_dims(lookup_table, 0), [N, 1, 1])
if scale:
outputs = outputs * num_units ** 0.5
return outputs
class Positions(Layer):
def __init__(self, **kwargs):
super(Positions, self).__init__(**kwargs)
def call(self, inputs, mask=None, **kwargs):
# Range starting from one so that 0 won't become -1 in next operation
positions = tf.map_fn(lambda x: tf.range(1, tf.shape(x)[0] + 1), inputs, dtype=tf.int32)
# Add padded -1s to positions, natural numbers are decreased by one and others will become -1
positions = positions * tf.cast(tf.not_equal(inputs, -1), dtype=tf.int32) - 1
return tf.cast(positions, tf.float32)
def compute_output_shape(self, input_shape):
return input_shape
|
{"hexsha": "f499d3433fb82161a00ea610a03ad4aebb15de88", "size": 6043, "ext": "py", "lang": "Python", "max_stars_repo_path": "layers/common.py", "max_stars_repo_name": "dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation", "max_stars_repo_head_hexsha": "d1540513056190ab0fbf547d22257dda2dfcd323", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "layers/common.py", "max_issues_repo_name": "dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation", "max_issues_repo_head_hexsha": "d1540513056190ab0fbf547d22257dda2dfcd323", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "layers/common.py", "max_forks_repo_name": "dlkt-review-and-empirical-evaluation/dlkt-review-and-empirical-evaluation", "max_forks_repo_head_hexsha": "d1540513056190ab0fbf547d22257dda2dfcd323", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.76875, "max_line_length": 105, "alphanum_fraction": 0.6212146285, "include": true, "reason": "import numpy", "num_tokens": 1376}
|
\section{Interviews}\label{InterviewAnalysis}
Five persons from different households participated in interviews, and the answers can be seen in \cref{Interview}. In this section similarities and differences are looked upon.
\subsection{General}
This subsection contain some of the general questions.
\textbf{Sex:}
Two participants were male, and three were female, which makes both sexes represented.
\textbf{Age:}
The two males are the youngest, at 25 and 28 years, the females are 53, 55 and 75. This gives a broadly spread range, representing a wide target group.
\textbf{Household:}
Most households are similar in size, with three households of two, one household of three, the last household with 11 persons. This gives a generally good idea of problems that can be found in smaller households.
\subsection{Food Waste}
The participants' opinion on food being thrown out, is the same as for the participants in the FDB report, which is that edible food being thrown out is food waste. Another aspect was that
all participants said that they used leftovers. Some participants saw food fed to animals, as not being food waste, and lastly a reason is given for the bundles, in which some food is sold, are to big, so the household do not have the time to use it all before it goes bad. The participants were asked if they found food waste to be a problem in their household, and only one saw it as an actual problem, it should be noted that two participants fed animals with food that was about to expire. Furthermore if food was thrown out it would mostly be vegetables, and sometimes bread. The participants were asked what they focused on, when they had to decide if food should be thrown out, and all answered that they did not take the best before date into account, but instead looked at the freshness of the food.
\subsection{Importance}
The participants were asked what was the most important about their diet. The priority varied a lot among the participants, but some of the aspects they all had in focus, were quality of the food, and price. Other aspects were: organic, exciting, healthy and varied meals.
\subsection{Planning}
The participants have in common that they do not plan more than a few days ahead. But they plan their shopping in different ways:
\begin{itemize}
\item Shops for dinner almost every day.
\item Make plans from meat that can be found in the freezer.
\item Drive far for proper meat.
\item Shop a few days before the planed meal.
\item Plan and shop when arriving at the store.
\end{itemize}
\subsubsection{Shopping}
It was difficult for some the participants to say, for how long they shop, but two of them shop in 5-10 minutes. Another participant elaborated that she often goes to multiple shops, and therefor the shopping often takes about an hour, in total. Furthermore the participants could not all elaborate when, in the day, they shop, but some said that it would mostly be after work. The participants shop between 3-5 times in a week. Only one of them prefer to plan the shopping in advance, by looking through magazines, to find the best offers. One elaborates, that if he knows what he wants before he goes shopping, it will take a lot less time.
\subsubsection{Shopping situation}
The participants are affected a little differently by impulsive shopping, but in general they like to buy things that are on sale. Some likes to buy the food on sale for being close to expiration.
|
{"hexsha": "2ec92fd37461cee10523bc12a890dccd0327185d", "size": 3438, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Analysis/InterviewAnalysis.tex", "max_stars_repo_name": "amatt13/FoodPlanner-Report", "max_stars_repo_head_hexsha": "387a7c769cdda4913b81838bc8feffc9fbcafcc8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Analysis/InterviewAnalysis.tex", "max_issues_repo_name": "amatt13/FoodPlanner-Report", "max_issues_repo_head_hexsha": "387a7c769cdda4913b81838bc8feffc9fbcafcc8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Analysis/InterviewAnalysis.tex", "max_forks_repo_name": "amatt13/FoodPlanner-Report", "max_forks_repo_head_hexsha": "387a7c769cdda4913b81838bc8feffc9fbcafcc8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 92.9189189189, "max_line_length": 808, "alphanum_fraction": 0.7946480512, "num_tokens": 741}
|
[STATEMENT]
lemma bounded_bilinear_matrix_matrix_mult[bounded_bilinear]:
"bounded_bilinear ((**)::
('a::{euclidean_space, real_normed_algebra_1}^'n^'m) \<Rightarrow>
('a::{euclidean_space, real_normed_algebra_1}^'p^'n) \<Rightarrow>
('a::{euclidean_space, real_normed_algebra_1}^'p^'m))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bounded_bilinear (**)
[PROOF STEP]
unfolding bilinear_conv_bounded_bilinear[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bilinear (**)
[PROOF STEP]
unfolding bilinear_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>x. linear ((**) x)) \<and> (\<forall>y. linear (\<lambda>x. x ** y))
[PROOF STEP]
apply safe
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>x. linear ((**) x)
2. \<And>y. linear (\<lambda>x. x ** y)
[PROOF STEP]
by unfold_locales (auto simp: matrix_add_ldistrib matrix_add_rdistrib matrix_scaleR_right matrix_scaleR_left)
|
{"llama_tokens": 389, "file": "Affine_Arithmetic_Floatarith_Expression", "length": 4}
|
\section{UML Activities and Surface Languages}
\label{sec:grammars-and-metamodels:Preliminaries}
The surface language we present is a textual alternative for the activity diagrams of the \UML.
In this section, we give a brief description of Activities and explain what a surface language is.
We use the naming convention used by the \OMG in the definition of the \UML~\cite{UMLsuper} when discussing concepts of the \UML.
This means that we use medial capitals for the names of these concepts.
\subsection{UML Activities}
\label{sub:grammars-and-metamodels:UML-Activities}
\begin{figure}
\centering
\includegraphics[scale=0.5]{grammars-and-metamodels/figs/diagram-comparison}
\caption{Two representations of the same behavior}
\label{fig:grammars-and-metamodels:UML-diagram-comparison}
\end{figure}
\Activities are one of the concepts offered by the \UML to specify behavior.
Some aspects of an \Activity can be visualized in an activity diagram.
The leftmost part of Figure~\ref{fig:grammars-and-metamodels:UML-diagram-comparison} shows an example of such a diagram.
An \Activity is a directed graph, whose nodes and edges are called \ActivityNodes and \ActivityEdges.
There are a number of different \ActivityNodes, such as \ControlNodes (depicted by diamonds) and \Actions (depicted by rounded rectangles), and two types of \ActivityEdges, namely \ControlFlows and \ObjectFlows.
The informal description of the semantics of Activities states that the order in which \Actions are executed is based on the flow of tokens.
There are two kinds of tokens: control tokens and object tokens.
\ControlFlows, which are depicted by arrows connecting \ActivityNodes, show how control tokens flow from one \ActivityNode to the other.
\ObjectFlows, which are depicted by arrows connecting \OutputPins and \InputPins, show how object tokens flow from one \Action producing an object to another \Action that uses this object.
The \ObjectFlows in Figure~\ref{fig:grammars-and-metamodels:UML-diagram-comparison} are depicted by the arrows connecting the small rectangles on the borders of the \Actions.
These small rectangles are the \InputPins and \OutputPins of those \Actions.
\subsection{Surface Languages}
\label{sub:grammars-and-metamodels:Prelim-SL}
Every model conforms to a metamodel, which defines the elements that play a role in the model.
If a model conforms to a certain metamodel, each element of the model is an instance of an element in that metamodel.
The \UML defines a number of diagrams, which can be used to depict certain parts of a model.
There are diagrams that depict the structure of a model, diagrams that depict the behavior of parts of the model, etc.
These diagrams offer a graphical representation for instances of elements in the metamodel.
In the context of the \UML, the term \emph{surface language} is used to refer to a concrete syntax that offers an alternative notation for these diagrams.
In our case, instead of a graphical representation, a textual representation is given for instances of elements of the metamodel.
Other names for surface languages related to \Activities and \Actions are \emph{surface action languages} and \emph{action languages}.
|
{"hexsha": "9c13be665a73314d695b4179abef6cf59932b552", "size": 3192, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "grammars-and-metamodels/preliminaries.tex", "max_stars_repo_name": "ljpengelen/latex-phd-thesis", "max_stars_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-18T21:53:57.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-18T21:53:57.000Z", "max_issues_repo_path": "grammars-and-metamodels/preliminaries.tex", "max_issues_repo_name": "ljpengelen/latex-phd-thesis", "max_issues_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "grammars-and-metamodels/preliminaries.tex", "max_forks_repo_name": "ljpengelen/latex-phd-thesis", "max_forks_repo_head_hexsha": "8cabcf160a6f06e12b5ced92bb5cec06983e5bb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 67.914893617, "max_line_length": 211, "alphanum_fraction": 0.8048245614, "num_tokens": 742}
|
"""
This class contains the code to encode/decode data using BB-ANS with a VAE
"""
from ans import ANSCoder
import numpy as np
import distributions
def BBANS_append(posterior_pop, likelihood_append, prior_append):
"""
Given functions to pop a posterior, append a likelihood and append the prior,
return a function to append some data.
"""
def append(ans, data):
latent = posterior_pop(data)(ans)
likelihood_append(latent)(ans, data)
prior_append(ans, latent)
return append
def BBANS_pop(prior_pop, likelihood_pop, posterior_append):
"""
Given functions to pop a prior and likelihood and append
the posterior, return a function to pop data
"""
def pop(ans):
latent = prior_pop(ans)
data = likelihood_pop(latent)(ans)
posterior_append(data)(ans, latent)
return data
return pop
def VAE_append(latent_shape, generative_model, recognition_model,
obs_append, prior_precision, latent_precision):
"""
This append takes functions from a variational autoencoder and produces
an append and pop function for BBANS.
Follows the same layout as vae_append in the author's code
"""
def posterior_pop(data):
"""
Pop the posterior
"""
posterior_mean, posterior_stdd = recognition_model(data)
posterior_mean = np.ravel(posterior_mean)
posterior_stdd = np.ravel(posterior_stdd)
# we now have an array of mean and standard deviation values
# from the posterior distribution
cdfs = [distributions.gaussian_latent_cdf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
ppfs = [distributions.gaussian_latent_ppf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
return distributions.distr_pop(latent_precision, ppfs, cdfs)
def likelihood_append(latent_indices):
"""
Append the likelihood
"""
y = distributions.standard_gaussian_centers(prior_precision)[latent_indices]
obs_parameters = generative_model(np.reshape(y, latent_shape))
return obs_append(obs_parameters)
prior_append = distributions.uniforms_append(prior_precision)
return BBANS_append(posterior_pop, likelihood_append, prior_append)
def VAE_pop(latent_shape, generative_model, recognition_model,
obs_pop, prior_precision, latent_precision):
"""
Pop a symbol using VAE BBANS
"""
prior_pop = distributions.uniforms_pop(prior_precision, np.prod(latent_shape))
def likelihood_pop(latent_indices):
y = distributions.standard_gaussian_centers(prior_precision)[latent_indices]
obs_params = generative_model(np.reshape(y, latent_shape))
return obs_pop(obs_params)
def posterior_append(data):
posterior_mean, posterior_stdd = recognition_model(np.atleast_2d(data))
posterior_mean = np.ravel(posterior_mean)
posterior_stdd = np.ravel(posterior_stdd)
cdfs = [distributions.gaussian_latent_cdf(mean, stdd, prior_precision, latent_precision)
for mean, stdd in zip(posterior_mean, posterior_stdd)]
return distributions.distr_append(latent_precision, cdfs)
return BBANS_pop(prior_pop, likelihood_pop, posterior_append)
|
{"hexsha": "13d8de72ae16461c7fda69a9d809b2d6a0a2ecea", "size": 3410, "ext": "py", "lang": "Python", "max_stars_repo_path": "BitsBack/bbans.py", "max_stars_repo_name": "deepaks4077/bits-back", "max_stars_repo_head_hexsha": "0d4355302eb4c5a18a229fa15a0a1caf8fe529d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "BitsBack/bbans.py", "max_issues_repo_name": "deepaks4077/bits-back", "max_issues_repo_head_hexsha": "0d4355302eb4c5a18a229fa15a0a1caf8fe529d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "BitsBack/bbans.py", "max_forks_repo_name": "deepaks4077/bits-back", "max_forks_repo_head_hexsha": "0d4355302eb4c5a18a229fa15a0a1caf8fe529d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0652173913, "max_line_length": 97, "alphanum_fraction": 0.7082111437, "include": true, "reason": "import numpy", "num_tokens": 737}
|
#redirect Users/Cox
|
{"hexsha": "e0cf4638ebca84b0e85e6d234ea7886136399252", "size": 20, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/KellyCox.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/KellyCox.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/KellyCox.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.0, "max_line_length": 19, "alphanum_fraction": 0.8, "num_tokens": 6}
|
# Copyright (C) 2019 SAMSUNG SDS <Team.SAIDA@gmail.com>
#
# This code is distribued under the terms and conditions from the MIT License (MIT).
#
# Authors : Uk Jo, Iljoo Yoon, Hyunjae Lee, Daehun Jun
from __future__ import division
import numpy as np
import copy
from math import sqrt
class RandomProcess(object):
def reset_states(self):
pass
class AnnealedGaussianProcess(RandomProcess):
def __init__(self, mu, sigma, sigma_min, n_steps_annealing):
self.mu = mu
self.sigma = sigma
self.n_steps = 0
if sigma_min is not None:
self.m = -float(sigma - sigma_min) / float(n_steps_annealing)
self.c = sigma
self.sigma_min = sigma_min
else:
self.m = 0.
self.c = sigma
self.sigma_min = sigma
@property
def current_sigma(self):
sigma = max(self.sigma_min, self.m * float(self.n_steps) + self.c)
return sigma
class GaussianWhiteNoiseProcess(AnnealedGaussianProcess):
def __init__(self, mu=0., sigma=1., sigma_min=None, n_steps_annealing=1000, size=1):
super(GaussianWhiteNoiseProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.size = size
def sample(self):
sample = np.random.normal(self.mu, self.current_sigma, self.size)
self.n_steps += 1
return sample
# Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab \
# https://www.quora.com/Why-do-we-use-the-Ornstein-Uhlenbeck-Process-in-the-exploration-of-DDPG
class OrnsteinUhlenbeckProcess(AnnealedGaussianProcess):
def __init__(self, theta, mu=0., sigma=1., dt=1e-2, size=1, sigma_min=None, n_steps_annealing=1000):
super(OrnsteinUhlenbeckProcess, self).__init__(mu=mu, sigma=sigma, sigma_min=sigma_min, n_steps_annealing=n_steps_annealing)
self.theta = theta
self.mu = mu
self.dt = dt
self.size = size
self.reset_states()
def sample(self):
x = self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.current_sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
self.x_prev = x
self.n_steps += 1
return x
def reset_states(self):
self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size)
class SimpleOUNoise:
"""Ornstein-Uhlenbeck process."""
def __init__(self, size=1, mu=0, theta=0.05, sigma=0.25):
"""Initialize parameters and noise process."""
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset_states()
def reset_states(self):
"""Reset the internal state (= noise) to mean (mu)."""
self.state = copy.copy(self.mu)
def sample(self):
"""Update internal state and return it as a noise sample."""
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
"""
inspired by paper "PARAMETER SPACE NOISE FOR EXPLORATION" https://arxiv.org/pdf/1706.01905.pdf
https://github.com/l5shi/Multi-DDPG-with-parameter-noise/blob/master/Multi_DDPG_with_parameter_noise.ipynb
"""
class AdaptiveParamNoiseSpec(object):
def __init__(self, initial_stddev=0.1, desired_action_stddev=0.2, adaptation_coefficient=1.01):
"""
Note that initial_stddev and current_stddev refer to std of parameter noise,
but desired_action_stddev refers to (as name notes) desired std in action space
"""
self.initial_stddev = initial_stddev
self.desired_action_stddev = desired_action_stddev
self.adaptation_coefficient = adaptation_coefficient
self.current_stddev = initial_stddev
def adapt(self, distance):
if distance > self.desired_action_stddev:
# Decrease stddev.
self.current_stddev /= self.adaptation_coefficient
else:
# Increase stddev.
self.current_stddev *= self.adaptation_coefficient
def get_stats(self):
stats = {
'param_noise_stddev': self.current_stddev,
}
return stats
def __repr__(self):
fmt = 'AdaptiveParamNoiseSpec(initial_stddev={}, desired_action_stddev={}, adaptation_coefficient={})'
return fmt.format(self.initial_stddev, self.desired_action_stddev, self.adaptation_coefficient)
def ddpg_distance_metric(actions1, actions2):
"""
Compute "distance" between actions taken by two policies at the same states
Expects numpy arrays
"""
diff = actions1-actions2
mean_diff = np.mean(np.square(diff), axis=0)
dist = sqrt(np.mean(mean_diff))
return dist
# class OrnsteinUhlenbeckProcessWithAnnealing(AnnealedGaussianProcess):
# """
# Ornstein-Uhlenbeck Noise (original code by @slowbull)
# """
# def __init__(self, theta=0.15, mu=0, sigma=1, x0=0, dt=1e-2, sigma_min=None, n_steps_annealing=100, size=1):
# self.theta = theta
# self.sigma = sigma
# self.sigma_min = sigma_min
# self.n_steps_annealing = n_steps_annealing
# self.sigma_step = - self.sigma / float(self.n_steps_annealing)
# self.x0 = x0
# self.mu = mu
# self.dt = dt
# self.size = size
#
# def sample(self, step):
# sigma = max(sigma_min, self.sigma_step * step + self.sigma)
# x = self.x0 + self.theta * (self.mu - self.x0) * self.dt + sigma * np.sqrt(self.dt) * np.random.normal(size=self.size)
# self.x0 = x
# return x
#
# def reset_states(self):
# self.x_prev = np.random.normal(self.mu,self.current_sigma,self.size)
|
{"hexsha": "6271745fea275cd27c94a0fe28f421211845f5b2", "size": 5731, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/core/common/random.py", "max_stars_repo_name": "BupyeongHealer/SAMSUNG_SAIDALAB_RLCustom", "max_stars_repo_head_hexsha": "b6b1e8a473e134b548db5a1c235cf71ef83e36e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2019-05-29T04:14:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T04:48:48.000Z", "max_issues_repo_path": "python/core/common/random.py", "max_issues_repo_name": "BupyeongHealer/SAMSUNG_SAIDALAB_RLCustom", "max_issues_repo_head_hexsha": "b6b1e8a473e134b548db5a1c235cf71ef83e36e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-10-24T12:18:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:15:38.000Z", "max_forks_repo_path": "python/core/common/random.py", "max_forks_repo_name": "BupyeongHealer/SAMSUNG_SAIDALAB_RLCustom", "max_forks_repo_head_hexsha": "b6b1e8a473e134b548db5a1c235cf71ef83e36e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-05-29T03:18:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-06T03:49:52.000Z", "avg_line_length": 35.5962732919, "max_line_length": 147, "alphanum_fraction": 0.658000349, "include": true, "reason": "import numpy", "num_tokens": 1474}
|
#!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the following classes:
* :class:`~aeneas.mfcc.MFCC`, computing Mel-frequency cepstral coefficients (MFCCs).
This file is a modified version of the ``mfcc.py`` file
by David Huggins-Daines from the CMU Sphinx-III project.
You can find the original file in the ``thirdparty/`` directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy
from aeneas.logger import Loggable
from aeneas.runtimeconfiguration import RuntimeConfiguration
class MFCC(Loggable):
"""
A class for computing Mel-frequency cepstral coefficients (MFCCs).
:param rconf: a runtime configuration
:type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration`
:param logger: the logger object
:type logger: :class:`~aeneas.logger.Logger`
"""
CUTOFF = 0.00001
""" Cut-off threshold """
MEL_10 = 2595.0
""" Base Mel frequency """
TAG = u"MFCC"
def __init__(self, rconf=None, logger=None):
super(MFCC, self).__init__(rconf=rconf, logger=logger)
# store parameters in local attributes
self.filter_bank_size = self.rconf[RuntimeConfiguration.MFCC_FILTERS]
self.mfcc_size = self.rconf[RuntimeConfiguration.MFCC_SIZE]
self.fft_order = self.rconf[RuntimeConfiguration.MFCC_FFT_ORDER]
self.lower_frequency = self.rconf[RuntimeConfiguration.MFCC_LOWER_FREQUENCY]
self.upper_frequency = self.rconf[RuntimeConfiguration.MFCC_UPPER_FREQUENCY]
self.emphasis_factor = self.rconf[RuntimeConfiguration.MFCC_EMPHASIS_FACTOR]
self.window_length = self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH]
self.window_shift = self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT]
# initialize DCT matrix
self._create_dct_matrix()
# initialized later by compute_from_data()
self.data = None
self.sample_rate = None
self.filters = None
self.hamming_window = None
@classmethod
def _hz2mel(cls, frequency):
"""
Convert the given frequency in Hz to the Mel scale.
:param float frequency: the Hz frequency to convert
:rtype: float
"""
return cls.MEL_10 * math.log10(1.0 + (frequency / 700.0))
@classmethod
def _mel2hz(cls, mel):
"""
Convert the given Mel value to Hz frequency.
:param float mel: the Mel value to convert
:rtype: float
"""
return 700.0 * (10 ** (mel / cls.MEL_10) - 1)
def _create_dct_matrix(self):
"""
Create the not-quite-DCT matrix as used by Sphinx,
and store it in ```self.s2dct```.
"""
self.s2dct = numpy.zeros((self.mfcc_size, self.filter_bank_size))
for i in range(0, self.mfcc_size):
freq = numpy.pi * float(i) / self.filter_bank_size
self.s2dct[i] = numpy.cos(freq * numpy.arange(0.5, 0.5 + self.filter_bank_size, 1.0, 'float64'))
self.s2dct[:, 0] *= 0.5
self.s2dct = self.s2dct.transpose()
def _create_mel_filter_bank(self):
"""
Create the Mel filter bank,
and store it in ``self.filters``.
Note that it is a function of the audio sample rate,
so it cannot be created in the class initializer,
but only later in :func:`aeneas.mfcc.MFCC.compute_from_data`.
"""
self.filters = numpy.zeros((1 + (self.fft_order // 2), self.filter_bank_size), 'd')
dfreq = float(self.sample_rate) / self.fft_order
nyquist_frequency = self.sample_rate / 2
if self.upper_frequency > nyquist_frequency:
self.log_exc(u"Upper frequency %f exceeds Nyquist frequency %f" % (self.upper_frequency, nyquist_frequency), None, True, ValueError)
melmax = MFCC._hz2mel(self.upper_frequency)
melmin = MFCC._hz2mel(self.lower_frequency)
dmelbw = (melmax - melmin) / (self.filter_bank_size + 1)
filt_edge = MFCC._mel2hz(melmin + dmelbw * numpy.arange(self.filter_bank_size + 2, dtype='d'))
# TODO can this code be written more numpy-style?
# (the performance loss is negligible, it is just ugly to see)
for whichfilt in range(0, self.filter_bank_size):
# int() casts to native int instead of working with numpy.float64
leftfr = int(round(filt_edge[whichfilt] / dfreq))
centerfr = int(round(filt_edge[whichfilt + 1] / dfreq))
rightfr = int(round(filt_edge[whichfilt + 2] / dfreq))
fwidth = (rightfr - leftfr) * dfreq
height = 2.0 / fwidth
if centerfr != leftfr:
leftslope = height / (centerfr - leftfr)
else:
leftslope = 0
freq = leftfr + 1
while freq < centerfr:
self.filters[freq, whichfilt] = (freq - leftfr) * leftslope
freq = freq + 1
# the next if should always be true!
if freq == centerfr:
self.filters[freq, whichfilt] = height
freq = freq + 1
if centerfr != rightfr:
rightslope = height / (centerfr - rightfr)
while freq < rightfr:
self.filters[freq, whichfilt] = (freq - rightfr) * rightslope
freq = freq + 1
def _pre_emphasis(self):
"""
Pre-emphasize the entire signal at once by self.emphasis_factor,
overwriting ``self.data``.
"""
self.data = numpy.append(self.data[0], self.data[1:] - self.emphasis_factor * self.data[:-1])
def compute_from_data(self, data, sample_rate):
"""
Compute MFCCs for the given audio data.
The audio data must be a 1D :class:`numpy.ndarray`,
that is, it must represent a monoaural (single channel)
array of ``float64`` values in ``[-1.0, 1.0]``.
:param data: the audio data
:type data: :class:`numpy.ndarray` (1D)
:param int sample_rate: the sample rate of the audio data, in samples/s (Hz)
:raises: ValueError: if the data is not a 1D :class:`numpy.ndarray` (i.e., not mono),
or if the data is empty
:raises: ValueError: if the upper frequency defined in the ``rconf`` is
larger than the Nyquist frequenct (i.e., half of ``sample_rate``)
"""
def _process_frame(self, frame):
"""
Process each frame, returning the log(power()) of it.
"""
# apply Hamming window
frame *= self.hamming_window
# compute RFFT
fft = numpy.fft.rfft(frame, self.fft_order)
# equivalent to power = fft.real * fft.real + fft.imag * fft.imag
power = numpy.square(numpy.absolute(fft))
#
# return the log(power()) of the transformed vector
# v1
# COMMENTED logspec = numpy.log(numpy.dot(power, self.filters).clip(self.CUTOFF, numpy.inf))
# COMMENTED return numpy.dot(logspec, self.s2dct) / self.filter_bank_size
# v2
return numpy.log(numpy.dot(power, self.filters).clip(self.CUTOFF, numpy.inf))
if len(data.shape) != 1:
self.log_exc(u"The audio data must be a 1D numpy array (mono).", None, True, ValueError)
if len(data) < 1:
self.log_exc(u"The audio data must not be empty.", None, True, ValueError)
self.data = data
self.sample_rate = sample_rate
# number of samples in the audio
data_length = len(self.data)
# frame length in number of samples
frame_length = int(self.window_length * self.sample_rate)
# frame length must be at least equal to the FFT order
frame_length_padded = max(frame_length, self.fft_order)
# frame shift in number of samples
frame_shift = int(self.window_shift * self.sample_rate)
# number of MFCC vectors (one for each frame)
# this number includes the last shift,
# where the data will be padded with zeros
# if the remaining samples are less than frame_length_padded
number_of_frames = int((1.0 * data_length) / frame_shift)
# create Hamming window
self.hamming_window = numpy.hamming(frame_length_padded)
# build Mel filter bank
self._create_mel_filter_bank()
# pre-emphasize the entire audio data
self._pre_emphasis()
# allocate the MFCCs matrix
# v1
# COMMENTED mfcc = numpy.zeros((number_of_frames, self.mfcc_size), 'float64')
# v2
mfcc = numpy.zeros((number_of_frames, self.filter_bank_size), 'float64')
# compute MFCCs one frame at a time
for frame_index in range(number_of_frames):
# COMMENTED print("Computing frame %d / %d" % (frame_index, number_of_frames))
# get the start and end indices for this frame,
# do not overrun the data length
frame_start = frame_index * frame_shift
frame_end = min(frame_start + frame_length_padded, data_length)
# frame is zero-padded if the remaining samples
# are less than its length
frame = numpy.zeros(frame_length_padded)
frame[0:(frame_end - frame_start)] = self.data[frame_start:frame_end]
# process the frame
mfcc[frame_index] = _process_frame(self, frame)
# v1
# COMMENTED return mfcc
# v2
# return the dot product with the DCT matrix
return numpy.dot(mfcc, self.s2dct) / self.filter_bank_size
|
{"hexsha": "0828564ddbf57e84e7d376685308b00e00ff8991", "size": 10691, "ext": "py", "lang": "Python", "max_stars_repo_path": "Aeneas/aeneas/aeneas/mfcc.py", "max_stars_repo_name": "yalhaizaey/Dreich", "max_stars_repo_head_hexsha": "9528856c3879d4c9d3ced453f223785a71188808", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2019-05-09T19:03:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-06T20:47:37.000Z", "max_issues_repo_path": "Experiments/Aeneas/aeneas/aeneas/mfcc.py", "max_issues_repo_name": "jonathanmcchesney/DeFog", "max_issues_repo_head_hexsha": "bc314d41471d00b9d605bb4519f31a465e0a6b75", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Experiments/Aeneas/aeneas/aeneas/mfcc.py", "max_forks_repo_name": "jonathanmcchesney/DeFog", "max_forks_repo_head_hexsha": "bc314d41471d00b9d605bb4519f31a465e0a6b75", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-08-19T19:00:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T04:46:07.000Z", "avg_line_length": 40.1917293233, "max_line_length": 144, "alphanum_fraction": 0.635113647, "include": true, "reason": "import numpy", "num_tokens": 2600}
|
import os
import re
import time
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
from nba_api.stats.endpoints import leaguestandings
from src.team_colors import team_colors
table_cols = ['Rk', 'Team', 'Record', 'PCT', 'GB', 'Home', 'Away', 'Div',
'PPG', 'Opp PPG', 'Diff', 'Strk', 'Last 10']
def conf_table_cols(conference):
if conference == 'League':
conference = 'Conference'
cols = table_cols[:]
cols.insert(8, f'{conference}')
return cols
def conf_table_data(season, conference):
#! add in playoff string reading for previous years after this works for current year
url = f'https://www.espn.com/nba/standings/_/season/{int(season) + 1}'
if conference == 'League':
url += '/group/league'
dfs = pd.read_html(url)
time.sleep(1)
flatten = lambda t: [item for sublist in t for item in sublist]
start_cols = ['Rank', 'Team', 'Record', 'PCT', 'GB', 'HOME', 'AWAY', 'DIV', 'CONF', 'PPG', 'OPP PPG',
'DIFF', 'STRK', 'L10']
if conference == 'West':
val = 3
else:
val = 1
conf = dfs[val]
teams = pd.DataFrame([dfs[val - 1].columns.values.tolist()[0]] + flatten(dfs[val - 1].values.tolist()))
def playoff_str(x):
if str(x)[5].isdigit() and str(x)[6].islower():
return str(x)[6:8]
elif str(x)[5].islower():
return str(x)[5:7]
else:
return ''
playoff_str_vals = teams.apply(playoff_str, axis=1)
teams = pd.DataFrame([item.split(' ')[-1] for sublist in teams.values for item in sublist])
teams = teams.replace({0:{i.split(' ')[-1]: i for i in list(team_colors.keys())}})
teams['t'] = playoff_str_vals
teams = teams.apply(lambda row: row[0] + ' -' + row['t'] if row['t'] != '' else row[0], axis=1)
conf['Team'] = teams.apply(lambda x: x[:-1] if x.endswith(' ') else x)
conf['PCT'] = round(conf['PCT'] * 100, 2).astype(str) + '%'
conf['Record'] = conf['W'].astype(str) + '-' + conf['L'].astype(str)
conf['Rank'] = range(1, len(conf) + 1)
for j in ['PPG', 'OPP PPG', 'DIFF']:
conf[j] = round(conf[j], 1)
conf[j] = conf[j].astype(str)
conf = conf.reindex(columns=start_cols).copy()
conf.columns = conf_table_cols(conference)
return conf.copy()
#%%
|
{"hexsha": "7224a9fad0429125dc435d9ee49a82aa170806c7", "size": 2369, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nba_data.py", "max_stars_repo_name": "Reece323/NBA-Dash", "max_stars_repo_head_hexsha": "1700ec0143a82a668264e5aad98878238211d751", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/nba_data.py", "max_issues_repo_name": "Reece323/NBA-Dash", "max_issues_repo_head_hexsha": "1700ec0143a82a668264e5aad98878238211d751", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nba_data.py", "max_forks_repo_name": "Reece323/NBA-Dash", "max_forks_repo_head_hexsha": "1700ec0143a82a668264e5aad98878238211d751", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9873417722, "max_line_length": 107, "alphanum_fraction": 0.5821021528, "include": true, "reason": "import numpy", "num_tokens": 685}
|
\section{Main}
\begin{frame}{Main}
\end{frame}
|
{"hexsha": "a87afb537b51329d1f327ebaa1c9563e07352083", "size": 48, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/main.tex", "max_stars_repo_name": "bencwbrown/beamer-template", "max_stars_repo_head_hexsha": "a709ea73793ca4ff5d5bd38cdc68ffe3253c6127", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/main.tex", "max_issues_repo_name": "bencwbrown/beamer-template", "max_issues_repo_head_hexsha": "a709ea73793ca4ff5d5bd38cdc68ffe3253c6127", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/main.tex", "max_forks_repo_name": "bencwbrown/beamer-template", "max_forks_repo_head_hexsha": "a709ea73793ca4ff5d5bd38cdc68ffe3253c6127", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 9.6, "max_line_length": 19, "alphanum_fraction": 0.6875, "num_tokens": 17}
|
# -*- coding: utf-8 -*-
import copy
from typing import Union
import numpy as np
def temperature(
t: np.array,
fire_load_density_MJm2: float,
fire_hrr_density_MWm2: float,
room_length_m: float,
room_width_m: float,
fire_spread_rate_ms: float,
beam_location_height_m: float,
beam_location_length_m: Union[float, list, np.ndarray],
fire_nft_limit_c: float,
*_,
**__,
):
"""
This function calculates and returns a temperature array representing travelling temperature. This function is NOT in SI.
:param t: in s, is the time array
:param fire_load_density_MJm2: in MJ/m2, is the fuel density on the floor
:param fire_hrr_density_MWm2: in MW/m2, is the heat release rate density
:param room_length_m: in m, is the room length
:param room_width_m: in m, is the room width
:param fire_spread_rate_ms: in m/s, is fire spread speed
:param beam_location_height_m: in m, is the beam lateral distance to fire origin
:param beam_location_length_m: in m, is the beam height above the floor
:param fire_nft_limit_c: in deg.C, is the maximum near field temperature
:param opening_fraction: in -, is the ventilation opening proportion between 0 to 1
:param opening_width_m: in m, is ventilation opening width
:param opening_height_m: in m, is ventilation opening height
:return T_g: in deg.C, is calculated gas temperature
"""
# re-assign variable names for equation readability
q_fd = fire_load_density_MJm2
HRRPUA = fire_hrr_density_MWm2
s = fire_spread_rate_ms
h_s = beam_location_height_m
l_s = beam_location_length_m
l = room_length_m
w = room_width_m
if l < w:
l += w
w = l - w
l -= w
# work out ventilation conditions
# a_v = opening_height_m * opening_width_m * opening_fraction
# Qv = 1.75 * a_v * np.sqrt(opening_height_m)
# workout burning time etc.
t_burn = max([q_fd / HRRPUA, 900.0])
t_decay = max([t_burn, l / s])
t_lim = min([t_burn, l / s])
# reduce resolution to fit time step for t_burn, t_decay, t_lim
time_interval_s = t[1] - t[0]
t_decay_ = round(t_decay / time_interval_s, 0) * time_interval_s
t_lim_ = round(t_lim / time_interval_s, 0) * time_interval_s
if t_decay_ == t_lim_:
t_lim_ -= time_interval_s
# workout the heat release rate ARRAY (corrected with time)
Q_growth = (HRRPUA * w * s * t) * (t < t_lim_)
Q_peak = (
min([HRRPUA * w * s * t_burn, HRRPUA * w * l]) * (t >= t_lim_) * (t <= t_decay_)
)
Q_decay = (max(Q_peak) - (t - t_decay_) * w * s * HRRPUA) * (t > t_decay_)
Q_decay[Q_decay < 0] = 0
Q = (Q_growth + Q_peak + Q_decay) * 1000.0
# workout the distance between fire median to the structural element r
l_fire_front = s * t
l_fire_front[l_fire_front < 0] = 0
l_fire_front[l_fire_front > l] = l
l_fire_end = s * (t - t_lim)
l_fire_end[l_fire_end < 0] = 0.0
l_fire_end[l_fire_end > l] = l
l_fire_median = (l_fire_front + l_fire_end) / 2.0
# workout the far field temperature of gas T_g
if isinstance(l_s, float) or isinstance(l_s, int):
r = np.absolute(l_s - l_fire_median)
T_g = np.where((r / h_s) > 0.8, (5.38 * np.power(Q / r, 2 / 3) / h_s) + 20.0, 0)
T_g = np.where(
(r / h_s) <= 0.8,
(16.9 * np.power(Q, 2 / 3) / np.power(h_s, 5 / 3)) + 20.0,
T_g,
)
T_g[T_g >= fire_nft_limit_c] = fire_nft_limit_c
return T_g
elif isinstance(l_s, np.ndarray) or isinstance(l_s, list):
l_s_list = copy.copy(l_s)
T_g_list = list()
for l_s in l_s_list:
r = np.absolute(l_s - l_fire_median)
T_g = np.where(
(r / h_s) > 0.8, (5.38 * np.power(Q / r, 2 / 3) / h_s) + 20.0, 0
)
T_g = np.where(
(r / h_s) <= 0.8,
(16.9 * np.power(Q, 2 / 3) / np.power(h_s, 5 / 3)) + 20.0,
T_g,
)
T_g[T_g >= fire_nft_limit_c] = fire_nft_limit_c
T_g_list.append(T_g)
return T_g_list
else:
raise TypeError('Unknown type of parameter "l_s": {}'.format(type(l_s)))
def temperature_si(
t: np.ndarray,
T_0: float,
q_fd: float,
hrrpua: float,
l: float,
w: float,
s: float,
e_h: float,
e_l: float,
T_max: float = 1323.15,
):
"""
This is an SI and improved version of the original `temperature` method.
:param t: ndarray, [s] An array representing time incorporating 'temperature'.
:param T_0: float, [K] ,Initial temperature.
:param q_fd: float, [J/m2], Fire load density.
:param hrrpua: float, [W/m2], Heat release rate density.
:param l: float, [m], Compartment length.
:param w: float, [m], Compartment width.
:param s: float, [m/s], Fire spread speed.
:param e_h: float, [m], Vertical distance between element to fuel bed.
:param e_l: float, [m], Horizontal distance between element to fire front.
:return temperature: [K] An array representing temperature incorporating 'time'.
"""
# UNIT CONVERSION TO FIT EQUATIONS
T_0 -= 273.15
q_fd /= 1e6
hrrpua /= 1e6
T_max -= 273.15
# workout time step
time_step = t[1] - t[0]
# workout burning time etc.
t_burn = max([q_fd / hrrpua, 900.0])
t_decay = max([t_burn, l / s])
t_lim = min([t_burn, l / s])
# reduce resolution to fit time step for t_burn, t_decay, t_lim
t_decay_ = round(t_decay / time_step, 0) * time_step
t_lim_ = round(t_lim / time_step, 0) * time_step
if t_decay_ == t_lim_:
t_lim_ -= time_step
# workout the heat release rate ARRAY (corrected with time)
Q_growth = (hrrpua * w * s * t) * (t < t_lim_)
Q_peak = min([hrrpua * w * s * t_burn, hrrpua * w * l]) * (t >= t_lim_) * (t <= t_decay_)
Q_decay = (max(Q_peak) - (t - t_decay_) * w * s * hrrpua) * (t > t_decay_)
Q_decay[Q_decay < 0] = 0
Q = (Q_growth + Q_peak + Q_decay) * 1000.0
# workout the distance between fire_curve median to the structural element r
l_fire_front = s * t
l_fire_front[l_fire_front < 0] = 0.0
l_fire_front[l_fire_front > l] = l
l_fire_end = s * (t - t_lim)
l_fire_end[l_fire_end < 0] = 0.0
l_fire_end[l_fire_end > l] = l
l_fire_median = (l_fire_front + l_fire_end) / 2.0
r = np.absolute(e_l - l_fire_median)
r[r == 0] = 0.001 # will cause crash if r = 0
# workout the far field temperature of gas T_g
T_g1 = (5.38 * np.power(Q / r, 2 / 3) / e_h) * ((r / e_h) > 0.18)
T_g2 = (16.9 * np.power(Q, 2 / 3) / np.power(e_h, 5 / 3)) * ((r / e_h) <= 0.18)
T_g = T_g1 + T_g2 + T_0
T_g[T_g >= T_max] = T_max
# UNIT CONVERSION TO FIT OUTPUT (SI)
T_g = T_g + 273.15 # C -> K
Q *= 10e6 # MJ -> J
return T_g
def _test_fire():
time = np.arange(0, 210 * 60, 30)
list_l = [25, 50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel(u"Temperature [$℃$]")
for length in list_l:
temperature_ = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=length / 2,
fire_nft_limit_c=1050,
)
ax.plot(time / 60, temperature_, label="Room length {:4.0f} m".format(length))
ax.legend(loc=4).set_visible(True)
ax.set_xlim((-10, 190))
ax.grid(color="k", linestyle="--")
plt.tight_layout()
plt.show()
def _test_fire_backup():
import numpy as np
time = np.arange(0, 22080, 30)
list_l = [50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
for l in list_l:
temperature_0 = temperature_si(
t=time,
T_0=293.15,
q_fd=900e6,
hrrpua=0.15e6,
l=l,
w=17.4,
s=0.012,
e_h=3.5,
e_l=l / 2,
)
temperature_1 = temperature(
t=time,
fire_load_density_MJm2=900,
fire_hrr_density_MWm2=0.15,
room_length_m=l,
room_width_m=17.4,
fire_spread_rate_ms=0.012,
beam_location_height_m=3.5,
beam_location_length_m=l / 2,
fire_nft_limit_c=1323.15 - 273.15
)
ax.plot(time / 60, temperature_0 - 273.15)
ax.plot(time / 60, temperature_1, ls=':', c='r')
assert np.allclose(temperature_0 - 273.15, temperature_1)
ax.legend().set_visible(True)
ax.set_xlim((0, 180))
ax.grid(color="grey", linestyle="--", linewidth=0.5)
plt.tight_layout()
plt.show()
def _test_fire_multiple_beam_location():
time = np.arange(0, 210 * 60, 30)
length = 100
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
temperature_list = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=np.linspace(0, length, 12)[1:-1],
fire_nft_limit_c=1050,
)
for temperature_ in temperature_list:
ax.plot(time / 60, temperature_, label="Room length {:4.0f} m".format(length))
ax.legend(loc=4).set_visible(True)
ax.set_xlim((-10, 190))
ax.grid(color="k", linestyle="--")
plt.tight_layout()
# plt.show()
def example():
time = np.arange(0, 210 * 60, 30)
list_l = [25, 50, 100, 150]
import matplotlib.pyplot as plt
plt.style.use("seaborn-paper")
fig, ax = plt.subplots(figsize=(3.94, 2.76))
ax.set_xlabel("Time [minute]")
ax.set_ylabel("Temperature [$℃$]")
for length in list_l:
temperature_ = temperature(
t=time,
fire_load_density_MJm2=600,
fire_hrr_density_MWm2=0.25,
room_length_m=length,
room_width_m=16,
fire_spread_rate_ms=0.012,
beam_location_height_m=3,
beam_location_length_m=length / 2,
fire_nft_limit_c=1050,
)
ax.plot(time / 60, temperature_, label="Room length {:4.0f} m".format(length))
ax.legend(loc=4).set_visible(True)
ax.set_xlim((0, 180))
ax.set_ylim((0, 1400))
ax.grid(color="k", linestyle="--")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
_test_fire()
_test_fire_backup()
_test_fire_multiple_beam_location()
example()
|
{"hexsha": "68a477822791bb1bdd82adb8255db21be7e27309", "size": 11140, "ext": "py", "lang": "Python", "max_stars_repo_path": "fsetools/lib/fse_travelling_fire.py", "max_stars_repo_name": "fsepy/fsetools", "max_stars_repo_head_hexsha": "6b6c647912551680109a84d8640b9cfbe7970970", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-25T21:47:56.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-25T21:47:56.000Z", "max_issues_repo_path": "fsetools/lib/fse_travelling_fire.py", "max_issues_repo_name": "fsepy/fsetools", "max_issues_repo_head_hexsha": "6b6c647912551680109a84d8640b9cfbe7970970", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-02-24T10:10:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T11:18:08.000Z", "max_forks_repo_path": "fsetools/lib/fse_travelling_fire.py", "max_forks_repo_name": "fsepy/fsetools", "max_forks_repo_head_hexsha": "6b6c647912551680109a84d8640b9cfbe7970970", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9197707736, "max_line_length": 125, "alphanum_fraction": 0.5963195691, "include": true, "reason": "import numpy", "num_tokens": 3435}
|
// Copyright 2011-2014 Renato Tegon Forti
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
// For more information, see http://www.boost.org
#define BOOST_APPLICATION_FEATURE_NS_SELECT_BOOST
#include <iostream>
#include <boost/application.hpp>
#define BOOST_TEST_MODULE ArgAspect
#include <boost/test/unit_test.hpp>
using namespace boost;
BOOST_AUTO_TEST_CASE(arg_aspect)
{
auto& argc = boost::unit_test::framework::master_test_suite().argc;
auto& argv = boost::unit_test::framework::master_test_suite().argv;
application::args myargs(argc, argv);
BOOST_CHECK(myargs.argc());
const std::vector< std::string > &argvec = myargs.arg_vector();
BOOST_CHECK(argvec.size());
}
|
{"hexsha": "2894985350155ed6c62fd33dd4b2307607378355", "size": 810, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/args_aspect_test.cpp", "max_stars_repo_name": "museghost/Boost.Application", "max_stars_repo_head_hexsha": "e3d16df35023ee90aea51631e4ffbb688341d61b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 89.0, "max_stars_repo_stars_event_min_datetime": "2015-01-09T02:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-28T07:03:11.000Z", "max_issues_repo_path": "test/args_aspect_test.cpp", "max_issues_repo_name": "retf/Boost.Application", "max_issues_repo_head_hexsha": "4d0cf24587694a67a58bb20614d035e059f103c8", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 28.0, "max_issues_repo_issues_event_min_datetime": "2015-01-15T11:30:16.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-16T12:05:39.000Z", "max_forks_repo_path": "test/args_aspect_test.cpp", "max_forks_repo_name": "museghost/Boost.Application", "max_forks_repo_head_hexsha": "e3d16df35023ee90aea51631e4ffbb688341d61b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 38.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T11:23:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-11T10:57:47.000Z", "avg_line_length": 24.5454545455, "max_line_length": 70, "alphanum_fraction": 0.7456790123, "num_tokens": 198}
|
import LoggingSetup
import logging
import itertools
import numpy as np
import ast
import math
import random
from functools import partial
from io import BytesIO
import pathlib
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
from matplotlib.patches import Circle, PathPatch, Arrow, FancyArrow
from matplotlib.text import Annotation
from matplotlib.path import Path
from matplotlib.backends.backend_agg import FigureCanvasAgg
import PIL.Image
from ipywidgets import interact, Button, IntText, GridBox, Layout, VBox, HBox, Box, HTML, Output, Label, FloatSlider, IntText, Image, Checkbox
from IPython.display import display
from World import World, WorldFactory
class VisualWorld:
"""An interactive representation of an informative path planning world, with a control system that is relying on ipywidgets to control the robots
and the evolution of time."""
def __init__(self, world):
self.world = world
self.config = {"IMAGE_WIDTH": 900, "IMAGE_HEIGHT": 900, "ROBOT_SIZE": 0.2,
"SHOW_ROBOT_PATH" : 10, "SHOW_ROBOT_HEADING": True,
"ROBOT_COLOR": ["white", "yellow", "red", "blue"],
"PATH": "screenshots"}
self.path_screenshots = pathlib.Path("screenshots")
self.path_screenshots.mkdir(exist_ok = True)
self.create_visualization()
self.update_visualization()
@staticmethod
def image_to_byte(X):
"""Takes an image as an array X, and returns a png bytevalue"""
# a = np.uint8(X)
f = BytesIO()
img = PIL.Image.fromarray(X)
img.save(f, "png")
bytevalue = f.getvalue()
return bytevalue
def create_robot_control_panel(self, robot):
"""Creates a panel that allows for the control and status visualization panel for a given robot."""
# Label - the name of the robot
label = Label(value=robot.name)
# button box for the manual entering of a robot action
btn_north = Button(description='North', layout=Layout(width='auto', grid_area='btn_north'))
btn_west = Button(description='West', layout=Layout(width='auto', grid_area='btn_west'))
btn_east = Button(description='East', layout=Layout(width='auto', grid_area='btn_east'))
btn_south = Button(description='South', layout=Layout(width='auto', grid_area='btn_south'))
for button in [btn_north, btn_west, btn_east, btn_south]:
button.on_click(partial(self.on_button_clicked_robot, robot))
arrows = GridBox(children=[btn_north, btn_west, btn_east, btn_south], layout=Layout(
width='100%',
grid_area=robot.name,
grid_template_rows='auto',
grid_template_columns='auto',
grid_template_areas='''
". btn_north btn_north ."
"btn_west btn_west btn_east btn_east"
". btn_south btn_south ."
'''
))
status = HTML(value=robot.toHTML())
self.dict_robot_status[robot.name] = status
panel = VBox([label, arrows, status], layout=Layout(border="solid 2px"))
return panel
def create_visualization(self):
"""Creates a panel that allows for the control and status visualization for all robots and the
advance of the status of the world."""
# The environment and the estimate panels
self.panel_environment = self.create_image_widget(self.config["IMAGE_WIDTH"], self.config["IMAGE_HEIGHT"])
self.panel_estimate = self.create_image_widget(self.config["IMAGE_WIDTH"], self.config["IMAGE_HEIGHT"])
self.panel_top = HBox([self.panel_environment, self.panel_estimate])
# The robot control panels
robot_panels = []
grid_template_areas=""
self.dict_robot_status = {} # mapping name to status HTML widget
for robot in self.world.robots:
robot_panels.append(self.create_robot_control_panel(robot))
robot_control_panels = HBox(robot_panels)
# apply policy button
btn_apply_policy = Button(description = "Apply policies", layout = Layout(width='20%'))
btn_apply_policy.on_click(partial(self.on_button_clicked_apply_policy))
# apply actions button
btn_apply_actions = Button(description = "Apply actions", layout = Layout(width='20%'))
btn_apply_actions.on_click(partial(self.on_button_clicked_apply_actions))
btn_proceed_all = Button(description = "Policy + Action", layout = Layout(width='20%'))
btn_proceed_all.on_click(partial(self.on_button_clicked_proceed_all))
self.widget_timestep = FloatSlider(value = 1.0, min = 0.1, max = 3.0, step = 0.1)
self.html_world_time = HTML(value="<b>T</b>=0")
world_panel = HBox([btn_apply_policy, btn_apply_actions, btn_proceed_all,
self.widget_timestep, self.html_world_time])
# world panel 2
btn_screenshot = Button(description = "Screenshot", layout = Layout(width='20%'))
btn_screenshot.on_click(partial(self.on_button_screenshot))
btn_multistep = Button(description = "Proceed multiple steps", layout = Layout(width='20%'))
btn_multistep.on_click(partial(self.on_button_proceed_multiple_steps))
self.inttext_multiple_steps = IntText(10, description = "How many steps?")
self.checkbox_update_each_step = Checkbox(value=True, description="Update each step?")
world_panel_2 = HBox([btn_screenshot, btn_multistep, self.inttext_multiple_steps,
self.checkbox_update_each_step])
#btn_proceed.on_click(partial(self.on_button_clicked_proceed))
full_control_panel = VBox([self.panel_top, robot_control_panels, world_panel, world_panel_2])
display(full_control_panel)
def on_button_proceed_multiple_steps(self, b):
print(f"TODO: proceed multiple steps {self.inttext_multiple_steps.value}")
for i in range(self.inttext_multiple_steps.value):
self.world.enact_policy(self.widget_timestep.value)
self.world.proceed(self.widget_timestep.value)
# maybe a checkbox here?
if self.checkbox_update_each_step.value:
self.update_visualization()
def on_button_screenshot(self, b):
fig = self.update_image(self.panel_environment, self.update_environment)
file_env = pathlib.Path(self.path_screenshots, f'env-{self.world.time}.pdf')
fig.savefig(file_env, transparent=False, dpi=80, bbox_inches="tight")
fig = self.update_image(self.panel_estimate, self.update_estimate)
file_estimate = pathlib.Path(self.path_screenshots, f'estimate-{self.world.time}.pdf')
fig.savefig(file_estimate, transparent=False, dpi=80, bbox_inches="tight")
print("Screenshot done")
def on_button_clicked_robot(self, robot, b):
print(f"on_button_clicked_robot {robot.name} button {b}")
robot.add_action(b.description)
self.update_visualization()
def on_button_clicked_proceed_all(self, b):
"""Action for the button Policy+Action"""
self.world.enact_policy(self.widget_timestep.value)
self.world.proceed(self.widget_timestep.value)
self.update_visualization()
def on_button_clicked_apply_policy(self, b):
"""Action for the button Apply policy"""
self.world.enact_policy(self.widget_timestep.value)
self.update_visualization()
def on_button_clicked_apply_actions(self, b):
"""Action for the button Apply actions"""
self.world.proceed(self.widget_timestep.value)
self.update_visualization()
def update_visualization(self):
# update the status in the robot panels
self.html_world_time.value = f"<b>T</b>={self.world.time}"
for r in self.world.robots:
status = self.dict_robot_status[r.name]
status.value = r.toHTML()
# update the images
self.update_image(self.panel_environment, self.update_environment)
self.update_image(self.panel_estimate, self.update_estimate)
def update_image(self, im, update_function):
"""Update the estimate"""
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(111)
update_function(self, ax)
self.update_image_from_figure(fig, im)
return fig
@staticmethod
def update_estimate(self, ax):
"""An update function, to be called from update_image"""
ax.imshow(self.world.im.value)
@staticmethod
def update_environment(self, ax):
"""An update function, to be called from update_image"""
ax.imshow(self.world.env.value)
for i, robot in enumerate(self.world.robots):
robot_color = self.config["ROBOT_COLOR"][i % len(self.config["ROBOT_COLOR"])]
robot_size = self.config["ROBOT_SIZE"]
patch = Circle((robot.x, robot.y), robot_size, color=robot_color, linewidth=2, fill=False)
ax.add_patch(patch)
patch = Annotation(robot.name, (robot.x + robot_size,robot.y - robot_size))
ax.add_artist(patch)
if self.config["SHOW_ROBOT_PATH"] > 0:
vertices = [[val[0], val[1]] for val in robot.location_history]
# cut the lenght of the path
length = 0
current = [robot.x, robot.y]
for i, vertex in enumerate(vertices):
length += abs(current[0] - vertex[0]) + abs(current[1] - vertex[1])
current = vertex
if (length > self.config["SHOW_ROBOT_PATH"]):
vertices = vertices[0:i]
break
if len(vertices) > 0:
codes = Path.LINETO * np.ones(len(vertices), dtype=Path.code_type)
codes[0] = Path.MOVETO
path = Path(vertices, codes)
patch = PathPatch(path, facecolor=None, edgecolor=robot_color, fill=False, alpha=0.3)
ax.add_patch(patch)
if self.config["SHOW_ROBOT_HEADING"]:
x = robot.x
y = robot.y
dx = robot.vel_x
dy = robot.vel_y
patch = FancyArrow(x,y,dx,dy, width=0.1, head_width=0.3, overhang=0.8, color=robot_color)
ax.add_patch(patch)
def create_env_robot_visualization(self):
"""Two figures, side by side"""
self.panel_environment = self.create_image_widget()
self.panel_estimate = self.create_image_widget()
self.panel_top = HBox([self.panel_environment, self.panel_estimate])
def create_image_widget(self, width = 400, height=400):
"""Creates an empty image widget from a canvas"""
fig = Figure()
canvas = FigureCanvasAgg(fig)
ax = fig.add_subplot(111)
ax.imshow(np.ones([4,4]))
fig.canvas.draw()
buf = fig.canvas.buffer_rgba()
emptyim = np.asarray(buf)
value = self.image_to_byte(emptyim)
im = Image(value=value, format="png", width = width, height=height)
return im
@staticmethod
def update_image_from_figure(fig, im):
"""Updates an image from a figure"""
fig.canvas.draw()
buf = fig.canvas.buffer_rgba()
imagecontent = np.asarray(buf)
value = VisualWorld.image_to_byte(imagecontent)
im.value = value
|
{"hexsha": "60942088093b0d0979184d6958552d26f466e16f", "size": 11756, "ext": "py", "lang": "Python", "max_stars_repo_path": "VisualWorld.py", "max_stars_repo_name": "lboloni/MREM", "max_stars_repo_head_hexsha": "f0d6354d1d3b625e71597a42e03f4e5d859a2ef2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "VisualWorld.py", "max_issues_repo_name": "lboloni/MREM", "max_issues_repo_head_hexsha": "f0d6354d1d3b625e71597a42e03f4e5d859a2ef2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "VisualWorld.py", "max_forks_repo_name": "lboloni/MREM", "max_forks_repo_head_hexsha": "f0d6354d1d3b625e71597a42e03f4e5d859a2ef2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.024, "max_line_length": 149, "alphanum_fraction": 0.6355052739, "include": true, "reason": "import numpy", "num_tokens": 2520}
|
MODULE CONVERTWGS84
! ** CONVERT GEOGRAPHIC COORDINATES TO UTM AND REVERSE
! ** AUTHOR: DH CHUNG
! ** START : 2008
! ** UPDATE: 2016-06-16
USE GLOBAL,ONLY:RKD,PI,HEMI,UTMZ
IMPLICIT NONE
! ** HEMI = HEMISPHERE (1:NORTH/2SOUTH)
! ** UTMZ = ZONE NUMBER (1:60)
! ** GEOGRAPHY SYSTEM:
! ** 1.WGS84/NAD83 /2.GRS80 /3.WGS72
CHARACTER(20):: GEOSYS='1.WGS84/NAD83'
REAL(RKD):: R_MJR !SEMI MAJOR AXIS
REAL(RKD):: R_MNR !SEMI MINOR AXIS
REAL(RKD):: SCLF !SCALE ALONG CENTRAL MERIDIAN
REAL(RKD):: FE !X OFFSET IN METER
REAL(RKD):: FN !Y OFFSET IN METER
REAL(RKD):: LAM0 !CENTRAL MERIDIAN OF ZONE [DEGREE]
REAL(RKD):: FLA,E2,EP2
CONTAINS
SUBROUTINE UTMPARS
! ** UTM PARAMETERS
IF (HEMI==1) THEN
FN=0
ELSEIF(HEMI==2) THEN
FN=1.D7
ENDIF
FE=500000._RKD
SCLF=0.9996_RKD
IF (GEOSYS(1:1)=='1') THEN
R_MJR = 6378137._RKD
FLA =1._RKD/298.257223563_RKD
ELSEIF(GEOSYS(1:1)=='2') THEN
R_MJR = 6378137._RKD
FLA =1._RKD/298.257222101_RKD
ELSEIF(GEOSYS(1:1)=='3') THEN
R_MJR = 6378135._RKD
FLA =1._RKD/298.26_RKD
ENDIF
E2=2*FLA-FLA**2
EP2=E2/(1-E2)
R_MNR=R_MJR*SQRT(1-E2)
LAM0 = 6*(UTMZ-30)-3 !CENTRAL MERIDIAN IN DEGREE
END SUBROUTINE
SUBROUTINE UTM_WGS84(LON,LAT,XUTM,YUTM)
! *********************************************************
! ** INPUT:
! LAT = LATITUDE OF POINTS [DECIMAL DEGREE]
! LON = LONGITUDE OF POINTS [DECIMAL DEGREE]
! ** OUTPUT:
! XUTM = ABSCISSA OF POINTS: XUTM [M]
! YUTM = ORDINATE OF POINTS: YUTM [M]
REAL(RKD) ,INTENT(IN )::LON(:),LAT(:)
REAL(RKD) ,INTENT(OUT)::XUTM(:),YUTM(:)
REAL(RKD):: F2,AP,BP,CP,DP,EP,L0
REAL(RKD),DIMENSION(SIZE(LON))::A2,A4,A6,B1,B3,B5,DLAM,RN,LEN1,TAPH,ETA2,LAM,PHI
L0 =PI*LAM0/180
LAM = PI*LON/180
PHI = PI*LAT/180
DLAM= LAM-L0
RN=R_MJR/SQRT(1-E2*SIN(PHI)**2)
F2=(R_MJR-R_MNR)/(R_MJR+R_MNR)
AP=R_MJR*(1-F2+(5._RKD/4)*(F2**2-F2**3)+(81._RKD/64)*(F2**4-F2**5))
BP=(3._RKD/2)*R_MJR*F2*(1-F2+(7._RKD/8)*(F2**2-F2**3)+(55._RKD/64)*(F2**4-F2**5))
CP=(15._RKD/16)*R_MJR*F2**2*(1-F2+(3._RKD/4)*(F2**2-F2**3))
DP=(35._RKD/48)*R_MJR*F2**3*(1-F2+(11._RKD/16)*(F2**2-F2**3))
EP=(315._RKD/51)*(R_MJR*F2**4)*(1-F2)
LEN1=AP*PHI-BP*SIN(2*PHI)+CP*SIN(4*PHI)-DP*SIN(6*PHI)+EP*SIN(8*PHI)
ETA2=EP2*COS(PHI)**2
TAPH = TAN(PHI)**2
A2=RN/2*SIN(PHI)*COS(PHI)
A4=RN/24*SIN(PHI)*COS(PHI)**3*(5-TAPH+9*ETA2+4*ETA2**2)
A6=RN/720*SIN(PHI)*COS(PHI)**5*(61-58*TAPH+TAPH**2+270*ETA2-330*ETA2*TAPH)
B1=RN*COS(PHI)
B3=RN/6*COS(PHI)**3*(1-TAPH+ETA2)
B5=RN/120*COS(PHI)**5*(5-18*TAPH+TAPH**2+14*ETA2-58*ETA2*TAPH+13*ETA2**2-64*ETA2**2*TAPH)
YUTM=SCLF*(LEN1+A2*DLAM**2+A4*DLAM**4+A6*DLAM**6)+FN
XUTM=SCLF*(B1*DLAM+B3*DLAM**3+B5*DLAM**5)+FE
END SUBROUTINE
SUBROUTINE UTMR_WGS84(XUTM,YUTM,LON,LAT)
! ** INPUT:
! XUTM [M]
! YUTM [M]
! ** OUTPUT:
! LON = LONGITUDE (DECIMAL DEGREE)
! LAT = LATITUDE (DECIMAL DEGREE)
REAL(RKD), INTENT(IN )::XUTM(:),YUTM(:)
REAL(RKD), INTENT(OUT)::LON(:),LAT(:)
REAL(RKD)::E1,J1,J2,J3,J4,L0
REAL(RKD),DIMENSION(SIZE(XUTM))::RM,MU,BX,ETAX2,VX2,NX,TX,A2,A4,A6,B1,B3,B5,Y
L0 =PI*LAM0/180
RM = YUTM/SCLF
MU = RM/(R_MJR*(1-E2/4-3*E2**2/64 - 5*E2**3/256))
E1 = (1-SQRT(1-E2))/(1+SQRT(1-E2))
J1 = 3*E1/2 - 27*E1**3/32
J2 = 21*E1**2/16 - 55*E1**4/32
J3 = 151*E1**3/96
J4 = 1097*E1**4/512
BX = MU+J1*SIN(2*MU)+J2*SIN(4*MU)+J3*SIN(6*MU)+J4*SIN(8*MU)
ETAX2=EP2*COS(BX)**2
VX2=1+ETAX2
NX =R_MJR/SQRT(1-E2*SIN(BX)**2)
TX =TAN(BX)
A2 =-VX2*TX/(2*NX**2)
A4 =-A2/(12*NX**2)*(5+3*TX**2+ETAX2-9*ETAX2*TX**2-4*ETAX2**2)
A6 =-A2/(360*NX**4)*(61-90*TX**2+45*TX**4-46*ETAX2)
B1 = 1/(NX*COS(BX))
B3 =-B1/(6*NX**2)*(1+2*TX**2+ETAX2)
B5 =-B1/(120*NX**4)*(5+28*TX**2+24*TX**4+6*ETAX2+8*ETAX2*TX**2)
Y =(XUTM-FE)/SCLF
LAT=BX+A2*Y**2+A4*Y**4+A6*Y**6
LON=B1*Y+B3*Y**3+B5*Y**5 +L0
LAT=LAT*180/PI ! TO DEGREE
LON=LON*180/PI ! TO DEGREE
END SUBROUTINE
END MODULE
|
{"hexsha": "58bb3566b57af07ed2f75cd5a7b1ef71361c9b02", "size": 3982, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "EFDC/convertwgs84.f90", "max_stars_repo_name": "dsi-llc/EFDCPlus", "max_stars_repo_head_hexsha": "27ece1cd0bb9e02a46d1ad20f343bc5d109acfb3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2019-09-11T23:39:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T08:14:29.000Z", "max_issues_repo_path": "EFDC/convertwgs84.f90", "max_issues_repo_name": "dsi-llc/EFDCPlus8.5", "max_issues_repo_head_hexsha": "27ece1cd0bb9e02a46d1ad20f343bc5d109acfb3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-08-06T01:59:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-25T01:46:32.000Z", "max_forks_repo_path": "EFDC/convertwgs84.f90", "max_forks_repo_name": "dsi-llc/EFDCPlus", "max_forks_repo_head_hexsha": "27ece1cd0bb9e02a46d1ad20f343bc5d109acfb3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 22, "max_forks_repo_forks_event_min_datetime": "2020-03-06T06:34:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-02T04:15:55.000Z", "avg_line_length": 28.0422535211, "max_line_length": 91, "alphanum_fraction": 0.5861376193, "num_tokens": 2044}
|
'''
Created on April 16th, 2015
@author: bennettd
'''
import numpy
import pylab
import gpib_instrument
class AgilentE4407B(gpib_instrument.Gpib_Instrument):
'''
Agililent E4407B control class
'''
def __init__(self, pad, board_number = 0, name = '', sad = 0, timeout = 13, send_eoi = 1, eos_mode = 0):
'''Constructor - The PAD (Primary GPIB Address) is the only required parameter '''
super(AgilentE4407B, self).__init__(board_number, name, pad, sad, timeout, send_eoi, eos_mode)
# GPIB identity string of the instrument
self.id_string = "Hewlett-Packard, E4407B, SG44210888, A.14.01"
self.manufacturer = 'Agilent'
self.model_number = 'E4407B'
self.description = 'Spectrum Analyzer'
def identifyInstrument(self):
'''Get the identiy string from instruement'''
inst_idn = self.ask('*IDN?')
print inst_idn
def setCenterFrequency(self, frequency = 100):
'''
Set the center frequency in Hz
'''
frequencystring = str(frequency)
commandstring = 'SENS:FREQ:CENT ' + frequencystring
self.write(commandstring)
def setSpanFrequency(self, frequency = 100):
'''
Set the span frequency in Hz
'''
frequencystring = str(frequency)
commandstring = 'SENS:FREQ:SPAN ' + frequencystring
self.write(commandstring)
def getStartFrequency(self):
'''
Get the start frequency in Hz
'''
commandstring = 'SENS:FREQ:STAR?'
result = self.ask(commandstring)
value = float(result)
return value
def getStopFrequency(self):
'''
Get the stop frequency in Hz
'''
commandstring = 'SENS:FREQ:STOP?'
result = self.ask(commandstring)
value = float(result)
return value
def getTrace(self, trace=1):
'''
result = measureCurrent(output)
output = "P6V" or "P25V" or "N25V"
P6V = +6V output
P25V = +25V output
N25V = -25V output
Measure one of the currents from the specified output
'''
if trace > 3 or trace < 1:
print('Not a valid trace!')
trace = 1
commandstring = 'TRAC:DATA? TRACE' + str(int(trace))
self.write(commandstring)
result = self.read(20480)
str_array = numpy.array(result.split(','))
data_array = str_array.astype(numpy.float)
return data_array
def getSpectrum(self, trace=1):
psd = self.getTrace(trace)
start_freq = self.getStartFrequency()
stop_freq = self.getStopFrequency()
pnts = len(psd)
freqs = numpy.linspace(start_freq, stop_freq, pnts)
data_array = numpy.vstack((freqs,psd))
return data_array
def plotSpectrum(self, trace=1, file_name=None):
data_array = self.getSpectrum(trace)
if file_name is not None:
numpy.savetxt(file_name, data_array.transpose())
pylab.plot(data_array[0],data_array[1])
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Power (dB)')
pylab.show()
|
{"hexsha": "ebb513a4017d8621ba248e12530fab2ed86e8194", "size": 3266, "ext": "py", "lang": "Python", "max_stars_repo_path": "waferscreen/inst_control/inactive/agilent_e4407B.py", "max_stars_repo_name": "chw3k5/WaferScreen", "max_stars_repo_head_hexsha": "c0ca7fe939fe7cd0b722b7d6129b148c03a7505c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-30T19:06:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-30T19:06:07.000Z", "max_issues_repo_path": "waferscreen/inst_control/inactive/agilent_e4407B.py", "max_issues_repo_name": "chw3k5/WaferScreen", "max_issues_repo_head_hexsha": "c0ca7fe939fe7cd0b722b7d6129b148c03a7505c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-04-22T20:47:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-30T19:06:01.000Z", "max_forks_repo_path": "waferscreen/inst_control/inactive/agilent_e4407B.py", "max_forks_repo_name": "chw3k5/WaferScreen", "max_forks_repo_head_hexsha": "c0ca7fe939fe7cd0b722b7d6129b148c03a7505c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.649122807, "max_line_length": 108, "alphanum_fraction": 0.5845070423, "include": true, "reason": "import numpy", "num_tokens": 792}
|
using Test
using TestSetExtensions
using LinearAlgebra
using Qaintessent
##==----------------------------------------------------------------------------------------------------------------------
# adapted from https://github.com/FluxML/Zygote.jl/blob/master/test/gradcheck.jl
function ngradient(f, xs::AbstractArray...)
grads = zero.(xs)
for (x, Δ) in zip(xs, grads), i in 1:length(x)
δ = sqrt(eps())
tmp = x[i]
x[i] = tmp - δ/2
y1 = f(xs...)
x[i] = tmp + δ/2
y2 = f(xs...)
x[i] = tmp
Δ[i] = (y2-y1)/δ
if eltype(x) <: Complex
# derivative with respect to imaginary part
x[i] = tmp - im*δ/2
y1 = f(xs...)
x[i] = tmp + im*δ/2
y2 = f(xs...)
x[i] = tmp
Δ[i] += im*(y2-y1)/δ
end
end
return grads
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "gate gradients" begin
@testset "single qubit gates" begin
# fictitious gradients of cost function with respect to quantum gate
Δ = randn(ComplexF64, 2, 2)
for g in [RxGate, RyGate, RzGate]
f = (θ) -> 2*real(sum(Δ .* Qaintessent.sparse_matrix(g(θ[]))))
θ = 2π*rand()
ngrad = ngradient(f, [θ])
dg = Qaintessent.backward(g(θ), conj(Δ))
@test isapprox(dg.θ, ngrad[1], rtol=1e-5, atol=1e-5)
end
for g in [XGate, YGate, ZGate, HadamardGate, SGate, SdagGate, TGate, TdagGate]
dg = Qaintessent.backward(g(), conj(Δ))
@test dg ≈ g()
end
begin
f = (ϕ) -> 2*real(sum(Δ .* Qaintessent.sparse_matrix(PhaseShiftGate(ϕ[]))))
ϕ = 2π*rand()
ngrad = ngradient(f, [ϕ])
dg = Qaintessent.backward(PhaseShiftGate(ϕ), conj(Δ))
@test isapprox(dg.ϕ, ngrad[1], rtol=1e-5, atol=1e-5)
end
begin
f = (nθ) -> 2*real(sum(Δ .* Qaintessent.sparse_matrix(RotationGate(nθ))))
nθ = randn(3)
ngrad = ngradient(f, nθ)
dg = Qaintessent.backward(RotationGate(nθ), conj(Δ))
@test isapprox(dg.nθ, ngrad[1], rtol=1e-6)
end
begin
f = (nθ) -> 2*real(sum(Δ .* Qaintessent.sparse_matrix(RotationGate(nθ))))
# special case: zero vector
nθ = zeros(3)
ngrad = ngradient(f, nθ)
dg = Qaintessent.backward(RotationGate(nθ), conj(Δ))
@test isapprox(dg.nθ, ngrad[1], rtol=1e-6)
end
end
@testset "two qubit gates" begin
# fictitious gradients of cost function with respect to quantum gate
Δ = randn(ComplexF64, 4, 4)
@testset "entanglement gates" begin
for g in [EntanglementXXGate, EntanglementYYGate, EntanglementZZGate]
f(θ) = 2*real(sum(Δ .* Qaintessent.sparse_matrix(g(θ[]))))
θ = 2π*rand()
ngrad = ngradient(f, [θ])
dg = Qaintessent.backward(g(θ), conj(Δ))
@test isapprox(dg.θ, ngrad[1], rtol=1e-5, atol=1e-5)
end
end
@testset "two qubit gates" begin
for g in [SwapGate()]
dg = Qaintessent.backward(g, conj(Δ))
@test dg ≈ g
end
end
end
@testset "controlled gates" begin
# fictitious gradients of cost function with respect to quantum gate
Δ = randn(ComplexF64, 8, 8)
for g in [RxGate, RyGate, RzGate]
f(θ) = 2*real(sum(Δ .* Qaintessent.sparse_matrix(ControlledGate{g}(g(θ[]), 2))))
θ = 2π*rand()
ngrad = ngradient(f, [θ])
dg = Qaintessent.backward(ControlledGate{g}(g(θ), 2), conj(Δ))
@test isapprox(dg.U.θ, ngrad[1], rtol=1e-5, atol=1e-5)
end
for g in [EntanglementXXGate, EntanglementYYGate, EntanglementZZGate]
f(θ) = 2*real(sum(Δ .* Qaintessent.sparse_matrix(ControlledGate{g}(g(θ[]), 1))))
θ = 2π*rand()
ngrad = ngradient(f, [θ])
dg = Qaintessent.backward(ControlledGate{g}(g(θ), 2), conj(Δ))
@test isapprox(dg.U.θ, ngrad[1], rtol=1e-5, atol=1e-5)
end
for g in [XGate, YGate, ZGate, HadamardGate, SGate, SdagGate, TGate, TdagGate]
dg = Qaintessent.backward(ControlledGate{g}(g(), 2), conj(Δ))
@test dg ≈ ControlledGate{g}(g(), 2)
end
end
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gradients" begin
# construct parametrized circuit
N = 4
A = rand(ComplexF64, 2 ,2)
U, R = qr(A)
U = Array(U)
i = rand(1:N)
cgc(θ, ϕ, χ, κ, ωn) = CircuitGate[
circuit_gate(3, HadamardGate()),
circuit_gate(2, RzGate(θ), (1, 4)),
circuit_gate(2, TGate(), 4),
circuit_gate(2, 3, SwapGate()),
circuit_gate(2, SdagGate(), 1),
circuit_gate(3, PhaseShiftGate(ϕ)),
circuit_gate(1, RyGate(χ)),
circuit_gate(4, HadamardGate(), 2),
circuit_gate(3, X, 1),
circuit_gate(2, Z, 4),
circuit_gate(4, Y, 1),
circuit_gate(3, 1, EntanglementYYGate(κ)),
circuit_gate(3, RotationGate(ωn)),
circuit_gate(4, SGate()),
circuit_gate(2, TdagGate(), 3),
circuit_gate(i, MatrixGate(U)),
circuit_gate(2, Z),
circuit_gate(3, X),
circuit_gate(2, Y, 4),
circuit_gate(1, SGate()),
circuit_gate(2, 3, SwapGate(), 4),
circuit_gate(3, SdagGate()),
circuit_gate(2, TGate()),
]
# measurement operators
meas(M) = MeasurementOperator.([Matrix{Float64}(I, 2^N, 2^N), Hermitian(M)], (Tuple(1:N),))
# parameter values
θ = 1.5π
ϕ = 0.3
χ = √2
κ = exp(-0.4)
n = randn(Float64, 3)
n /= norm(n)
ωn = 0.2π * n
M = randn(ComplexF64, 2^N, 2^N)
M = 0.5*(M + adjoint(M))
c = Circuit{N}(cgc(θ, ϕ, χ, κ, ωn), meas(M))
# input quantum state
ψ = randn(ComplexF64, 2^N)
ψl = deepcopy(ψ)
# fictitious gradients of cost function with respect to circuit output
Δ = [0.3, -1.2]
dc = Qaintessent.gradients(c, ψ, Δ)[1]
f(rθ, rϕ, rχ, rκ, ωn, M) = dot(Δ, apply(ψ, Circuit{N}(cgc(rθ[], rϕ[], rχ[], rκ[], ωn), meas(M))))
# numeric gradients
ngrad = ngradient(f, [θ], [ϕ], [χ], [κ], ωn, M)
# symmetrize gradient with respect to measurement operator
ngrad[end][:] = 0.5*(ngrad[end] + adjoint(ngrad[end]))
@test all(isapprox.(ngrad,
(dc.moments[1][2].gate.U.θ,
dc.moments[4][2].gate.ϕ,
dc.moments[5][1].gate.θ,
dc.moments[8][1].gate.θ,
dc.moments[9][1].gate.nθ,
sparse_matrix(dc.meas[2])), rtol=1e-5, atol=1e-5))
end
##==----------------------------------------------------------------------------------------------------------------------
@testset ExtendedTestSet "circuit gradients with moments" begin
# construct parametrized circuit
N = 4
cgc(θ, ϕ, χ, ωn) = Moment[
Moment([
circuit_gate(3, HadamardGate()),
circuit_gate(2, RzGate(θ), (1, 4)),
]),
Moment(circuit_gate(2, 3, SwapGate())),
Moment(circuit_gate(3, PhaseShiftGate(ϕ))),
Moment([
circuit_gate(3, RotationGate(ωn)),
circuit_gate(1, RyGate(χ)),
]),
]
# measurement operators
meas(M) = MeasurementOperator.([Matrix{Float64}(I, 2^N, 2^N), Hermitian(M)], (Tuple(1:N),))
# parameter values
θ = 1.5π
ϕ = 0.3
χ = √2
n = randn(Float64, 3)
n /= norm(n)
ωn = 0.2π * n
M = randn(ComplexF64, 2^N, 2^N)
M = 0.5*(M + adjoint(M))
c = Circuit{N}(cgc(θ, ϕ, χ, ωn), meas(M))
# input quantum state
ψ = randn(ComplexF64, 2^N)
# fictitious gradients of cost function with respect to circuit output
Δ = [0.3, -1.2]
dc = Qaintessent.gradients(c, ψ, Δ)[1]
f(rθ, rϕ, rχ, ωn, M) = dot(Δ, apply(ψ, Circuit{N}(cgc(rθ[], rϕ[], rχ[], ωn), meas(M))))
# numeric gradients
ngrad = ngradient(f, [θ], [ϕ], [χ], ωn, M)
# symmetrize gradient with respect to measurement operator
ngrad[end][:] = 0.5*(ngrad[end] + adjoint(ngrad[end]))
@test all(isapprox.(ngrad,
(dc.moments[1][2].gate.U.θ,
dc.moments[3][1].gate.ϕ,
dc.moments[4][2].gate.θ,
dc.moments[4][1].gate.nθ,
sparse_matrix(dc.meas[2])), rtol=1e-5, atol=1e-5))
end
|
{"hexsha": "184b00c42b66579605bafa3f80ea86363f88536d", "size": 8624, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_gradients.jl", "max_stars_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_stars_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-05-25T11:43:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T11:34:12.000Z", "max_issues_repo_path": "test/test_gradients.jl", "max_issues_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_issues_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2020-04-09T17:15:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T12:46:52.000Z", "max_forks_repo_path": "test/test_gradients.jl", "max_forks_repo_name": "oguzcankirmemis/Qaintessent.jl", "max_forks_repo_head_hexsha": "6261dc5d8a9a7ea7d406ea39cac950747583f414", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-12-16T13:25:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T15:49:00.000Z", "avg_line_length": 31.9407407407, "max_line_length": 122, "alphanum_fraction": 0.5074211503, "num_tokens": 2854}
|
"""
Test script for weight_set.py.
"""
import unittest
import numpy as np
np.random.seed(1234)
from copy import deepcopy
from models.tools.weight_set import WeightSet
initializations = ['random', 'glorot_normal',
'glorot_uniform',
'he_normal', 'he_uniform',
'lecun_normal', 'lecun_uniform']
class TestWeightSet(unittest.TestCase):
"""
Test class for the WeightSet class.
"""
def setUp(self):
"""
Setup WeightSet for every test
"""
# Initialize random layers and sizes
model_depth = np.random.randint(3, 10)
self.layer_sizes = [int(np.random.randint(2, 50)) for _ in range(model_depth)]
# Output size must be smaller than last hidden layer
self.output_size = np.random.randint(1, self.layer_sizes[-1])
self.weightset = WeightSet(layer_sizes=self.layer_sizes + [self.output_size])
# Save input size for testing
self.input_size = self.layer_sizes[0]
def reset(self):
"""
Initialize arbitrary WeightSet
"""
# Initialize random layers and sizes
model_depth = np.random.randint(3, 10)
self.layer_sizes = [np.random.randint(2, 50) for _ in range(model_depth)]
# Output size must be smaller than last hidden layer
self.output_size = np.random.randint(1, self.layer_sizes[-1])
self.weightset = WeightSet(layer_sizes=self.layer_sizes + [self.output_size])
# Save input size for testing
self.input_size = self.layer_sizes[0]
def test_clone(self):
"""
Test cloning (deepcopy)
"""
clone = self.weightset.clone()
# Test if they are not the same instance
self.assertNotEqual(self.weightset, clone)
# Test if arguments are the same
self.assertEqual(self.weightset.layer_sizes, clone.layer_sizes)
self.assertEqual(self.weightset.activation, clone.activation)
self.assertEqual(self.weightset.initialization_name, clone.initialization_name)
# Test if deepcopy argument change does not change original
clone.activation = 'foo'
clone.layer_sizes = [-999, -999]
clone.initialization_name = 'foo'
self.assertNotEqual(self.weightset.activation, clone.activation)
self.assertNotEqual(self.weightset.activation, clone.activation)
self.assertNotEqual(self.weightset.initialization_name, clone.initialization_name)
def test_randn_init(self):
"""
Test random initialization of weights
"""
for _ in range(1000):
self.reset()
self.assertIsInstance(self.weightset.weights, np.ndarray)
def test_feedforward(self):
"""
Test the feedforward method
"""
for _ in range(100):
# Get random set of weights
self.reset()
# The feedforward pass must work on all activation functions
for act_func in self.weightset.activation_dir.values():
self.weightset.activation = act_func
# Make feedforward pass
model_input = np.random.uniform(-999, 999, self.input_size)
output = self.weightset.feedforward(model_input)
# Output must be an integer
self.assertIsInstance(output, np.int64)
def test_mutate(self):
"""
Test if mutate changes the weights in-place
"""
for _ in range(1000):
mutation_probability = np.random.uniform(0.00001, 1)
weights_before = deepcopy(self.weightset.weights)
self.weightset.mutate(mutation_probability)
weights_after = self.weightset.weights
# Weights cannot be the same after mutating if mutation_probability > 0
self.assertNotEqual(weights_before, weights_after)
def test_crossbreed(self):
"""
Test if crossbreeding works properly and results in a entirely new WeightSet
"""
# Get two arbitrary arrays
for _ in range(1000):
# Get arbitrary array for crossbreeding
ws2 = WeightSet(layer_sizes=self.layer_sizes)
crossbreeded_ws = WeightSet.crossbreed(self.weightset, ws2)
# Crossbreeded WeightSet cannot be the same as the input instances
self.assertNotEqual(crossbreeded_ws, self.weightset)
self.assertNotEqual(crossbreeded_ws, ws2)
# Crossbreeded weights cannot be the same as either of the initial weights
self.assertNotEqual(crossbreeded_ws.weights, self.weightset.weights)
self.assertNotEqual(crossbreeded_ws.weights, ws2.weights)
# Crossbreeded weightset must be a WeightSet instance
self.assertIsInstance(crossbreeded_ws, WeightSet)
self.assertIsInstance(crossbreeded_ws.weights, np.ndarray)
# Activation function must be the same as the first weightset
self.assertEqual(self.weightset.activation_name, crossbreeded_ws.activation_name)
def test_get_init_std(self):
"""
Test if all initialization schemes are working properly
"""
# Create arbitrary values for rows and columns
for _ in range(1000):
rows = np.random.randint(1, 1000)
columns = np.random.randint(1, 1000)
for init in initializations:
self.weightset.initialization_name = init
std = self.weightset.get_init_std(rows, columns)
self.assertIsInstance(std, np.float64)
self.assertGreater(std, 0)
def test_activations(self):
"""
Test all activations that are included in the activation directory
"""
for _ in range(100):
# Initialize random weights and get arbitrary layer
self.weightset.weights = self.weightset.randn_init()
weight_array = self.weightset.weights[0][0]
bias_array = self.weightset.weights[0][0]
for act_func in self.weightset.activation_dir.values():
result1 = act_func(weight_array)
result2 = act_func(bias_array)
self.assertIsInstance(result1, np.ndarray)
self.assertIsInstance(result2, np.ndarray)
|
{"hexsha": "20be302b6c50ef60e491b6cbbcebcf05bd894a87", "size": 6377, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/models/tools/test_weight_set.py", "max_stars_repo_name": "TeamSerpentine/retro-baselines", "max_stars_repo_head_hexsha": "9b2c725604496aca9c382a53f456d31fdbcaa5b1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-09T08:41:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-22T02:29:22.000Z", "max_issues_repo_path": "test/models/tools/test_weight_set.py", "max_issues_repo_name": "TeamSerpentine/retro-baselines", "max_issues_repo_head_hexsha": "9b2c725604496aca9c382a53f456d31fdbcaa5b1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/models/tools/test_weight_set.py", "max_forks_repo_name": "TeamSerpentine/retro-baselines", "max_forks_repo_head_hexsha": "9b2c725604496aca9c382a53f456d31fdbcaa5b1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.6086956522, "max_line_length": 93, "alphanum_fraction": 0.6321154148, "include": true, "reason": "import numpy", "num_tokens": 1279}
|
"""Problem 4.
Author: Lucas David -- <ld492@drexel.edu>
"""
import multiprocessing
from mpl_toolkits.mplot3d import Axes3D
from scipy.io import loadmat
from sklearn.cluster import KMeans
from sklearn.model_selection import GridSearchCV, train_test_split
from algorithms import RBFRegressor
Axes3D
N_JOBS = multiprocessing.cpu_count() - 2
N_CLUSTERS = 50
N_FEATURES = 2
def train(clf, params, X, y):
grid = GridSearchCV(clf, params, n_jobs=N_JOBS)
grid.fit(X, y)
print('grid parameters: %s' % params)
print('best parameters: %s' % grid.best_params_)
print('best estimator\'s score in validation fold: %.2f' % grid.best_score_)
evaluate(grid.best_estimator_, X, y)
return grid
def evaluate(machine, X, y):
print('score: %.2f' % machine.score(X, y))
def a(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=.8,
random_state=0)
clusterizer = KMeans(n_clusters=N_CLUSTERS, n_jobs=N_JOBS,
random_state=0)
clusterizer.fit(X_train, y_train)
print('centers: ', clusterizer.cluster_centers_)
machine = RBFRegressor(n_features=N_FEATURES,
centers=clusterizer.cluster_centers_,
random_state=1)
machine.fit(X_train, y_train)
print('training', end=' ')
evaluate(machine, X_train, y_train)
print('validation', end=' ')
evaluate(machine, X_test, y_test)
return machine
def main():
print(__doc__)
data = loadmat('./data/dados_map.mat')
X = data['dados_rbf'][:, :2]
y = data['dados_rbf'][:, 2].flatten()
print('shapes: ', X.shape, y.shape)
a(X, y)
if __name__ == '__main__':
main()
|
{"hexsha": "5b1aa4df02e6906efddc3d0ed18187fcbb246ab0", "size": 1755, "ext": "py", "lang": "Python", "max_stars_repo_path": "tasks/assignment-1/p4.py", "max_stars_repo_name": "Comp-UFSCar/neural-networks-2", "max_stars_repo_head_hexsha": "e5e105c91bcd1d63b200f36b9e02dbcde54ae756", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tasks/assignment-1/p4.py", "max_issues_repo_name": "Comp-UFSCar/neural-networks-2", "max_issues_repo_head_hexsha": "e5e105c91bcd1d63b200f36b9e02dbcde54ae756", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tasks/assignment-1/p4.py", "max_forks_repo_name": "Comp-UFSCar/neural-networks-2", "max_forks_repo_head_hexsha": "e5e105c91bcd1d63b200f36b9e02dbcde54ae756", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0921052632, "max_line_length": 80, "alphanum_fraction": 0.6358974359, "include": true, "reason": "from scipy", "num_tokens": 457}
|
#include <iostream>
#include <queue>
#include <string>
#include <boost/random.hpp>
#include <boost/generator_iterator.hpp>
#include <glog/logging.h>
using boost::variate_generator;
using boost::mt19937;
using boost::exponential_distribution;
#define ONLY_EVENT_TYPE 0
#define NUMBER_EVENT_TYPES 2 //NEED TO MAKE SURE THIS IS 1 MORE THAN THE LAST DEFINED EVENT
const std::string event_names[NUMBER_EVENT_TYPES] = {
"ARRIVE",
"DEPART"
};
/**
* A class for an event, which holds an int for the event type, a double for simulation time and possibly other data.
* We may want to subclass this with our own events
*/
class Event {
public:
const double time;
const double server_start_time;
const int type;
Event(double time, int type) : time(time), server_start_time(0), type(type) {
//cout << "created an event with simulation time: " << this->time << endl;
//cout << "created an event with event type: " << this->type << endl;
}
Event(double time, double server_start_time, int type) : time(time), server_start_time(server_start_time), type(type) {
//cout << "created an event with simulation time: " << this->time << endl;
//cout << "created an event with event type: " << this->type << endl;
}
bool operator<( const Event& e ) const {
return time < e.time;
}
bool operator>( const Event& e ) const {
return time > e.time;
}
bool less( const Event& e) const {
return time < e.time;
}
friend std::ostream& operator<< (std::ostream& out, Event& event);
};
class CompareEvent {
public:
bool operator()(Event* &e1, Event* &e2) {
return e1->time > e2->time;
return false;
}
};
// Print event
std::ostream& operator<< ( std::ostream& out, Event& event) {
out << "[sim_time: " << std::setw(10) << std::setprecision(3) << std::fixed << event.time << ", type: " << std::setw(4) << event.type;
if (event.type < NUMBER_EVENT_TYPES) {
out << " - " << std::left << std::setw(50) << event_names[event.type];
} else {
out << std::left << std::setw(50) << " - UNKNOWN";
}
out << std::right << "]";
return out;
}
int main(int argc, char **argv) {
// Initialize Google Logging
google::InitGoogleLogging(argv[0]);
// Log to Stderr
FLAGS_logtostderr = 1;
int seed = time(0);
double distribution_mean = 3.0;
double simulation_time_s = 0;
double end_simulation_time_s = atoi(argv[1]);
double previous_time_s;
bool server_idle = true;
double server_work_time = 0;
double total_queue_time = 0;
double total_departures = 0;
double sum_of_time_queue_length = 0;
variate_generator< mt19937, exponential_distribution<> > rand_generator(mt19937(seed), exponential_distribution<>(1/distribution_mean));
std::priority_queue<Event*, std::vector<Event*>, CompareEvent> heap;
std::queue<double> queue;
// Put initial event in the heap
heap.push(new Event(simulation_time_s + rand_generator(), 0));
while (simulation_time_s < end_simulation_time_s) {
Event *current_event = heap.top();
heap.pop();
if (current_event == NULL) {
LOG(ERROR) << "Simulation not complete and there are no events in the min heap.";
LOG(ERROR) << "\tsimulation time: " << simulation_time_s;
exit(0);
}
previous_time_s = simulation_time_s;
simulation_time_s = current_event->time;
// Calculate average time stuff
double time_since_last = simulation_time_s - previous_time_s;
sum_of_time_queue_length += queue.size() * time_since_last;
switch (current_event->type) {
case 0: // ARRIVE
// If the server is busy add new arrival to waiting queue
// otherwise set the server as busy and add new departure time.
if (!server_idle) {
queue.push(simulation_time_s);
} else {
server_idle = false;
heap.push(new Event(simulation_time_s + rand_generator(), simulation_time_s, 1));
}
// Add next arrival event
heap.push(new Event(simulation_time_s + rand_generator(), 0));
break;
case 1: // DEPART
// Add server busy time.
server_work_time += simulation_time_s - current_event->server_start_time;
// If the queue is empty set the server to idle otherwise
// get the next person from the queue and set their departure
// time.
if (queue.empty()) {
server_idle = true;
} else {
// Add current simulation time minus time stored in the
// queue to the total queue time.
total_queue_time += simulation_time_s - queue.front();
queue.pop();
// Add a new depature to the heap.
heap.push(new Event(simulation_time_s + rand_generator(), simulation_time_s, 1));
}
total_departures++;
break;
default:
LOG(ERROR) << "Simulation had an event with an unknown type: " << current_event->type;
LOG(ERROR) << "\tsimulation time: " << simulation_time_s;
exit(0);
}
VLOG(2) << *current_event << ", h: " << heap.size() << ", q: " << queue.size();
delete current_event; // Event's are created with new, so we need to delete them when we're done with them
}
LOG(INFO) << "The simulation ended at time: " << end_simulation_time_s;
VLOG(1) << "The server was busy for time: " << server_work_time;
LOG(INFO) << "The server utilization was: " << server_work_time / end_simulation_time_s;
LOG(INFO) << "The average length of the queue was: " << sum_of_time_queue_length / end_simulation_time_s;
VLOG(1) << "Total time spent in the queue: " << total_queue_time;
VLOG(1) << "Total departures: " << total_departures;
LOG(INFO) << "The average time spent in queue: " << total_queue_time / total_departures;
}
|
{"hexsha": "8bae782e41899be60de07e4c5dfd58f34a8613df", "size": 6360, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/hw1.cpp", "max_stars_repo_name": "Kazz47/cs445", "max_stars_repo_head_hexsha": "eb991eda50e395a2f10ea943eabe7f74b30f38f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/hw1.cpp", "max_issues_repo_name": "Kazz47/cs445", "max_issues_repo_head_hexsha": "eb991eda50e395a2f10ea943eabe7f74b30f38f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/hw1.cpp", "max_forks_repo_name": "Kazz47/cs445", "max_forks_repo_head_hexsha": "eb991eda50e395a2f10ea943eabe7f74b30f38f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7630057803, "max_line_length": 140, "alphanum_fraction": 0.5893081761, "num_tokens": 1454}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ade:
# Asynchronous Differential Evolution.
#
# Copyright (C) 2018-19 by Edwin A. Suominen,
# http://edsuom.com/ade
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
A L{Population} class and helpers.
What you'll need to be concerned with is mostly constructing an
instance, setting it up, and passing it to
L{de.DifferentialEvolution}. The constructor requires an evaluation
function, parameter names, and parameter bounds. You'll need to wait
for the C{Deferred} that L{Population.setup} returns before
proceeding.
"""
import random, pickle, os.path, bz2
from copy import copy
from textwrap import TextWrapper
import numpy as np
from scipy import stats
from pyDOE import lhs
from twisted.internet import defer, task
from asynqueue import DeferredTracker
import abort
from individual import Individual
from report import Reporter
from history import History
from util import *
class ParameterManager(object):
"""
I manage the digital DNA parameters of the evolving species.
I can pretty-print values with their parameter names, check if
values pass constraints, limit values to their bounds, scale
unity-range values to their appropriate ranges, and let you
iterate over sorted parameter names.
@ivar mins: Lower bound of each parameter.
@ivar maxs: Lower bound of each parameter.
@keyword constraints: A single callable object (function, method,
class instance with I{__call__} method), or a sequence of such
objects, that enforce(s) any constraints on your parameter
values. See L{passesConstraints}. Instead of a sequence, you
can use an instance of L{constraints.Constraints}.
"""
maxLineLength = 120
dashes = "-"*maxLineLength
fill = TextWrapper(width=maxLineLength, break_on_hyphens=False).fill
def __init__(self, names, bounds, constraints=[]):
if len(bounds) != len(names):
raise ValueError(
"Define one parameter name for each lower/upper bound")
self.names = names
self.sortedNameIndices = [
names.index(name) for name in sorted(names)]
self.constraints = [constraints] \
if notseq(constraints) else constraints
self.setup(bounds)
def setup(self, bounds):
"""
Call to set (or reset) the bounds of my parameters.
"""
self.mins = np.array([x[0] for x in bounds])
self.maxs = np.array([x[1] for x in bounds])
self.scales = self.maxs - self.mins
self.mids = self.mins + 0.5 * self.scales
def __getstate__(self):
"""
For pickling.
"""
state = {}
names = {
'names', 'sortedNameIndices',
'mins', 'maxs', 'scales', 'mids', 'constraints',
}
for name in names:
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""
For unpickling.
"""
for name in state:
setattr(self, name, state[name])
def stringValue(self, k, value, forColumn=False):
"""
For the parameter with position I{k}, returns the float
I{value} formatted as a string. Adds a '*' if value within 5%
of the lower bound, or '**' if within 5% of the upper bound.
"""
uValue = (value - self.mins[k]) / self.scales[k]
suffix = "*" if uValue < 0.05 else "**" if uValue > 0.95 else ""
proto = "{:>10.5g}{:2s}" if forColumn else "{:g}{}"
return sub(proto, float(value), suffix)
def prettyValues(self, values, *args):
"""
Returns an easily readable string representation of the supplied
I{values} with their parameter names, sorted.
Adds a '*' if < 5% of the way from lower to upper bound, or
'**' if > 95% of the way
You can provide as an additional argument a prelude string, or
a string proto with additional args, and the string will
precede the values.
"""
lineParts = []
if args:
lineParts.append(args[0].format(*args[1:]))
unityValues = self.toUnity(values)
for k, name, value in self.sortedNamerator(values):
part = sub("{}={}", name, self.stringValue(k, value))
lineParts.append(part)
text = " ".join(lineParts)
return self.fill(text)
def sortedNamerator(self, values=None, namesOnly=False):
"""
Generates tuples of sorted names, or just the sorted names if
I{namesOnly} is set C{True}.
Each tuple contains (1) the index in a I{values} list of
parameters where each named parameter appears, and (2) the
name itself. If such a list of I{values} is supplied, each
tuple also includes (3) the value for that name.
"""
if namesOnly:
for k in self.sortedNameIndices:
yield self.names[k]
elif values is None:
for k in self.sortedNameIndices:
yield k, self.names[k]
else:
for k in self.sortedNameIndices:
yield k, self.names[k], values[k]
def fromUnity(self, values):
"""
Translates normalized into actual values.
Converts the supplied normalized I{values} from the
standardized range of 0-1 into my range of actual parameter
values within the ranges specified in the bounds supplied to
my constructor.
"""
scaled = self.scales * values
return self.mins + scaled
def toUnity(self, values, ):
"""
Translates actual into normalized values.
Converts the supplied actual parameter I{values} into the
standardized range of 0-1 within the ranges specified in the
bounds supplied to my constructor.
"""
return (values - self.mins) / self.scales
def setConstraints(self, *args):
"""
Sets my I{constraints} list to the callable function(s),
method(s), or object(s) supplied as one or more arguments.
What you supply as arguments will any constraint checking
already in place, so make sure you everything you want is
included.
All constraints, and only those constraints, defined by this
call will need to be be satisfied with each parameter
combination. To clear any existing constraints, call with no
args.
Called by L{Population.setConstraints}.
"""
for f in args:
if not callable(f):
raise ValueError("Supplied object '{}' is not callable!", f)
self.constraints = list(args)
def passesConstraints(self, values):
"""
Checks if I{values} pass all my constraints.
Call with a 1-D array of parameter I{values} to check them against
all of the constraints. Each callable in my I{constraints}
list must return C{True} if it found the parameters (supplied
to each callable as a dict) to be acceptable. The result will
be C{True} if and only if all constraints were satisfied. (Or
if you constructed me with an empty list.)
"""
if not self.constraints: return True
params = {}
for name, value in zip(self.names, values):
params[name] = value
for func in self.constraints:
if not func(params):
# This constraint was violated, bail out
return False
return True
def limit(self, values):
"""
Limits the supplied I{values} to my boundaries using the
simple and well-accepted "reflection" method.
According to a study by Kreischer, Magalhaes, et
al. ("Evaluation of Bound Constraints Handling Methods in
Differential Evolution using the CEC2017 Benchmark"), this is
second and performance only to resampling for a new DE
mutant. (They also propose a "scaled mutant" method that is
more complicated, but according to their Tables 1, 2, doesn't
appear significantly better.)
"""
values = np.where(values < self.mins, 2*self.mins - values, values)
values = np.where(values > self.maxs, 2*self.maxs - values, values)
return np.clip(values, self.mins, self.maxs)
class ProbabilitySampler(object):
"""
Call an instance of me with a sequence of indices, sorted in
ascending order of the SSE of the individual they point to, and a
float version of I{randomBase} to get a best-biased index sample.
"""
N_chunk = 100
def __init__(self):
self.rc = None
self.RV = None
def trapz(self, rc):
"""
Returns a random variate from a half-trapezoid distribution with
the start of the triangular portion I{rc} specified between
0.0 and 1.0.
"""
pr_tri = 0.0 if rc >= 1.0 else (1.0 - rc) / (1.0 + rc)
if random.random() < pr_tri:
# Sample from triangular portion
return rc + random.triangular(0, 1.0-rc, 0)
# Sample from uniform (rectangular) portion
return random.uniform(0, rc)
def __call__(self, K, rb):
if rb > 0.5:
rc = 2*(rb - 0.5)
rv = self.trapz(rc)
else:
rc = 2*rb
rv = random.triangular(0, rc, 0)
return K[int(rv*len(K))]
class Population(object):
"""
I contain a population of parameter-combination L{Individual}
objects.
Construct me with a callable evaluation I{func}, a sequence of
parameter I{names}, and a sequence of I{bounds} containing
2-tuples that each define the lower and upper limits of the
values:
- I{func}: A callable to which an L{Individual} can send its
parameter values and from which it receives a sum-of-squared
error float value as a result.
- I{names}: A sequence of parameter names.
- I{bounds}: A list of 2-tuples, one for each parameter
name. The first element of each tuple is the lower bound of
a parameter in the second the upper bound.
The callable I{func} must accept a single 1-D Numpy array as its
sole argument and return the sum of squared errors (SSE) as a
single float value. To shut down I{ade}, it can return a negative
SSE value. If I{ade} is shutting down, it will use I{None} as the
argument, and the callable should act accordingly.
My I{targetFraction} attribute determines how much success
challengers must have to maintain the status quo in adaptive
mode. Consider the default of 2.5%: In a population of 100, that
is reached with a score of 2.5, which can be achieved, for
example, with
- ten challengers winning with a rounded improvement ratio
of 1; or
- one challenger winning with an I{rir} of 2 and five with an
I{rir} of 1; or
- just one challenger winning with an I{rir} of 3.
- Or, if you're somehow positioned at a subtle transition in
the fitness landscape along just the right multi-dimensional
angle, fully half of the challengers winning with an I{rir}
of 0. (Unlikely!)
@keyword constraints: A single callable object (function, method,
class instance with I{__call__} method), or a sequence of such
objects, that enforce(s) any constraints on your parameter
values. See L{ParameterManager.passesConstraints}. Instead of
a sequence, you can use an instance of
L{constraints.Constraints}.
@keyword popsize: The number of individuals per parameter in the
population, if not the default.
@keyword debug: Set C{True} to override my default I{debug}
setting and ensure that I show individuals getting replaced.
@keyword complaintCallback: A callable that my L{Reporter} calls
with an individual and the non-None result of a complaining
reporter callback. See L{Reporter.runCallbacks}.
@keyword targetFraction: Set this to a (small) float to override
my default target for the total score of improvements in each
iteration.
@cvar N_maxParallel: The maximum number of parallel evaluations
during population L{setup}. Uses an instance of
C{asynqueue.util.DeferredTracker} for concurrency limiting.
@ivar popsize: The number of individuals per parameter. The
population size will scale with the number of parameters, up
until I{Np_max} is reached. Default is 10 individuals per
parameter.
@ivar Np_min: Minimum population size, i.e., my total number of
individuals. Default is 20.
@ivar Np_max: Maximum population size. Default is 500, which is
really pretty big.
@ivar Nd: The number of parameters for each individual.
@ivar targetFraction: The desired total score of improvements in
each iteration in order for I{ade}'s adaptive algorithm to not
change the current differential weight. See L{replacement} and
L{FManager} for details. The default is 2%. (Previously, it
was 2.5% but that seemed too strict for the application the
author is mostly using ADE for.)
@ivar debug: Set C{True} to show individuals getting
replaced. (Results in a very messy log or console display.)
@ivar running: Indicates my run status: C{None} after
instantiation but before L{setup}, C{True} after setup, and
C{False} if I{ade} is aborting.
@see: U{asynqueue.util.DeferredTracker<http://edsuom.com/AsynQueue/asynqueue.util.DeferredTracker.html>}, used to limit concurrency during population L{setup}.
"""
maxTries = 2000
popsize = 10
Np_min = 20
Np_max = 500
N_maxParallel = 12
targetFraction = 0.02
debug = False
failedConstraintChar = " "
# Property placeholders
_KS = None; _iSorted = None;
def __init__(
self, func, names, bounds,
constraints=[], popsize=None,
debug=False, complaintCallback=None, targetFraction=None):
"""
C{Population(func, names, bounds, constraints=[], popsize=None,
debug=False, complaintCallback=None)}
"""
if not callable(func):
raise ValueError(sub("Object '{}' is not callable", func))
self.func = func
self.Nd = len(bounds)
if debug: self.debug = True
if targetFraction:
self.targetFraction = targetFraction
msg("WARNING: Non-default target improvement score of {:f}",
targetFraction)
self.history = History(names)
self.pm = ParameterManager(names, bounds, constraints)
self.reporter = Reporter(self, complaintCallback)
self.clear()
if popsize: self.popsize = popsize
self.Np = max([
self.Np_min, min([self.popsize * self.Nd, self.Np_max])])
self.statusQuoScore = self.targetFraction * self.Np
abort.callOnAbort(self.abort)
@classmethod
def load(cls, filePath, **kw):
"""
Returns a new instance of me with values initialized from the
original version that was pickled and written with BZ2
compression to I{filePath}.
The pickled version will not have a reference to the
evaluation I{func} that was supplied to the original version
in its constructor, nor to any I{complaintCallback}. If you
want to do further evaluations, you can supply a reference to
those functions (or even a different one, though that would be
weird) with the I{func} and I{complaintCallback} keywords.
B{Note}: For some mysterious reason, the DE algorithm seems to
run significantly slower when resuming with a population that
has been loaded using this method than with one initialized
from scratch.
@keyword func: Evaluation function, specify if you want to
resume evaluations. All individuals in the loaded
population should have their SSEs re-evaluated if anything
at all has changed about that function.
@keyword complaintCallback: Callback function for complaining
about new-best reports during resumed evaluations.
@keyword bounds: A list of bounds to update my restored
I{ParameterManager} object with. Specify if you refined
the parameter bounds since the last run and want to resume
evaluations with the refined bounds. Each I{Individual} in
the new instance will have its values limited to the new
bounds with a call to L{Population.limit}.
@see: L{save} for the way to create compressed pickles of an
instance of me.
"""
filePath = os.path.expanduser(filePath)
with bz2.BZ2File(filePath, 'r') as fh:
p = pickle.load(fh)
p.func = kw.get('func', None)
bounds = kw.get('bounds', None)
if bounds:
p.pm.setup(bounds)
for i in p:
p.limit(i)
p.reporter = Reporter(p, kw.get('complaintCallback', None))
return p
def __getstate__(self):
"""
For pickling. Note that neither the user-supplied evaluation
function nor any complaint callback function is included.
"""
state = {}
names = {
# Bools
'debug', 'running',
# Scalars
'Nd', 'Np', 'popsize', 'targetFraction', 'statusQuoScore',
# Other
'iList', 'kr', 'pm', 'history',
}
for name in names:
if hasattr(self, name):
state[name] = getattr(self, name)
return state
def __setstate__(self, state):
"""
For unpickling.
"""
self.clear()
for name in state:
setattr(self, name, state[name])
if self.running is False: self.running = True
for i in self.iList:
i.p = self
self.dLocks.append(defer.DeferredLock())
def save(self, filePath):
"""
Writes a BZ2-compressed pickled version of me to the specified
I{filePath}.
Note that the user-supplied evaluation function will not be
included in the pickled version. However, you can supply it as
a keyword to L{load}.
"""
filePath = os.path.expanduser(filePath)
with bz2.BZ2File(filePath, 'w') as fh:
pickle.dump(self, fh)
def __getitem__(self, k):
"""
Sequence-like access to my individuals.
"""
return self.iList[k]
def __setitem__(self, k, i):
"""
Use only this method (item setting) to replace individuals in my
I{iList}.
The only other place my I{iList} is ever manipulated directly
is the C{addIndividual} function of L{setup}.
"""
if not isinstance(i, Individual):
raise TypeError("You can only set me with Individuals")
# The history object uses a DeferredLock to ensure that it
# updates its internals properly, so no need to keep track of
# the deferreds that get returned from the notInPop and add
# method calls.
if len(self.iList) > k:
iPrev = self.iList[k]
self.history.notInPop(iPrev)
self.history.add(i)
# Here is the only place iList should ever be set directly
self.iList[k] = i
# Invalidate sorting
del self.KS
def __len__(self):
"""
Sequence-like container of individuals: length.
My length will be equal to my I{Np} attribute unless setup
has not been completed.
"""
return len(self.iList)
def __iter__(self):
"""
Sequence-like container of individuals: iteration.
"""
for i in self.iList:
yield i
def __contains__(self, i):
"""
Sequence-like container of individuals: "in".
"""
return i in self.iList
def __nonzero__(self):
"""
Sequence-like container of individuals: I am C{True} if I have
any.
"""
return bool(self.iList)
@property
def KS(self):
"""
Property: A list of indices to I{iList}, sorted by increasing
(worsening) SSE of the individuals there. The best individual
will have the first index in I{KS}.
"""
if self._KS is None and self.iList:
self._KS = np.argsort([float(i.SSE) for i in self.iList])
return self._KS
@KS.deleter
def KS(self):
"""
Property: "Deleting" my SSE-sorted list of indices forces
regeneration of it the next time the I{KS} property is
accessed. It also "deletes" I{iSorted}.
"""
self._KS = None
del self.iSorted
@property
def iSorted(self):
"""
Property: A list of my individuals, sorted by increasing
(worsening) SSE.
"""
if self._KS is None or self._iSorted is None:
if self.iList:
self._iSorted = [self.iList[k] for k in self.KS]
return self._iSorted
@iSorted.deleter
def iSorted(self):
"""
Property: "Deleting" my sorted list of individuals forces
regeneration of the sorted list that will be returned next
time the I{iSorted} property is accessed.
"""
self._iSorted = None
@property
def kBest(self):
"""
Property: The index to I{iList} of the best individual. C{None} if
I have no individuals yet.
"""
if self.KS is not None:
return self.KS[0]
def __repr__(self):
"""
An informative string representation with a text table of my best
individuals.
"""
def addRow():
lineParts = ["{:>11s}".format(columns[0]), '|']
for x in columns[1:]:
lineParts.append(x)
lines.append(" ".join(lineParts))
if not self: return "Population: (empty)"
N_top = (self.pm.maxLineLength-3) / 15
iTops = self.iSorted[:N_top]
if len(iTops) < N_top: N_top = len(iTops)
SSEs = [float(i.SSE) for i in self]
lines = [sub(
"Population: {:d} individuals with SSE {:.5g} to "+\
"{:.5g}, avg eval time {:.3g} sec. Top {:d}:",
self.Np, min(SSEs), max(SSEs), np.mean(self.evalTimes()), N_top)]
lines.append("")
columns = ["SSE"] + [sub("{:>10.5g} ", float(i.SSE)) for i in iTops]
addRow()
lines.append(self.pm.dashes)
X = np.empty([self.Nd, N_top])
for kc, i in enumerate(iTops):
X[:,kc] = i.values
for kr, name in self.pm.sortedNamerator():
columns = [name] + [
self.pm.stringValue(kr, X[kr,kc], forColumn=True)
for kc in range(N_top)]
addRow()
lines.append(self.pm.dashes)
lines.append(sub("Best individual:\n{}\n", repr(self.best())))
return "\n".join(lines)
def evalFunc(self, values, xSSE=None):
"""
A wrapper for the user-supplied evaluation function.
"""
if self.running is False:
values = None
if xSSE is None:
return defer.maybeDeferred(self.func, values)
return defer.maybeDeferred(self.func, values, xSSE=xSSE)
def clear(self):
"""
Wipes out any existing population and sets up everything for a
brand new one.
"""
self.counter = 0
self.iList = []
self.dLocks = []
if hasattr(self, 'history'): self.history.clear()
self.running = None
self.replacementScore = None
del self.KS
# This is only here because clear is called by both __init__
# and __setstate__
self.ps = ProbabilitySampler()
def limit(self, i):
"""
Limits the individual's parameter values to the bounds in the way
that my L{ParameterManager} is configured to do, modifying the
individual in place.
B{Note}: The individual's population status is not considered
or affected. If it's a population member, you will want to
re-evaluate it and invalidate my sort with a C{del self.KS} or
C{del self.iSorted} if its SSE has changed.
"""
values = self.pm.limit(i.values)
i.update(values)
def spawn(self, values, fromUnity=False):
"""
Spawns a new L{Individual} with the supplied I{values}. If
I{fromUnity} is set C{True}, the values are converted from 0-1
range into their proper ranges.
"""
if fromUnity:
values = self.pm.fromUnity(values)
return Individual(self, values)
def abort(self, ignoreReporter=False):
"""
Aborts my operations ASAP. Repeated calls will release any
locks that got acquired since the last call.
L{Reporter.abort} calls this with I{ignoreReporter} set
C{True} to avoid infinite recursion.
"""
self.running = False
if not ignoreReporter:
msg("Shutting down reporter")
self.reporter.abort()
# This next little line may run a bunch of stuff that was
# waiting for locks
msg("Releasing locks")
self.release()
msg("Population object stopped")
def initialize(self):
"""
Invalidates the last sort of my individuals, sets my I{running}
flag to C{True}, and prints/logs a representation of my populated
instance.
"""
del self.KS
self.running = True
msg(0, repr(self))
def setup(self, uniform=False, blank=False):
"""
Sets up my initial population using a Latin hypercube to
initialize pseudorandom parameter values with minimal clustering.
Unless I{uniform} is set, that is. Then each parameter values
is just uniformly random without regard to the others.
With parameter constraints, the Latin hypercube doesn't work
that well. The initial values matrix must be refreshed,
perhaps many times. But it may still be better than uniform
initial population sampling.
Sets my I{running} flag C{True} and returns a C{Deferred} that
fires when the population has been set up, with C{True} if
it's ready to go and setup didn't get aborted.
@keyword uniform: Use uniform random variates instead of a
Latin hypercube (LHS). Using LHS (the default) is usually
better because initializes pseudorandom parameter values
with minimal clustering.
@keyword blank: Set C{True} to give the initial individuals an
infinite placeholder SSE instead of being evaluated.
"""
def running():
return self.running is not False
def refreshIV():
kIV[0] = 0
IV = np.random.uniform(
size=(self.Np, self.Nd)) if uniform else lhs(
self.Nd, samples=self.Np, criterion='m')
kIV[1] = self.pm.fromUnity(IV)
def getNextIV():
k, IV = kIV
if k+1 == IV.shape[0]:
refreshIV()
k, IV = kIV
kIV[0] += 1
return IV[k,:]
def getIndividual():
for k in range(self.maxTries):
values = getNextIV()
if self.pm.passesConstraints(values):
break
self.showFailedConstraint()
else:
msg(0, "Couldn't generate a conforming Individual, aborting!")
self.abort()
return Individual(self, self.pm.limit(values))
def addIndividual(i):
"""
This is the only place other than L{__setitem__} where my I{iList}
is manipulated.
"""
self.iList.append(i)
self.dLocks.append(defer.DeferredLock())
self.history.add(i)
def needMore():
return len(self.iList) < self.Np
def evaluated(i, d):
if not i:
msg(0, "Bogus initial evaluation of {}, aborting", i)
self.abort()
return
self.reporter(i)
isFinite = not np.isinf(float(i.SSE))
if isFinite and needMore(): addIndividual(i)
@defer.inlineCallbacks
def populate():
k = 0
while running() and needMore():
i = getIndividual()
if blank:
i.SSE = np.inf
addIndividual(i)
continue
k += 1
d = i.evaluate()
d.addCallback(evaluated, d)
d.addErrback(oops)
if k < self.Np:
dt.put(d)
yield dt.deferUntilFewer(self.N_maxParallel)
else: yield d
yield dt.deferToAll()
def done(null):
if running():
self.initialize()
return True
if not running():
return defer.succeed(None)
if self: self.clear()
dt = DeferredTracker(interval=0.05)
kIV = [None]*2; refreshIV()
msg(0, "Initializing {:d} population members having {:d} parameters",
self.Np, self.Nd, '-')
return populate().addCallback(done)
def addCallback(self, func, *args, **kw):
"""
Adds callable I{func} to my reporter's list of functions to call
each time there is a significantly better L{Individual}.
@see: L{Reporter.addCallback}.
"""
self.reporter.addCallback(func, *args, **kw)
def setConstraints(self, *args):
"""
Sets the constraint checkers maintained by my L{ParameterManager}
instance I{pm} to the callable function(s), method(s), or
object(s) supplied as one or more args.
What you supply as arguments will replace any constraint
checking already in place, so make sure you everything you
want is included.
All constraints, and only those constraints, defined by this
call will need to be be satisfied with each parameter
combination. To clear any existing constraints, call with no
args.
@see: L{ParameterManager.setConstraints}.
"""
self.pm.setConstraints(*args)
def _keepStatusQuo(self, score):
"""
Returns C{True} with a probability that increases as I{score}
approaches my I{statusQuoteScore}.
"""
x = score / self.statusQuoScore
if x > 1:
# Greater than status quo threshold, always remains
return True
prob = 0.5 + 0.5*np.sin(np.pi*(x-0.5))
return np.random.random_sample() < prob
def replacement(self, rir=None, sqs=None):
"""
Records the replacement of an L{Individual} in this generation or
iteration.
Call with an integer B{r}ounded B{i}mprovement B{r}atio in a
loser's SSE vs. the successful challenger's SSE, unless you
are calling to inquire about whether the status quo I{F}
value(s) should be maintained or to set my I{statusQuoteScore}
with the I{sqs} keyword.
Three types of calls
====================
The rounded improvement ratio I{rir} indicates how much
better the challenger is than the individual it
replaced. I use that ratio to adjust a running score for
the current iteration to inform the status quo inquiry
that will occur when the iteration is done, unless I'm not
running in adaptive mode.
You can set my target I{statusQuoScore} by setting I{sqs}
to a (small) float value. That will replace my default
value for future evaluation of replacement individuals.
Finally, a status quo inquiry is a call with no keywords
set. I will determine if the replacements that occurred
in the previous generation/iteration were enough to
warrant maintaining the status quo, and then reset the
record. You will receive a result of C{True} if the status
quote should be maintained.
The status quo should be maintained if several small
improvements are made, or fewer larger ones, with the
required number and/or size increasing for a larger
population. For small populations where even a single
improvement would be significant, the probability of
status quo maintenance increases with smaller population
and will sometimes happen even with no improvements for a
given generation or iteration.
Improvement Ratios
==================
An I{rir} of 1 indicates that the successful challenger
was better (i.e., lower) and not considered equivalent to
that of the individual it replaced, and that its SSE was
no better than 1.5x as good (2/3 as high) as the replaced
individual's SSE. An I{rir} of 2 indicates that the
challenger had an SSE between 1.5x and 2.5x better than
(2/5 to 2/3 as high as) the individual it replaced.
I give very little weight to an I{rir} of zero, which
indicates that the challenger was better but still has an
equivalent SSE, i.e., is no more than 2% better with the
default value of I{Reporter.minDiff}. See
L{Reporter.isEquivSSE}.
I give five times much weight to an I{rir} of 1, though
it's still pretty small. The improvement is modest and
could be as little as 2% (assuming
C{Reporter.minDiff}=0.02, the default). An I{rir} of 2
gets three times as much weight as that.
An I{rir} of 3 also gets disproportionately more weight,
five times as much as I{rir}=1. Beyond that, though, the
weight scales in a nearly linear fashion. For example, an
I{rir} of 9 adds just a little more than three times to
the score (3.4x) as I{rir}=3 does.
Here's a practical example, with a population of 100
individuals: If you see 10 "1" characters on the screen
for one iteration with other 90 being "X," your ratio
score for that iteration will be 5.0. But if you see just
one non-X individual with a "8" character, the score will
be 7.5. That one amazing success story counts more in a
sea of failures than a bunch of marginal improvements,
which is kind of how evolution works in real life. (See
the literature around "hopeful monsters.")
@keyword rir: A rounded improvement ratio obtained from a call
to L{Reporter.msgRatio}, where the numerator is the SSE of
the individual that was replaced and the denominator is
the SSE of its successful challenger.
@see: L{report}, which calls this.
"""
if sqs:
self.statusQuoScore = sqs
return
if rir is None:
# Inquiry call, initialize score to zero
score = self.replacementScore
self.replacementScore = 0
if score is None:
# This is the first time ever called, so of course
# status quo should be maintained
return True
return self._keepStatusQuo(score)
# An adjustment call
if self.replacementScore is not None:
# 0 has a tiny weight, just 0.1
# 1 has only 0.5 weight
# 2 has 1.5, or 3x as much as 1
# 3 has 2.5, or 5x as much as 1
addition = 0.1 if rir == 0 else rir - 0.5
self.replacementScore += addition
def report(self, iNew=None, iOld=None, noProgress=False, force=False):
"""
Provides a message via the log messenger about the supplied
L{Individual}, optionally with a comparison to another one.
If no second individual is supplied, the comparison will be
with the best individual thus far reported on.
If no individual at all is supplied, reports on my best one,
forcing callbacks to run even if the best individual's SSE is
equivalent to the last-reported one's.
Gets the ratio from a call to my L{Reporter} instance, and
does a call to L{replacement} with it if the new individual is
better. Returns (for unit testing convenience) the ratio.
@keyword noProgress: Set C{True} to suppress printing/logging
a progress character.
@keyword force: Set C{True} to force callbacks to run even if
the reported SSE is considered equivalent to the previous
best one.
@see: L{Reporter}.
"""
if self.running is False: return
if iNew is None and iOld is None:
iNew = self.best()
noProgress = True
force = True
ratio = self.reporter(iNew, iOld, noProgress, force)
if ratio is not None: self.replacement(ratio)
return ratio
def waitForReports(self):
"""
Returns a C{Deferred} that fires when all reporter callbacks have
finished. (And also L{History} updates.)
"""
if not self.running:
return defer.succeed(None)
return defer.DeferredList([
self.history.shutdown(), self.reporter.waitForCallbacks()])
def showFailedConstraint(self):
"""
Outputs a progress character to indicate a failed constraint.
"""
self.reporter.progressChar(self.failedConstraintChar)
def push(self, i):
"""
Pushes the supplied L{Individual} I{i} onto my population and
kicks out the worst individual there to make room.
"""
kWorst = self.KS[-1]
self[kWorst] = i
del self.KS
def sample(self, N, *exclude, **kw):
"""
Returns a sample of I{N} indices from my population that are
unique from each other and from any excluded indices supplied
as additional arguments.
The I{randomBase} keyword lets you use a significant
improvement offered by ADE: Non-uniform probability of base
individual selection. Implementation is done by an instance of
L{ProbabilitySampler}.
The traditional DE/best/1/bin and DE/rand/1/bin are really
opposite extremes of what can be a continuous range of base
individual selection regimes. By specifying a float value for
I{randomBase} between 0.0 and 1.0, you can select a regime
anywhere in that range.
The higher the value, the more uniform the probability
distribution is. Setting it to near 0.0 makes it much more
likely that the index of the best individual or one nearly as
good will be chosen. Setting it to near 1.0 makes the worst
individual nearly as likely to be chosen as the best.
A I{randomBase} value of 0.5 is a compromise between
DE/best/1/bin and DE/rand/1/bin. With that setting, the
probability of an individual having its index selected will
gradually drop as it gets worse in the SSE rankings. As
I{randomBase} goes above 0.5, the probability will take longer
to start dropping, until at 1.0 it doesn't drop at all. As
I{randomBase} goes below 0.5, the probability will start
dropping sooner, until at 0.0 it drops to zero for anything
but the best individual.
@keyword randomBase: Sample probability uniformity value
between 0.0 (only the best individual is ever selected)
and 1.0 (uniform probability). Setting it I{False} is
equivalent to 0.0, and setting it I{True} (the default) is
equivalent to 1.0.
"""
K = [k for k in self.KS if k not in exclude]
rb = kw.get('randomBase', True)
if not rb:
if N > 1:
raise ValueError("Can't have > 1 unique best samples!")
result = [K[0]]
elif rb in (True, 1.0):
# Sampling without replacement, so all items of result
# will be unique
result = random.sample(K, N)
elif rb > 1.0:
raise ValueError(
"randomBase must be False, True, or between 0.0 and 1.0")
else:
result = []
while len(result) < N:
k = self.ps(K, rb)
if k in result: continue
result.append(k)
return result[0] if N == 1 else result
def individuals(self, *indices):
"""
Immediately returns a list of the individuals at the specified
integer index or indices.
"""
result = []
for k in indices:
if k >= len(self.iList): return
result.append(self.iList[k])
return result[0] if len(result) == 1 else result
def lock(self, *indices):
"""
Obtains the locks for individuals at the specified indices,
submits a request to acquire them, and returns a C{Deferred}
that fires when all of them have been acquired.
Release the locks (as soon as possible) by calling L{release}
with the indices that are locked.
If I'm shutting down, the returned C{Deferred} fires
immediately.
"""
if self.running is False:
return defer.succeed(None)
dList = []
for k in indices:
if indices.count(k) > 1:
raise ValueError(
"Requesting the same lock twice will result in deadlock!")
if k >= len(self.dLocks):
# Invalid index, we must be shutting down
self.release()
return defer.succeed(None)
dList.append(self.dLocks[k].acquire())
return defer.DeferredList(dList).addErrback(oops)
def release(self, *indices):
"""
Releases any active lock for individuals at the specified index or
indices.
If no indices are supplied, releases all active locks. (This
is for aborting only.)
"""
def tryRelease(dLock):
if dLock.locked:
dLock.release()
if indices:
for k in indices:
tryRelease(self.dLocks[k])
return
for dLock in self.dLocks:
tryRelease(dLock)
def best(self):
"""
Returns my best individual, or C{None} if I have no individuals yet.
"""
if self.iList:
return self.iList[self.kBest]
def evalTimes(self):
"""
Returns a list of the most recent elapsed evaluation times for
each of my individuals that have done evaluations.
"""
dtList = []
for i in self:
if i.dt is None: continue
dtList.append(i.dt)
return dtList
|
{"hexsha": "005e2eca5dbd9f7f21bb28f291e3422e8ee47b6c", "size": 44535, "ext": "py", "lang": "Python", "max_stars_repo_path": "ade/population.py", "max_stars_repo_name": "vishalbelsare/ade", "max_stars_repo_head_hexsha": "c2d16fe5544a130d509cb1e430b170a2b77e520b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ade/population.py", "max_issues_repo_name": "vishalbelsare/ade", "max_issues_repo_head_hexsha": "c2d16fe5544a130d509cb1e430b170a2b77e520b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ade/population.py", "max_forks_repo_name": "vishalbelsare/ade", "max_forks_repo_head_hexsha": "c2d16fe5544a130d509cb1e430b170a2b77e520b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4558452481, "max_line_length": 163, "alphanum_fraction": 0.601077804, "include": true, "reason": "import numpy,from scipy", "num_tokens": 10047}
|
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import time
from torch.utils.data import Dataset, DataLoader
import torchaudio
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torchvision.models as models
device = torch.device('cuda')
def train_transform(sample,padding=200):
pad_num = np.random.randint(padding)
sample = torch.cat((torch.zeros((1,pad_num)),sample),-1)
specgram = torchaudio.transforms.Spectrogram()(sample)
specgram = (specgram+1e-10).log2()[0,:,:]
specgram = transforms.Normalize((0.5),(0.5))(specgram.unsqueeze(0)).squeeze(0)
specgram = torch.stack((specgram,specgram,specgram),dim=0)
return specgram
def test_transform(sample):
specgram = torchaudio.transforms.Spectrogram()(sample)
specgram = (specgram+1e-10).log2()[0,:,:]
specgram = transforms.Normalize((0.5),(0.5))(specgram.unsqueeze(0)).squeeze(0)
specgram = torch.stack((specgram,specgram,specgram),dim=0)
return specgram
class my_dataset(Dataset):
def __init__(self, df_path, train = False):
self.df = pd.read_csv(df_path)
self.train = train
def __len__(self):
return len(self.df)
def __getitem__(self,idx):
sample_1_name = self.df.iloc[idx]['sample 1']
sample_1_path = '/scratch/cz2064/myjupyter/Time_Series/Data/data_VoxCeleb/wav/'+sample_1_name
sample_1,_ = torchaudio.load(sample_1_path)
sample_2_name = self.df.iloc[idx]['sample 2']
sample_2_path = '/scratch/cz2064/myjupyter/Time_Series/Data/data_VoxCeleb/wav/'+sample_2_name
sample_2,_ = torchaudio.load(sample_2_path)
if self.train:
sample_1_tensor = train_transform(sample_1)
sample_2_tensor = train_transform(sample_2)
else:
sample_1_tensor = test_transform(sample_1)
sample_2_tensor = test_transform(sample_2)
label = self.df.loc[idx,'True or False']
sample = (sample_1_tensor, sample_2_tensor, label)
return sample
def pad_collate(batch):
(xx1,xx2, yy) = zip(*batch)
x1_lens = [len(x[0][0]) for x in xx1]
x2_lens = [len(x[0][0]) for x in xx2]
x1_max_len = np.max(x1_lens)
x2_max_len = np.max(x2_lens)
xx1_new = torch.zeros([len(xx1),xx1[0].size(0),xx1[0].size(1),x1_max_len],dtype=torch.float)
for i in range(len(xx1)):
xx1_new[i,:,:,:len(xx1[i][0][0])] = xx1[i]
xx2_new = torch.zeros([len(xx2),xx2[0].size(0),xx2[0].size(1),x2_max_len],dtype=torch.float)
for i in range(len(xx2)):
xx2_new[i,:,:,:len(xx2[i][0][0])] = xx2[i]
yy_new = torch.tensor(yy, dtype=torch.long)
sample = {'x1':xx1_new,'x2':xx2_new,'y':yy_new}
return sample
train_df_path = '/scratch/cz2064/myjupyter/Time_Series/notebook/train.csv'
val_df_path = '/scratch/cz2064/myjupyter/Time_Series/notebook/val.csv'
test_df_path = '/scratch/cz2064/myjupyter/Time_Series/notebook/test.csv'
BATCH_SIZE = 8
train_sampler = torch.utils.data.sampler.RandomSampler(my_dataset(train_df_path,train = True)\
,num_samples=50000,replacement=True)
train_loader = DataLoader(my_dataset(train_df_path,train = True), batch_size=BATCH_SIZE, \
sampler = train_sampler,num_workers=16,collate_fn = pad_collate)
val_loader = DataLoader(my_dataset(val_df_path), batch_size=BATCH_SIZE, shuffle=True,\
num_workers=16,collate_fn = pad_collate)
test_loader = DataLoader(my_dataset(test_df_path), batch_size=1, shuffle=True)
# resnet18 and remove the final two layers
resnet18 = models.resnet18(pretrained=True)
CNN_model = nn.Sequential(*(list(resnet18.children())[:-1]))
CNN_model[8] = nn.AvgPool2d(7,stride=1)
class LSTM(nn.Module):
def __init__(self, in_dim=512, hidden_dim=128):
super(LSTM, self).__init__()
self.lstm_1 = nn.LSTM(in_dim, hidden_dim, batch_first=True)
self.lstm_2 = nn.LSTM(hidden_dim, hidden_dim, batch_first=True)
def forward(self, x):
x, _ = self.lstm_1(x)
x, _ = self.lstm_2(x)
x = x[:, -1, :]
return x
RNN_model = LSTM()
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.cnn = CNN_model
self.rnn = RNN_model
self.fc1 = nn.Linear(128*3,1024)
self.activation_fc1 = nn.ReLU()
self.dropout1 = nn.Dropout(p=0.2)
self.fc2 = nn.Linear(1024,128)
self.activation_fc2 = nn.ReLU()
self.fc3 = nn.Linear(128,2)
def forward(self, x1, x2):
x1 = self.cnn(x1)
x2 = self.cnn(x2)
x1 = x1.squeeze(2)
x1 = torch.transpose(x1,1,2)
x2 = x2.squeeze(2)
x2 = torch.transpose(x2,1,2)
x1 = self.rnn(x1)
x2 = self.rnn(x2)
x_add = x1+x2
x_minus = x1-x2
x_multiply = x1*x2
x = torch.cat((x_add, x_minus, x_multiply),-1)
x = self.fc1(x)
x = self.activation_fc1(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.activation_fc2(x)
x = self.fc3(x)
return x
def train(model, train_loader=train_loader, val_loader=val_loader, learning_rate=1e-4, num_epoch=100):
start_time = time.time()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
train_loss_return = []
train_acc_return = []
val_loss_return = []
val_acc_return = []
best_acc = 0
for epoch in range(num_epoch):
# Training steps
correct = 0
total = 0
predictions = []
truths = []
model.train()
train_loss_list = []
for i, (sample) in enumerate(train_loader):
sample_1 = sample['x1'].to(device)
sample_2 = sample['x2'].to(device)
labels = sample['y'].to(device)
outputs = model(sample_1,sample_2)
pred = outputs.data.max(-1)[1]
predictions += list(pred.cpu().numpy())
truths += list(labels.cpu().numpy())
total += labels.size(0)
correct += (pred == labels).sum()
model.zero_grad()
loss = loss_fn(outputs, labels)
train_loss_list.append(loss.item())
loss.backward()
optimizer.step()
# report performance
acc = (100 * correct / total)
train_acc_return.append(acc)
train_loss_every_epoch = np.average(train_loss_list)
train_loss_return.append(train_loss_every_epoch)
print('----------Epoch{:2d}/{:2d}----------'.format(epoch+1,num_epoch))
print('Train set | Loss: {:6.4f} | Accuracy: {:4.2f}% '.format(train_loss_every_epoch, acc))
# Evaluate after every epochh
correct = 0
total = 0
model.eval()
predictions = []
truths = []
val_loss_list = []
with torch.no_grad():
for i, (sample) in enumerate(val_loader):
sample_1 = sample['x1'].to(device)
sample_2 = sample['x2'].to(device)
labels = sample['y'].to(device)
outputs = model(sample_1,sample_2)
loss = loss_fn(outputs, labels)
val_loss_list.append(loss.item())
pred = outputs.data.max(-1)[1]
predictions += list(pred.cpu().numpy())
truths += list(labels.cpu().numpy())
total += labels.size(0)
correct += (pred == labels).sum()
# report performance
acc = (100 * correct / total)
val_acc_return.append(acc)
val_loss_every_epoch = np.average(val_loss_list)
val_loss_return.append(val_loss_every_epoch)
if acc > best_acc:
best_acc = acc
best_model_wts = model.state_dict()
save_model(model,train_loss_return,train_acc_return,val_loss_return,val_acc_return,best_model_wts)
elapse = time.strftime('%H:%M:%S', time.gmtime(int((time.time() - start_time))))
print('Test set | Loss: {:6.4f} | Accuracy: {:4.2f}% | time elapse: {:>9}'\
.format(val_loss_every_epoch, acc,elapse))
return model,train_loss_return,train_acc_return,val_loss_return,val_acc_return,best_model_wts
def save_model(model,train_loss_return,train_acc_return,val_loss_return,val_acc_return,best_model_wts):
state = {'best_model_wts':best_model_wts, 'model':model, \
'train_loss':train_loss_return, 'train_acc':train_acc_return,\
'val_loss':val_loss_return, 'val_acc':val_acc_return}
torch.save(state, 'checkpoint_CNN_LSTM.pt')
return None
model = MyModel().to(device)
train(model)
|
{"hexsha": "406a8623f0f3cfdc374380da8a9f4ca91dbb636f", "size": 9033, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_files/CNN_LSTM_Spectrogram_2/CNN_LSTM_Spectrogram_2.py", "max_stars_repo_name": "ChaojieZhang-cz/TS-Project-VoxCeleb", "max_stars_repo_head_hexsha": "c11941f0019d74e064726469cfa6ec9e5772c56d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python_files/CNN_LSTM_Spectrogram_2/CNN_LSTM_Spectrogram_2.py", "max_issues_repo_name": "ChaojieZhang-cz/TS-Project-VoxCeleb", "max_issues_repo_head_hexsha": "c11941f0019d74e064726469cfa6ec9e5772c56d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_files/CNN_LSTM_Spectrogram_2/CNN_LSTM_Spectrogram_2.py", "max_forks_repo_name": "ChaojieZhang-cz/TS-Project-VoxCeleb", "max_forks_repo_head_hexsha": "c11941f0019d74e064726469cfa6ec9e5772c56d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4233870968, "max_line_length": 110, "alphanum_fraction": 0.6108712499, "include": true, "reason": "import numpy", "num_tokens": 2344}
|
%----------------------------------------------------------------
%---------------------BASIC SETUP-------------------------------
%----------------------------------------------------------------
\documentclass[9pt]{article}
\usepackage[
top=1.4cm,
bottom=2.4cm,
left=1.5cm,
right=1.5cm,
headsep=10pt,
letterpaper
]{geometry}
\usepackage{tikz}
\usetikzlibrary{calc}
\usepackage{fancyhdr} % fancy headdings
\usepackage{tabularx}
\usepackage[hyphens,spaces]{url}
\usepackage[hidelinks]{hyperref} % hyperreferences in the document
\usepackage{tcolorbox}
%----------------------------------------------------------------
%-------------HEADINGS APPEARENCE WHEN INVOKED-------------------
%----------------------------------------------------------------
\usepackage{multicol}
\setlength{\columnsep}{1cm} %Column Separation
\usepackage[toctitles]{titlesec} % section titles
\usepackage{amsmath,amsfonts,amssymb,amsthm}
\setcounter{secnumdepth}{0}
\makeatletter
\renewcommand{\@seccntformat}[1]
{\llap{\textcolor{color-theme}{\csname the#1\endcsname}\hspace{1em}}}
\renewcommand{\section}{%
\@startsection{section}%
{1}%
{0pt}%
{-2ex \@plus -1ex \@minus -.2ex}%
{1ex \@plus.2ex }%
{\noindent\normalfont\large\sffamily\bfseries\color{color-theme}}%
}
\renewcommand{\subsection}{%
\@startsection{subsection}%
{2}%
{\z@}%
{-2ex \@plus -0.1ex \@minus -.4ex}%
{0.6ex \@plus.2ex }%
{\fontsize{9}{9}\sffamily\bfseries}%
}
\renewcommand{\subsubsection}{%
\@startsection{subsubsection}%
{3}%
{\z@}%
{-2ex \@plus -0.1ex \@minus -.2ex}%
{.2ex \@plus.2ex }%
{\normalfont\small\sffamily\bfseries}%
}
\renewcommand\paragraph{%
\@startsection{paragraph}%
{4}%
{\z@}%
{-2ex \@plus-.2ex \@minus .2ex}%
{.1ex}%
{\normalfont\small\sffamily\bfseries}%
}
\makeatother
%----------------------------------------------------------------
%---------------------------FONTS--------------------------------
%----------------------------------------------------------------
\usepackage{xcolor} % use of \color{}
\definecolor{rmblack}{HTML}{0E0E0E}
\definecolor{dimgray}{HTML}{606060}
\usepackage[
usefilenames,
% RMstyle={Text,Semibold},
% SSstyle={Text,Semibold},
TTstyle={Text,Semibold},
DefaultFeatures={Ligatures=Common}
]{plex-otf} % Cool font by IBM
%\renewcommand*\familydefault{\ttdefault}
\setsansfont{TeX Gyre Adventor}
%\setmainfont{QTEurotype}
%\setmainfont{TeX Gyre Termes}
\setmainfont[Color=rmblack]{TeX Gyre Adventor}
%----------------------------------------------------------------
%---------------------ADDITIONAL FEATURES------------------------
%----------------------------------------------------------------
\usepackage{multicol}
\def\arraystretch{2}
\usepackage{enumitem}
\setlist[itemize]{leftmargin=*}
\pagestyle{fancy}
\fancyhf{} % sets both header and footer to nothing
\renewcommand{\headrulewidth}{0pt}
\lfoot{\fontsize{8}{8}\addfontfeature{Color=color-theme}\today}
% Center Footer
\cfoot{\fontsize{8}{8}\addfontfeature{Color=color-theme}Résumé}
% Right Footer
\rfoot{\fontsize{8}{8}\addfontfeature{Color=color-theme}Page \thepage}
\def\labelitemi{\fontsize{4}{4}$\color{color-theme}\blacksquare$}
\definecolor{dim-black}{HTML}{000000}
\newcommand{\reducespace}{\vspace*{-8pt}}
\newcommand{\interval}[1]{{\addfontfeature{Color=black}\textit{(#1)}}}
\newcommand{\projectsource}[1]{%
{%
\addfontfeature{Color=black}Link:\\\url{#1}%
}%
}
\newcommand{\link}[1]{\href{#1}{#1}}
\newcommand{\Mail}[1]{
\scriptsize\sffamily\textbf{Email:} \href{mailto:#1}{#1}\hspace*{12pt}%
}
\newcommand{\GitHub}[1]{
\textbf{GitHub:} \href{https://github.com/#1}{#1}\hspace*{12pt}%
}
\newcommand{\LinkedIn}[2]{
\textbf{Linked In:} \href{#1}{#2}%
}
\newcommand{\separator}{%
\vspace*{8pt}%
\par\noindent{\color{color-theme}\rule{\textwidth}{3pt}}%
\vspace*{-4pt}%
}
\newenvironment{cvlist}
{\begin{itemize}\setlength\itemsep{-0.2em}}
{\end{itemize}}
\setlength{\parindent}{0em}
\setlength{\parskip}{0em}
\newcommand{\originaltitle}[1]{%
~{\fontsize{6}{7}\sffamily\color{dimgray}{Original Title: #1}}%
}
\newcommand{\cvitem}[2]{
\def\originaltitleparam{#2}
\item #1
\ifx\originaltitleparam\empty% if #2 is empty
\\[-4pt]
\else
\\~\originaltitle{#2}%
\fi
}
|
{"hexsha": "23e2362aec484b45cbb73258f070886fd1db91be", "size": 4240, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/structure.tex", "max_stars_repo_name": "sesjehen-vestha-kxall/identity-cv", "max_stars_repo_head_hexsha": "b741e4759a23a7b9c43aa334855862c7eea7e6e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/structure.tex", "max_issues_repo_name": "sesjehen-vestha-kxall/identity-cv", "max_issues_repo_head_hexsha": "b741e4759a23a7b9c43aa334855862c7eea7e6e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/structure.tex", "max_forks_repo_name": "sesjehen-vestha-kxall/identity-cv", "max_forks_repo_head_hexsha": "b741e4759a23a7b9c43aa334855862c7eea7e6e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.367816092, "max_line_length": 89, "alphanum_fraction": 0.5820754717, "num_tokens": 1348}
|
"""
File: examples/model/output_interpolated_model.py
Author: Keith Tauscher
Date: 30 Jul 2019
Description: Shows a usage of the OutputInterpolatedModel class.
"""
import os
import numpy as np
import matplotlib.pyplot as pl
from pylinex import FixedModel, OutputInterpolatedModel,\
load_model_from_hdf5_file
file_name = 'TESTINGOUTPUTINTERPOLATEDMODELDELETETHIS.hdf5'
(amplitude, phase) = (5.1468, 0.29)
half_small_num_channels = 50
half_large_num_channels = 1000
small_num_channels = 1 + (2 * half_small_num_channels)
large_num_channels = 1 + (2 * half_large_num_channels)
fixed_slice = np.array([index for index in range(small_num_channels) if\
(((index < 20) or (index > 30)) and ((index < 70) or (index > 80)) and\
((index < 45) or (index > 55)))])
fixed_xs = np.linspace(-1, 1, small_num_channels)
interpolation_xs = np.linspace(-1, 1, large_num_channels)
fixed_ys = amplitude * np.sin((fixed_xs * np.pi) + phase)
fixed_xs = fixed_xs[fixed_slice]
fixed_ys = fixed_ys[fixed_slice]
fixed_model = FixedModel(fixed_ys)
interpolated_model =\
OutputInterpolatedModel(fixed_model, fixed_xs, interpolation_xs, order=1)
interpolated_by_model = interpolated_model(np.array([]))
interpolated_by_numpy = np.interp(interpolation_xs, fixed_xs, fixed_ys)
try:
interpolated_model.save(file_name)
assert(interpolated_model == load_model_from_hdf5_file(file_name))
except:
if os.path.exists(file_name):
os.remove(file_name)
raise
else:
os.remove(file_name)
assert(np.allclose(\
interpolated_by_model, interpolated_by_numpy, atol=1e-12, rtol=0))
pl.plot(interpolation_xs, interpolated_by_model, color='C0',\
label='after_interpolation')
pl.scatter(fixed_xs, fixed_ys, color='k', label='before interpolation')
pl.xlim((-1, 1))
pl.ylim((-1.1 * amplitude, 1.1 * amplitude))
pl.legend()
pl.show()
|
{"hexsha": "9488c3e5f71e01d0f605ee5641ecb45021382c9f", "size": 1842, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/model/output_interpolated_model.py", "max_stars_repo_name": "CU-NESS/pylinex", "max_stars_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/model/output_interpolated_model.py", "max_issues_repo_name": "CU-NESS/pylinex", "max_issues_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/model/output_interpolated_model.py", "max_forks_repo_name": "CU-NESS/pylinex", "max_forks_repo_head_hexsha": "b6f342595b6a154e129eb303782e5268088f34d5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7, "max_line_length": 77, "alphanum_fraction": 0.7540716612, "include": true, "reason": "import numpy", "num_tokens": 515}
|
# Copyright (C) 2008 University of Maryland
# All rights reserved.
# See LICENSE.txt for details.
# Author: Christopher Metting
#Starting Date:6/5/2009
from numpy import size,array,shape,indices, searchsorted, linspace
from numpy import log, log10, abs, min, max, nonzero,isnan
from .zoom_colorbar import *
import sys,copy
from numpy import roll
def feature_plot(unit,size,delta_xyz):
'''
A three dimensional plot the the voxilized feature. Does what spy_on but
does a three dimensional plot rather then z slices.
feature (type: Lego_Model): contains all of the information required to
calculate the scattering off the feature
'''
from enthought.mayavi import mlab
dumbie_unit = roll(unit,1,axis = 0)
dumbie_unit = roll(dumbie_unit,1,axis = 1)
dumbie_unit[:,0,:] = 0.0
dumbie_unit[:,-1,:] = 0.0
dumbie_unit[0,:,:] = 0.0
dumbie_unit[-1,:,:] = 0.0
xyz = indices((unit.shape), 'd')
xyz[0] *= delta_xyz[0]
xyz[1] *= delta_xyz[1]
xyz[2] *= delta_xyz[2]
feature_size = shape(unit)
mlab.figure(0)
s = mlab.contour3d(xyz[0],xyz[1],xyz[2],dumbie_unit,opacity=.07,contours = 20)
mlab.figure(1)
t = mlab.contour3d(xyz[0],xyz[1],xyz[2]*10,dumbie_unit,opacity=.07,contours = 20)
mlab.figure(2)
u = mlab.contour3d(dumbie_unit,opacity=.05,contours = 20)
mlab.show()
return
def intensity_plot(intensity,mins,maxs, header = None, bar = True,
vmin = None, vmax = None):
'''
creates a two three dimensional plot of the qz vs qx and the intensity
of the scattering.
This plotter can be used for both resolution corrected and uncorrected
intensity plots. The intensity has the lowest non-zero value added to it
to eliminated the limitations that exists when taking the log of zero
'''
from pylab import imshow,colorbar,show, title, xlabel, ylabel
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
print min(intensity)
if vmax == None:
vmax = max(log10(intensity))
print max(log(intensity))
if vmin == None:
vmin = max(log10(intensity)) - 15.0
intensity[isnan(intensity)] = 0.0
if size(abs(intensity[nonzero(intensity.real)])) == 0:
lower_lim = 0.0
else:
lower_lim = min(abs(intensity[nonzero(intensity.real)]))
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
graph = imshow(log10(abs(intensity.T+lower_lim)),aspect='auto',
interpolation='nearest',extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
zoom_colorbar(graph)
title(str(header))
xlabel('qx(A^-1)')
ylabel('qz(A^-1)')
return graph
def linear_plot(intensity,mins,maxs, header = None, bar = True,
vmin = None, vmax = None):
'''
creates a two three dimensional plot of the qz vs qx and the intensity
of the scattering.
This plotter can be used for both resolution corrected and uncorrected
intensity plots. The intensity has the lowest non-zero value added to it
to eliminated the limitations that excists when taking the log of zero
'''
from pylab import imshow,colorbar,show, title, xlabel, ylabel
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
lower_lim = min(intensity[nonzero(intensity.real)])
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
graph = imshow((abs(intensity.T+lower_lim)),aspect='auto',
interpolation='nearest',
extent=plot_extent,origin='lower')
colorbar()
title(str(header))
xlabel('qz(A^-1)')
ylabel('qx(A^-1)')
return graph
def qz_slice(intensity,mins,maxs,q_slice = 0.0,second_intensity = None):
'''
This takes a qz slice from the uncorrected intensity plot and, if the
resolution corrected plot exists, will also show the qz slice for that data
'''
from pylab import semilogy,plot, legend, title, xlabel, ylabel
print shape(intensity)
qz_array = linspace(mins[2],maxs[2],shape(intensity)[1])
z_position = searchsorted(qz_array,q_slice)
graph = plot(log10(intensity[z_position,:].real),
xdata = qz_array,label = 'Uncorrected')
if (second_intensity != None):
plot(log10(second_intensity[z_position,:].real),
xdata = qz_array,label= 'Corrected' )
legend()
title('Qz Slice at '+ str(q_slice))
xlabel('qz(A^-1)')
ylabel('Normalized Intensity')
return graph
def data_compare(intensity_one,intensity_two,mins,maxs):
from pylab import imshow,colorbar,show, title, xlabel, ylabel,subplot
plotxmin = mins[0]
plotxmax = maxs[0]
plotzmin = mins[-1]
plotzmax = maxs[-1]
intensity_one[isnan(intensity_one)] = 0.0
intensity_two[isnan(intensity_one)] = 0.0
intensity_one += min(intensity_one[(intensity_one)>0])/2
intensity_two += min(intensity_one[(intensity_one)>0])/2
vmin = min(log(intensity_one))
vmax = max(log(intensity_one))
plot_extent = (plotxmin,plotxmax,plotzmin,plotzmax)
subplot(311)
imshow(log(abs(intensity_one.T)),
aspect='auto',interpolation='nearest',
extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
subplot(312)
imshow(log(abs(intensity_two.T)),
aspect='auto',interpolation='nearest',
extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
subplot(313)
imshow((abs(intensity_one - intensity_two)/intensity_one).T,aspect='auto',
interpolation='nearest',extent=plot_extent,origin='lower',
vmin = vmin, vmax = vmax)
colorbar()
def test():
'''
this test was used to fix the plotters
'''
intensity = array([[1,2,3,4,5],[5,4,3,2,1],[1,5,2,4,3],[5,1,4,2,3],[5,1,4,2,3]])
mins = array([-3,-3,-3])
maxs = array([3,3,3])
qz_slice(intensity,mins,maxs)
if __name__=="__main__":test()
|
{"hexsha": "008b018afe7ed003cde34f0b292c96caf3811ea7", "size": 6038, "ext": "py", "lang": "Python", "max_stars_repo_path": "osrefl/viewers/view.py", "max_stars_repo_name": "reflectometry/osrefl", "max_stars_repo_head_hexsha": "ddf55d542f2eab2a29fd6ffc862379820a06d5c7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-05-21T15:16:46.000Z", "max_stars_repo_stars_event_max_datetime": "2015-10-23T17:47:36.000Z", "max_issues_repo_path": "osrefl/viewers/view.py", "max_issues_repo_name": "reflectometry/osrefl", "max_issues_repo_head_hexsha": "ddf55d542f2eab2a29fd6ffc862379820a06d5c7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "osrefl/viewers/view.py", "max_forks_repo_name": "reflectometry/osrefl", "max_forks_repo_head_hexsha": "ddf55d542f2eab2a29fd6ffc862379820a06d5c7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1690821256, "max_line_length": 85, "alphanum_fraction": 0.6477310368, "include": true, "reason": "from numpy", "num_tokens": 1686}
|
from math import ceil, sqrt
import numpy as np
from scipy.special import sph_harm, spherical_jn
def trunc_H3d(k, T):
l = np.arange(ceil(16 + k * T))
I = np.where(
np.abs(np.sqrt((2 * l + 1) / (4 * np.pi)) * spherical_jn(l, k * T)) > 1e-6
)
return I[0][-1]
def incident_field(k, z):
return np.exp(1j * k * z)
def calc_inc_field(k, r, θ, φ, L):
u = 1j * np.zeros_like(r) * np.zeros_like(θ) * np.zeros_like(φ)
for l in range(L):
u += (
1j ** l
* sqrt(4 * np.pi * (2 * l + 1))
* spherical_jn(l, k * r)
* sph_harm(0, l, θ, φ)
)
return u
k = 10
L = trunc_H3d(k, 3)
print(f"L = {L}")
r = np.linspace(0.5, 3, num=16)
θ = np.linspace(0, 2 * np.pi, num=16, endpoint=False)
φ = np.linspace(0, np.pi, num=18)[1:-1]
R, Θ, Φ = np.meshgrid(r, θ, φ)
Z = R * np.cos(Φ)
print(np.amax(np.abs(incident_field(k, Z) - calc_inc_field(k, R, Θ, Φ, L))))
|
{"hexsha": "db85bf14143b0e1ca08ce94da119bfa609f1e0dd", "size": 944, "ext": "py", "lang": "Python", "max_stars_repo_path": "dev/dev_uinc_3d.py", "max_stars_repo_name": "zmoitier/accoster", "max_stars_repo_head_hexsha": "648b9edf7e73848eacb60af0885be4d30fdbbafc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dev/dev_uinc_3d.py", "max_issues_repo_name": "zmoitier/accoster", "max_issues_repo_head_hexsha": "648b9edf7e73848eacb60af0885be4d30fdbbafc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-12-04T21:17:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-06T19:54:36.000Z", "max_forks_repo_path": "dev/dev_uinc_3d.py", "max_forks_repo_name": "zmoitier/accoster", "max_forks_repo_head_hexsha": "648b9edf7e73848eacb60af0885be4d30fdbbafc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-18T17:24:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-18T17:24:52.000Z", "avg_line_length": 22.4761904762, "max_line_length": 82, "alphanum_fraction": 0.5286016949, "include": true, "reason": "import numpy,from scipy", "num_tokens": 361}
|
# Copyright (c) OpenMMLab. All rights reserved.
from collections import defaultdict
import numpy as np
def _create_coco_gt_results(dataset):
from mmdet.core import bbox2result
from mmtrack.core import track2result
results = defaultdict(list)
for img_info in dataset.data_infos:
ann = dataset.get_ann_info(img_info)
scores = np.ones((ann['bboxes'].shape[0], 1), dtype=np.float)
bboxes = np.concatenate((ann['bboxes'], scores), axis=1)
bbox_results = bbox2result(bboxes, ann['labels'], len(dataset.CLASSES))
track_results = track2result(bboxes, ann['labels'],
ann['instance_ids'].astype(np.int),
len(dataset.CLASSES))
results['bbox_results'].append(bbox_results)
results['track_results'].append(track_results)
return results
|
{"hexsha": "4c39c14f9e926cc4bcbf2059e085101e859db9d1", "size": 877, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_data/test_datasets/utils.py", "max_stars_repo_name": "dzambrano/mmtracking", "max_stars_repo_head_hexsha": "ec7a2e36fbf99effed4602a4df929f495efe73c5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-25T21:50:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T14:45:09.000Z", "max_issues_repo_path": "tests/test_data/test_datasets/utils.py", "max_issues_repo_name": "RangiLyu/mmtracking", "max_issues_repo_head_hexsha": "a4824091a0b24eb0add16a9233fec3be73ad6e32", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_data/test_datasets/utils.py", "max_forks_repo_name": "RangiLyu/mmtracking", "max_forks_repo_head_hexsha": "a4824091a0b24eb0add16a9233fec3be73ad6e32", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.1304347826, "max_line_length": 79, "alphanum_fraction": 0.6465222349, "include": true, "reason": "import numpy", "num_tokens": 192}
|
# Takuya Ito
# 09/11/2018
# Modified by Michael Cole, June 2020
# Post-processing nuisance regression using Ciric et al. 2017 inspired best-practices
## OVERVIEW
# There are two main parts to this script/set of functions
# 1. "step1_createNuisanceRegressors"
# Generates a variety of nuisance regressors, such as motionSpikes, aCompCor regressors, etc. that are essential to a subset of Ciric-style models, with the addition of some new combinations (e.g., aCompCor + spikeReg + movement parameters)
# This is actually the bulk of the script, and takes quite a while to compute, largely due to the fact that we need to load in 4D time series from the raw fMRI data (in order to compute regressors such as global signal)
# 2. "step2_nuisanceRegression"
# This is the function that actually performs the nuisance regression, using regressors obtained from step1. There are a variety of models to choose from, including:
# The best model from Ciric et al. (2017) (e.g., 36p + spikeReg)
# What I call the "legacy Cole Lab models", which are the traditional 6 motion parameters, gsr, wm and ventricle time series and all their derivatives (e.g., 18p)
# There is also 16pNoGSR, which is the above, but without gsr and its derivative.
# Ultimately, read below for other combinations; what I would consider the best option that does NOT include GSR is the default, called "24pXaCompCorXVolterra" - read below for what it entails...
# IMPORTANT: In general, only functions step1, step2 and the parameters preceding that will need to be edited. There are many helper functions below, but in theory, they should not be edited.
# Currently, this script is defaulted to create the nuisance regressors in your current working directory (in a sub directory), and the glm output in your current working directory
# The default is set to use data from the HCP352 QC'd data set, so will need to be updated accordingly.
# For now, this only includes extensive nuisance regression. Any task regression will need to be performed independently after this.
## EXAMPLE USAGE:
# import nuisanceRegressionPipeline as nrp
# nrp.step1_createNuisanceRegressors(nproc=8)
# nrp.step2_nuisanceRegression(nproc=5, model='24pXaCompCorXVolterra',spikeReg=False,zscore=False)
## DISCLAIMER: This is a first draft, so... keep that in mind.
import numpy as np
import os
import glob
from nipy.modalities.fmri.hemodynamic_models import spm_hrf
import multiprocessing as mp
import h5py
import scipy.stats as stats
from scipy import signal
import nibabel as nib
import scipy
import time
import warnings
warnings.simplefilter('ignore', np.ComplexWarning)
import regression
## Define GLOBAL variables (variables accessible to all functions
# Define base data directory
datadir_minpreproc = '/projects/f_mc1689_1/HCP352Data/data/hcppreprocessedmsmall/'
datadir='/projects/f_mc1689_1/TaskFCActflow/data/hcppreprocessedmsmall/'
nuisanceregressors_datadir='/projects/f_mc1689_1/HCP352Data/data/'
# Define number of frames to skip
framesToSkip = 5
# Define all runs you want to preprocess
allRuns = ['rfMRI_REST1_RL', 'rfMRI_REST1_LR','rfMRI_REST2_RL', 'rfMRI_REST2_LR','tfMRI_EMOTION_RL','tfMRI_EMOTION_LR','tfMRI_GAMBLING_RL','tfMRI_GAMBLING_LR','tfMRI_LANGUAGE_RL','tfMRI_LANGUAGE_LR','tfMRI_MOTOR_RL','tfMRI_MOTOR_LR','tfMRI_RELATIONAL_RL','tfMRI_RELATIONAL_LR','tfMRI_SOCIAL_RL','tfMRI_SOCIAL_LR','tfMRI_WM_RL','tfMRI_WM_LR']
# Define the *output* directory for nuisance regressors
#nuis_reg_dir = datadir + 'nuisanceRegressors/'
nuis_reg_dir = nuisanceregressors_datadir + 'hcppreprocessedmsmall/nuisanceRegressors/'
# Create directory if it doesn't exist
if not os.path.exists(nuis_reg_dir): os.makedirs(nuis_reg_dir)
# Define the *output* directory for preprocessed data
outputdir = datadir + 'vertexWise/'
rawdatadir_base = '/projects/f_mc1689_1/HCP352Data/data/minimally_preprocessed/'
#
# Define subjects list
subjNums = ['100206','108020','117930','126325','133928','143224','153934','164636','174437','183034','194443','204521','212823','268749','322224','385450','463040','529953','587664','656253','731140','814548','877269','978578','100408','108222','118124','126426','134021','144832','154229','164939','175338','185139','194645','204622','213017','268850','329844','389357','467351','530635','588565','657659','737960','816653','878877','987074','101006','110007','118225','127933','134324','146331','154532','165638','175742','185341','195445','205119','213421','274542','341834','393247','479762','545345','597869','664757','742549','820745','887373','989987','102311','111009','118831','128632','135528','146432','154936','167036','176441','186141','196144','205725','213522','285345','342129','394956','480141','552241','598568','671855','744553','826454','896879','990366','102513','112516','118932','129028','135629','146533','156031','167440','176845','187850','196346','205826','214423','285446','348545','395756','481042','553344','599671','675661','749058','832651','899885','991267','102614','112920','119126','129129','135932','147636','157336','168745','177645','188145','198350','208226','214726','286347','349244','406432','486759','555651','604537','679568','749361','835657','901442','992774','103111','113316','120212','130013','136227','148133','157437','169545','178748','188549','198451','208327','217429','290136','352738','414229','497865','559457','615744','679770','753150','837560','907656','993675','103414','113619','120414','130114','136833','150726','157942','171330']
# Validation set
#subjNums = ['178950','189450','199453','209228','220721','298455','356948','419239','499566','561444','618952','680452','757764','841349','908860','103818','113922','121618','130619','137229','151829','158035','171633','179346','190031','200008','210112','221319','299154','361234','424939','500222','570243','622236','687163','769064','845458','911849','104416','114217','122317','130720','137532','151930','159744','172029','180230','191235','200614','211316','228434','300618','361941','432332','513130','571144','623844','692964','773257','857263','926862','105014','114419','122822','130821','137633','152427','160123','172938','180432','192035','200917','211417','239944','303119','365343','436239','513736','579665','638049','702133','774663','865363','930449','106521','114823','123521','130922','137936','152831','160729','173334','180533','192136','201111','211619','249947','305830','366042','436845','516742','580650','645450','715041','782561','871762','942658','106824','117021','123925','131823','138332','153025','162026','173536','180735','192439','201414','211821','251833','310621','371843','445543','519950','580751','647858','720337','800941','871964','955465','107018','117122','125222','132017','138837','153227','162329','173637','180937','193239','201818','211922','257542','314225','378857','454140','523032','585862','654350','725751','803240','872562','959574','107422','117324','125424','133827','142828','153631','164030','173940','182739','194140','202719','212015','257845','316633','381543','459453','525541','586460','654754','727553','812746','873968','966975']
def step1_createNuisanceRegressors(nproc=5):
"""
Function to generate subject-wise nuisance parameters in parallel
This function first defines a local function (a function within this function) to generate each subject's nuisance regressors
Then we use the multiprocessing module to generate regressors for multiple subjects at a time
**Note: Parameters in this function may need to be edited for project-specific purposes. Sections in which editing should NOT be done are noted
"""
# Make below function global, so it is accessible to the parallel process (don't change this)
global _createNuisanceRegressorsSubject
def _createNuisanceRegressorsSubject(subj):
## Potentially will need to be edited, according to project
# Directory for all the masks
maskdir = datadir + 'masks/' + subj
# Path and file name for the whole-brain mask
globalmask = maskdir + subj + '_wholebrainmask_func_dil1vox.nii.gz'
# Path and file name for the white matter mask
wmmask = maskdir + subj + '_wmMask_func_eroded.nii.gz'
# Path and file name for the ventricle mask
ventriclesmask = maskdir + subj + '_ventricles_func_eroded.nii.gz'
# This is the path and filename for the output regressors
nuisance_reg_filename = nuis_reg_dir + subj + '_nuisanceRegressors.h5'
# Define the directory containing the raw preprocessed data
rawdatadir = '/projects/f_mc1689_1/HCP352Data/data/minimally_preprocessed/' + subj + '/MNINonLinear/Results/'
# Number of principal components to extract out of WM and ventricle signals
compCorComponents = 5
# Spike regression threshold, using relative root-mean-square displacement (in mm)
spikeReg = .25
####
for run in allRuns:
print('creating nuisance regressors for subject', subj, 'run:', run)
# This is the fMRI 4d file (volumetric) to obtain the noise signals -- done for each run
inputname = rawdatadir + run + '/' + run + '.nii.gz'
#### Obtain movement parameters -- this will differ across preprocessing pipelines (e.g., HCP vs. typical)
# For all 12 movement parameters (6 regressors + derivatives)
movementRegressors = np.loadtxt(rawdatadir + run + '/Movement_Regressors.txt')
# Separate the two parameters out for clarity
# x, y, z + 3 rotational movements
motionParams = movementRegressors[:,:6]
# The derivatives of the above movements (backwards differentiated)
motionParams_deriv = movementRegressors[:,6:] # HCP automatically computes derivative of motion parameters
####
# DO NOT CHANGE THIS SECTION, IT IS NECESSARY FOR THE SCRIPT TO RUN
h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5','a')
try:
h5f.create_dataset(run + '/motionParams',data=motionParams)
h5f.create_dataset(run + '/motionParams_deriv',data=motionParams_deriv)
except:
del h5f[run + '/motionParams'], h5f[run + '/motionParams_deriv']
h5f.create_dataset(run + '/motionParams',data=motionParams)
h5f.create_dataset(run + '/motionParams_deriv',data=motionParams_deriv)
h5f.close()
# END OF DO NOT CHANGE
####
#### Obtain relative root-mean-square displacement -- this will differ across preprocessing pipelines
# A simple alternative is to compute the np.sqrt(x**2 + y**2 + z**2), where x, y, and z are motion displacement parameters
# e.g., x = x[t] - x[t-1]; y = y[t] - y[t-1]; z = z[t] - z[t-1]
# For HCP data, just load in the relative RMS
relativeRMS = np.loadtxt(rawdatadir + run + '/Movement_RelativeRMS.txt')
# Calculate motion spike regressors using helper functions defined below
_createMotionSpikeRegressors(relativeRMS, subj, run, spikeReg=spikeReg)
# Extract physiological noise signals using helper functions defined below
_createPhysiologicalNuisanceRegressors(inputname, subj, run, globalmask, wmmask, ventriclesmask, aCompCor=compCorComponents)
# Construct parallel processes to run the local function in parallel (subject-wise parallelization)
# Outputs will be found in "nuis_reg_dir" parameter
pool = mp.Pool(processes=nproc)
pool.map_async(_createNuisanceRegressorsSubject,subjNums).get()
pool.close()
pool.join()
def step2_nuisanceRegression(subj_nums=subjNums, nproc=5, model='24pXaCompCorXVolterra',spikeReg=False,zscore=False, cortex_only=True):
"""
Function to perform nuisance regression on each run separately
This uses parallel processing, but parallelization occurs within each subject
Each subject runs regression on each region/voxel in parallel, thus iterating subjects and runs serially
Input parameters:
subj : subject number as a string
run : task run
outputdir: Directory for GLM output, as an h5 file (each run will be contained within each h5)
model : model choices for linear regression. Models include:
1. 24pXaCompCorXVolterra [default]
Variant from Ciric et al. 2017.
Includes (64 regressors total):
- Movement parameters (6 directions; x, y, z displacement, and 3 rotations) and their derivatives, and their quadratics (24 regressors)
- aCompCor (5 white matter and 5 ventricle components) and their derivatives, and their quadratics (40 regressors)
2. 18p (the lab's legacy default)
Includes (18 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- Global signal and its derivative (2 regressors)
- White matter signal and its derivative (2 regressors)
- Ventricles signal and its derivative (2 regressors)
3. 16pNoGSR (the legacy default, without GSR)
Includes (16 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- White matter signal and its derivative (2 regressors)
- Ventricles signal and its derivative (2 regressors)
4. 12pXaCompCor (Typical motion regression, but using CompCor (noGSR))
Includes (32 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- aCompCor (5 white matter and 5 ventricle components) and their derivatives (no quadratics; 20 regressors)
5. 36p (State-of-the-art, according to Ciric et al. 2017)
Includes (36 regressors total - same as legacy, but with quadratics):
- Movement parameters (6 directions) and their derivatives and quadratics (24 regressors)
- Global signal and its derivative and both quadratics (4 regressors)
- White matter signal and its derivative and both quadratics (4 regressors)
- Ventricles signal and its derivative (4 regressors)
spikeReg : spike regression (Satterthwaite et al. 2013) [True/False]
Note, inclusion of this will add additional set of regressors, which is custom for each subject/run
zscore : Normalize data (across time) prior to fitting regression
nproc = number of processes to use via multiprocessing
"""
# Iterate through each subject
for subj in subj_nums:
# Iterate through each run
for run in allRuns:
print('Running regression on subject', subj, '| run', run)
print('\tModel:', model, 'with spikeReg:', spikeReg, '| zscore:', zscore)
## Load in data to be preprocessed - This needs to be a space x time 2d array
#inputfile = datadir_minpreproc + 'resampled/' + subj + '/' + subj + '_' + run + '_MSMAll_64k.LR.dtseries.nii'
inputfile=rawdatadir_base+'/'+subj+'/MNINonLinear/Results/'+run+'/'+run+'_Atlas_MSMAll.dtseries.nii';
# Load data
data = np.squeeze(nib.load(inputfile).get_data()).T
# If cortex_only then only use the first ~64k (actually 59412) grayordinates (corresponding to cortex)
if cortex_only:
data = data[0:59412,:]
# Run nuisance regression for this subject's run, using a helper function defined below
# Data will be output in 'outputdir', defined above
_nuisanceRegression(subj, run, data, outputdir, model=model,spikeReg=spikeReg,zscore=zscore,nproc=nproc)
#########################################
# Functions that probably don't need to be edited
def _nuisanceRegression(subj, run, inputdata, outputdir, model='24pXaCompCorXVolterra', spikeReg=False, zscore=False, nproc=8):
"""
This function runs nuisance regression on the Glasser Parcels (360) on a single subjects run
Will only regress out noise parameters given the model choice (see below for model options)
Input parameters:
subj : subject number as a string
run : task run
outputdir: Directory for GLM output, as an h5 file (each run will be contained within each h5)
model : model choices for linear regression. Models include:
1. 24pXaCompCorXVolterra [default]
Variant from Ciric et al. 2017.
Includes (64 regressors total):
- Movement parameters (6 directions; x, y, z displacement, and 3 rotations) and their derivatives, and their quadratics (24 regressors)
- aCompCor (5 white matter and 5 ventricle components) and their derivatives, and their quadratics (40 regressors)
2. 18p (the legacy default)
Includes (18 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- Global signal and its derivative (2 regressors)
- White matter signal and its derivative (2 regressors)
- Ventricles signal and its derivative (2 regressors)
3. 16pNoGSR (the legacy default, without GSR)
Includes (16 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- White matter signal and its derivative (2 regressors)
- Ventricles signal and its derivative (2 regressors)
4. 12pXaCompCor (Typical motion regression, but using CompCor (noGSR))
Includes (32 regressors total):
- Movement parameters (6 directions) and their derivatives (12 regressors)
- aCompCor (5 white matter and 5 ventricle components) and their derivatives (no quadratics; 20 regressors)
5. 36p (State-of-the-art, according to Ciric et al. 2017)
Includes (36 regressors total - same as legacy, but with quadratics):
- Movement parameters (6 directions) and their derivatives and quadratics (24 regressors)
- Global signal and its derivative and both quadratics (4 regressors)
- White matter signal and its derivative and both quadratics (4 regressors)
- Ventricles signal and its derivative (4 regressors)
spikeReg : spike regression (Satterthwaite et al. 2013) [True/False]
Note, inclusion of this will add additional set of regressors, which is custom for each subject/run
zscore : Normalize data (across time) prior to fitting regression
nproc = number of processes to use via multiprocessing
"""
data = inputdata
tMask = np.ones((data.shape[1],))
tMask[:framesToSkip] = 0
# Skip frames
data = data[:,framesToSkip:]
# Demean each run
data = signal.detrend(data,axis=1,type='constant')
# Detrend each run
data = signal.detrend(data,axis=1,type='linear')
tMask = np.asarray(tMask,dtype=bool)
nROIs = data.shape[0]
# Load nuisance regressors for this data
h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5','r')
if model=='24pXaCompCorXVolterra':
# Motion parameters + derivatives
motion_parameters = h5f[run]['motionParams'][:].copy()
motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
# WM aCompCor + derivatives
aCompCor_WM = h5f[run]['aCompCor_WM'][:].copy()
aCompCor_WM_deriv = h5f[run]['aCompCor_WM_deriv'][:].copy()
# Ventricles aCompCor + derivatives
aCompCor_ventricles = h5f[run]['aCompCor_ventricles'][:].copy()
aCompCor_ventricles_deriv = h5f[run]['aCompCor_ventricles_deriv'][:].copy()
# Create nuisance regressors design matrix
nuisanceRegressors = np.hstack((motion_parameters, motion_parameters_deriv, aCompCor_WM, aCompCor_WM_deriv, aCompCor_ventricles, aCompCor_ventricles_deriv))
quadraticRegressors = nuisanceRegressors**2
nuisanceRegressors = np.hstack((nuisanceRegressors,quadraticRegressors))
elif model=='18p':
# Motion parameters + derivatives
motion_parameters = h5f[run]['motionParams'][:].copy()
motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
# Global signal + derivatives
global_signal = h5f[run]['global_signal'][:].copy()
global_signal_deriv = h5f[run]['global_signal_deriv'][:].copy()
# white matter signal + derivatives
wm_signal = h5f[run]['wm_signal'][:].copy()
wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
# ventricle signal + derivatives
ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
# Create nuisance regressors design matrix
tmp = np.vstack((global_signal,global_signal_deriv,wm_signal,wm_signal_deriv,ventricle_signal,ventricle_signal_deriv)).T # Need to vstack, since these are 1d arrays
nuisanceRegressors = np.hstack((motion_parameters, motion_parameters_deriv, tmp))
elif model=='16pNoGSR':
# Motion parameters + derivatives
motion_parameters = h5f[run]['motionParams'][:].copy()
motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
# white matter signal + derivatives
wm_signal = h5f[run]['wm_signal'][:].copy()
wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
# ventricle signal + derivatives
ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
# Create nuisance regressors design matrix
tmp = np.vstack((wm_signal,wm_signal_deriv,ventricle_signal,ventricle_signal_deriv)).T # Need to vstack, since these are 1d arrays
nuisanceRegressors = np.hstack((motion_parameters, motion_parameters_deriv, tmp))
elif model=='12pXaCompCor':
# Motion parameters + derivatives
motion_parameters = h5f[run]['motionParams'][:].copy()
motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
# WM aCompCor + derivatives
aCompCor_WM = h5f[run]['aCompCor_WM'][:].copy()
aCompCor_WM_deriv = h5f[run]['aCompCor_WM_deriv'][:].copy()
# Ventricles aCompCor + derivatives
aCompCor_ventricles = h5f[run]['aCompCor_ventricles'][:].copy()
aCompCor_ventricles_deriv = h5f[run]['aCompCor_ventricles_deriv'][:].copy()
# Create nuisance regressors design matrix
nuisanceRegressors = np.hstack((motion_parameters, motion_parameters_deriv, aCompCor_WM, aCompCor_WM_deriv, aCompCor_ventricles, aCompCor_ventricles_deriv))
elif model=='36p':
# Motion parameters + derivatives
motion_parameters = h5f[run]['motionParams'][:].copy()
motion_parameters_deriv = h5f[run]['motionParams_deriv'][:].copy()
# Global signal + derivatives
global_signal = h5f[run]['global_signal'][:].copy()
global_signal_deriv = h5f[run]['global_signal_deriv'][:].copy()
# white matter signal + derivatives
wm_signal = h5f[run]['wm_signal'][:].copy()
wm_signal_deriv = h5f[run]['wm_signal_deriv'][:].copy()
# ventricle signal + derivatives
ventricle_signal = h5f[run]['ventricle_signal'][:].copy()
ventricle_signal_deriv = h5f[run]['ventricle_signal_deriv'][:].copy()
# Create nuisance regressors design matrix
tmp = np.vstack((global_signal,global_signal_deriv,wm_signal,wm_signal_deriv,ventricle_signal,ventricle_signal_deriv)).T # Need to vstack, since these are 1d arrays
nuisanceRegressors = np.hstack((motion_parameters, motion_parameters_deriv, tmp))
quadraticRegressors = nuisanceRegressors**2
nuisanceRegressors = np.hstack((nuisanceRegressors,quadraticRegressors))
if spikeReg:
# Obtain motion spikes
try:
motion_spikes = h5f[run]['motionSpikes'][:].copy()
nuisanceRegressors = np.hstack((nuisanceRegressors,motion_spikes))
except:
print('Spike regression option was chosen... but no motion spikes for subj', subj, '| run', run, '!')
# Update the model name - to keep track of different model types for output naming
model = model + '_spikeReg'
if zscore:
model = model + '_zscore'
h5f.close()
# Skip first 5 frames of nuisanceRegressors, too
nuisanceRegressors = nuisanceRegressors[framesToSkip:,:].copy()
betas, resid = regression.regression(data.T, nuisanceRegressors, constant=True)
betas = betas.T # Exclude nuisance regressors
residual_ts = resid.T
if zscore:
residual_ts = stats.zscore(residual_ts,axis=1)
outname1 = run + '/nuisanceReg_resid_' + model
outname2 = run + '/nuisanceReg_betas_' + model
outputfilename = outputdir + subj + '_glmPlusNuisRegOutput_64k_data.h5'
h5f = h5py.File(outputfilename,'a')
try:
h5f.create_dataset(outname1,data=residual_ts)
h5f.create_dataset(outname2,data=betas)
except:
del h5f[outname1], h5f[outname2]
h5f.create_dataset(outname1,data=residual_ts)
h5f.create_dataset(outname2,data=betas)
h5f.close()
def _createMotionSpikeRegressors(relative_rms, subj, run, spikeReg=.25):
"""
relative_rms- time x 1 array (for HCP data, can be obtained from the txt file 'Movement_RelativeRMS.txt'; otherwise see Van Dijk et al. (2011) Neuroimage for approximate calculation
run - Indicate which run this is
spikeReg - generate spike time regressors for motion spikes, using a default threshold of .25mm FD threshold
"""
nTRs = relative_rms.shape[0]
motionSpikes = np.where(relative_rms>spikeReg)[0]
if len(motionSpikes)>0:
spikeRegressorsArray = np.zeros((nTRs,len(motionSpikes)))
for spike in range(len(motionSpikes)):
spike_time = motionSpikes[spike]
spikeRegressorsArray[spike_time,spike] = 1.0
spikeRegressorsArray = np.asarray(spikeRegressorsArray,dtype=bool)
# Create h5py output
h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5','a')
try:
h5f.create_dataset(run + '/motionSpikes',data=spikeRegressorsArray)
except:
del h5f[run + '/motionSpikes']
h5f.create_dataset(run + '/motionSpikes',data=spikeRegressorsArray)
h5f.close()
def _createPhysiologicalNuisanceRegressors(inputname, subj, run, globalmask, wmmask, ventriclesmask, aCompCor=5):
"""
inputname - 4D input time series to obtain nuisance regressors
run - fMRI run
globalmask - whole brain mask to extract global time series
wmmask - white matter mask (functional) to extract white matter time series
ventriclesmask- ventricles mask (functional) to extract ventricle time series
aCompCor - Create PC component time series of white matter and ventricle time series, using first n PCs
"""
# Load raw fMRI data (in volume space)
print('Loading raw fMRI data')
fMRI4d = nib.load(inputname).get_data()
##########################################################
## Nuisance time series (Global signal, WM, and Ventricles)
print('Obtaining standard global, wm, and ventricle signals and their derivatives')
# Global signal
globalMask = nib.load(globalmask).get_data()
globalMask = np.asarray(globalMask,dtype=bool)
globaldata = fMRI4d[globalMask].copy()
globaldata = signal.detrend(globaldata,axis=1,type='constant')
globaldata = signal.detrend(globaldata,axis=1,type='linear')
global_signal1d = np.mean(globaldata,axis=0)
# White matter signal
wmMask = nib.load(wmmask).get_data()
wmMask = np.asarray(wmMask,dtype=bool)
wmdata = fMRI4d[wmMask].copy()
wmdata = signal.detrend(wmdata,axis=1,type='constant')
wmdata = signal.detrend(wmdata,axis=1,type='linear')
wm_signal1d = np.mean(wmdata,axis=0)
# Ventricle signal
ventricleMask = nib.load(ventriclesmask).get_data()
ventricleMask = np.asarray(ventricleMask,dtype=bool)
ventricledata = fMRI4d[ventricleMask].copy()
ventricledata = signal.detrend(ventricledata,axis=1,type='constant')
ventricledata = signal.detrend(ventricledata,axis=1,type='linear')
ventricle_signal1d = np.mean(ventricledata,axis=0)
del fMRI4d
## Create derivative time series (with backward differentiation, consistent with 1d_tool.py -derivative option)
# Global signal derivative
global_signal1d_deriv = np.zeros(global_signal1d.shape)
global_signal1d_deriv[1:] = global_signal1d[1:] - global_signal1d[:-1]
# White matter signal derivative
wm_signal1d_deriv = np.zeros(wm_signal1d.shape)
wm_signal1d_deriv[1:] = wm_signal1d[1:] - wm_signal1d[:-1]
# Ventricle signal derivative
ventricle_signal1d_deriv = np.zeros(ventricle_signal1d.shape)
ventricle_signal1d_deriv[1:] = ventricle_signal1d[1:] - ventricle_signal1d[:-1]
## Write to h5py
# Create h5py output
h5f = h5py.File(nuis_reg_dir + subj + '_nuisanceRegressors.h5','a')
try:
h5f.create_dataset(run + '/global_signal',data=global_signal1d)
h5f.create_dataset(run + '/global_signal_deriv',data=global_signal1d_deriv)
h5f.create_dataset(run + '/wm_signal',data=wm_signal1d)
h5f.create_dataset(run + '/wm_signal_deriv',data=wm_signal1d_deriv)
h5f.create_dataset(run + '/ventricle_signal',data=ventricle_signal1d)
h5f.create_dataset(run + '/ventricle_signal_deriv',data=ventricle_signal1d_deriv)
except:
del h5f[run + '/global_signal'], h5f[run + '/global_signal_deriv'], h5f[run + '/wm_signal'], h5f[run + '/wm_signal_deriv'], h5f[run + '/ventricle_signal'], h5f[run + '/ventricle_signal_deriv']
h5f.create_dataset(run + '/global_signal',data=global_signal1d)
h5f.create_dataset(run + '/global_signal_deriv',data=global_signal1d_deriv)
h5f.create_dataset(run + '/wm_signal',data=wm_signal1d)
h5f.create_dataset(run + '/wm_signal_deriv',data=wm_signal1d_deriv)
h5f.create_dataset(run + '/ventricle_signal',data=ventricle_signal1d)
h5f.create_dataset(run + '/ventricle_signal_deriv',data=ventricle_signal1d_deriv)
##########################################################
## Obtain aCompCor regressors using first 5 components of WM and Ventricles (No GSR!)
ncomponents = 5
nTRs = len(global_signal1d)
print('Obtaining aCompCor regressors and their derivatives')
# WM time series
wmstart = time.time()
# Obtain covariance matrix, and obtain first 5 PCs of WM time series
tmpcov = np.corrcoef(wmdata.T)
eigenvalues, topPCs = scipy.sparse.linalg.eigs(tmpcov,k=ncomponents,which='LM')
# Now using the top n PCs
aCompCor_WM = topPCs
# wmend = time.time() - wmstart
# print('WM aCompCor took', wmend, 'seconds')
# Ventricle time series
ventstart = time.time()
# Obtain covariance matrix, and obtain first 5 PCs of ventricle time series
tmpcov = np.corrcoef(ventricledata.T)
eigenvalues, topPCs = scipy.sparse.linalg.eigs(tmpcov,k=ncomponents,which='LM')
# Now using the top n PCs
aCompCor_ventricles = topPCs
# ventricletime = time.time() - ventstart
# print('Ventricle aCompCor took', ventricletime, 'seconds')
# White matter signal derivative using backwards differentiation
aCompCor_WM_deriv = np.zeros(aCompCor_WM.shape)
aCompCor_WM_deriv[1:,:] = np.real(aCompCor_WM[1:,:]) - np.real(aCompCor_WM[:-1,:])
# Ventricle signal derivative
aCompCor_ventricles_deriv = np.zeros(aCompCor_ventricles.shape)
aCompCor_ventricles_deriv[1:,:] = np.real(aCompCor_ventricles[1:,:]) - np.real(aCompCor_ventricles[:-1,:])
## Write to h5py
try:
h5f.create_dataset(run + '/aCompCor_WM',data=aCompCor_WM)
h5f.create_dataset(run + '/aCompCor_WM_deriv',data=aCompCor_WM_deriv)
h5f.create_dataset(run + '/aCompCor_ventricles',data=aCompCor_ventricles)
h5f.create_dataset(run + '/aCompCor_ventricles_deriv',data=aCompCor_ventricles_deriv)
except:
del h5f[run + '/aCompCor_WM'], h5f[run + '/aCompCor_WM_deriv'], h5f[run + '/aCompCor_ventricles'], h5f[run + '/aCompCor_ventricles_deriv']
h5f.create_dataset(run + '/aCompCor_WM',data=aCompCor_WM)
h5f.create_dataset(run + '/aCompCor_WM_deriv',data=aCompCor_WM_deriv)
h5f.create_dataset(run + '/aCompCor_ventricles',data=aCompCor_ventricles)
h5f.create_dataset(run + '/aCompCor_ventricles_deriv',data=aCompCor_ventricles_deriv)
##########################################################
## Load motion parameters, and calculate motion spike regressors
h5f.close()
|
{"hexsha": "914f832d7e7c4ab727f9c99d539b62dd0c3bd17c", "size": 33725, "ext": "py", "lang": "Python", "max_stars_repo_path": "glmScripts/vertexwise_postproc/nuisanceRegressionPipeline_VertexWise.py", "max_stars_repo_name": "McGintyLab/TaskFCActflow_release", "max_stars_repo_head_hexsha": "b277eb669cfb8ca48e98a3329ccd9c51f319ab95", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "glmScripts/vertexwise_postproc/nuisanceRegressionPipeline_VertexWise.py", "max_issues_repo_name": "McGintyLab/TaskFCActflow_release", "max_issues_repo_head_hexsha": "b277eb669cfb8ca48e98a3329ccd9c51f319ab95", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glmScripts/vertexwise_postproc/nuisanceRegressionPipeline_VertexWise.py", "max_forks_repo_name": "McGintyLab/TaskFCActflow_release", "max_forks_repo_head_hexsha": "b277eb669cfb8ca48e98a3329ccd9c51f319ab95", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-29T07:57:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T14:29:23.000Z", "avg_line_length": 62.6858736059, "max_line_length": 1597, "alphanum_fraction": 0.6738028169, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 8743}
|
include("common/constructor_validations.jl")
include("common/device_constructor_utils.jl")
include("thermalgeneration_constructor.jl")
include("hydrogeneration_constructor.jl")
include("branch_constructor.jl")
include("renewablegeneration_constructor.jl")
include("load_constructor.jl")
include("storage_constructor.jl")
|
{"hexsha": "286896aea7ddc201d118ba994d6cd79ade3e900e", "size": 321, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/devices/device_constructors/device_constructors.jl", "max_stars_repo_name": "UnofficialJuliaMirror/PowerSimulations.jl-e690365d-45e2-57bb-ac84-44ba829e73c4", "max_stars_repo_head_hexsha": "3f943bf1560e05c3df160fbf2206862d5c77c5aa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/devices/device_constructors/device_constructors.jl", "max_issues_repo_name": "UnofficialJuliaMirror/PowerSimulations.jl-e690365d-45e2-57bb-ac84-44ba829e73c4", "max_issues_repo_head_hexsha": "3f943bf1560e05c3df160fbf2206862d5c77c5aa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/devices/device_constructors/device_constructors.jl", "max_forks_repo_name": "UnofficialJuliaMirror/PowerSimulations.jl-e690365d-45e2-57bb-ac84-44ba829e73c4", "max_forks_repo_head_hexsha": "3f943bf1560e05c3df160fbf2206862d5c77c5aa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6666666667, "max_line_length": 45, "alphanum_fraction": 0.8442367601, "num_tokens": 66}
|
import torch
import numpy as np
import math
import operator
from global_random_seed import RANDOM_SEED
# make everything reproducible
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed(RANDOM_SEED)
torch.cuda.manual_seed_all(RANDOM_SEED)
# Already implemented: Ausgabe mean position/variance der scores
# 1. rausfinden, welche axis wörter, welche axis sind die positionen
# 2. für jedes über die positionen axis softmax (wird nur für zusätzliche Analyse verwendet)
# 3. Pro Wort: w = softmax(attention_scores), r = alle positions = "np.arange(len(w))"
# 4. mean = weighted_average = np.average(r, weights=w)
# 5. std_dev = https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy
# 6. Verteilung pro Satz ausgeben, für folgende Wörter (und erste 20 Sätze):
# - größtes/kleinstes mean
# - größte/kleinste std_dev
# TODO: finish me
def plot_generator_with_softmax(whole_sentence_graph_dict):
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
for single_word in whole_sentence_graph_dict:
for head in whole_sentence_graph_dict[single_word]:
word = single_word
sns.set(style="whitegrid")
fig = plt.figure(figsize=(8, 6))
def softmax(x):
return np.exp(x) / np.exp(x).sum(axis=0)
# attn
data = whole_sentence_graph_dict[single_word][head]["attn"]
# attn_pos
data2 = whole_sentence_graph_dict[single_word][head]["attn_pos"]
# combined
data3 = whole_sentence_graph_dict[single_word][head]["combined"]
softmax_flag = True
if softmax_flag:
# plt.bar(range(len(data)), softmax(data), color='b', alpha=0.3, label ='attn') #, hatch="/")
# plt.bar(range(len(data2)), softmax(data2), color='g', alpha = 0.3, label ='attn_rel_pos') #, hatch="o")
# plt.bar(range(len(data3)), softmax(data3), color='r',alpha = 0.3, label ='attn_comb') #, hatch="\\")
plt.plot(range(len(data[:43])), softmax(data[:43]), '-bx', alpha=0.5, label=r'$softmax(y_{inner})$')
plt.plot(range(len(data2[:43])), softmax(data2[:43]), '-go', alpha=0.5, lw=2, ms=4,
label=r'$softmax(y_{rel\_pos})$')
plt.plot(range(len(data3[:43])), softmax(data3[:43]), '-r+', alpha=0.5, lw=2,
label=r'$softmax(y_{inner} + y_{rel\_pos})$')
ax = fig.add_subplot(111)
ax.fill_between(range(len(data[:43])), 0, softmax(data[:43]), color='dodgerblue', alpha=0.4)
ax.fill_between(range(len(data[:43])), 0, softmax(data2[:43]), color='mediumseagreen', alpha=0.4)
ax.fill_between(range(len(data[:43])), 0, softmax(data3[:43]), color='indianred', alpha=0.4)
position = whole_sentence_graph_dict[single_word][head]["position"]
graph_title = "".join(["Head ", head, " (", "Word: ", word, " Position:", position, ")"])
plt.title(graph_title)
plt.xlabel("Word Position in the Sentence")
plt.ylabel("Attention Probability")
# plt.grid()
plt.legend(fontsize=16)
graph_filename = "".join(["utils/plots/head_", head, "_", word, "_", position, "_softmax_lines.png"])
plt.savefig(graph_filename, dpi=350)
plt.close(fig)
# TODO: finish me
def plot_generator_without_softmax():
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
fig = plt.figure(figsize=(8, 6))
# Not showing the data lists here
def softmax(x):
return np.exp(x) / np.exp(x).sum(axis=0)
# attn
data = [-0.005951299, -0.029044457, -2.4142258, 2.190176, 1.6590111, -3.6578588, -4.684568, -3.6578588, -3.6578588,
6.084622, 13.555143, 13.555143, -4.34307, -1.1962872, -0.47124174, -0.43637118, -8.555032, -6.269103,
-4.5881658, -5.9282355, -4.585698, -5.2294097, -6.944389, -3.5933948, -4.3854713, -2.5584598, -4.093828,
-7.9165735, -4.7697864, -5.935749, -5.9056034, -3.213047, -8.166379, -5.539582, -6.075837, -4.260547,
-2.8777754, -3.6983604, -4.148365, -4.4483614, -2.085313, -3.1456637, -3.706437, 2.0111775, 2.0111775,
2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775,
2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775, 2.0111775,
2.0111775, 2.0111775]
# attn_pos
data2 = [0.77760446, -0.3654992, -0.3654992, -0.3654992, -0.3654992, -0.3654992, -0.3654992, -0.3654992, -0.3654992,
0.5600485, 0.5600485, 0.5600485, 0.5600485, 1.0015192, 1.0015192, -0.8097563, -0.14138925, 0.66161174,
-0.43525112, -0.43525112, 0.29473847, 0.29473847, 0.30402362, 0.4407499, 0.99283254, 1.0506727, 1.0493455,
1.0368179, 1.0545973, 1.1171892, 1.0651786, 1.1270455, 0.8863518, 0.58156127, 0.6494905, 0.7443865,
0.69479305, 0.85680157, 0.63184834, 0.45794958, 0.38933513, 0.1833995, 0.32618314, 0.31002286, 0.23198071,
0.53673124, 0.34084955, 0.2578649, -0.8240579, -1.3246806, -1.287403, -1.3355032, -1.1736788, -1.1917881,
-1.1917881, -1.7546477, -1.5299661, -1.5299661, -1.6786951, -1.5299661, -1.1985171, -0.49476224,
-0.4208252, -0.1696207, -0.33454028]
# combined
data3 = [0.7716532, -0.39454365, -2.779725, 1.8246768, 1.2935119, -4.023358, -5.050067, -4.023358, -4.023358,
6.6446705, 14.115191, 14.115191, -3.7830215, -0.19476795, 0.5302775, -1.2461275, -8.696421, -5.6074915,
-5.023417, -6.363487, -4.29096, -4.9346714, -6.6496506, -3.2986562, -4.2205296, -2.393518, -3.9288864,
-7.7516317, -4.6048446, -5.7708073, -5.7406616, -3.0481052, -8.452318, -5.8255215, -6.361777, -4.546487,
-3.163715, -3.9843, -4.4343047, -4.734301, -2.3712525, -3.4316032, -3.9923766, 1.725238, 1.725238,
1.725238, 1.725238, 1.725238, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384,
0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384, 0.74440384,
0.74440384, 0.74440384]
softmax_flag = False
if softmax_flag:
plt.bar(range(len(data)), softmax(data), color='b', alpha=0.3, label='attn') # , hatch="/")
plt.bar(range(len(data2)), softmax(data2), color='g', alpha=0.3, label='attn_rel_pos') # , hatch="o")
plt.bar(range(len(data3)), softmax(data3), color='r', alpha=0.3, label='attn_comb') # , hatch="\\")
plt.plot(range(len(data)), softmax(data), 'bx', alpha=0.5)
plt.plot(range(len(data2)), softmax(data2), 'go', alpha=0.5, ms=4)
plt.plot(range(len(data3)), softmax(data3), 'r+', alpha=0.5)
else:
plt.bar(range(len(data[:43])), data[:43], color='b', alpha=0.3, label=r'$y_{inner}$') # , hatch="/")
plt.bar(range(len(data2[:43])), data2[:43], color='g', alpha=0.3, label=r'$y_{rel\_pos}$') # , hatch="o")
plt.bar(range(len(data3[:43])), data3[:43], color='r', alpha=0.3,
label=r'$y_{inner} + y_{rel\_pos}$') # , hatch="\\")
plt.plot(range(len(data[:43])), data[:43], 'bx', alpha=0.5)
plt.plot(range(len(data2[:43])), data2[:43], 'go', alpha=0.5, ms=4)
plt.plot(range(len(data3[:43])), data3[:43], 'r+', alpha=0.5)
plt.title("Head 1")
plt.xlabel("Word Position in the Sentence")
plt.ylabel("Attention Weight")
# plt.grid()
plt.legend(fontsize=16)
plt.savefig('head_1_in_32_no_softmax.png', dpi=350)
# plt.close(fig)
def investigate_attention(attn, attn_pos, sentence_words, outer_vocab):
########################################
# options for the investigations BEGIN #
########################################
# choose which head to investigate
list_of_head = [1.0, 2.0, 3.0]
list_of_combinations = ["attn", "attn_pos", "combined"]
# save data for plots
# TODO: generate plots automatically
from collections import defaultdict
whole_sentence_graph_dict = defaultdict(lambda: defaultdict(dict))
of_data = dict()
in_32_data = dict()
in_16_data = dict()
for head in list_of_head:
for what_attention_to_investigate in list_of_combinations:
# options to investigate from:
# attn: basic attention from self-attention paper without any pos encodings
# attn_pos: attention with our relative pos encodings
# combined: both of the above combined
# what_attention_to_investigate = "combined"
if what_attention_to_investigate == "combined":
all_attn = attn + attn_pos.transpose(1, 2)
numpy_sentences = sentence_words.cpu().numpy()
if what_attention_to_investigate == "attn":
numpy_attention = attn.cpu().numpy()
elif what_attention_to_investigate == "attn_pos":
numpy_attention = attn_pos.cpu().numpy()
elif what_attention_to_investigate == "combined":
numpy_attention = all_attn.cpu().numpy()
# generate file to save output to based on params above
investigations_filename = "".join(
[
'saved_models/out/attention_mean_and_std_head_',
str(int(head)),
'_',
str(what_attention_to_investigate),
'.txt'
]
)
######################################
# options for the investigations END #
######################################
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0) # only difference
def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
# have to substract the index position from the mean
# TODO: in mean and std
average = np.average(values, weights=weights)
# Fast and numerically precise:
variance = np.average((values - average) ** 2, weights=weights)
return average, math.sqrt(variance)
# following run is used for the investigations: python eval.py --model_dir saved_models/lm4
# --model checkpoint_epoch_60.pt
sentence_to_search = "They cited the case of OBJ-ORGANIZATION OBJ-ORGANIZATION OBJ-ORGANIZATION OBJ-ORGANIZATION subcontractor SUBJ-PERSON SUBJ-PERSON , who was working in Cuba on a tourist visa and possessed satellite communications equipment , who has been held in a maximum security prison since his arrest Dec 3 ."
for sentence_index, each_sentence in enumerate(numpy_sentences):
unmapped_sentence = outer_vocab.unmap(each_sentence)
# DONe
# TODO: skip the padded words in both vectors
# get id-to-word sentence representation
# print(unmapped_sentence, len(unmapped_sentence))
# find the position of the first pad
first_pad_position = None
for i, single_word in enumerate(unmapped_sentence):
if single_word == '<PAD>':
first_pad_position = i
break
if first_pad_position:
# get all words before the first pad appears
unmapped_sentence_final = unmapped_sentence[:first_pad_position] # [:first_pad_position]
# iterate over attention scores
# attention heads, 1: +0 2: +50 3: +100. i.e. head 2: numpy_attention[sentence_index+50]...
if head == 1.0:
unpadded_attention = numpy_attention[sentence_index][:first_pad_position]
elif head == 2.0:
if len(numpy_attention) != 27: # skip the last batch of size 27
unpadded_attention = numpy_attention[sentence_index + 50][:first_pad_position]
elif head == 3.0:
if len(numpy_attention) != 27: # skip the last batch of size 27
unpadded_attention = numpy_attention[sentence_index + 100][:first_pad_position]
else:
unmapped_sentence_final = unmapped_sentence
# attention heads, 1: +0 2: +50 3: +100. i.e. head 2: numpy_attention[sentence_index+50]...
if head == 1.0:
unpadded_attention = numpy_attention[sentence_index] # numpy_attention[sentence_index+50]
elif head == 2.0:
if len(numpy_attention) != 27: # skip the last batch of size 27
unpadded_attention = numpy_attention[
sentence_index + 50] # numpy_attention[sentence_index+50]
elif head == 3.0:
# print(numpy_attention)
if len(numpy_attention) != 27: # skip the last batch of size 27
unpadded_attention = numpy_attention[
sentence_index + 100] # numpy_attention[sentence_index+50]
# TODO: get rid of this later
# print(" ".join(unmapped_sentence))
if " ".join(unmapped_sentence_final) == sentence_to_search:
# make sure the slicing was correct on both tensors
assert len(unmapped_sentence_final) == len(unpadded_attention)
# print(len(unpadded_attention), len(unmapped_sentence_final))
print(unmapped_sentence_final, len(unmapped_sentence_final))
# print(unpadded_attention, len(unpadded_attention))
print()
all_means = []
all_stds = []
highest_attention_score_per_word = dict()
for i, each_word in enumerate(unmapped_sentence_final):
# print("WORD:", each_word)
# print("POS. VECTOR:", unpadded_attention[i])
# for each word pos vector, select the highest attention score to other word but itself
# mask out the word itself
if first_pad_position:
current_word_pos_vector = unpadded_attention[i][:first_pad_position]
else:
current_word_pos_vector = unpadded_attention[i]
indices = [i]
mask = np.zeros(current_word_pos_vector.size, dtype=bool)
mask[indices] = True
a = np.ma.array(current_word_pos_vector, mask=mask)
# select highest attention score for given word
# print(len(a))
highest_attention_index, highest_attention_score = max(enumerate(a), key=operator.itemgetter(1))
highest_attention_score_per_word[i] = (highest_attention_index, highest_attention_score)
# print(i, highest_attention_index, highest_attention_score)
# get weighted average
w = softmax(current_word_pos_vector)
# print(w, len(w))
r = np.arange(len(w)) - i
weighted_average_and_std = weighted_avg_and_std(r, w)
# print("WORD:", each_word)
# print("Weighted Average:", weighted_average_and_std[0])
# print("Weighted Standard Deviation:", weighted_average_and_std[1])
# print(i, first_pad_position, unmapped_sentence_final)
if first_pad_position:
if i < first_pad_position:
all_means.append(weighted_average_and_std[0])
all_stds.append(weighted_average_and_std[1])
else:
all_means.append(weighted_average_and_std[0])
all_stds.append(weighted_average_and_std[1])
# highest_attention_score_per_word[i] = max(enumerate(all_means), key=operator.itemgetter(1))
# print(all_means)
smallest_mean_index, smallest_mean = min(enumerate(all_means), key=operator.itemgetter(1))
biggest_mean_index, biggest_mean = max(enumerate(all_means), key=operator.itemgetter(1))
print()
print("without padding")
print("smallest_mean:", smallest_mean)
print("smallest_mean_word:", unmapped_sentence_final[smallest_mean_index], "index:",
smallest_mean_index)
print("^ av_mean:", all_means[smallest_mean_index], "av_std:", all_stds[smallest_mean_index])
print(
"highest attention score:", highest_attention_score_per_word[smallest_mean_index][1], "word:",
unmapped_sentence_final[highest_attention_score_per_word[smallest_mean_index][0]],
"index:", highest_attention_score_per_word[smallest_mean_index][0])
print()
print("biggest_mean:", biggest_mean)
print("biggest_mean_word:", unmapped_sentence_final[biggest_mean_index], "index:",
biggest_mean_index)
print("^ av_mean:", all_means[biggest_mean_index], "av_std:", all_stds[biggest_mean_index])
print(
"highest attention score:", highest_attention_score_per_word[biggest_mean_index][1], "word:",
unmapped_sentence_final[highest_attention_score_per_word[biggest_mean_index][0]],
"index:", highest_attention_score_per_word[biggest_mean_index][0])
print()
smallest_std_index, smallest_std = min(enumerate(all_stds), key=operator.itemgetter(1))
biggest_std_index, biggest_std = max(enumerate(all_stds), key=operator.itemgetter(1))
print("smallest_std:", smallest_std)
print("smallest_std_word:", unmapped_sentence_final[smallest_std_index], "index:",
smallest_std_index)
print("^ av_mean:", all_means[smallest_std_index], "av_std:", all_stds[smallest_std_index])
print(
"highest attention score:", highest_attention_score_per_word[smallest_std_index][1], "word:",
unmapped_sentence_final[highest_attention_score_per_word[smallest_std_index][0]],
"index:", highest_attention_score_per_word[smallest_std_index][0])
print()
print("biggest_std:", biggest_std)
print("biggest_std_word:", unmapped_sentence_final[biggest_std_index], "index:", biggest_std_index)
print("^ av_mean:", all_means[biggest_std_index], "av_std:", all_stds[biggest_std_index])
print(
"highest attention score:", highest_attention_score_per_word[biggest_std_index][1], "word:",
unmapped_sentence_final[highest_attention_score_per_word[biggest_std_index][0]],
"index:", highest_attention_score_per_word[biggest_std_index][0])
print()
with open(investigations_filename, 'a') as outfile:
# print each att score
outfile.write(" ".join(
[" ".join(unmapped_sentence_final), "\n", str(len(unmapped_sentence_final)), "\n"]))
for i, element in enumerate(unmapped_sentence_final):
outfile.write(" ".join(["index:", str(i), " /// word:", str(element), "\n"]))
outfile.write(" ".join(
["Attention vector:", "[", ", ".join([str(x) for x in unpadded_attention[i]]), "]",
"\n",
str(len(unpadded_attention)), "\n",
"av_mean: ", str(all_means[i]), "av_str: ", str(all_stds[i]), "\n"]))
outfile.write(
" ".join(
["[", ", ".join([str(x) for x in softmax(current_word_pos_vector)]), "]", "\n"]))
if what_attention_to_investigate == "attn":
if head == 1.0:
whole_sentence_graph_dict[str(element)]["1"]["position"] = str(i)
whole_sentence_graph_dict[str(element)]["1"]["attn"] = unpadded_attention[i]
elif head == 2.0:
whole_sentence_graph_dict[str(element)]["2"]["position"] = str(i)
whole_sentence_graph_dict[str(element)]["2"]["attn"] = unpadded_attention[i]
elif head == 3.0:
whole_sentence_graph_dict[str(element)]["3"]["position"] = str(i)
whole_sentence_graph_dict[str(element)]["3"]["attn"] = unpadded_attention[i]
elif what_attention_to_investigate == "attn_pos":
if head == 1.0:
whole_sentence_graph_dict[str(element)]["1"]["attn_pos"] = unpadded_attention[i]
elif head == 2.0:
whole_sentence_graph_dict[str(element)]["2"]["attn_pos"] = unpadded_attention[i]
elif head == 3.0:
whole_sentence_graph_dict[str(element)]["3"]["attn_pos"] = unpadded_attention[i]
elif what_attention_to_investigate == "combined":
if head == 1.0:
whole_sentence_graph_dict[str(element)]["1"]["combined"] = unpadded_attention[i]
elif head == 2.0:
whole_sentence_graph_dict[str(element)]["2"]["combined"] = unpadded_attention[i]
elif head == 3.0:
whole_sentence_graph_dict[str(element)]["3"]["combined"] = unpadded_attention[i]
# outfile.write(" ".join([" ".join([str(x) for x in unpadded_attention]), "\n", str(len(unpadded_attention)), "\n"]))
outfile.write(
" ".join(
[" ".join(unmapped_sentence_final), "\n", str(len(unmapped_sentence_final)), "\n"]))
outfile.write(" ".join(["without padding", "\n"]))
outfile.write(" ".join(["smallest_mean:", str(smallest_mean), "\n"]))
outfile.write(" ".join(
["smallest_mean_word:", str(unmapped_sentence_final[smallest_mean_index]), " index:",
str(smallest_mean_index), "\n"]))
outfile.write(" ".join(
["^ av_mean:", str(all_means[smallest_mean_index]), "av_std:",
str(all_stds[smallest_mean_index]),
"\n"]))
outfile.write(" ".join([
"highest attention score:", str(highest_attention_score_per_word[smallest_mean_index][1]),
"word:",
str(unmapped_sentence_final[highest_attention_score_per_word[smallest_mean_index][0]]),
"index:", str(highest_attention_score_per_word[smallest_mean_index][0]), "\n", "\n"]))
outfile.write(" ".join(["biggest_mean:", str(biggest_mean), "\n"]))
outfile.write(" ".join(
["biggest_mean_word:", str(unmapped_sentence_final[biggest_mean_index]), " index:",
str(biggest_mean_index), "\n"]))
outfile.write(" ".join(["^ av_mean:", str(all_means[biggest_mean_index]), "av_std:",
str(all_stds[biggest_mean_index]), "\n"]))
outfile.write(" ".join([
"highest attention score:", str(highest_attention_score_per_word[biggest_mean_index][1]),
"word:",
str(unmapped_sentence_final[highest_attention_score_per_word[biggest_mean_index][0]]),
"index:", str(highest_attention_score_per_word[biggest_mean_index][0]), "\n", "\n"]))
outfile.write(" ".join(["smallest_std:", str(smallest_std), "\n"]))
outfile.write(" ".join(
["smallest_std_word:", str(unmapped_sentence_final[smallest_std_index]), " index:",
str(smallest_std_index), "\n"]))
outfile.write(" ".join(["^ av_mean:", str(all_means[smallest_std_index]), "av_std:",
str(all_stds[smallest_std_index]), "\n"]))
outfile.write(" ".join([
"highest attention score:", str(highest_attention_score_per_word[smallest_std_index][1]),
"word:",
str(unmapped_sentence_final[highest_attention_score_per_word[smallest_std_index][0]]),
"index:", str(highest_attention_score_per_word[smallest_std_index][0]), "\n", "\n"]))
outfile.write(" ".join(["biggest_std:", str(biggest_std), "\n"]))
outfile.write(
" ".join(["biggest_std_word:", str(unmapped_sentence_final[biggest_std_index]), " index:",
str(biggest_std_index), "\n"]))
outfile.write(" ".join(["^ av_mean:", str(all_means[biggest_std_index]), "av_std:",
str(all_stds[biggest_std_index]), "\n"]))
outfile.write(" ".join([
"highest attention score:", str(highest_attention_score_per_word[biggest_std_index][1]),
"word:",
str(unmapped_sentence_final[highest_attention_score_per_word[biggest_std_index][0]]),
"index:", str(highest_attention_score_per_word[biggest_std_index][0]), "\n", "\n"]))
outfile.write("\n")
# automatically generate plots
plot_generator_with_softmax(whole_sentence_graph_dict)
|
{"hexsha": "db7d424f497a2a67c94bc684116c12227c72feb1", "size": 27673, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/attention_investigation.py", "max_stars_repo_name": "ivan-bilan/tac-self-attention", "max_stars_repo_head_hexsha": "8dd583ac960716bbf0c645c23f2c50bd36ca042a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2018-11-19T20:31:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T07:23:56.000Z", "max_issues_repo_path": "utils/attention_investigation.py", "max_issues_repo_name": "ivan-bilan/Relation-Extraction-Transformer", "max_issues_repo_head_hexsha": "8dd583ac960716bbf0c645c23f2c50bd36ca042a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-25T05:34:11.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-14T01:34:31.000Z", "max_forks_repo_path": "utils/attention_investigation.py", "max_forks_repo_name": "ivan-bilan/tac-self-attention", "max_forks_repo_head_hexsha": "8dd583ac960716bbf0c645c23f2c50bd36ca042a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-12-10T04:52:18.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-20T09:07:56.000Z", "avg_line_length": 55.6800804829, "max_line_length": 330, "alphanum_fraction": 0.544971633, "include": true, "reason": "import numpy", "num_tokens": 6435}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 by Inria
Authored by Mostafa Sadeghi (mostafa.sadeghi@inria.fr)
License agreement in LICENSE.txt
"""
import numpy as np
import torch
import torch.nn as nn
#%% The following implements the MCEM algorithm for audio-only VAE
class MCEM_algo:
def __init__(self, X=None, W=None, H=None, Z=None, v=None, decoder=None,
niter_MCEM=100, niter_MH=40, burnin=30, var_MH=0.01):
self.X = X # Mixture STFT, shape (F,N)
self.W = W # NMF dictionary matrix, shape (F, K)
self.H = H # NMF activation matrix, shape (K, N)
self.V = self.W @ self.H # Noise variance, shape (F, N)
self.Z = Z # Last draw of the latent variables, shape (D, N)
self.decoder = decoder # VAE decoder, keras model
self.niter_MCEM = niter_MCEM # Maximum number of MCEM iterations
self.niter_MH = niter_MH # Number of iterations for the MH algorithm
# of the E-step
self.burnin = burnin # Burn-in period for the MH algorithm of the
# E-step
self.var_MH = var_MH # Variance of the proposal distribution of the MH
# algorithm
self.a = np.ones((1,self.X.shape[1])) # gain parameters, shape (1,N)
# output of the decoder with self.Z as input, shape (F, N)
#removed transpose from line below
if torch.cuda.is_available(): self.Z_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(self.Z.T))).T
else: self.Z_mapped_decoder = self.torch2num(self.decoder(self.num2torch(self.Z.T))).T
self.speech_var = (self.Z_mapped_decoder*self.a) # apply gain
def num2torch(self, x):
y = torch.from_numpy(x.astype(np.float32))
return y
def torch2num(self, x):
y = x.detach().numpy()
return y
def num2torch_cuda(self, x):
y = torch.from_numpy(x.astype(np.float32))
return y.cuda()
def torch2num_cuda(self, x):
y = x.cpu().detach().numpy()
return y
def metropolis_hastings(self, niter_MH=None, burnin=None):
if niter_MH==None:
niter_MH = self.niter_MH
if burnin==None:
burnin = self.burnin
F, N = self.X.shape # 258, 124 - power spec dim
D = self.Z.shape[0] # 32 - latent dim
Z_sampled = np.zeros((D, N, niter_MH - burnin)) # (32, 124, 10)
cpt = 0
for n in np.arange(niter_MH):
# self.Z - (32, 124)
# self.Z_prime - (32, 124)
#breakpoint()
Z_prime = self.Z + np.sqrt(self.var_MH)*np.random.randn(D,N)
if torch.cuda.is_available(): Z_prime_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(Z_prime.T))).T #(513, 124) #513 - input_dim
else: Z_prime_mapped_decoder = self.torch2num(self.decoder(self.num2torch(Z_prime.T))).T #(513, 124) #513 - input_dim
# shape (F, N)
speech_var_prime = (Z_prime_mapped_decoder*self.a) # apply gain #(513, 124)
# self.V and self.speech_var should be of same shape
#import pdb; pdb.set_trace()
acc_prob = ( np.sum( np.log(self.V + self.speech_var)
- np.log(self.V + speech_var_prime)
+ ( 1/(self.V + self.speech_var)
- 1/(self.V + speech_var_prime) )
* np.abs(self.X)**2, axis=0)
+ .5*np.sum( self.Z**2 - Z_prime**2 , axis=0) )
#import pdb; pdb.set_trace()
is_acc = np.log(np.random.rand(1,N)) < acc_prob
is_acc = is_acc.reshape((is_acc.shape[1],))
self.Z[:,is_acc] = Z_prime[:,is_acc]
if torch.cuda.is_available(): self.Z_mapped_decoder = self.torch2num_cuda(self.decoder(self.num2torch_cuda(self.Z.T))).T
else: self.Z_mapped_decoder = self.torch2num(self.decoder(self.num2torch(self.Z.T))).T
self.speech_var = self.Z_mapped_decoder*self.a
if n > burnin - 1:
Z_sampled[:,:,cpt] = self.Z
cpt += 1
return Z_sampled
def run(self, hop, wlen, win, tol=1e-4):
F, N = self.X.shape
X_abs_2 = np.abs(self.X)**2
cost_after_M_step = np.zeros((self.niter_MCEM, 1))
for n in np.arange(self.niter_MCEM):
# MC-Step
# print('Metropolis-Hastings')
Z_sampled = self.metropolis_hastings(self.niter_MH, self.burnin)
Z_sampled_mapped_decoder = np.zeros((F, N, self.niter_MH-self.burnin))
for i in range(self.niter_MH-self.burnin):
if torch.cuda.is_available: Z_sampled_mapped_decoder[:,:,i] =self.torch2num_cuda(self.decoder(self.num2torch_cuda(Z_sampled[:,:,i].T))).T
else: Z_sampled_mapped_decoder[:,:,i] =self.torch2num(self.decoder(self.num2torch(Z_sampled[:,:,i].T))).T
speech_var_multi_samples = (Z_sampled_mapped_decoder*
self.a[:,:,None]) # shape (F,N,R)
# M-Step
V_plus_Z_mapped = self.V[:,:,None] + speech_var_multi_samples
# print('Update W')
self.W = self.W*(
((X_abs_2*np.sum(V_plus_Z_mapped**-2,
axis=-1)) @ self.H.T)
/ (np.sum(V_plus_Z_mapped**-1, axis=-1) @ self.H.T)
)**.5
self.V = self.W @ self.H
V_plus_Z_mapped = self.V[:,:,None] + speech_var_multi_samples
# print('Update H')
self.H = self.H*(
(self.W.T @ (X_abs_2 * np.sum(V_plus_Z_mapped**-2,
axis=-1)))
/ (self.W.T @ np.sum(V_plus_Z_mapped**-1, axis=-1))
)**.5
self.V = self.W @ self.H
V_plus_Z_mapped = self.V[:,:,None] + speech_var_multi_samples
# print('Update gain')
self.a = self.a*(
(np.sum(X_abs_2 * np.sum(
Z_sampled_mapped_decoder*(V_plus_Z_mapped**-2),
axis=-1), axis=0) )
/(np.sum(np.sum(
Z_sampled_mapped_decoder*(V_plus_Z_mapped**-1),
axis=-1), axis=0) ) )**.5
speech_var_multi_samples = (Z_sampled_mapped_decoder*
self.a[:,:,None]) # shape (F,N,R)
V_plus_Z_mapped = self.V[:,:,None] + speech_var_multi_samples
cost_after_M_step[n] = np.mean(
np.log(V_plus_Z_mapped)
+ X_abs_2[:,:,None]/V_plus_Z_mapped )
print("iter %d/%d - cost=%.4f\n" %
(n+1, self.niter_MCEM, cost_after_M_step[n]))
if n>0 and np.abs(cost_after_M_step[n-1] - cost_after_M_step[n]) < tol:
print('tolerance achieved')
break
return cost_after_M_step, n
def separate(self, niter_MH=None, burnin=None):
if niter_MH==None:
niter_MH = self.niter_MH
if burnin==None:
burnin = self.burnin
F, N = self.X.shape
Z_sampled = self.metropolis_hastings(niter_MH, burnin)
Z_sampled_mapped_decoder = np.zeros((F, N, self.niter_MH-self.burnin))
for i in range(self.niter_MH-self.burnin):
if torch.cuda.is_available(): Z_sampled_mapped_decoder[:,:,i] =self.torch2num_cuda(self.decoder(self.num2torch_cuda(Z_sampled[:,:,i].T))).T
else: Z_sampled_mapped_decoder[:,:,i] =self.torch2num(self.decoder(self.num2torch(Z_sampled[:,:,i].T))).T
speech_var_multi_samples = (Z_sampled_mapped_decoder*
self.a[:,:,None]) # shape (F,N,R)
self.S_hat = np.mean(
(speech_var_multi_samples/(speech_var_multi_samples
+ self.V[:,:,None])),
axis=-1) * self.X
self.N_hat = np.mean(
(self.V[:,:,None]/(speech_var_multi_samples
+ self.V[:,:,None])) , axis=-1) * self.X
#%% The following implements the MCEM algorithm for audio-visual CVAE
class VAE_Decoder_Eval(nn.Module):
def __init__(self, vae):
super(VAE_Decoder_Eval, self).__init__()
self.latent_dim = vae.latent_dim
self.activation = vae.activation
self.output_layer = None
self.build(vae)
def build(self, vae):
self.output_layer = vae.output_layer
self.decoder_layerZ = vae.decoder_layerZ
def forward(self, z):
zv = self.decoder_layerZ(z)
hdd = torch.tanh(zv)
return torch.exp(self.output_layer(hdd))
|
{"hexsha": "628f49d5f7d1917639ba5db10588f0caec0e67cf", "size": 8877, "ext": "py", "lang": "Python", "max_stars_repo_path": "asteroid/masknn/MCEM_algo.py", "max_stars_repo_name": "flyingleafe/asteroid", "max_stars_repo_head_hexsha": "1c3c68ffc83f4b0bf7b00893083e4eff1f577b88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "asteroid/masknn/MCEM_algo.py", "max_issues_repo_name": "flyingleafe/asteroid", "max_issues_repo_head_hexsha": "1c3c68ffc83f4b0bf7b00893083e4eff1f577b88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "asteroid/masknn/MCEM_algo.py", "max_forks_repo_name": "flyingleafe/asteroid", "max_forks_repo_head_hexsha": "1c3c68ffc83f4b0bf7b00893083e4eff1f577b88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1674208145, "max_line_length": 163, "alphanum_fraction": 0.5487214149, "include": true, "reason": "import numpy", "num_tokens": 2284}
|
"""
Code edited from:
https://pythonprogramming.net/training-deep-q-learning-dqn-reinforcement-learning-python-tutorial/?completed=/deep-q-learning-dqn-reinforcement-learning-python-tutorial/
Train the Agent in a simulated environment -> Much faster than training by playing on the emulator directly.
"""
import os
import numpy as np
import random
import tensorflow as tf
import time
from tqdm import tqdm
from DQNAgentSimulation import DQNAgentSimulation
from DQNAgentSimulation import BlobEnv
# Environment settings
EPISODES = 50000
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
MODEL_NAME = 'Yoyo'
# epsilon = 0
# EPSILON_DECAY = 0
# MIN_EPSILON = 0
# Stats settings
AGGREGATE_STATS_EVERY = 1 # episodes
m = DQNAgentSimulation().create_model()
agent = DQNAgentSimulation()
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
# Create models folder
if not os.path.isdir('models_simulation'):
os.makedirs('models_simulation')
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
path = [(current_state[0], current_state[1], None)]
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
print('Current state:', current_state)
print('Location memory:', env.location_memory)
print('Action:', action)
new_state, reward, done, same = env.step(current_state, action)
path.append((new_state[0], new_state[1], action))
print('New state:', new_state)
print('Reward: ', reward)
print('Done: ', done)
print('Step: ', step)
print('Path:', path)
print('Epsilon: ', epsilon)
print('-------')
# Transform new continuous state to new discrete state and count reward
episode_reward += reward
# if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
# env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.model.save(f'models_simulation/{MODEL_NAME}__{max_reward:_>7.2f}max_'
f'{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon)
|
{"hexsha": "7a20c8b548c5b183ba2c5783180db9acf530cedf", "size": 3403, "ext": "py", "lang": "Python", "max_stars_repo_path": "OutOfMyRoom/train_simulation.py", "max_stars_repo_name": "uncleman11/pokemonBot", "max_stars_repo_head_hexsha": "86975557ecef0ffe8a0f154f21ba2bf3fab69b8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OutOfMyRoom/train_simulation.py", "max_issues_repo_name": "uncleman11/pokemonBot", "max_issues_repo_head_hexsha": "86975557ecef0ffe8a0f154f21ba2bf3fab69b8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OutOfMyRoom/train_simulation.py", "max_forks_repo_name": "uncleman11/pokemonBot", "max_forks_repo_head_hexsha": "86975557ecef0ffe8a0f154f21ba2bf3fab69b8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3362068966, "max_line_length": 169, "alphanum_fraction": 0.6829268293, "include": true, "reason": "import numpy", "num_tokens": 843}
|
program number_guess
!! not every compiler inits arandom seed (Gfortran yes, flang no)
use, intrinsic:: iso_fortran_env, only: stdin=>input_unit, stdout=>output_unit
use numerical, only: isprime
implicit none
integer :: secret, guess, i
real :: r
character(20) :: msg, buf
call random_init(.false., .false.)
call random_number(r)
secret = int((r*99+1))
msg = 'guess my number '
main: do
write(stdout,'(A)', advance='no') msg
read(stdin,*, iostat=i) buf
if (i/=0) stop 'goodbye'
if (buf == 'h' .or. buf == 'hint' .or. buf == 'help') then
if (isprime(secret)) then
print *,'my secret number is prime'
else
print *,'my secret number is NOT prime'
endif
else
read(buf,*, iostat=i) guess
if (i/=0) stop 'goodbye'
endif
if(guess < secret) then
msg = 'try bigger '
elseif(guess > secret) then
msg = 'try smaller '
elseif(guess == secret) then
print *,'you guessed my secret number -- hooray!'
exit main
else
error stop 'impossible'
endif
enddo main
end program
|
{"hexsha": "e9ed88adb2b2cfdd2268f9c2f99e2c3b87cc8d19", "size": 1042, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "number_guess.f90", "max_stars_repo_name": "scivision/preschool-coding", "max_stars_repo_head_hexsha": "0cd63b0693b31b1381aaa484a3b8b3671b036270", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "number_guess.f90", "max_issues_repo_name": "scivision/preschool-coding", "max_issues_repo_head_hexsha": "0cd63b0693b31b1381aaa484a3b8b3671b036270", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "number_guess.f90", "max_forks_repo_name": "scivision/preschool-coding", "max_forks_repo_head_hexsha": "0cd63b0693b31b1381aaa484a3b8b3671b036270", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.2653061224, "max_line_length": 78, "alphanum_fraction": 0.6449136276, "num_tokens": 321}
|
"""Summarizes `astro_utils` test results."""
import os
import numpy as np
import pandas as pd
_DATA_DIR_PATH = r'C:\Users\Harold\Desktop\NFC\Data\USNO Tables'
# _DATA_DIR_PATH = '/Users/Harold/Desktop/NFC/Data/USNO Tables'
_CSV_FILE_NAME = 'Rise Set Data.csv'
def _main():
csv_file_path = os.path.join(_DATA_DIR_PATH, _CSV_FILE_NAME)
df = pd.read_csv(csv_file_path)
r = _get_diff_data(df, 'Risings')
s = _get_diff_data(df, 'Settings')
keys = (
'Event Type', 'Number of Events', 'Percent Diffs 0',
'Percent Diffs 1', 'Percent Diffs 2 or More')
items = [_item(keys[i], i, r, s) for i in range(5)]
df = pd.DataFrame.from_items(items)
print(df)
def _get_diff_data(df, name):
column_names = _get_diff_column_names(name)
d = df[column_names].sum()
n = d.sum()
counts = np.array([d[2], d[1] + d[3], d[0] + d[4]])
percentages = 100. * counts / float(n)
return [name[:-1], n] + list(percentages)
def _get_diff_column_names(name):
return [name + ' Diff ' + str(d) for d in range(-2, 3)]
def _item(name, index, r, s):
return (name, (r[index], s[index]))
if __name__ == '__main__':
_main()
|
{"hexsha": "3653a58c0e0b4216235f0b1413d6cc18feccf369", "size": 1219, "ext": "py", "lang": "Python", "max_stars_repo_path": "vesper/ephem/tests/scripts/summarize_astro_utils_test_results.py", "max_stars_repo_name": "RichardLitt/Vesper", "max_stars_repo_head_hexsha": "5360844f42a06942e7684121c650b08cf8616285", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-26T23:03:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-26T23:03:17.000Z", "max_issues_repo_path": "vesper/ephem/tests/scripts/summarize_astro_utils_test_results.py", "max_issues_repo_name": "Tubbz-alt/Vesper", "max_issues_repo_head_hexsha": "76e5931ca0c7fbe070c53b1362ec246ec9007beb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vesper/ephem/tests/scripts/summarize_astro_utils_test_results.py", "max_forks_repo_name": "Tubbz-alt/Vesper", "max_forks_repo_head_hexsha": "76e5931ca0c7fbe070c53b1362ec246ec9007beb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.0, "max_line_length": 64, "alphanum_fraction": 0.6177194422, "include": true, "reason": "import numpy", "num_tokens": 370}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) BaseDetection, Inc. and its affiliates. All Rights Reserved
import json
import os
import numpy as np
PERSON_CLASSES = ['background', 'person']
class Image(object):
def __init__(self, mode):
self.ID = None
self._width = None
self._height = None
self.dtboxes = None
self.gtboxes = None
self.eval_mode = mode
self._ignNum = None
self._gtNum = None
self._dtNum = None
def load(self, record, body_key, head_key, class_names, gtflag):
"""
:meth: read the object from a dict
"""
if "ID" in record and self.ID is None:
self.ID = record['ID']
if "width" in record and self._width is None:
self._width = record["width"]
if "height" in record and self._height is None:
self._height = record["height"]
if gtflag:
self._gtNum = len(record["gtboxes"])
body_bbox, head_bbox = self.load_gt_boxes(record, 'gtboxes', class_names)
if self.eval_mode == 0:
self.gtboxes = body_bbox
self._ignNum = (body_bbox[:, -1] == -1).sum()
elif self.eval_mode == 1:
self.gtboxes = head_bbox
self._ignNum = (head_bbox[:, -1] == -1).sum()
elif self.eval_mode == 2:
gt_tag = np.array(
[body_bbox[i, -1] != -1 and head_bbox[i, -1] != -1
for i in range(len(body_bbox))]
)
self._ignNum = (gt_tag == 0).sum()
self.gtboxes = np.hstack(
(body_bbox[:, :-1], head_bbox[:, :-1], gt_tag.reshape(-1, 1))
)
else:
raise Exception('Unknown evaluation mode!')
if not gtflag:
self._dtNum = len(record["dtboxes"])
if self.eval_mode == 0:
self.dtboxes = self.load_det_boxes(record, 'dtboxes', body_key, 'score')
elif self.eval_mode == 1:
self.dtboxes = self.load_det_boxes(record, 'dtboxes', head_key, 'score')
elif self.eval_mode == 2:
body_dtboxes = self.load_det_boxes(record, 'dtboxes', body_key)
head_dtboxes = self.load_det_boxes(record, 'dtboxes', head_key, 'score')
self.dtboxes = np.hstack((body_dtboxes, head_dtboxes))
else:
raise Exception('Unknown evaluation mode!')
def compare_caltech(self, thres):
"""
:meth: match the detection results with the groundtruth by Caltech matching strategy
:param thres: iou threshold
:type thres: float
:return: a list of tuples (dtbox, imageID), in the descending sort of dtbox.score
"""
if self.dtboxes is None or self.gtboxes is None:
return list()
dtboxes = self.dtboxes if self.dtboxes is not None else list()
gtboxes = self.gtboxes if self.gtboxes is not None else list()
dt_matched = np.zeros(dtboxes.shape[0])
gt_matched = np.zeros(gtboxes.shape[0])
dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True))
gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True))
if len(dtboxes):
overlap_iou = self.box_overlap_opr(dtboxes, gtboxes, True)
overlap_ioa = self.box_overlap_opr(dtboxes, gtboxes, False)
else:
return list()
scorelist = list()
for i, dt in enumerate(dtboxes):
maxpos = -1
maxiou = thres
for j, gt in enumerate(gtboxes):
if gt_matched[j] == 1:
continue
if gt[-1] > 0:
overlap = overlap_iou[i][j]
if overlap > maxiou:
maxiou = overlap
maxpos = j
else:
if maxpos >= 0:
break
else:
overlap = overlap_ioa[i][j]
if overlap > thres:
maxiou = overlap
maxpos = j
if maxpos >= 0:
if gtboxes[maxpos, -1] > 0:
gt_matched[maxpos] = 1
dt_matched[i] = 1
scorelist.append((dt, 1, self.ID))
else:
dt_matched[i] = -1
else:
dt_matched[i] = 0
scorelist.append((dt, 0, self.ID))
return scorelist
def compare_caltech_union(self, thres):
"""
:meth: match the detection results with the groundtruth by Caltech matching strategy
:param thres: iou threshold
:type thres: float
:return: a list of tuples (dtbox, imageID), in the descending sort of dtbox.score
"""
dtboxes = self.dtboxes if self.dtboxes is not None else list()
gtboxes = self.gtboxes if self.gtboxes is not None else list()
if len(dtboxes) == 0:
return list()
dt_matched = np.zeros(dtboxes.shape[0])
gt_matched = np.zeros(gtboxes.shape[0])
dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True))
gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True))
dt_body_boxes = np.hstack((dtboxes[:, :4], dtboxes[:, -1][:, None]))
dt_head_boxes = dtboxes[:, 4:8]
gt_body_boxes = np.hstack((gtboxes[:, :4], gtboxes[:, -1][:, None]))
gt_head_boxes = gtboxes[:, 4:8]
overlap_iou = self.box_overlap_opr(dt_body_boxes, gt_body_boxes, True)
overlap_head = self.box_overlap_opr(dt_head_boxes, gt_head_boxes, True)
overlap_ioa = self.box_overlap_opr(dt_body_boxes, gt_body_boxes, False)
scorelist = list()
for i, dt in enumerate(dtboxes):
maxpos = -1
maxiou = thres
for j, gt in enumerate(gtboxes):
if gt_matched[j] == 1:
continue
if gt[-1] > 0:
o_body = overlap_iou[i][j]
o_head = overlap_head[i][j]
if o_body > maxiou and o_head > maxiou:
maxiou = o_body
maxpos = j
else:
if maxpos >= 0:
break
else:
o_body = overlap_ioa[i][j]
if o_body > thres:
maxiou = o_body
maxpos = j
if maxpos >= 0:
if gtboxes[maxpos, -1] > 0:
gt_matched[maxpos] = 1
dt_matched[i] = 1
scorelist.append((dt, 1, self.ID))
else:
dt_matched[i] = -1
else:
dt_matched[i] = 0
scorelist.append((dt, 0, self.ID))
return scorelist
def box_overlap_opr(self, dboxes: np.ndarray, gboxes: np.ndarray, if_iou):
eps = 1e-6
assert dboxes.shape[-1] >= 4 and gboxes.shape[-1] >= 4
N, K = dboxes.shape[0], gboxes.shape[0]
dtboxes = np.tile(np.expand_dims(dboxes, axis=1), (1, K, 1))
gtboxes = np.tile(np.expand_dims(gboxes, axis=0), (N, 1, 1))
iw = (np.minimum(dtboxes[:, :, 2], gtboxes[:, :, 2])
- np.maximum(dtboxes[:, :, 0], gtboxes[:, :, 0]))
ih = (np.minimum(dtboxes[:, :, 3], gtboxes[:, :, 3])
- np.maximum(dtboxes[:, :, 1], gtboxes[:, :, 1]))
inter = np.maximum(0, iw) * np.maximum(0, ih)
dtarea = (dtboxes[:, :, 2] - dtboxes[:, :, 0]) * (dtboxes[:, :, 3] - dtboxes[:, :, 1])
if if_iou:
gtarea = (gtboxes[:, :, 2] - gtboxes[:, :, 0]) * (gtboxes[:, :, 3] - gtboxes[:, :, 1])
ious = inter / (dtarea + gtarea - inter + eps)
else:
ious = inter / (dtarea + eps)
return ious
def clip_all_boader(self):
def _clip_boundary(boxes, height, width):
assert boxes.shape[-1] >= 4
boxes[:, 0] = np.minimum(np.maximum(boxes[:, 0], 0), width - 1)
boxes[:, 1] = np.minimum(np.maximum(boxes[:, 1], 0), height - 1)
boxes[:, 2] = np.maximum(np.minimum(boxes[:, 2], width), 0)
boxes[:, 3] = np.maximum(np.minimum(boxes[:, 3], height), 0)
return boxes
assert self.dtboxes.shape[-1] >= 4
assert self.gtboxes.shape[-1] >= 4
assert self._width is not None and self._height is not None
if self.eval_mode == 2:
self.dtboxes[:, :4] = _clip_boundary(self.dtboxes[:, :4], self._height, self._width)
self.gtboxes[:, :4] = _clip_boundary(self.gtboxes[:, :4], self._height, self._width)
self.dtboxes[:, 4:8] = _clip_boundary(self.dtboxes[:, 4:8], self._height, self._width)
self.gtboxes[:, 4:8] = _clip_boundary(self.gtboxes[:, 4:8], self._height, self._width)
else:
self.dtboxes = _clip_boundary(self.dtboxes, self._height, self._width)
self.gtboxes = _clip_boundary(self.gtboxes, self._height, self._width)
def load_gt_boxes(self, dict_input, key_name, class_names):
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
head_bbox = []
body_bbox = []
for rb in dict_input[key_name]:
if rb['tag'] in class_names:
body_tag = class_names.index(rb['tag'])
head_tag = 1
else:
body_tag = -1
head_tag = -1
if 'extra' in rb:
if 'ignore' in rb['extra']:
if rb['extra']['ignore'] != 0:
body_tag = -1
head_tag = -1
if 'head_attr' in rb:
if 'ignore' in rb['head_attr']:
if rb['head_attr']['ignore'] != 0:
head_tag = -1
head_bbox.append(np.hstack((rb['hbox'], head_tag)))
body_bbox.append(np.hstack((rb['fbox'], body_tag)))
head_bbox = np.array(head_bbox)
head_bbox[:, 2:4] += head_bbox[:, :2]
body_bbox = np.array(body_bbox)
body_bbox[:, 2:4] += body_bbox[:, :2]
return body_bbox, head_bbox
def load_det_boxes(self, dict_input, key_name, key_box, key_score=None, key_tag=None):
assert key_name in dict_input
if len(dict_input[key_name]) < 1:
return np.empty([0, 5])
else:
assert key_box in dict_input[key_name][0]
if key_score:
assert key_score in dict_input[key_name][0]
if key_tag:
assert key_tag in dict_input[key_name][0]
if key_score:
if key_tag:
bboxes = np.vstack(
[
np.hstack(
(rb[key_box], rb[key_score], rb[key_tag])
) for rb in dict_input[key_name]
]
)
else:
bboxes = np.vstack(
[np.hstack((rb[key_box], rb[key_score])) for rb in dict_input[key_name]]
)
else:
if key_tag:
bboxes = np.vstack(
[np.hstack((rb[key_box], rb[key_tag])) for rb in dict_input[key_name]]
)
else:
bboxes = np.vstack([rb[key_box] for rb in dict_input[key_name]])
bboxes[:, 2:4] += bboxes[:, :2]
return bboxes
def compare_voc(self, thres):
"""
:meth: match the detection results with the groundtruth by VOC matching strategy
:param thres: iou threshold
:type thres: float
:return: a list of tuples (dtbox, imageID), in the descending sort of dtbox.score
"""
if self.dtboxes is None:
return list()
dtboxes = self.dtboxes
gtboxes = self.gtboxes if self.gtboxes is not None else list()
dtboxes.sort(key=lambda x: x.score, reverse=True)
gtboxes.sort(key=lambda x: x.ign)
scorelist = list()
for i, dt in enumerate(dtboxes):
maxpos = -1
maxiou = thres
for j, gt in enumerate(gtboxes):
overlap = dt.iou(gt)
if overlap > maxiou:
maxiou = overlap
maxpos = j
if maxpos >= 0:
if gtboxes[maxpos].ign == 0:
gtboxes[maxpos].matched = 1
dtboxes[i].matched = 1
scorelist.append((dt, self.ID))
else:
dtboxes[i].matched = -1
else:
dtboxes[i].matched = 0
scorelist.append((dt, self.ID))
return scorelist
class Database(object):
def __init__(self, gtpath=None, dtpath=None, body_key=None, head_key=None, mode=0):
"""
mode=0: only body; mode=1: only head
"""
self.images = dict()
self.eval_mode = mode
self.loadData(gtpath, body_key, head_key, if_gt=True)
self.loadData(dtpath, body_key, head_key, if_gt=False)
self._ignNum = sum([self.images[i]._ignNum for i in self.images])
self._gtNum = sum([self.images[i]._gtNum for i in self.images])
self._imageNum = len(self.images)
self.scorelist = None
def loadData(self, fpath, body_key=None, head_key=None, if_gt=True):
assert os.path.isfile(fpath), fpath + " does not exist!"
with open(fpath, "r") as f:
lines = f.readlines()
records = [json.loads(line.strip('\n')) for line in lines]
if if_gt:
for record in records:
self.images[record["ID"]] = Image(self.eval_mode)
self.images[record["ID"]].load(record, body_key, head_key, PERSON_CLASSES, True)
else:
for record in records:
self.images[record["ID"]].load(record, body_key, head_key, PERSON_CLASSES, False)
self.images[record["ID"]].clip_all_boader()
def compare(self, thres=0.5, matching=None):
"""
match the detection results with the groundtruth in the whole database
"""
assert matching is None or matching == "VOC", matching
scorelist = list()
for ID in self.images:
if matching == "VOC":
result = self.images[ID].compare_voc(thres)
else:
result = self.images[ID].compare_caltech(thres)
scorelist.extend(result)
# In the descending sort of dtbox score.
scorelist.sort(key=lambda x: x[0][-1], reverse=True)
self.scorelist = scorelist
def eval_MR(self, ref="CALTECH_-2"):
"""
evaluate by Caltech-style log-average miss rate
ref: str - "CALTECH_-2"/"CALTECH_-4"
"""
# find greater_than
def _find_gt(lst, target):
for idx, item in enumerate(lst):
if item >= target:
return idx
return len(lst) - 1
assert ref == "CALTECH_-2" or ref == "CALTECH_-4", ref
if ref == "CALTECH_-2":
# CALTECH_MRREF_2: anchor points (from 10^-2 to 1) as in P.Dollar's paper
ref = [0.0100, 0.0178, 0.03160, 0.0562, 0.1000, 0.1778, 0.3162, 0.5623, 1.000]
else:
# CALTECH_MRREF_4: anchor points (from 10^-4 to 1) as in S.Zhang's paper
ref = [0.0001, 0.0003, 0.00100, 0.0032, 0.0100, 0.0316, 0.1000, 0.3162, 1.000]
if self.scorelist is None:
self.compare()
tp, fp = 0.0, 0.0
fppiX, fppiY = list(), list()
for i, item in enumerate(self.scorelist):
if item[1] == 1:
tp += 1.0
elif item[1] == 0:
fp += 1.0
fn = (self._gtNum - self._ignNum) - tp
recall = tp / (tp + fn)
missrate = 1.0 - recall
fppi = fp / self._imageNum
fppiX.append(fppi)
fppiY.append(missrate)
score = list()
for pos in ref:
argmin = _find_gt(fppiX, pos)
if argmin >= 0:
score.append(fppiY[argmin])
score = np.array(score)
MR = np.exp(np.log(score).mean())
return MR, (fppiX, fppiY)
def eval_AP(self):
"""
:meth: evaluate by average precision
"""
# calculate general ap score
def _calculate_map(recall, precision):
assert len(recall) == len(precision)
area = 0
for i in range(1, len(recall)):
delta_h = (precision[i - 1] + precision[i]) / 2
delta_w = recall[i] - recall[i - 1]
area += delta_w * delta_h
return area
tp, fp = 0.0, 0.0
rpX, rpY = list(), list()
total_gt = self._gtNum - self._ignNum
total_images = self._imageNum
fpn = []
recalln = []
thr = []
fppi = []
for i, item in enumerate(self.scorelist):
if item[1] == 1:
tp += 1.0
elif item[1] == 0:
fp += 1.0
fn = total_gt - tp
recall = tp / (tp + fn)
precision = tp / (tp + fp)
rpX.append(recall)
rpY.append(precision)
fpn.append(fp)
recalln.append(tp)
thr.append(item[0][-1])
fppi.append(fp / total_images)
AP = _calculate_map(rpX, rpY)
return AP, recall, (rpX, rpY, thr, fpn, recalln, fppi)
|
{"hexsha": "4c4910924502fe76c5d854f5c8327931905772a6", "size": 17803, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvpods/evaluation/crowdhumantools.py", "max_stars_repo_name": "reinforcementdriving/cvpods", "max_stars_repo_head_hexsha": "32d98b74745020be035a0e20337ad934201615c4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 758, "max_stars_repo_stars_event_min_datetime": "2021-03-11T08:14:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:24:13.000Z", "max_issues_repo_path": "cvpods/evaluation/crowdhumantools.py", "max_issues_repo_name": "wondervictor/cvpods", "max_issues_repo_head_hexsha": "614a975e5425bbaeb66bbd1ffca552d633ba89ca", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2020-12-04T19:47:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T06:52:13.000Z", "max_forks_repo_path": "cvpods/evaluation/crowdhumantools.py", "max_forks_repo_name": "wondervictor/cvpods", "max_forks_repo_head_hexsha": "614a975e5425bbaeb66bbd1ffca552d633ba89ca", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 110, "max_forks_repo_forks_event_min_datetime": "2021-03-18T01:59:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T21:26:56.000Z", "avg_line_length": 38.9562363239, "max_line_length": 98, "alphanum_fraction": 0.5071617143, "include": true, "reason": "import numpy", "num_tokens": 4502}
|
import unittest
from collections import Counter
from math import sqrt
import scipy.stats
from multinomial import binomial, int_sqrt, sample_binomial, sample_binomial_p, sample_multinomial_p
def get_tuples(length, total):
"""
Computes all possible multinomial draws, the support of the multinomial distribution
:param length: number of categories
:param total: sum of the multinomial
:return: list with all possible multinomial draws
"""
if length == 1:
yield (total,)
return
for i in range(total + 1):
for t in get_tuples(length - 1, total - i):
yield (i,) + t
class TestMultinomial(unittest.TestCase):
def test_binomial(self):
self.assertEqual(binomial(10, 5), 252)
self.assertEqual(binomial(10, 0), 1)
self.assertEqual(binomial(10, 10), 1)
self.assertEqual(sum(binomial(20, i) for i in range(0, 21)), 2 ** 20)
def test_sqrt(self):
for n in range(1, 2000, 17):
s = int_sqrt(n)
self.assertLess(sqrt(n) - 1, s)
self.assertLess(s, sqrt(n) + 1)
m = int_sqrt(2 * n) + 1
self.assertGreaterEqual(m, sqrt(2 * n))
self.assertLessEqual(m, sqrt(2 * n) + 3)
def sample_binomial_tester(self, n, N):
"""
Helper function to test sample_binomial using the chi-square test
:param n: parameter n of the binomial
:param N: number of draws from the binomial distribution B(n, 1/2)
:return: nothing, just makes assertions
"""
k = [sample_binomial(n) for _ in range(0, N)]
c = Counter(k)
if n == 0:
self.assertEqual(c, Counter({0: N}))
return
pmf = scipy.stats.binom(n=n, p=0.5).pmf
test = scipy.stats.chisquare([c[i] / N for i in range(0, n + 1)], [pmf(i) for i in range(0, n + 1)])
self.assertGreater(test.pvalue, 0.001) # yes this is not very sound
def test_sample_binomial(self):
self.sample_binomial_tester(20, 1000)
self.sample_binomial_tester(0, 1000)
self.sample_binomial_tester(1, 2000)
self.sample_binomial_tester(13, 2000)
pass
def sample_binomial_p_tester(self, n, N, p, q):
"""
Helper function to test sample_binomial_p using the chi-square test
:param n: parameter n of sample_binomial_p
:param N: number of draws from the binomial distribution B(n, p/q)
:param p: parameter p of sample_binomial_p
:param q: parameter q of sample_binomial_p
"""
k = [sample_binomial_p(n, p, q) for _ in range(0, N)]
pmf = scipy.stats.binom(n=n, p=p / q).pmf
c = Counter(k)
test = scipy.stats.chisquare([c[i] / N for i in range(0, n + 1)], [pmf(i) for i in range(0, n + 1)])
self.assertGreater(test.pvalue, 0.001) # yes this is not very sound
def test_sample_binomial_p(self):
self.sample_binomial_p_tester(20, 1000, 716221, 1000000)
self.sample_binomial_p_tester(103, 2000, 1, 100)
self.sample_binomial_p_tester(103, 2000, 99, 100)
def sample_multinomial_p_tester(self, n, N, rs):
"""
Helper function to test sample_multinomial_p using the chi-square test
:param n: parameter n of sample_multinomial_p
:param N: number of draws from the multinomial distribution M(n, [r/sum(rs) for r in rs])
:param rs: parameter n of sample_multinomial_p
"""
k = [tuple(sample_multinomial_p(n, rs)) for i in range(0, N)]
s = sum(rs)
p = [r / s for r in rs]
pmf = scipy.stats.multinomial(n, p).pmf
c = Counter(k)
events = list(get_tuples(len(rs), n))
test = scipy.stats.chisquare([c[e] / N for e in events], [pmf(e) for e in events])
self.assertGreater(test.pvalue, 0.001) # yes this is not very sound
def test_sample_multinomial_p(self):
self.sample_multinomial_p_tester(10, 2000, [14, 5, 7])
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "6ab894d96e8da7718b8e322ece06dc2bd6c9fa93", "size": 4030, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tests.py", "max_stars_repo_name": "murbard/multinomial", "max_stars_repo_head_hexsha": "a3afddb51158fa6dd5218ecd286d1756daaecc4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/tests.py", "max_issues_repo_name": "murbard/multinomial", "max_issues_repo_head_hexsha": "a3afddb51158fa6dd5218ecd286d1756daaecc4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/tests.py", "max_forks_repo_name": "murbard/multinomial", "max_forks_repo_head_hexsha": "a3afddb51158fa6dd5218ecd286d1756daaecc4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.380952381, "max_line_length": 108, "alphanum_fraction": 0.6200992556, "include": true, "reason": "import scipy", "num_tokens": 1149}
|
import itertools
import numpy as np
import torch
import detectron2.lib.ops as ops
def make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def convert_conv2convsamepadding_model(module, process_group=None, channel_last=False):
mod = module
if isinstance(module, torch.nn.modules.conv._ConvNd):
if isinstance(module.bias, torch.Tensor):
bias = True
else:
bias = False
mod = ops.Conv2dSamePadding(module.in_channels, module.out_channels, module.kernel_size, module.stride,
module.dilation, module.groups, bias=bias)
mod.weight.data = module.weight.data.clone().detach()
if bias:
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_conv2convsamepadding_model(child, process_group=process_group,
channel_last=channel_last))
# TODO(jie) should I delete model explicitly?
del module
return mod
def convert_conv2convws_model(module, process_group=None, channel_last=False):
mod = module
if isinstance(module, torch.nn.modules.conv._ConvNd):
if isinstance(module.bias, torch.Tensor):
bias = True
else:
bias = False
mod = ops.Conv2dWS(module.in_channels, module.out_channels, module.kernel_size, module.stride, module.padding,
module.dilation, module.groups, bias=bias)
mod.weight.data = module.weight.data.clone().detach()
if bias:
mod.bias.data = module.bias.data.clone().detach()
for name, child in module.named_children():
mod.add_module(name, convert_conv2convws_model(child, process_group=process_group, channel_last=channel_last))
# TODO(jie) should I delete model explicitly?
del module
return mod
def convert_bn2affine_model(module, process_group=None, channel_last=False, merge=True):
"""
This function is learned from the NVIDIA/apex.
It can be seen here:
https://github.com/NVIDIA/apex/blob/master/apex/parallel/sync_batchnorm.py
Recursively traverse module and its children to replace all instances of
``torch.nn.modules.batchnorm._BatchNorm`` with `ops.AffineChannel2d`.
"""
mod = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and not isinstance(module, ops.MixtureBatchNorm2d):
# print(module.weight.cpu().detach().numpy().shape)
mod = ops.AffineChannel2d(module.num_features)
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
freeze_params(mod) # freeze affine params
if merge:
gamma = module.weight.data.clone().detach().numpy()
beta = module.bias.data.clone().detach().numpy()
mu = module.running_mean.data.clone().detach().numpy()
var = module.running_var.data.clone().detach().numpy()
eps = module.eps
new_gamma = gamma / (np.power(var + eps, 0.5)) # new bn.weight
new_beta = beta - gamma * mu / (np.power(var + eps, 0.5)) # new bn.bias
mod.weight.data = torch.from_numpy(new_gamma)
mod.bias.data = torch.from_numpy(new_beta)
for name, child in module.named_children():
mod.add_module(name, convert_bn2affine_model(child, process_group=process_group, channel_last=channel_last,
merge=merge))
del module
return mod
def convert_bn2syncbn_model(module, process_group=None):
mod = module
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and not isinstance(module, ops.MixtureBatchNorm2d):
# frozen backbone bn layers which do not require grad
for p in module.parameters():
if not p.requires_grad:
return convert_bn2frozenbn_model(mod)
mod = ops.NaiveSyncBatchNorm(module.num_features, module.eps, module.momentum, module.affine,
module.track_running_stats)
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
# keep reuqires_grad unchanged
mod.weight.requires_grad = module.weight.requires_grad
mod.bias.requires_grad = module.bias.requires_grad
mod.running_mean = module.running_mean
mod.running_var = module.running_var
mod.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
mod.add_module(name, convert_bn2syncbn_model(child, process_group=process_group))
del module
return mod
def convert_bn2frozenbn_model(module):
bn_module = (torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm)
mod = module
if isinstance(module, bn_module):
mod = ops.FrozenBatchNorm2d(module.num_features)
if module.affine:
mod.weight.data = module.weight.data.clone().detach()
mod.bias.data = module.bias.data.clone().detach()
mod.running_mean.data = module.running_mean.data
mod.running_var.data = module.running_var.data
mod.eps = module.eps
freeze_params(mod)
else:
for name, child in module.named_children():
new_child = convert_bn2frozenbn_model(child)
if new_child is not child:
mod.add_module(name, new_child)
del module
return mod
@torch.no_grad()
def update_bn_stats(model, data_loader, device, num_iters=200):
assert model.training
bn_layers = get_bn_modules(model)
if len(bn_layers) == 0:
return
momentum_actual = [bn.momentum for bn in bn_layers]
for bn in bn_layers:
bn.momentum = 1.0
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
for ind, (images, targets, _) in enumerate(itertools.islice(data_loader, num_iters)):
images = images.to(device)
targets = [target.to(device) for target in targets]
model(images, targets)
for i, bn in enumerate(bn_layers):
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
assert ind == num_iters - 1, (
"update_bn_stats is meant to run for {} iterations, "
"but the dataloader stops at {} iterations.".format(num_iters, ind)
)
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
def get_bn_modules(model):
types = (
torch.nn.BatchNorm1d,
torch.nn.BatchNorm2d,
torch.nn.BatchNorm3d,
torch.nn.SyncBatchNorm,
)
bn_layers = [
m
for m in model.modules()
if m.training and isinstance(m, types)
]
return bn_layers
def freeze_params(m):
"""Freeze all the weights by setting requires_grad to False
"""
for p in m.parameters():
p.requires_grad = False
def mismatch_params_filter(s):
l = []
for i in s:
if i.split('.')[-1] in ['num_batches_tracked', 'running_mean', 'running_var']:
continue
else:
l.append(i)
return l
def reduce_tensor(tensor, world_size=1):
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.ReduceOp.SUM)
rt /= world_size
return rt
|
{"hexsha": "16ae70280365285e9eeb307a908b86d588ff06fa", "size": 8286, "ext": "py", "lang": "Python", "max_stars_repo_path": "detectron2/lib/utils/net.py", "max_stars_repo_name": "BUPT-PRIV/detectron2", "max_stars_repo_head_hexsha": "3163664cd5f43d50ea1966f410dc82410b9ccbf4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "detectron2/lib/utils/net.py", "max_issues_repo_name": "BUPT-PRIV/detectron2", "max_issues_repo_head_hexsha": "3163664cd5f43d50ea1966f410dc82410b9ccbf4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "detectron2/lib/utils/net.py", "max_forks_repo_name": "BUPT-PRIV/detectron2", "max_forks_repo_head_hexsha": "3163664cd5f43d50ea1966f410dc82410b9ccbf4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6636363636, "max_line_length": 118, "alphanum_fraction": 0.6477190442, "include": true, "reason": "import numpy", "num_tokens": 1868}
|
using SHA
using Random
block_string = "test5"
block_string = randstring(10)
#println(bytes2hex(sha256("test")))
max_nonce = 2 ^ 32 # 4 billion
max_nonce = 2 ^ 16
is_hash_found = 0
for nonce in 0:max_nonce
hash = bytes2hex(sha2_256(string(nonce) * block_string))
#println(hash)
if startswith(hash, "0000")
global is_hash_found = 1
println(hash)
break
end
end
if is_hash_found == 0
println("no proper hash found, please try again later")
end
|
{"hexsha": "b9e05e1ecb05a8af5256102ae78c06e525649d45", "size": 499, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/proof_of_work.jl", "max_stars_repo_name": "marchunter/litcoin", "max_stars_repo_head_hexsha": "b8dca6afc507915df8b0a07c06f7235a351d4a62", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/proof_of_work.jl", "max_issues_repo_name": "marchunter/litcoin", "max_issues_repo_head_hexsha": "b8dca6afc507915df8b0a07c06f7235a351d4a62", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/proof_of_work.jl", "max_forks_repo_name": "marchunter/litcoin", "max_forks_repo_head_hexsha": "b8dca6afc507915df8b0a07c06f7235a351d4a62", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.8214285714, "max_line_length": 60, "alphanum_fraction": 0.6633266533, "num_tokens": 148}
|
import numpy as np
class Policy():
def __init__(self,num_observations,num_actions,lr,num_dirs,num_dirs_best,noise):
self.theta = np.zeros((num_actions,num_observations))
self.learning_rate = lr
self.num_directions = num_dirs
self.num_best_directions = num_dirs_best
self.noise = noise
def evaluate(self,state, delta=None, direction=None):
if direction is None:
return self.theta.dot(state)
elif direction == "positive":
return (self.theta+self.noise*delta).dot(state)
else:
return (self.theta-self.noise*delta).dot(state)
def sample_deltas(self):
return [np.random.randn(*self.theta.shape) for i in range(self.num_directions)]
def update(self,rollouts,sigma_r):
step = np.zeros(self.theta.shape)
for positive_reward,negative_reward,d in rollouts:
step += (positive_reward-negative_reward)*d
self.theta += self.learning_rate/(self.num_best_directions * sigma_r)*step
|
{"hexsha": "e1e8187f11ac24458dc5a37c4b76c0a9518821d6", "size": 1030, "ext": "py", "lang": "Python", "max_stars_repo_path": "ARS/policy.py", "max_stars_repo_name": "7enTropy7/BipedalWalker", "max_stars_repo_head_hexsha": "be699026fd556ad242896412c34af1401582ba50", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-12-14T06:57:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-16T05:16:18.000Z", "max_issues_repo_path": "ARS/policy.py", "max_issues_repo_name": "7enTropy7/BipedalWalker", "max_issues_repo_head_hexsha": "be699026fd556ad242896412c34af1401582ba50", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ARS/policy.py", "max_forks_repo_name": "7enTropy7/BipedalWalker", "max_forks_repo_head_hexsha": "be699026fd556ad242896412c34af1401582ba50", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-01-10T06:59:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-05T21:07:42.000Z", "avg_line_length": 38.1481481481, "max_line_length": 87, "alphanum_fraction": 0.6689320388, "include": true, "reason": "import numpy", "num_tokens": 228}
|
import os
import time
from collections import deque
import pickle
from baselines.ddpg_custom.ddpg import DDPG
import baselines.common.tf_util as U
from baselines import logger
import numpy as np
import tensorflow as tf
from mpi4py import MPI
def print_n_txt(_f,_chars,_addNewLine=True,_DO_PRINT=True,_DO_SAVE=True):
"""
Usage:
txtName = ('../res/res_%s.txt'%(self.name))
f = open(txtName,'w') # Open txt file
print_n_txt(_f=f,_chars='Text name: '+txtName,_DO_PRINT=True,_DO_SAVE=True)
"""
if _DO_SAVE:
if _addNewLine: _f.write(_chars+'\n')
else: _f.write(_chars)
_f.flush();os.fsync(_f.fileno()) # Write to txt
if _DO_PRINT:
print (_chars)
def train(seed, env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,
normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise,
popart, gamma, tau,clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,
eval_env=None, param_noise_adaption_interval=50, _maxSec=5.0, _f=None):
rank = MPI.COMM_WORLD.Get_rank()
assert (np.abs(env.action_space.low) == env.action_space.high).all() # we assume symmetric actions.
max_action = env.action_space.high
logger.info('scaling actions by {} before executing in env'.format(max_action))
print ("\n\n actor_lr:%.4f critic_lr:%.4f\n\n"%(actor_lr,critic_lr))
agent = DDPG(actor, critic, memory, env.observation_space.shape, env.action_space.shape,
gamma=gamma, tau=tau, normalize_returns=normalize_returns, normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
# Set up logging stuff only for a single worker.
if rank == 0:
saver = tf.train.Saver()
else:
saver = None
step = 0
episode = 0
eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
with U.single_threaded_session() as sess:
# Prepare everything.
agent.initialize(sess)
sess.graph.finalize()
agent.reset()
obs = env.reset()
if eval_env is not None:
eval_obs = eval_env.reset()
done = False
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch = 0
start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_episode_eval_rewards = []
epoch_episode_eval_steps = []
epoch_start_time = time.time()
epoch_actions = []
epoch_qs = []
epoch_episodes = 0
for epoch in range(nb_epochs):
for cycle in range(nb_epoch_cycles):
# Perform rollouts.
for t_rollout in range(nb_rollout_steps):
# Predict next action.
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape == env.action_space.shape
# Execute next action.
if rank == 0 and render:
env.render()
assert max_action.shape == action.shape
new_obs, r, done, info = env.step(max_action * action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
t += 1
if rank == 0 and render:
env.render()
episode_reward += r
episode_step += 1
# Book-keeping.
epoch_actions.append(action)
epoch_qs.append(q)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
sec = env.sim.data.time
if sec > _maxSec:
done = True
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
agent.reset()
obs = env.reset()
# Train.
epoch_actor_losses = []
epoch_critic_losses = []
epoch_adaptive_distances = []
for t_train in range(nb_train_steps):
# Adapt param noise, if necessary.
if memory.nb_entries >= batch_size and t_train % param_noise_adaption_interval == 0:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
cl, al = agent.train()
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
# Evaluate.
eval_episode_rewards = []
eval_qs = []
if eval_env is not None:
eval_episode_reward = 0.
eval_obs = eval_env.reset() # reset here
for t_rollout in range(nb_eval_steps):
eval_action, eval_q = agent.pi(eval_obs, apply_noise=False, compute_Q=True) # get action
eval_obs, eval_r, eval_done, eval_info = eval_env.step(max_action * eval_action) # scale for execution in env (as far as DDPG is concerned, every action is in [-1, 1])
if render_eval: # render
eval_env.render()
eval_episode_reward += eval_r # Compute the sum of reward
# Maximum rollout second
sec = eval_env.sim.data.time
if sec > _maxSec:
eval_done = True
eval_qs.append(eval_q)
if eval_done:
# eval_obs = eval_env.reset() # do reset at start
eval_episode_rewards.append(eval_episode_reward)
eval_episode_rewards_history.append(eval_episode_reward)
# eval_episode_reward = 0.
# Printout eval
_chars = ('[%d/%d] rSumAvg:[%.3f]'%(epoch,nb_epochs,eval_episode_reward))
print_n_txt(_f,_chars,_addNewLine=True,_DO_PRINT=True,_DO_SAVE=True)
mpi_size = MPI.COMM_WORLD.Get_size()
# Log stats.
# XXX shouldn't call np.mean on variable length lists
duration = time.time() - start_time
stats = agent.get_stats()
combined_stats = stats.copy()
combined_stats['rollout/return'] = np.mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = np.mean(episode_rewards_history)
combined_stats['rollout/episode_steps'] = np.mean(epoch_episode_steps)
combined_stats['rollout/actions_mean'] = np.mean(epoch_actions)
combined_stats['rollout/Q_mean'] = np.mean(epoch_qs)
combined_stats['train/loss_actor'] = np.mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = np.mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = np.mean(epoch_adaptive_distances)
combined_stats['total/duration'] = duration
combined_stats['total/steps_per_second'] = float(t) / float(duration)
combined_stats['total/episodes'] = episodes
combined_stats['rollout/episodes'] = epoch_episodes
combined_stats['rollout/actions_std'] = np.std(epoch_actions)
# Evaluation statistics.
if eval_env is not None:
"""
combined_stats['eval/return'] = np.mean(eval_episode_rewards)
combined_stats['eval/return_history'] = np.mean(eval_episode_rewards_history)
combined_stats['eval/Q'] = eval_qs
combined_stats['eval/episodes'] = len(eval_episode_rewards)
"""
combined_stats['eval/return'] = eval_episode_reward
combined_stats['eval/return_history'] = 0
combined_stats['eval/Q'] = 0
combined_stats['eval/episodes'] = 0
def as_scalar(x):
if isinstance(x, np.ndarray):
assert x.size == 1
return x[0]
elif np.isscalar(x):
return x
else:
raise ValueError('expected scalar, got %s'%x)
combined_stats_sums = MPI.COMM_WORLD.allreduce(np.array([as_scalar(x) for x in combined_stats.values()]))
combined_stats = {k : v / mpi_size for (k,v) in zip(combined_stats.keys(), combined_stats_sums)}
# Total statistics.
combined_stats['total/epochs'] = epoch + 1
combined_stats['total/steps'] = t
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
# with open('/home/dlxhrl/Projects/baselines/baselines/ddpg_custom/results/ddpg_'+env.env_name+'_seed'+str(seed)+'.pickle','wb') as f:
with open('baselines/ddpg_custom/results/ddpg_'+env.env_name+'_seed'+str(seed)+'.pickle','wb') as f:
pickle.dump({'epoch_episode_rewards':epoch_episode_rewards},f)
|
{"hexsha": "7a02171533dd31591a50b9db3c0d5e131ad4cd79", "size": 10565, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/ddpg_custom/training.py", "max_stars_repo_name": "kyungjaelee/customized_open_ai_baselines", "max_stars_repo_head_hexsha": "f10dd63d00efa3653377272662581c493da60417", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "baselines/ddpg_custom/training.py", "max_issues_repo_name": "kyungjaelee/customized_open_ai_baselines", "max_issues_repo_head_hexsha": "f10dd63d00efa3653377272662581c493da60417", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/ddpg_custom/training.py", "max_forks_repo_name": "kyungjaelee/customized_open_ai_baselines", "max_forks_repo_head_hexsha": "f10dd63d00efa3653377272662581c493da60417", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-09-06T00:17:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-10T22:30:11.000Z", "avg_line_length": 45.1495726496, "max_line_length": 192, "alphanum_fraction": 0.5663038334, "include": true, "reason": "import numpy", "num_tokens": 2187}
|
\section{Control Plane on Power Line}
|
{"hexsha": "64a9cb21c91b4a08308877b89bfda2b662a8016d", "size": 37, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/paper/sections/control.tex", "max_stars_repo_name": "HKUST-SING/p4mr", "max_stars_repo_head_hexsha": "82f0916d9a9ab8036123742061d5b21779277800", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-09T13:05:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-09T13:05:39.000Z", "max_issues_repo_path": "docs/paper/sections/control.tex", "max_issues_repo_name": "li-ch/p4mr", "max_issues_repo_head_hexsha": "82f0916d9a9ab8036123742061d5b21779277800", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/paper/sections/control.tex", "max_forks_repo_name": "li-ch/p4mr", "max_forks_repo_head_hexsha": "82f0916d9a9ab8036123742061d5b21779277800", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0, "max_line_length": 37, "alphanum_fraction": 0.8108108108, "num_tokens": 9}
|
/*
Part of the Fluid Corpus Manipulation Project (http://www.flucoma.org/)
Copyright 2017-2019 University of Huddersfield.
Licensed under the BSD-3 License.
See license.md file in the project root for full license information.
This project has received funding from the European Research Council (ERC)
under the European Union’s Horizon 2020 research and innovation programme
(grant agreement No 725899).
*/
#pragma once
#include <Eigen/Core>
#include <Eigen/Sparse>
#include <Spectra/MatOp/SparseSymMatProd.h>
#include <Spectra/SymEigsSolver.h>
namespace fluid {
namespace algorithm {
class SpectralEmbedding
{
public:
using MatrixXd = Eigen::MatrixXd;
using ArrayXXd = Eigen::ArrayXXd;
using SparseMatrixXd = Eigen::SparseMatrix<double>;
ArrayXXd process(SparseMatrixXd graph, index dims)
{
using namespace Eigen;
using namespace Spectra;
using namespace std;
VectorXd diagData = graph * VectorXd::Ones(graph.cols());
diagData = (1 / diagData.array().sqrt());
SparseMatrixXd D = SparseMatrixXd(graph.rows(), graph.cols());
D.reserve(graph.rows());
for (index i = 0; i < D.rows(); i++) { D.insert(i, i) = diagData(i); }
SparseMatrixXd I = SparseMatrixXd(D.rows(), D.cols());
I.setIdentity();
SparseMatrixXd L = I - (D * (graph * D));
int k = static_cast<int>(dims + 1);
index ncv = max(2 * k + 1, int(round(sqrt(L.rows()))));
VectorXd initV = VectorXd::Ones(L.rows());
SparseSymMatProd<double> op(L);
SymEigsSolver<double, SMALLEST_MAGN, SparseSymMatProd<double>> eigs(&op, k,
ncv);
eigs.init(initV.data());
auto nConverged = eigs.compute(
D.cols(), 1e-4, SMALLEST_MAGN); // TODO: failback if not converging
MatrixXd U = eigs.eigenvectors();
ArrayXXd Y = U.block(0, 1, U.rows(), dims).array();
return Y;
}
};
}; // namespace algorithm
}; // namespace fluid
|
{"hexsha": "ce6af5b955d6f2fbdd109af77b17b557422f34a0", "size": 2004, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/algorithms/util/SpectralEmbedding.hpp", "max_stars_repo_name": "elgiano/flucoma-core", "max_stars_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/algorithms/util/SpectralEmbedding.hpp", "max_issues_repo_name": "elgiano/flucoma-core", "max_issues_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/algorithms/util/SpectralEmbedding.hpp", "max_forks_repo_name": "elgiano/flucoma-core", "max_forks_repo_head_hexsha": "d34a04e7a68f24eaf09b24df57020d45664061fc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7857142857, "max_line_length": 79, "alphanum_fraction": 0.6432135729, "num_tokens": 505}
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Base class for EoS modules.
Raises NotImplementedErrors for all expected methods in case developer misses
some. EoS developers should overload all these methods.
"""
from pyomo.environ import units as pyunits
from idaes.core.util.constants import Constants as const
from idaes.generic_models.properties.core.generic.utility import (
get_method, get_component_object as cobj)
from idaes.core.util.exceptions import (
PropertyNotSupportedError, ConfigurationError)
class EoSBase():
@staticmethod
def gas_constant(b):
# Utility method to convert gas constant to base units
base_units = b.params.get_metadata().default_units
r_units = (base_units["mass"] *
base_units["length"]**2 *
base_units["temperature"]**-1 *
base_units["amount"]**-1 *
base_units["time"]**-2)
return pyunits.convert(const.gas_constant, to_units=r_units)
@staticmethod
def common(b, pobj):
raise NotImplementedError(_msg(b, "common"))
@staticmethod
def calculate_scaling_factors(b, pobj):
raise NotImplementedError(_msg(b, "calculate_scaling_factors"))
@staticmethod
def build_parameters(b):
raise NotImplementedError(_msg(b, "build_parameters"))
@staticmethod
def act_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "act_phase_comp"))
@staticmethod
def act_phase_comp_true(b, p, j):
raise NotImplementedError(_msg(b, "act_phase_comp_true"))
@staticmethod
def act_phase_comp_appr(b, p, j):
raise NotImplementedError(_msg(b, "act_phase_comp_appr"))
@staticmethod
def log_act_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "log_act_phase_comp"))
@staticmethod
def log_act_phase_comp_true(b, p, j):
raise NotImplementedError(_msg(b, "log_act_phase_comp_true"))
@staticmethod
def log_act_phase_comp_appr(b, p, j):
raise NotImplementedError(_msg(b, "log_act_phase_comp_appr"))
@staticmethod
def act_coeff_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "act_coeff_phase_comp"))
@staticmethod
def act_coeff_phase_comp_true(b, p, j):
raise NotImplementedError(_msg(b, "act_coeff_phase_comp_true"))
@staticmethod
def act_coeff_phase_comp_appr(b, p, j):
raise NotImplementedError(_msg(b, "act_coeff_phase_comp_appr"))
@staticmethod
def cp_mol_phase(b, p):
raise NotImplementedError(_msg(b, "cp_mol_phase"))
@staticmethod
def cp_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "cp_mol_phase_comp"))
@staticmethod
def cv_mol_phase(b, p):
raise NotImplementedError(_msg(b, "cv_mol_phase"))
@staticmethod
def cv_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "cv_mol_phase_comp"))
@staticmethod
def heat_capacity_ratio_phase(b, p):
return (b.cp_mol_phase[p] /
b.cv_mol_phase[p])
@staticmethod
def cv_mol_ig_comp_pure(b, j):
# Method for calculating pure component ideal gas cv from cp
# For ideal gases, cv = cp - R
units = b.params.get_metadata().derived_units
R = pyunits.convert(const.gas_constant,
to_units=units["heat_capacity_mole"])
return (get_method(b, "cp_mol_ig_comp", j)(
b, cobj(b, j), b.temperature) - R)
@staticmethod
def cv_mol_ls_comp_pure(b, p, j):
# Method for calculating pure component liquid and solid cv from cp
# For ideal (incompressible) liquids and solids, cv = cp
pobj = b.params.get_phase(p)
if pobj.is_liquid_phase():
return get_method(b, "cp_mol_liq_comp", j)(
b, cobj(b, j), b.temperature)
elif pobj.is_solid_phase():
return get_method(b, "cp_mol_sol_comp", j)(
b, cobj(b, j), b.temperature)
@staticmethod
def dens_mass_phase(b, p):
raise NotImplementedError(_msg(b, "dens_mass_phase"))
@staticmethod
def dens_mol_phase(b, p):
raise NotImplementedError(_msg(b, "dens_mol_phase"))
@staticmethod
def energy_internal_mol_phase(b, p):
raise NotImplementedError(_msg(b, "energy_internal_mol_phase"))
@staticmethod
def energy_internal_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "energy_internal_mol_phase_comp"))
@staticmethod
def energy_internal_mol_ig_comp_pure(b, j):
# Method for calculating pure component U from H for ideal gases
units = b.params.get_metadata().derived_units
R = pyunits.convert(const.gas_constant,
to_units=units["heat_capacity_mole"])
if cobj(b, j).parent_block().config.include_enthalpy_of_formation:
# First, need to determine correction between U_form and H_form
# U_form = H_form - delta_n*R*T
ele_comp = cobj(b, j).config.elemental_composition
if ele_comp is None:
raise ConfigurationError(
"{} calculation of internal energy requires elemental "
"composition of all species. Please set this using the "
"elemental_composition argument in the component "
"declaration ({}).".format(b.name, j))
delta_n = 0
for e, s in ele_comp.items():
# Check for any element which is vapor at standard state
if e in ["He", "Ne", "Ar", "Kr", "Xe", "Ra"]:
delta_n += -s
elif e in ["F", "Cl", "H", "N", "O"]:
delta_n += -s/2 # These are diatomic at standard state
delta_n += 1 # One mole of gaseous compound is formed
dU_form = delta_n*R*b.params.temperature_ref
else:
dU_form = 0 # No heat of formation to correct
# For ideal gases, U = H - R(T-T_ref) + dU_form
return (get_method(b, "enth_mol_ig_comp", j)(
b, cobj(b, j), b.temperature) -
R*(b.temperature-b.params.temperature_ref) +
dU_form)
@staticmethod
def energy_internal_mol_ls_comp_pure(b, p, j):
pobj = b.params.get_phase(p)
if pobj.is_liquid_phase():
mthd = get_method(b, "enth_mol_liq_comp", j)
elif pobj.is_solid_phase():
mthd = get_method(b, "enth_mol_sol_comp", j)
# Method for calculating pure component U from H for liquids & solids
units = b.params.get_metadata().derived_units
R = pyunits.convert(const.gas_constant,
to_units=units["heat_capacity_mole"])
if cobj(b, j).parent_block().config.include_enthalpy_of_formation:
# First, need to determine correction between U_form and H_form
# U_form = H_form - delta_n*R*T
ele_comp = cobj(b, j).config.elemental_composition
if ele_comp is None:
raise ConfigurationError(
"{} calculation of internal energy requires elemental "
"composition of all species. Please set this using the "
"elemental_composition argument in the component "
"declaration ({}).".format(b.name, j))
delta_n = 0
for e, s in ele_comp.items():
# Check for any element which is vapor at standard state
if e in ["He", "Ne", "Ar", "Kr", "Xe", "Ra"]:
delta_n += -s
elif e in ["F", "Cl", "H", "N", "O"]:
delta_n += -s/2 # These are diatomic at standard state
dU_form = delta_n*R*b.params.temperature_ref
# For ideal (incompressible) liquids and solids, U = H + dU_form
return (mthd(b, cobj(b, j), b.temperature) + dU_form)
else:
# If not including heat of formation, U = H
return mthd(b, cobj(b, j), b.temperature)
@staticmethod
def enth_mol_phase(b, p):
raise NotImplementedError(_msg(b, "enth_mol_phase"))
@staticmethod
def enth_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "enth_mol_phase_comp"))
@staticmethod
def entr_mol_phase(b, p):
raise NotImplementedError(_msg(b, "entr_mol_phase"))
@staticmethod
def entr_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "entr_mol_phase_comp"))
@staticmethod
def fug_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "fug_phase_comp"))
@staticmethod
def fug_phase_comp_eq(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_phase_comp_eq"))
@staticmethod
def log_fug_phase_comp_eq(b, p, j, pp):
raise NotImplementedError(_msg(b, "log_fug_phase_comp_eq"))
@staticmethod
def fug_coeff_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "fug_coeff_phase_comp"))
@staticmethod
def fug_coeff_phase_comp_eq(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_coeff_phase_comp_eq"))
@staticmethod
def fug_phase_comp_Tbub(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_phase_comp_Tbub"))
@staticmethod
def fug_phase_comp_Tdew(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_phase_comp_Tdew"))
@staticmethod
def fug_phase_comp_Pbub(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_phase_comp_Pbub"))
@staticmethod
def fug_phase_comp_Pdew(b, p, j, pp):
raise NotImplementedError(_msg(b, "fug_phase_comp_Pdew"))
@staticmethod
def gibbs_mol_phase(b, p):
raise NotImplementedError(_msg(b, "gibbs_mol_phase"))
@staticmethod
def gibbs_mol_phase_comp(b, p, j):
raise NotImplementedError(_msg(b, "gibbs_mol_phase_comp"))
def _msg(b, attr):
return ("{} Equation of State module has not implemented a method for {}. "
"Please contact the EoS developer or use a different module."
.format(b.name, attr))
|
{"hexsha": "714e1d177c62313e6366c5fedf865ab1f82a1e11", "size": 10847, "ext": "py", "lang": "Python", "max_stars_repo_path": "idaes/generic_models/properties/core/eos/eos_base.py", "max_stars_repo_name": "dangunter/idaes-pse", "max_stars_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_stars_repo_licenses": ["RSA-MD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "idaes/generic_models/properties/core/eos/eos_base.py", "max_issues_repo_name": "dangunter/idaes-pse", "max_issues_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_issues_repo_licenses": ["RSA-MD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "idaes/generic_models/properties/core/eos/eos_base.py", "max_forks_repo_name": "dangunter/idaes-pse", "max_forks_repo_head_hexsha": "8f63b4ad8000af8a3eb0316a5f61c32e206925d0", "max_forks_repo_licenses": ["RSA-MD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4034482759, "max_line_length": 81, "alphanum_fraction": 0.6304047202, "include": true, "reason": "from pyomo", "num_tokens": 2544}
|
[STATEMENT]
lemma Let_is_action:
"(relation_of A;;
(R(true \<turnstile> (\<lambda> (A, A'). tr A' = tr A \<and> \<not>wait A' \<and> more A' = (decrease v (more A)))))) \<in> {p. is_CSP_process p}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (action.relation_of A ;; R (true \<turnstile> \<lambda>(A, A'). tr A' = tr A \<and> \<not> wait A' \<and> alpha_rp.more A' = decrease v (alpha_rp.more A))) \<in> {p. is_CSP_process p}
[PROOF STEP]
apply (simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_CSP_process (action.relation_of A ;; R (true \<turnstile> \<lambda>(A, A'). tr A' = tr A \<and> \<not> wait A' \<and> alpha_rp.more A' = decrease v (alpha_rp.more A)))
[PROOF STEP]
apply (rule seq_CSP)
[PROOF STATE]
proof (prove)
goal (3 subgoals):
1. action.relation_of A is CSP1 healthy
2. action.relation_of A is R healthy
3. is_CSP_process (R (true \<turnstile> \<lambda>(A, A'). tr A' = tr A \<and> \<not> wait A' \<and> alpha_rp.more A' = decrease v (alpha_rp.more A)))
[PROOF STEP]
apply (auto simp: relation_of_CSP1 relation_of_R)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_CSP_process (R (true \<turnstile> \<lambda>(A, A'). tr A' = tr A \<and> \<not> wait A' \<and> alpha_rp.more A' = decrease v (alpha_rp.more A)))
[PROOF STEP]
apply (rule rd_is_CSP)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>a b. true (a, b\<lparr>ok := True\<rparr>) \<longrightarrow> true (a, b\<lparr>ok := False\<rparr>)
[PROOF STEP]
apply (auto)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 658, "file": "Circus_Denotational_Semantics", "length": 6}
|
\documentclass{sig-alternate-05-2015}
% \usepackage{subfigure}
\usepackage{subfig}
\usepackage{balance}
\usepackage{multirow}
\usepackage{color}
\usepackage{chngpage}
\usepackage{url}
\usepackage{amsmath}
\usepackage{caption}
\usepackage{algorithm}
\usepackage{algpseudocode}
\usepackage{hyperref}
\newtheorem{theorem}{Theorem}[section]
\newcommand{\para}[1]{{\vspace{2pt} \bf \noindent #1 \hspace{8pt}}}
\newenvironment{packed_itemize}{
\begin{itemize}
}{\end{itemize}}
\makeatletter
\def\@copyrightspace{\relax}
\makeatother
\begin{document}
\captionsetup[subfigure]{labelformat=empty}
\title{Gaussian Process Regression}
\subtitle{A practical overview}
\author{
\alignauthor \color{blue}\href{http://lzhbrian.me}{Tzu-Heng Lin}\color{black}, 2014011054, W42\\
\affaddr{Department of Electronic Engineering, Tsinghua
University, Beijing, China\\}
\email{\color{blue}\href{mailto:lzhbrian@gmail.com}{lzhbrian@gmail.com}}
}
\maketitle
\begin{abstract}
\footnote{\color{blue}\href{http://lzhbrian.me}{Tzu-Heng Lin} \color{black} is currently an undergraduate student in the Department of Electronic Engineering, Tsinghua University. His research interests include Big Data Mining, Machine Learning, etc. For more information about me, please see \color{blue}\href{http://lzhbrian.me}{my personal website}\color{black}.
Please feel free to contact me at any time via \color{blue}\href{mailto:lzhbrian@gmail.com}{email} }Gaussian Process Regression(GPR) is a powerful, nonparametric tool developed based on the Bayesian theory and the Statistical learning theory. Choosing the right \emph{Mean Functions}, \emph{Kernel Functions} as well as the \emph{Likelihood Functions} and the \emph{Inference Methods} have been critical to the performance of the model. However, these works are often hard and require much expertise \& experience.
In this paper, we first give an introduction on the overall process of GPR.
Subsequently, we give a precise explanation on some of the recent works which emphasize on the choice of the \emph{Kernel Function}.
In addition, we implement sufficient number of experiments to systematically analyze the performance of GPR with different \emph{Likelihood Functions} and the \emph{Inference Methods}. Our experiments are conducted on two interesting datasets. The best MSE we get from two experiments are 0.2112 \& 0.0262.
We seek to provide an comprehensive practical overview in the field of Gaussian Process Regression.
\end{abstract}
%
% Use this command to print the description
%
\printccsdesc
\input{introduction}
\input{GPR}
\input{related}
\input{ABCD}
\input{experiments}
% Section: Conclusion
\section{Conclusion} \label{sec:conclusion}
In this paper, we discuss about the Gaussian Process Regression.
We comprehensively introduce the concept of Gaussian Process Regression.
Specifically, we precisely explain the recent works for kernel auto construction.
We also conduct two experiments, in which we systematically compare the performance of two Likelihood Functions and three Inference Methods.
\textbf{The best MSE we get from two experiments are 0.2112 \& 0.0262.}
As future work, we would like to join more information of GPR to this paper such as the details of how a hyperparameter is derived and some deeper discussion about the Inference methods; and if could, propose some improvement to the algorithms available.
% Last
\renewcommand{\baselinestretch}{1.1}
\balance
% \small
% Acknowledgement
\section{Acknowledgement} \label{sec:acknowledgement}
I would like to thank Yuanxin Zhang, XueChao Wang, for the discussion with me on the algorithms. Without them, I wouldn't have the possibility to accomplish this work in such a short time. This paper is a project of Stochastic Process Course in Tsinghua University, taught by Prof. Zhijian Ou.\\
\para{Source Code and Dataset} Source Code to perform all experiments, along with the dataset in this paper can be found in my github repository\footnote{Available at \color{blue}\href{http://github.com/lzhbrian/gpr}{http://github.com/lzhbrian/gpr}}. Due to my limited knowledge, there might be some mistakes and flaws in this paper as well as in the code, please do not hesitate to contact and correct me.
% Reference
\bibliographystyle{abbrv}
\bibliography{ref}
\end{document}
|
{"hexsha": "e17c76bdad00d7530c2522855eed37810a7ac137", "size": 4328, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tex/head-full.tex", "max_stars_repo_name": "lzhbrian/gpr", "max_stars_repo_head_hexsha": "912c530fec02e4fe1a4d49b96e6fc3a25b2bdf3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-02-08T13:38:19.000Z", "max_stars_repo_stars_event_max_datetime": "2018-08-07T02:26:12.000Z", "max_issues_repo_path": "tex/head-full.tex", "max_issues_repo_name": "lzhbrian/gpr", "max_issues_repo_head_hexsha": "912c530fec02e4fe1a4d49b96e6fc3a25b2bdf3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tex/head-full.tex", "max_forks_repo_name": "lzhbrian/gpr", "max_forks_repo_head_hexsha": "912c530fec02e4fe1a4d49b96e6fc3a25b2bdf3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4485981308, "max_line_length": 514, "alphanum_fraction": 0.7853512015, "num_tokens": 1098}
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# -------------------------------------------#
# author: sean lee #
# email: xmlee97@gmail.com #
#--------------------------------------------#
"""MIT License
Copyright (c) 2018 Sean
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
if sys.version_info[0] == 2:
reload(sys)
sys.setdefaultencoding('utf8')
range = xrange
import numpy as np
from ..utils.bm25 import BM25
class KeywordTextRank(object):
def __init__(self, words, window=5, alpha=0.85, iters=300):
self.words = words
self.window = window
self.d = alpha
self.vertex = set(words)
self.edge = {}
self.iters = iters
self.build_edge()
self.build_matrix()
self.calc_pr()
def build_edge(self):
N = len(self.words)
for idx, word in enumerate(self.words):
if word not in self.edge.keys():
tmp = set()
l = idx - self.window + 1 # left win
r = idx + self.window # right win
l = 0 if l<0 else l
r = N if r>=N else r
# get window
for i in range(l, r):
if i == idx:
continue
tmp.add(self.words[i])
self.edge[word] = tmp
def build_matrix(self):
self.matrix = np.zeros([len(self.vertex), len(self.vertex)])
self.word_idx = {} #记录词的idx
self.idx_dict = {} #记录节点idx对应的词
for i, v in enumerate(self.vertex):
self.word_idx[v] = i
self.idx_dict[i] = v
for key in self.edge.keys():
for w in self.edge[key]:
self.matrix[self.word_idx[key]][self.word_idx[w]] = 1
self.matrix[self.word_idx[w]][self.word_idx[key]] = 1
for j in range(self.matrix.shape[1]):
Z = 0
for i in range(self.matrix.shape[0]):
Z += self.matrix[i][j]
for i in range(self.matrix.shape[0]):
self.matrix[i][j] /= Z
def calc_pr(self):
self.PR = np.ones([len(self.vertex), 1])
for i in range(self.iters):
self.PR = (1 - self.d) + self.d * np.dot(self.matrix, self.PR)
#输出词和相应的权重
def topk(self, k):
word_pr = {}
for i in range(len(self.PR)):
word_pr[self.idx_dict[i]] = self.PR[i][0]
res = sorted(word_pr.items(), key = lambda x : x[1], reverse=True)
return res[:k]
class TextRank(object):
def __init__(self, docs, alpha=0.85, min_diff=1e-2, iters=500):
self.docs = docs
self.bm25 = BM25(docs)
self.N = len(docs)
self.d = alpha
self.weight = []
self.weight_sum = []
self.vertex = []
self.iters = iters
self.min_diff = min_diff
self.build()
self.calc_pr()
def build(self):
for idx, doc in enumerate(self.docs):
scores = self.bm25.get_sims(doc)
self.weight.append(scores)
self.weight_sum.append(sum(scores)-scores[idx])
self.vertex.append(1.0)
def calc_pr(self):
for _ in range(self.iters):
m = []
max_diff = 0
for i in range(self.N):
m.append(1 - self.d)
for j in range(self.N):
if j == i or self.weight_sum[j] == 0:
continue
m[-1] += (self.d * self.weight[j][i] / self.weight_sum[j]*self.vertex[j])
if abs(m[-1] - self.vertex[i]) > max_diff:
max_diff = abs(m[-1] - self.vertex[i])
self.vertex = m
if max_diff <= self.min_diff:
break
def topk(self, k):
res = list(enumerate(self.vertex))
res = sorted(res, key=lambda x: x, reverse=True)
return list(map(lambda x: x[0], res))[:k]
|
{"hexsha": "b5545600ac784bff913827191a6083d735515b84", "size": 5133, "ext": "py", "lang": "Python", "max_stars_repo_path": "xmnlp/summary/textrank.py", "max_stars_repo_name": "cukuangjiangjun/Sebastian0606", "max_stars_repo_head_hexsha": "d7bb38ae23f22f95d555b2505411473440bde298", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-12T07:19:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-12T07:19:58.000Z", "max_issues_repo_path": "xmnlp/summary/textrank.py", "max_issues_repo_name": "cukuangjiangjun/Sebastian0606", "max_issues_repo_head_hexsha": "d7bb38ae23f22f95d555b2505411473440bde298", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-14T10:08:37.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-14T10:08:37.000Z", "max_forks_repo_path": "xmnlp/summary/textrank.py", "max_forks_repo_name": "cukuangjiangjun/Sebastian0606", "max_forks_repo_head_hexsha": "d7bb38ae23f22f95d555b2505411473440bde298", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-20T08:58:45.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-20T08:58:45.000Z", "avg_line_length": 34.4496644295, "max_line_length": 93, "alphanum_fraction": 0.5439314241, "include": true, "reason": "import numpy", "num_tokens": 1217}
|
*> \brief \b STRSM
*
* =========== DOCUMENTATION ===========
*
* Online html documentation available at
* http://www.netlib.org/lapack/explore-html/
*
* Definition:
* ===========
*
* SUBROUTINE STRSM(SIDE,UPLO,TRANSA,DIAG,M,N,ALPHA,A,LDA,B,LDB)
*
* .. Scalar Arguments ..
* REAL ALPHA
* INTEGER LDA,LDB,M,N
* CHARACTER DIAG,SIDE,TRANSA,UPLO
* ..
* .. Array Arguments ..
* REAL A(LDA,*),B(LDB,*)
* ..
*
*
*> \par Purpose:
* =============
*>
*> \verbatim
*>
*> STRSM solves one of the matrix equations
*>
*> op( A )*X = alpha*B, or X*op( A ) = alpha*B,
*>
*> where alpha is a scalar, X and B are m by n matrices, A is a unit, or
*> non-unit, upper or lower triangular matrix and op( A ) is one of
*>
*> op( A ) = A or op( A ) = A**T.
*>
*> The matrix X is overwritten on B.
*> \endverbatim
*
* Arguments:
* ==========
*
*> \param[in] SIDE
*> \verbatim
*> SIDE is CHARACTER*1
*> On entry, SIDE specifies whether op( A ) appears on the left
*> or right of X as follows:
*>
*> SIDE = 'L' or 'l' op( A )*X = alpha*B.
*>
*> SIDE = 'R' or 'r' X*op( A ) = alpha*B.
*> \endverbatim
*>
*> \param[in] UPLO
*> \verbatim
*> UPLO is CHARACTER*1
*> On entry, UPLO specifies whether the matrix A is an upper or
*> lower triangular matrix as follows:
*>
*> UPLO = 'U' or 'u' A is an upper triangular matrix.
*>
*> UPLO = 'L' or 'l' A is a lower triangular matrix.
*> \endverbatim
*>
*> \param[in] TRANSA
*> \verbatim
*> TRANSA is CHARACTER*1
*> On entry, TRANSA specifies the form of op( A ) to be used in
*> the matrix multiplication as follows:
*>
*> TRANSA = 'N' or 'n' op( A ) = A.
*>
*> TRANSA = 'T' or 't' op( A ) = A**T.
*>
*> TRANSA = 'C' or 'c' op( A ) = A**T.
*> \endverbatim
*>
*> \param[in] DIAG
*> \verbatim
*> DIAG is CHARACTER*1
*> On entry, DIAG specifies whether or not A is unit triangular
*> as follows:
*>
*> DIAG = 'U' or 'u' A is assumed to be unit triangular.
*>
*> DIAG = 'N' or 'n' A is not assumed to be unit
*> triangular.
*> \endverbatim
*>
*> \param[in] M
*> \verbatim
*> M is INTEGER
*> On entry, M specifies the number of rows of B. M must be at
*> least zero.
*> \endverbatim
*>
*> \param[in] N
*> \verbatim
*> N is INTEGER
*> On entry, N specifies the number of columns of B. N must be
*> at least zero.
*> \endverbatim
*>
*> \param[in] ALPHA
*> \verbatim
*> ALPHA is REAL
*> On entry, ALPHA specifies the scalar alpha. When alpha is
*> zero then A is not referenced and B need not be set before
*> entry.
*> \endverbatim
*>
*> \param[in] A
*> \verbatim
*> A is REAL array, dimension ( LDA, k ),
*> where k is m when SIDE = 'L' or 'l'
*> and k is n when SIDE = 'R' or 'r'.
*> Before entry with UPLO = 'U' or 'u', the leading k by k
*> upper triangular part of the array A must contain the upper
*> triangular matrix and the strictly lower triangular part of
*> A is not referenced.
*> Before entry with UPLO = 'L' or 'l', the leading k by k
*> lower triangular part of the array A must contain the lower
*> triangular matrix and the strictly upper triangular part of
*> A is not referenced.
*> Note that when DIAG = 'U' or 'u', the diagonal elements of
*> A are not referenced either, but are assumed to be unity.
*> \endverbatim
*>
*> \param[in] LDA
*> \verbatim
*> LDA is INTEGER
*> On entry, LDA specifies the first dimension of A as declared
*> in the calling (sub) program. When SIDE = 'L' or 'l' then
*> LDA must be at least max( 1, m ), when SIDE = 'R' or 'r'
*> then LDA must be at least max( 1, n ).
*> \endverbatim
*>
*> \param[in,out] B
*> \verbatim
*> B is REAL array, dimension ( LDB, N )
*> Before entry, the leading m by n part of the array B must
*> contain the right-hand side matrix B, and on exit is
*> overwritten by the solution matrix X.
*> \endverbatim
*>
*> \param[in] LDB
*> \verbatim
*> LDB is INTEGER
*> On entry, LDB specifies the first dimension of B as declared
*> in the calling (sub) program. LDB must be at least
*> max( 1, m ).
*> \endverbatim
*
* Authors:
* ========
*
*> \author Univ. of Tennessee
*> \author Univ. of California Berkeley
*> \author Univ. of Colorado Denver
*> \author NAG Ltd.
*
*> \date December 2016
*
*> \ingroup single_blas_level3
*
*> \par Further Details:
* =====================
*>
*> \verbatim
*>
*> Level 3 Blas routine.
*>
*>
*> -- Written on 8-February-1989.
*> Jack Dongarra, Argonne National Laboratory.
*> Iain Duff, AERE Harwell.
*> Jeremy Du Croz, Numerical Algorithms Group Ltd.
*> Sven Hammarling, Numerical Algorithms Group Ltd.
*> \endverbatim
*>
* =====================================================================
SUBROUTINE STRSM(SIDE,UPLO,TRANSA,DIAG,M,N,ALPHA,A,LDA,B,LDB)
*
* -- Reference BLAS level3 routine (version 3.7.0) --
* -- Reference BLAS is a software package provided by Univ. of Tennessee, --
* -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
* December 2016
*
* .. Scalar Arguments ..
REAL ALPHA
INTEGER LDA,LDB,M,N
CHARACTER DIAG,SIDE,TRANSA,UPLO
* ..
* .. Array Arguments ..
REAL A(LDA,*),B(LDB,*)
* ..
*
* =====================================================================
*
* .. External Functions ..
LOGICAL LSAME
EXTERNAL LSAME
* ..
* .. External Subroutines ..
EXTERNAL XERBLA
* ..
* .. Intrinsic Functions ..
INTRINSIC MAX
* ..
* .. Local Scalars ..
REAL TEMP
INTEGER I,INFO,J,K,NROWA
LOGICAL LSIDE,NOUNIT,UPPER
* ..
* .. Parameters ..
REAL ONE,ZERO
PARAMETER (ONE=1.0E+0,ZERO=0.0E+0)
* ..
*
* Test the input parameters.
*
LSIDE = LSAME(SIDE,'L')
IF (LSIDE) THEN
NROWA = M
ELSE
NROWA = N
END IF
NOUNIT = LSAME(DIAG,'N')
UPPER = LSAME(UPLO,'U')
*
INFO = 0
IF ((.NOT.LSIDE) .AND. (.NOT.LSAME(SIDE,'R'))) THEN
INFO = 1
ELSE IF ((.NOT.UPPER) .AND. (.NOT.LSAME(UPLO,'L'))) THEN
INFO = 2
ELSE IF ((.NOT.LSAME(TRANSA,'N')) .AND.
+ (.NOT.LSAME(TRANSA,'T')) .AND.
+ (.NOT.LSAME(TRANSA,'C'))) THEN
INFO = 3
ELSE IF ((.NOT.LSAME(DIAG,'U')) .AND. (.NOT.LSAME(DIAG,'N'))) THEN
INFO = 4
ELSE IF (M.LT.0) THEN
INFO = 5
ELSE IF (N.LT.0) THEN
INFO = 6
ELSE IF (LDA.LT.MAX(1,NROWA)) THEN
INFO = 9
ELSE IF (LDB.LT.MAX(1,M)) THEN
INFO = 11
END IF
IF (INFO.NE.0) THEN
CALL XERBLA('STRSM ',INFO)
RETURN
END IF
*
* Quick return if possible.
*
IF (M.EQ.0 .OR. N.EQ.0) RETURN
*
* And when alpha.eq.zero.
*
IF (ALPHA.EQ.ZERO) THEN
DO 20 J = 1,N
DO 10 I = 1,M
B(I,J) = ZERO
10 CONTINUE
20 CONTINUE
RETURN
END IF
*
* Start the operations.
*
IF (LSIDE) THEN
IF (LSAME(TRANSA,'N')) THEN
*
* Form B := alpha*inv( A )*B.
*
IF (UPPER) THEN
DO 60 J = 1,N
IF (ALPHA.NE.ONE) THEN
DO 30 I = 1,M
B(I,J) = ALPHA*B(I,J)
30 CONTINUE
END IF
DO 50 K = M,1,-1
IF (B(K,J).NE.ZERO) THEN
IF (NOUNIT) B(K,J) = B(K,J)/A(K,K)
DO 40 I = 1,K - 1
B(I,J) = B(I,J) - B(K,J)*A(I,K)
40 CONTINUE
END IF
50 CONTINUE
60 CONTINUE
ELSE
DO 100 J = 1,N
IF (ALPHA.NE.ONE) THEN
DO 70 I = 1,M
B(I,J) = ALPHA*B(I,J)
70 CONTINUE
END IF
DO 90 K = 1,M
IF (B(K,J).NE.ZERO) THEN
IF (NOUNIT) B(K,J) = B(K,J)/A(K,K)
DO 80 I = K + 1,M
B(I,J) = B(I,J) - B(K,J)*A(I,K)
80 CONTINUE
END IF
90 CONTINUE
100 CONTINUE
END IF
ELSE
*
* Form B := alpha*inv( A**T )*B.
*
IF (UPPER) THEN
DO 130 J = 1,N
DO 120 I = 1,M
TEMP = ALPHA*B(I,J)
DO 110 K = 1,I - 1
TEMP = TEMP - A(K,I)*B(K,J)
110 CONTINUE
IF (NOUNIT) TEMP = TEMP/A(I,I)
B(I,J) = TEMP
120 CONTINUE
130 CONTINUE
ELSE
DO 160 J = 1,N
DO 150 I = M,1,-1
TEMP = ALPHA*B(I,J)
DO 140 K = I + 1,M
TEMP = TEMP - A(K,I)*B(K,J)
140 CONTINUE
IF (NOUNIT) TEMP = TEMP/A(I,I)
B(I,J) = TEMP
150 CONTINUE
160 CONTINUE
END IF
END IF
ELSE
IF (LSAME(TRANSA,'N')) THEN
*
* Form B := alpha*B*inv( A ).
*
IF (UPPER) THEN
DO 210 J = 1,N
IF (ALPHA.NE.ONE) THEN
DO 170 I = 1,M
B(I,J) = ALPHA*B(I,J)
170 CONTINUE
END IF
DO 190 K = 1,J - 1
IF (A(K,J).NE.ZERO) THEN
DO 180 I = 1,M
B(I,J) = B(I,J) - A(K,J)*B(I,K)
180 CONTINUE
END IF
190 CONTINUE
IF (NOUNIT) THEN
TEMP = ONE/A(J,J)
DO 200 I = 1,M
B(I,J) = TEMP*B(I,J)
200 CONTINUE
END IF
210 CONTINUE
ELSE
DO 260 J = N,1,-1
IF (ALPHA.NE.ONE) THEN
DO 220 I = 1,M
B(I,J) = ALPHA*B(I,J)
220 CONTINUE
END IF
DO 240 K = J + 1,N
IF (A(K,J).NE.ZERO) THEN
DO 230 I = 1,M
B(I,J) = B(I,J) - A(K,J)*B(I,K)
230 CONTINUE
END IF
240 CONTINUE
IF (NOUNIT) THEN
TEMP = ONE/A(J,J)
DO 250 I = 1,M
B(I,J) = TEMP*B(I,J)
250 CONTINUE
END IF
260 CONTINUE
END IF
ELSE
*
* Form B := alpha*B*inv( A**T ).
*
IF (UPPER) THEN
DO 310 K = N,1,-1
IF (NOUNIT) THEN
TEMP = ONE/A(K,K)
DO 270 I = 1,M
B(I,K) = TEMP*B(I,K)
270 CONTINUE
END IF
DO 290 J = 1,K - 1
IF (A(J,K).NE.ZERO) THEN
TEMP = A(J,K)
DO 280 I = 1,M
B(I,J) = B(I,J) - TEMP*B(I,K)
280 CONTINUE
END IF
290 CONTINUE
IF (ALPHA.NE.ONE) THEN
DO 300 I = 1,M
B(I,K) = ALPHA*B(I,K)
300 CONTINUE
END IF
310 CONTINUE
ELSE
DO 360 K = 1,N
IF (NOUNIT) THEN
TEMP = ONE/A(K,K)
DO 320 I = 1,M
B(I,K) = TEMP*B(I,K)
320 CONTINUE
END IF
DO 340 J = K + 1,N
IF (A(J,K).NE.ZERO) THEN
TEMP = A(J,K)
DO 330 I = 1,M
B(I,J) = B(I,J) - TEMP*B(I,K)
330 CONTINUE
END IF
340 CONTINUE
IF (ALPHA.NE.ONE) THEN
DO 350 I = 1,M
B(I,K) = ALPHA*B(I,K)
350 CONTINUE
END IF
360 CONTINUE
END IF
END IF
END IF
*
RETURN
*
* End of STRSM .
*
END
|
{"hexsha": "aa805f6b6c4b06d81eca64985c0bf62a0c94d8a7", "size": 13679, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lapack-netlib/BLAS/SRC/strsm.f", "max_stars_repo_name": "drhpc/OpenBLAS", "max_stars_repo_head_hexsha": "9721b57ecfd194f1a4aaa08d715735cd9e8ad8b6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4392, "max_stars_repo_stars_event_min_datetime": "2015-01-02T18:15:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T12:14:38.000Z", "max_issues_repo_path": "lapack-netlib/BLAS/SRC/strsm.f", "max_issues_repo_name": "drhpc/OpenBLAS", "max_issues_repo_head_hexsha": "9721b57ecfd194f1a4aaa08d715735cd9e8ad8b6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2067, "max_issues_repo_issues_event_min_datetime": "2015-01-01T03:50:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:59:43.000Z", "max_forks_repo_path": "lapack-netlib/BLAS/SRC/strsm.f", "max_forks_repo_name": "drhpc/OpenBLAS", "max_forks_repo_head_hexsha": "9721b57ecfd194f1a4aaa08d715735cd9e8ad8b6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1564, "max_forks_repo_forks_event_min_datetime": "2015-01-01T01:32:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:12:54.000Z", "avg_line_length": 30.8085585586, "max_line_length": 80, "alphanum_fraction": 0.3982747277, "num_tokens": 3700}
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Monte carlo replay with support for auxiliary MC rewards."""
from typing import Sequence
from dopamine.replay_memory import circular_replay_buffer as crb
import gin
import numpy as np
ReplayElement = crb.ReplayElement
@gin.configurable
class MultiMCReplayBuffer(crb.OutOfGraphReplayBuffer):
"""This is an extension of the circular RB that handles Monte Carlo rollouts.
Specifically, it supports two kinds of sampling:
- Regular n-step sampling.
- MonteCarlo rollout sampling (for a different value of n).
"""
def __init__(self,
observation_shape,
stack_size,
replay_capacity = 1000000,
batch_size = 32,
update_horizon = 10,
gamma = 0.99,
max_sample_attempts = 1000,
extra_storage_types = (),
extra_reward_storage_types = (),
observation_dtype = np.uint8,
num_additional_discount_factors = 32):
"""Initializes OutOfGraphReplayBufferWithMC.
Note that not all constructor parameters are replicated here. The rest can
be set via a gin config file.
Args:
observation_shape: tuple of ints.
stack_size: int, number of frames to use in state stack.
replay_capacity: int, number of transitions to keep in memory.
batch_size: int.
update_horizon: int, the length of the MC horizon.
gamma: int, the discount factor.
max_sample_attempts: int, the maximum number of attempts allowed to
get a sample.
extra_storage_types: list of ReplayElements defining the type of the extra
contents that will be stored and returned by sample_transition_batch.
extra_reward_storage_types: list of ReplayElements defining the type of
the extra contents that will be stored and returned by
sample_transition_batch. These elements will also be subject to
the MC return calculation.
observation_dtype: np.dtype, type of the observations. Defaults to
np.uint8 for Atari 2600.
num_additional_discount_factors: The number of additional discount
factors to train with. These additional discount factors must
be supplied to sample_transition_batch.
"""
self._num_discount_factors = num_additional_discount_factors
# TODO(joshgreaves): Implement.
if extra_reward_storage_types:
raise NotImplementedError(
'extra_reward_storage_types hasn\'t been implemented yet.')
extra_storage_types = list(extra_storage_types)
extra_storage_types.extend([
ReplayElement('additional_discount_factor_returns',
(self._num_discount_factors,),
np.float32),
])
super().__init__(
observation_shape,
stack_size,
replay_capacity,
batch_size,
update_horizon=update_horizon,
gamma=gamma,
max_sample_attempts=max_sample_attempts,
extra_storage_types=extra_storage_types,
observation_dtype=observation_dtype)
def sample_transition_batch(self,
batch_size=None,
indices=None,
*,
extra_discounts):
"""Returns a batch of transitions (including any extra contents).
There are two different horizons being considered here, one for the regular
transitions, and one for doing Monte Carlo rollouts for estimating returns.
Args:
batch_size: int, number of transitions returned. If None, the default
batch_size will be used.
indices: None or list of ints, the indices of every transition in the
batch. If None, sample the indices uniformly.
extra_discounts: If supplied, will compute the return for each discount
in the sequence.
Returns:
transition_batch: tuple of np.arrays with the shape and type as in
get_transition_elements().
Raises:
ValueError: If an element to be sampled is missing from the replay buffer.
"""
if len(extra_discounts) != self._num_discount_factors:
raise ValueError(
f'The number of supplied discount factors ({len(extra_discounts)}) ',
'must be equal to num_discount_factors '
f'({self._num_discount_factors})')
if batch_size is None:
batch_size = self._batch_size
if indices is None:
indices = self.sample_index_batch(batch_size)
assert len(indices) == batch_size
extra_discounts = np.asarray(extra_discounts).reshape(-1, 1)
extra_discounts = np.tile(
extra_discounts, (1, self._update_horizon))
extra_discounts[:, 0] = 1.0 # Required to make first step undiscounted.
extra_discounts = np.cumprod(extra_discounts, axis=1)
transition_elements = self.get_transition_elements(batch_size)
batch_arrays = self._create_batch_arrays(batch_size)
for batch_index, state_index in enumerate(indices):
# Get transitions for regular updates.
trajectory_indices = [(state_index + j) % self._replay_capacity
for j in range(self._update_horizon)]
trajectory_terminals = self._store['terminal'][trajectory_indices]
is_terminal_transition = trajectory_terminals.any()
if not is_terminal_transition:
trajectory_length = self._update_horizon
else:
# np.argmax of a bool array returns the index of the first True.
trajectory_length = (
np.argmax(trajectory_terminals.astype(bool), 0) + 1)
next_state_index = state_index + trajectory_length
trajectory_discount_vector = (
self._cumulative_discount_vector[:trajectory_length])
extra_discount_vector = extra_discounts[:, :trajectory_length]
trajectory_rewards = self.get_range(self._store['reward'],
state_index,
next_state_index)
# Fill the contents of each array in the sampled batch.
assert len(transition_elements) == len(batch_arrays)
for element_array, element in zip(batch_arrays, transition_elements):
if element.name == 'state':
element_array[batch_index] = self.get_observation_stack(state_index)
elif element.name == 'reward':
# compute the discounted sum of rewards in the trajectory.
element_array[batch_index] = trajectory_discount_vector.dot(
trajectory_rewards)
elif element.name == 'additional_discount_factor_returns':
element_array[batch_index] = (
extra_discount_vector.dot(trajectory_rewards))
elif element.name == 'next_state':
element_array[batch_index] = self.get_observation_stack(
(next_state_index) % self._replay_capacity)
elif element.name == 'terminal':
element_array[batch_index] = is_terminal_transition
elif element.name == 'indices':
element_array[batch_index] = state_index
elif element.name in list(self._store.keys()):
element_array[batch_index] = (
self._store[element.name][state_index])
# We assume the other elements are filled in by the subclass.
return batch_arrays
def get_add_args_signature(self):
add_args = [
ReplayElement('observation', self._observation_shape,
self._observation_dtype),
ReplayElement('action', self._action_shape, self._action_dtype),
ReplayElement('reward', self._reward_shape, self._reward_dtype),
ReplayElement('terminal', (), self._terminal_dtype)
]
for extra_replay_element in self._extra_storage_types:
if extra_replay_element.name == 'additional_discount_factor_returns':
continue
add_args.append(extra_replay_element)
return add_args
|
{"hexsha": "2b39cdde44ece0a2c40f586b6baf8f44c74d6b7d", "size": 8464, "ext": "py", "lang": "Python", "max_stars_repo_path": "aux_tasks/auxiliary_mc/multi_mc_replay_buffer.py", "max_stars_repo_name": "dumpmemory/google-research", "max_stars_repo_head_hexsha": "bc87d010ab9086b6e92c3f075410fa6e1f27251b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aux_tasks/auxiliary_mc/multi_mc_replay_buffer.py", "max_issues_repo_name": "dumpmemory/google-research", "max_issues_repo_head_hexsha": "bc87d010ab9086b6e92c3f075410fa6e1f27251b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aux_tasks/auxiliary_mc/multi_mc_replay_buffer.py", "max_forks_repo_name": "dumpmemory/google-research", "max_forks_repo_head_hexsha": "bc87d010ab9086b6e92c3f075410fa6e1f27251b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6923076923, "max_line_length": 80, "alphanum_fraction": 0.6813563327, "include": true, "reason": "import numpy", "num_tokens": 1702}
|
import mysql.connector
from asyncore import read
from PyQt5 import uic
from PyQt5 import QtWidgets
from numpy import save
from reportlab.pdfgen import canvas
c = 0
# Conectando com o banco de dados
con = mysql.connector.connect(
host='localhost', database='cadastro_estoque', user='andre2', password='anova123')
# Função de inserir as informações digitadas nos campos no banco de dados SQL
def insert():
linha1 = formulario.lineEdit.text()
linha2 = formulario.lineEdit_2.text()
linha3 = formulario.lineEdit_3.text()
linha4 = formulario.lineEdit_4.text()
categoria = ("")
if formulario.checkBox.isChecked():
print("Item adicionado à categoria Informática.")
categoria = ("Informática")
elif formulario.checkBox_2.isChecked():
print("Item adicionado à categoria Alimentos.")
categoria = ("Alimentos")
elif formulario.checkBox_3.isChecked():
print("Item adicionado à categoria Eletrodomésticos.")
categoria = ("Eletrodomésticos")
elif formulario.checkBox_4.isChecked():
print("Item adicionado à categoria Cama, Mesa e Banho.")
categoria = ("Cama, Mesa e Banho")
elif formulario.checkBox_5.isChecked():
print("Item adicionado à categoria Brinquedos.")
categoria = ("Brinquedos")
elif formulario.checkBox_6.isChecked():
print("Item adicionado à categoria Produtos de Limpeza.")
categoria = ("Produtos de Limpeza")
elif formulario.checkBox_7.isChecked():
print("Item adicionado à categoria Higiene Pessoal.")
categoria = ("Higiene Pessoal")
# Printando no terminal os itens inseridos, para simples conferência.
print("Codigo ", linha1)
print("Descrição ", linha2)
print("Preço ", linha3)
print("Categoria", linha4)
# Comando SQL de inserção de dados
cursor = con.cursor()
query = (
"INSERT INTO produtos (codigo, descricao, preco, categoria, quantidade) VALUES (%s,%s,%s,%s,%s)")
dados = (str(linha1), str(linha2), str(linha3), categoria, str(linha4))
cursor.execute(query, dados)
con.commit()
# Limpando os campos de texo após cada cadastro
formulario.lineEdit.setText("")
formulario.lineEdit_2.setText("")
formulario.lineEdit_3.setText("")
formulario.lineEdit_4.setText("")
# Função consultar no banco de dados
def consult():
consultar.show()
cursor = con.cursor()
query = ("SELECT codigo, descricao, preco, categoria, quantidade FROM produtos;")
cursor.execute(query)
readed_data = cursor.fetchall()
consultar.tableWidget.setRowCount(len(readed_data))
consultar.tableWidget.setColumnCount(5)
for i in range(0, len(readed_data)):
for j in range(0, 5):
consultar.tableWidget.setItem(
i, j, QtWidgets.QTableWidgetItem(str(readed_data[i][j])))
# Exportar em arquivo
def export():
cursor = con.cursor()
query = ("SELECT * FROM produtos")
cursor.execute(query)
readed_data = cursor.fetchall()
y = 0
pdf = canvas.Canvas("cadastro_produtos.pdf")
pdf.setFont("Times-Bold", 20)
pdf.drawString(200, 800, "Produtos Cadastrados:")
pdf.setFont("Times-Bold", 12)
# Posições dos tópicos na exibição do arquivo em PDF "X, Y"
# X é a distância entre um titulo em outro na mesma linha
pdf.drawString(10, 750, "CÓD")
pdf.drawString(50, 750, "PRODUTO")
pdf.drawString(280, 750, "PREÇO")
pdf.drawString(330, 750, "CATEGORIA")
pdf.drawString(480, 750, "QTD")
# Linha 37 espaçamento entre linhas.
for i in range(0, len(readed_data)):
y = y + 15
pdf.drawString(10, 750 - y, str(readed_data[i][0]))
pdf.drawString(50, 750 - y, str(readed_data[i][1]))
pdf.drawString(280, 750 - y, str(readed_data[i][2]))
pdf.drawString(330, 750 - y, str(readed_data[i][3]))
pdf.drawString(480, 750 - y, str(readed_data[i][4]))
#pdf.drawString(420, 750 - y, str(readed_data[i][5]))
pdf.save()
print("Planilha gerada com sucesso.")
def delete():
line = consultar.tableWidget.currentRow()
consultar.tableWidget.removeRow(line)
cursor = con.cursor()
selectquery = ("Select codigo FROM produtos")
cursor.execute(selectquery)
readed_data = cursor.fetchall()
codigo = readed_data[line][0]
print(codigo)
cursor.close()
cursor = con.cursor()
deletequery = (
"DELETE FROM cadastro_estoque.produtos WHERE codigo = " + str(codigo) + (";"))
cursor.execute(deletequery)
con.commit()
print("Item excluido da lista.")
# Alterar qualquer dado inserido no DB
def edit():
global c
line = consultar.tableWidget.currentRow()
cursor = con.cursor()
selectquery = ("Select codigo FROM produtos")
cursor.execute(selectquery)
readed_data = cursor.fetchall()
codigo = readed_data[line][0]
cursor = con.cursor()
selectquery2 = (
"SELECT * FROM produtos WHERE codigo = " + str(codigo) + (";"))
cursor.execute(selectquery2)
produto = cursor.fetchall()
editwindow.show()
c = codigo
editwindow.lineEdit.setText(str(produto[0][0]))
editwindow.lineEdit_2.setText(str(produto[0][1]))
editwindow.lineEdit_3.setText(str(produto[0][2]))
editwindow.lineEdit_4.setText(str(produto[0][3]))
editwindow.lineEdit_5.setText(str(produto[0][4]))
# Salvando os dados editados
def save():
# Identifica do número do código do Produto
global c
# Valor digitado na caixa de texto para edição
descricao = editwindow.lineEdit_2.text()
preco = editwindow.lineEdit_3.text()
categoria = editwindow.lineEdit_4.text()
quantidade = editwindow.lineEdit_5.text()
# Atualizando o banco de dados
cursor = con.cursor()
editquery = ("UPDATE produtos SET descricao = '{}', preco = '{}', categoria = '{}', quantidade = '{}' WHERE codigo = {}".format(
descricao, preco, categoria, quantidade, c))
cursor.execute(editquery)
con.commit()
print("Dados alterados com sucesso!")
editwindow.close()
consultar.close()
consult()
# Apagar dado inserido no DB
app = QtWidgets.QApplication([])
formulario = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/formulario.ui")
consultar = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/consultar.ui")
editwindow = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/editwindow.ui")
formulario.pushButton.clicked.connect(insert)
formulario.pushButton_2.clicked.connect(consult)
consultar.pushButton.clicked.connect(export)
consultar.pushButton_2.clicked.connect(delete)
consultar.pushButton_3.clicked.connect(edit)
editwindow.pushButton.clicked.connect(save)
formulario.show()
app.exec()
""" Finalmente, com o pouco tempo que eu tenho consegui finalizar essa parte, à partir daqui é que começa a parte de praticar, ainda assim, como estou iniciando tive muita dificuldade em conseguir que todas as funções funcionassem, pesquisei por conta própria, tirei dúvidas em grupos do Telegram, o GitHub me ajudou demais, várias das dúvidas e vários problemas que eu tive foram possíveis encontrar a solução lá.
A ideia agora, é criar uma tela de Login e com esse login se conectar ao banco de dados, delegando menos atribuições e deixando as funções de editar e excluir dados apenas para o administrador, além do mais, será acrescentada uma tela que será basicamente a tela de um caixa de comércio, que irá buscar o produto pelo código lido, inserir em um display e fazer a somatória dos valores de cada produto."""
|
{"hexsha": "af9f9905b7162b507306b46c8df041976601f3ff", "size": 7611, "ext": "py", "lang": "Python", "max_stars_repo_path": "cadastro.py", "max_stars_repo_name": "Andreambu23/Cadastro-de-Produtos-main", "max_stars_repo_head_hexsha": "34ee6d6bf1016defae6ad62fb1301bc7bc203bd9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cadastro.py", "max_issues_repo_name": "Andreambu23/Cadastro-de-Produtos-main", "max_issues_repo_head_hexsha": "34ee6d6bf1016defae6ad62fb1301bc7bc203bd9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cadastro.py", "max_forks_repo_name": "Andreambu23/Cadastro-de-Produtos-main", "max_forks_repo_head_hexsha": "34ee6d6bf1016defae6ad62fb1301bc7bc203bd9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5954545455, "max_line_length": 414, "alphanum_fraction": 0.6909735909, "include": true, "reason": "from numpy", "num_tokens": 1884}
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import sys
import os
def harmonicProps(_direction, _frequency):
if _direction == 'vertical':
if _frequency > 1 and _frequency < 2.6:
if _frequency >= 1.7 and _frequency <= 2.1:
return 1, 280, '1'
elif _frequency < 1.7:
return 1/0.7*(_frequency - 1), 280, '1'
else:
return 1 - 1/0.5*(_frequency - 2.1), 280, '1'
elif _frequency > 2.6 and _frequency < 5:
if _frequency >= 3.4 and _frequency <= 4.2:
return 1, 70, '2'
elif _frequency < 3.4:
return 1/0.8*(_frequency - 2.6), 70, '2'
else:
return 1 - 1/0.8*(_frequency - 4.2), 70, '2'
else:
return 0, 0, 'Nenhum'
elif _direction == 'lateral':
if _frequency > 0.3 and _frequency < 1.3:
if _frequency >= 0.5 and _frequency <= 1.1:
return 1, 280, '1'
elif _frequency < 0.5:
return 1/0.2*(_frequency - 0.3), 280, '1'
else:
return 1 - 1/0.2*(_frequency - 1.1), 280, '1'
elif _frequency > 1.3 and _frequency < 2.5:
if _frequency >= 1.7 and _frequency <= 2.1:
return 1, 70, '2'
elif _frequency < 1.7:
return 1/0.4*(_frequency - 1.3), 70, '2'
else:
return 1 - 1/0.4*(_frequency - 2.1), 70, '2'
else:
return 0, 0, 'Nenhum'
def formatPath(_path):
return _path.replace('\\', '/')
def displaceAndNormalize(_data):
newData = list()
minValue = _data[0]
maxValue = _data[0]
for value in _data:
if abs(value) < abs(minValue):
minValue = value
if abs(value) > abs(maxValue):
maxValue = value
for value in _data:
newData.append((value - minValue)/(maxValue - minValue))
return newData
def convertDataToFloat(_data):
newFloatData = list()
for value in _data:
if ',' in str(value):
newFloatData.append(float(str(value).replace(',', '.')))
else:
newFloatData.append(float(value))
return newFloatData
def newtonInterpol(_point, _xdata, _ydata):
for i, value in enumerate(_xdata):
if value == _point:
return _ydata[i]
elif i > 0 and i < len(_xdata) - 3:
if _point > value and _point < _xdata[i + 1]:
x1 = _xdata[i - 1]
y1 = _ydata[i - 1]
x2 = _xdata[i]
y2 = _ydata[i]
x3 = _xdata[i + 1]
y3 = _ydata[i + 1]
x4 = _xdata[i + 2]
y4 = _ydata[i + 2]
delt11 = (y2 - y1)/(x2 - x1)
delt12 = (y3 - y2)/(x3 - x2)
delt13 = (y4 - y3)/(x4 - x3)
delt21 = (delt12 - delt11)/(x3 - x1)
delt22 = (delt13 - delt12)/(x4 - x2)
delt31 = (delt22 - delt21)/(x4 - x1)
return y1 + (_point - x1)*delt11 + (_point - x1)*(_point - x2)*delt21 + (_point - x1)*(_point - x2)*(_point - x3)*delt31
if i <= 1 and _point < value:
x1 = _xdata[i + 3]
y1 = _ydata[i + 3]
x2 = _xdata[i + 2]
y2 = _ydata[i + 2]
x3 = _xdata[i + 1]
y3 = _ydata[i + 1]
x4 = _xdata[i]
y4 = _ydata[i]
delt11 = (y2 - y1)/(x2 - x1)
delt12 = (y3 - y2)/(x3 - x2)
delt13 = (y4 - y3)/(x4 - x3)
delt21 = (delt12 - delt11)/(x3 - x1)
delt22 = (delt13 - delt12)/(x4 - x2)
delt31 = (delt22 - delt21)/(x4 - x1)
return y1 + (_point - x1)*delt11 + (_point - x1)*(_point - x2)*delt21 + (_point - x1)*(_point - x2)*(_point - x3)*delt31
elif i >= len(_xdata) - 3:
x1 = _xdata[i - 3]
y1 = _ydata[i - 3]
x2 = _xdata[i - 2]
y2 = _ydata[i - 2]
x3 = _xdata[i - 1]
y3 = _ydata[i - 1]
x4 = _xdata[i]
y4 = _ydata[i]
delt11 = (y2 - y1)/(x2 - x1)
delt12 = (y3 - y2)/(x3 - x2)
delt13 = (y4 - y3)/(x4 - x3)
delt21 = (delt12 - delt11)/(x3 - x1)
delt22 = (delt13 - delt12)/(x4 - x2)
delt31 = (delt22 - delt21)/(x4 - x1)
return y1 + (_point - x1)*delt11 + (_point - x1)*(_point - x2)*delt21 + (_point - x1)*(_point - x2)*(_point - x3)*delt31
def gaussianQuadrature1(_numPoints, _supLimit, _infLimit):
points, weights = np.polynomial.legendre.leggauss(_numPoints)
solToPoint = np.zeros((_numPoints))
sol = 0
for i, value in enumerate(solToPoint):
x = (_supLimit - _infLimit) / 2 * points[i] + (_supLimit + _infLimit) / 2
value = (_supLimit - _infLimit) / 2 * newtonInterpol(x, xPosition, fi)
sol += weights[i] * value
return sol
def gaussianQuadrature2(_numPoints, _infLimit, _supLimit):
points, weights = np.polynomial.legendre.leggauss(_numPoints)
solToPoint = np.zeros((_numPoints))
sol = 0
for i, value in enumerate(solToPoint):
x = (_supLimit - _infLimit) / 2 * points[i] + (_supLimit + _infLimit) / 2
value = (_supLimit - _infLimit) / 2 * newtonInterpol(x, xPosition, fi)**2
sol += weights[i] * value
return sol
def generateGraph(_xdata, _ydata, _xlegend, _ylegend, _name):
fig, ax = plt.subplots()
plt.xlabel(_xlegend)
plt.ylabel(_ylegend)
ax.plot(_xdata, _ydata, color='blue', linewidth=0.4)
ax.set_xlim(min(_xdata), max(_xdata) + (max(_xdata) - min(_xdata))/20)
if min(_ydata) == 0:
ax.set_ylim(min(_ydata), max(_ydata) + max(_ydata)/20)
plt.ticklabel_format(style='sci', axis='both', scilimits=(-3,4))
plt.grid(True)
plt.savefig(f'{userDataPath}/data/{_name}.png')
def modalForce(_t):
forceValue = d*param*unitWeight*math.cos(w*_t)*1.85*(1/n)**0.5
modalForce = width*forceValue*factor1
return modalForce
def f(_x, _y):
force = modalForce(_x)
valuek0 = _y[1]
valuek1 = ((-2*w*damping*modalMass*_y[1]) - _y[0]*modalK + force)/modalMass
return np.array([valuek0, valuek1])
def RK4(_inicialx, _finalx, _nInterval, _inicialy0, _inicialy1):
sol = np.asmatrix(np.zeros((_nInterval + 1, 4)))
h = (_finalx - _inicialx)/_nInterval
x = _inicialx
y0 = _inicialy0
y1 = _inicialy1
y = np.array([y0, y1])
sol[0, 0] = x
sol[0, 1] = y[0]
sol[0, 2] = y[1]
sol[0, 3] = 0
for i in range(_nInterval):
yante = y
K1 = f(x, y)
K2 = f(x + h/2, y + h/2*K1)
K3 = f(x + h/2, y + h/2*K2)
K4 = f(x + h, y + h*K3)
y = y + h/6*(K1 + 2*K2 + 2*K3 + K4)
x = x + h
sol[i + 1, 0] = x
sol[i + 1, 1] = y[0]
sol[i + 1, 2] = y[1]
sol[i + 1, 3] = (y[1] - yante[1])/h
return sol
# Getting analysi data
tablePath = sys.argv[1]
naturalFrequency = float(sys.argv[2])
mass = float(sys.argv[3])
damping = float(sys.argv[4])
lenght = float(sys.argv[5])
width = float(sys.argv[6])
direction = sys.argv[7]
ptGauss = int(sys.argv[8])
t0 = int(sys.argv[9])
tf = int(sys.argv[10])
nInterval = int(sys.argv[11])
userDataPath = sys.argv[12]
# direction = 'vertical'
# naturalFrequency = 3.96
# lenght = 34.087
# mass = 9724
# damping = 0.84/100
# width = 3
# Getting data from table
data = pd.read_excel(tablePath)
displacementLabel = 'u'
coordenateLabel = 'x'
try:
data['u']
except Exception:
displacementLabel = 'U'
try:
data['x']
except Exception:
displacementLabel = 'X'
displacement = convertDataToFloat(data[displacementLabel])
fi = np.array(displaceAndNormalize(displacement))
xPosition = np.array(convertDataToFloat(data[coordenateLabel]))
w = 2*(math.pi)*naturalFrequency
d = 1
n = width*lenght*d
param, unitWeight, harmonicNumber = harmonicProps(direction, naturalFrequency)
factor1 = gaussianQuadrature1(ptGauss, 0, lenght)
factor2 = gaussianQuadrature2(ptGauss, 0, lenght)
modalMass = mass/lenght*factor2
modalK = modalMass*w**2
sol = RK4(t0, tf, nInterval, 0, 0)
# t = np.linspace(t0, (tf - t0)/nInterval, tf)
# N = len(sol[:,3])//2
# fft = np.fft.fft(sol[:,3][N:])
# T = (tf - t0)/nInterval
# f = np.fft.fftfreq(N//2, T)
# xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
# frequencias = f[:N // 2]
# amplitudes = np.abs(fft)[:N // 2] * 1 / N
# Y = numpy.fft.fft(sol[:,3][1800:])
# freq = numpy.fft.fftfreq(2500-1800, T)
generateGraph(xPosition, fi, 'Posição (m)', 'Φ', 'autoVetorGraph')
generateGraph(sol[:,0], sol[:,1], 'Tempo (s)', 'Deslocamento (m)', 'deslocGraph')
generateGraph(sol[:,0], sol[:,2], 'Tempo (s)', 'Velocidade (m/s)', 'velocGraph')
generateGraph(sol[:,0], sol[:,3], 'Tempo (s)', 'Aceleração (m/s²)', 'acelGraph')
generateGraph(xPosition, fi*(max(sol[:,1])[0, 0]), 'Posição (m)', 'Deslocamento máximo (m)', 'maxDeslocGraph')
generateGraph(xPosition, fi*(max(sol[:,2])[0, 0]), 'Posição (m)', 'Velocidade máxima (m)', 'maxVelocGraph')
generateGraph(xPosition, fi*(max(sol[:,3])[0, 0]), 'Posição (m)', 'Aceleração máxima (m)', 'maxAcelGraph')
# generateGraph(freq, Y, 'Amplitude', 'Frequência (Hz)', 'FFTGraph')
# sys.stdin.read(1)
solution = {"massaModal": modalMass, "rigidezModal": modalK, "harmonico": harmonicNumber, "deslocMax": max(sol[:,1])[0, 0], "velMax": max(sol[:,2])[0, 0], "acelMax": max(sol[:,3])[0, 0]}
print(solution)
|
{"hexsha": "577e0ed68a5bbd175ffa4e1d3fc9505da02ac0a8", "size": 9497, "ext": "py", "lang": "Python", "max_stars_repo_path": "engine/main.py", "max_stars_repo_name": "Rfaelv/Dinpass", "max_stars_repo_head_hexsha": "d2191e9a243b3620c715205b3e499f56abf98ddb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "engine/main.py", "max_issues_repo_name": "Rfaelv/Dinpass", "max_issues_repo_head_hexsha": "d2191e9a243b3620c715205b3e499f56abf98ddb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "engine/main.py", "max_forks_repo_name": "Rfaelv/Dinpass", "max_forks_repo_head_hexsha": "d2191e9a243b3620c715205b3e499f56abf98ddb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5514950166, "max_line_length": 186, "alphanum_fraction": 0.5476466253, "include": true, "reason": "import numpy", "num_tokens": 3257}
|
import os
import numpy as np
from PIL import Image
from pprint import pprint
from torchvision import datasets, transforms
import shutil
BASE_DIR = "./mnist_data/"
IMG_DIR = "imgs/"
def download_mnist_image_files(data_num, save_dir=BASE_DIR):
transform = transforms.Compose([transforms.ToTensor()])
data = datasets.MNIST(save_dir, download=True, transform=transform)
print("preparing img files...")
if os.path.exists(BASE_DIR + IMG_DIR):
shutil.rmtree(BASE_DIR + IMG_DIR)
print("removed stored img")
os.mkdir(BASE_DIR + IMG_DIR)
d_count = dict()
for i in range(data_num):
x, y = data.__getitem__(i)
save_dir = BASE_DIR + IMG_DIR + str(y) + "/"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
arr_x = np.squeeze(x.numpy().transpose((1, 2, 0)))
im = Image.fromarray(arr_x * 255).convert("L")
if not str(y) in d_count.keys():
d_count[str(y)] = int(0)
d_count[str(y)] += int(1)
im.save(save_dir + "{}_{}.jpg".format(y, str(d_count[str(y)]).zfill(4)))
print("Completed")
return d_count
|
{"hexsha": "6a896200126fd1d215296eb076c3ee7cb2a27023", "size": 1158, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/download_mnist_data.py", "max_stars_repo_name": "sari-rev00/pytorch_image_clissifier", "max_stars_repo_head_hexsha": "08698b1023e08cdde561d492074e7ee8c41be8ac", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-25T01:43:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T01:43:44.000Z", "max_issues_repo_path": "utils/download_mnist_data.py", "max_issues_repo_name": "sari-rev00/pytorch_image_clissifier", "max_issues_repo_head_hexsha": "08698b1023e08cdde561d492074e7ee8c41be8ac", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-02-13T13:46:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T01:20:43.000Z", "max_forks_repo_path": "utils/download_mnist_data.py", "max_forks_repo_name": "sari-rev00/pytorch_image_classifier", "max_forks_repo_head_hexsha": "08698b1023e08cdde561d492074e7ee8c41be8ac", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0909090909, "max_line_length": 81, "alphanum_fraction": 0.6208981002, "include": true, "reason": "import numpy", "num_tokens": 291}
|
// Copyright (c) 2017-2019 The Blocknet developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <rpc/server.h>
#include <xbridge/util/logger.h>
#include <xbridge/util/settings.h>
#include <xbridge/util/xbridgeerror.h>
#include <xbridge/util/xseries.h>
#include <xbridge/util/xutil.h>
#include <xbridge/xbridgeapp.h>
#include <xbridge/xbridgeexchange.h>
#include <xbridge/xbridgetransaction.h>
#include <xbridge/xbridgetransactiondescr.h>
#include <xbridge/xuiconnector.h>
#include <init.h>
#include <rpc/util.h>
#include <shutdown.h>
#include <validation.h>
#include <array>
#include <atomic>
#include <math.h>
#include <numeric>
#include <stdio.h>
#include <json/json_spirit_reader_template.h>
#include <json/json_spirit_writer_template.h>
#include <json/json_spirit_utils.h>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/iostreams/concepts.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string/predicate.hpp>
using namespace json_spirit;
using namespace std;
using namespace boost;
using TransactionMap = std::map<uint256, xbridge::TransactionDescrPtr>;
using TransactionPair = std::pair<uint256, xbridge::TransactionDescrPtr>;
using RealVector = std::vector<double>;
using TransactionVector = std::vector<xbridge::TransactionDescrPtr>;
using ArrayValue = Array::value_type;
using ArrayIL = std::initializer_list<ArrayValue>;
UniValue uret(const json_spirit::Value & o) {
UniValue uv;
if (!uv.read(json_spirit::write_string(o, json_spirit::none, 8)))
throw runtime_error("Unknown server error: failed to process request");
return uv;
}
std::string parseParentId(const uint256 & parentId) {
if (parentId.IsNull())
return "";
return parentId.GetHex();
}
/**
* @brief TxOutToCurrencyPair inspects a CTxOut and returns currency pair transaction info
* @param tx.vout - transaction outpoints with possible multisig/op_return
* @param snode_pubkey - (output) the service node public key
* @return - currency pair transaction details
*/
CurrencyPair TxOutToCurrencyPair(const std::vector<CTxOut> & vout, std::string& snode_pubkey)
{
snode_pubkey.clear();
if (vout.empty())
return {};
bool foundOpData{false};
std::string json;
for (const CTxOut & out : vout) {
if (out.scriptPubKey.empty())
continue;
std::vector<std::vector<unsigned char> > solutions;
txnouttype type = Solver(out.scriptPubKey, solutions);
if (type == TX_MULTISIG) {
if (solutions.size() < 4)
continue;
snode_pubkey = EncodeDestination(CTxDestination(CPubKey(solutions[1]).GetID()));
for (size_t i = 2; i < solutions.size()-1; ++i) {
const auto& sol = solutions[i];
if (sol.size() != 65)
break;
std::copy(sol.begin()+1, sol.end(), std::back_inserter(json));
}
} else if (type == TX_NULL_DATA) {
if (out.nValue != 0 || !out.scriptPubKey.IsUnspendable())
continue;
std::vector<unsigned char> data;
CScript::const_iterator pc = out.scriptPubKey.begin();
while (pc < out.scriptPubKey.end()) { // look for order data
opcodetype opcode;
if (!out.scriptPubKey.GetOp(pc, opcode, data))
break;
if (data.size() != 0) {
std::copy(data.begin(), data.end(), std::back_inserter(json));
foundOpData = true;
break;
}
}
}
}
if (json.empty())
return {}; // no data found
if (foundOpData && vout.size() >= 2) {
CTxDestination snodeAddr;
if (ExtractDestination(vout[1].scriptPubKey, snodeAddr))
snode_pubkey = EncodeDestination(snodeAddr);
}
json_spirit::Value val;
if (not json_spirit::read_string(json, val) || val.type() != json_spirit::array_type)
return {}; // not order data, ignore
json_spirit::Array xtx = val.get_array();
if (xtx.size() != 5)
return {"Unknown chain data, bad records count"};
// validate chain inputs
try { xtx[0].get_str(); } catch(...) {
return {"Bad ID" }; }
try { xtx[1].get_str(); } catch(...) {
return {"Bad from token" }; }
try { xtx[2].get_uint64(); } catch(...) {
return {"Bad from amount" }; }
try { xtx[3].get_str(); } catch(...) {
return {"Bad to token" }; }
try { xtx[4].get_uint64(); } catch(...) {
return {"Bad to amount" }; }
return CurrencyPair{
xtx[0].get_str(), // xid
{ccy::Currency{xtx[1].get_str(),xbridge::TransactionDescr::COIN}, // fromCurrency
xtx[2].get_uint64()}, // fromAmount
{ccy::Currency{xtx[3].get_str(),xbridge::TransactionDescr::COIN}, // toCurrency
xtx[4].get_uint64()} // toAmount
};
}
UniValue dxGetNewTokenAddress(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetNewTokenAddress",
"\nReturns a new address for the specified asset.\n",
{
{"ticker", RPCArg::Type::STR, RPCArg::Optional::NO, "The ticker symbol of the asset you want to generate an address for (e.g. LTC)."},
},
RPCResult{
R"(
[
"SVTbaYZ8oApVn3uNyimst3GKyvvfzXQgdK"
]
Key | Type | Description
-----------------------|------|----------------------------------------------
Array | arr | An array containing the newly generated
| | address for the given asset.
)"
},
RPCExamples{
HelpExampleCli("dxGetNewTokenAddress", "BTC")
+ HelpExampleRpc("dxGetNewTokenAddress", "\"BTC\"")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() != 1)
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "(ticker)"));
const auto currency = params[0].get_str();
Array res;
xbridge::WalletConnectorPtr conn = xbridge::App::instance().connectorByCurrency(currency);
if (conn) {
const auto addr = conn->getNewTokenAddress();
if (!addr.empty())
res.emplace_back(addr);
}
return uret(res);
}
UniValue dxLoadXBridgeConf(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxLoadXBridgeConf",
"\nHot loads the xbridge.conf file. Note, this may disrupt trades in progress.\n",
{},
RPCResult{
R"(
true
Type | Description
-----|----------------------------------------------
bool | `true`: Successfully reloaded file.
)"
},
RPCExamples{
HelpExampleCli("dxLoadXBridgeConf", "")
+ HelpExampleRpc("dxLoadXBridgeConf", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() > 0)
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"This function does not accept any parameter."));
if (ShutdownRequested())
throw runtime_error("dxLoadXBridgeConf\nFailed to reload the config because a shutdown request is in progress.");
auto & app = xbridge::App::instance();
if (app.isUpdatingWallets()) // let the user know if wallets are being actively updated
throw runtime_error("dxLoadXBridgeConf\nAn existing wallet update is currently in progress, please wait until it is completed.");
auto success = app.loadSettings();
app.clearBadWallets(); // clear any bad wallet designations b/c user is explicitly requesting a wallet update
app.updateActiveWallets();
if (!settings().showAllOrders())
app.clearNonLocalOrders();
return uret(success);
}
UniValue dxGetLocalTokens(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetLocalTokens",
"\nReturns a list of assets supported by your node. "
"You can only trade on markets with assets returned in both dxGetNetworkTokens and dxGetLocalTokens.\n",
{},
RPCResult{
R"(
[
"BLOCK",
"LTC",
"MONA",
"SYS"
]
Key | Type | Description
-----------------------|------|----------------------------------------------
Array | arr | An array of all the assets supported by the
| | local client.
)"
},
RPCExamples{
HelpExampleCli("dxGetLocalTokens", "")
+ HelpExampleRpc("dxGetLocalTokens", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() > 0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"This function does not accept any parameter."));
}
Array r;
std::vector<std::string> currencies = xbridge::App::instance().availableCurrencies();
for (std::string currency : currencies) {
r.emplace_back(currency);
}
return uret(r);
}
UniValue dxGetNetworkTokens(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetNetworkTokens",
"\nReturns a list of all the assets currently supported by the network. "
"You can only trade on markets with assets returned in both dxGetNetworkTokens and dxGetLocalTokens.\n",
{},
RPCResult{
R"(
[
"BLOCK",
"BTC",
"DGB",
"LTC",
"MONA",
"PIVX",
"SYS"
]
Key | Type | Description
-----------------------|------|----------------------------------------------
Array | arr | An array of all the assets supported by the
| | network.
)"
},
RPCExamples{
HelpExampleCli("dxGetNetworkTokens", "")
+ HelpExampleRpc("dxGetNetworkTokens", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() > 0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"This function does not accept any parameters."));
}
std::set<std::string> services;
auto ws = xbridge::App::instance().walletServices();
for (auto & serviceItem : ws) {
auto s = serviceItem.second.services();
services.insert(s.begin(), s.end());
}
return uret(Array{services.begin(), services.end()});
}
/** \brief Returns the list of open and pending transactions
* \param params A list of input params.
* \param request.fHelp For debug purposes, throw the exception describing parameters.
* \return A list of open(they go first) and pending transactions.
*
* Returns the list of open and pending transactions as JSON structures.
* The open transactions go first.
*/
UniValue dxGetOrders(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetOrders",
"\nReturns a list of all orders of every market pair. \n"
"It will only return orders for assets returned in dxGetLocalTokens.\n",
{},
RPCResult{
R"(
[
{
"id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"maker": "SYS",
"maker_size": "100.000000",
"taker": "LTC",
"taker_size": "10.500000",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "partial",
"partial_minimum": "10.000000",
"partial_orig_maker_size": "100.000000",
"partial_orig_taker_size": "10.500000",
"partial_repost": false,
"partial_parent_id": "",
"status": "open"
},
{
"id": "a1f40d53f75357eb914554359b207b7b745cf096dbcb028eb77b7b7e4043c6b4",
"maker": "SYS",
"maker_size": "0.100000",
"taker": "LTC",
"taker_size": "0.010000",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "exact",
"partial_minimum": "0.000000",
"partial_orig_maker_size": "0.000000",
"partial_orig_taker_size": "0.000000",
"partial_repost": false,
"partial_parent_id": "",
"status": "open"
}
]
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxGetOrders", "")
+ HelpExampleRpc("dxGetOrders", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (!params.empty()) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"This function does not accept any parameters."));
}
auto &xapp = xbridge::App::instance();
TransactionMap trlist = xapp.transactions();
auto currentTime = boost::posix_time::second_clock::universal_time();
bool nowalletswitch = gArgs.GetBoolArg("-dxnowallets", settings().showAllOrders());
Array result;
for (const auto& trEntry : trlist) {
const auto &tr = trEntry.second;
// Skip canceled, finished, and expired orders older than 1 minute
if ((currentTime - tr->txtime).total_seconds() > 60) {
if (tr->state == xbridge::TransactionDescr::trCancelled
|| tr->state == xbridge::TransactionDescr::trFinished
|| tr->state == xbridge::TransactionDescr::trExpired)
continue;
}
xbridge::WalletConnectorPtr connFrom = xapp.connectorByCurrency(tr->fromCurrency);
xbridge::WalletConnectorPtr connTo = xapp.connectorByCurrency(tr->toCurrency);
if ((!connFrom || !connTo) && !nowalletswitch ){
continue;
}
Object jtr;
jtr.emplace_back(Pair("id", tr->id.GetHex()));
jtr.emplace_back(Pair("maker", tr->fromCurrency));
jtr.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(tr->fromAmount)));
jtr.emplace_back(Pair("taker", tr->toCurrency));
jtr.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(tr->toAmount)));
jtr.emplace_back(Pair("updated_at", xbridge::iso8601(tr->txtime)));
jtr.emplace_back(Pair("created_at", xbridge::iso8601(tr->created)));
jtr.emplace_back(Pair("order_type", tr->orderType()));
jtr.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(tr->minFromAmount)));
jtr.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(tr->origFromAmount)));
jtr.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(tr->origToAmount)));
jtr.emplace_back(Pair("partial_repost", tr->repostOrder));
jtr.emplace_back(Pair("partial_parent_id", parseParentId(tr->getParentOrder())));
jtr.emplace_back(Pair("status", tr->strState()));
result.emplace_back(jtr);
}
return uret(result);
}
UniValue dxGetOrderFills(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetOrderFills",
"\nReturns all the recent trades by trade pair that have been filled (i.e. completed). "
"This will only return orders that have been filled in your current session.\n",
{
{"maker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset sold by the maker (e.g. LTC)."},
{"taker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset sold by the taker (e.g. BLOCK)."},
{"combined", RPCArg::Type::BOOL, "true", "If true, combines the results to return orders with the maker and taker as specified as well as orders of the inverse market. If false, only returns filled orders with the maker and taker assets as specified."},
},
RPCResult{
R"(
[
{
"id": "a1f40d53f75357eb914554359b207b7b745cf096dbcb028eb77b7b7e4043c6b4",
"time": "2018-01-16T13:15:05.12345Z",
"maker": "SYS",
"maker_size": "101.00000000",
"taker": "LTC",
"taker_size": "0.01000000"
},
{
"id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"time": "2018-01-16T13:15:05.12345Z",
"maker": "LTC",
"maker_size": "0.01000000",
"taker": "SYS",
"taker_size": "101.00000000"
}
]
Key | Type | Description
----------------|------|-----------------------------------------------------
Array | arr | Array of orders sorted by date descending.
id | str | The order ID.
time | str | Time the order was filled.
maker | str | Maker trading asset; the ticker of the asset being
| | sold by the maker.
maker_size | str | Maker trading size. String is used to preserve
| | precision.
taker | str | Taker trading asset; the ticker of the asset being
| | sold by the taker.
taker_size | str | Taker trading size. String is used to preserve
| | precision.
)"
},
RPCExamples{
HelpExampleCli("dxGetOrderFills", "BLOCK LTC")
+ HelpExampleRpc("dxGetOrderFills", "\"BLOCK\", \"LTC\"")
+ HelpExampleCli("dxGetOrderFills", "BLOCK LTC true")
+ HelpExampleRpc("dxGetOrderFills", "\"BLOCK\", \"LTC\", true")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
bool invalidParams = ((params.size() != 2) &&
(params.size() != 3));
if (invalidParams) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"(maker) (taker) (combined, default=true)[optional]"));
}
bool combined = params.size() == 3 ? params[2].get_bool() : true;
const auto maker = params[0].get_str();
const auto taker = params[1].get_str();
TransactionMap history = xbridge::App::instance().history();
TransactionVector result;
for (auto &item : history) {
const xbridge::TransactionDescrPtr &ptr = item.second;
if ((ptr->state == xbridge::TransactionDescr::trFinished) &&
(combined ? ((ptr->fromCurrency == maker && ptr->toCurrency == taker) || (ptr->toCurrency == maker && ptr->fromCurrency == taker)) : (ptr->fromCurrency == maker && ptr->toCurrency == taker))) {
result.push_back(ptr);
}
}
std::sort(result.begin(), result.end(),
[](const xbridge::TransactionDescrPtr &a, const xbridge::TransactionDescrPtr &b)
{
return (a->txtime) > (b->txtime);
});
Array arr;
for(const auto &transaction : result) {
Object tmp;
tmp.emplace_back(Pair("id", transaction->id.GetHex()));
tmp.emplace_back(Pair("time", xbridge::iso8601(transaction->txtime)));
tmp.emplace_back(Pair("maker", transaction->fromCurrency));
tmp.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(transaction->fromAmount)));
tmp.emplace_back(Pair("taker", transaction->toCurrency));
tmp.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(transaction->toAmount)));
tmp.emplace_back(Pair("order_type", transaction->orderType()));
tmp.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(transaction->minFromAmount)));
tmp.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(transaction->origFromAmount)));
tmp.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(transaction->origToAmount)));
tmp.emplace_back(Pair("partial_repost", transaction->repostOrder));
tmp.emplace_back(Pair("partial_parent_id", parseParentId(transaction->getParentOrder())));
arr.emplace_back(tmp);
}
return uret(arr);
}
UniValue dxGetOrderHistory(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetOrderHistory",
"\nReturns the OHLCV data by trade pair for a specified time range and interval. "
"It can return the order history for any asset since all trade history is stored on-chain.\n",
{
{"maker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset sold by the maker (e.g. LTC)."},
{"taker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset sold by the taker (e.g. BLOCK)."},
{"start_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "The Unix time in seconds for the start time boundary to search."},
{"end_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "The Unix time in seconds for the end time boundary to search."},
{"granularity", RPCArg::Type::NUM, RPCArg::Optional::NO, "Time interval slice in seconds. The slice options are: " + xQuery::supported_seconds_csv()},
{"order_ids", RPCArg::Type::BOOL, "false", "If true, returns the IDs of all filled orders in each slice. If false, IDs are omitted."},
{"with_inverse", RPCArg::Type::BOOL, "false", "If false, returns the order history for the specified market. If true, also returns the orders in the inverse pair too (e.g. if LTC SYS then SYS LTC would be returned as well)."},
{"limit", RPCArg::Type::NUM, std::to_string(xQuery::IntervalLimit{}.count()), "The max number of interval slices returned. maximum=" + std::to_string(xQuery::IntervalLimit::max())},
// {"interval_timestamp", RPCArg::Type::STR, "at_start", "The timestamp at start of the interval. The options are [at_start | at_end]."},
},
RPCResult{
R"(
[
//[ time, low, high, open, close, volume, id(s) ],
[ "2018-01-16T13:15:05.12345Z", 1.10, 2.0, 1.10, 1.4, 1000, [ "0cc2e8a7222f1416cda996031ca21f67b53431614e89651887bc300499a6f83e" ] ],
[ "2018-01-16T14:15:05.12345Z", 0, 0, 0, 0, 0, [] ],
[ "2018-01-16T15:15:05.12345Z", 1.12, 2.2, 1.10, 1.4, 1000, [ "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9", "0cc2e8a7222f1416cda996031ca21f67b53431614e89651887bc300499a6f83e", "a1f40d53f75357eb914554359b207b7b745cf096dbcb028eb77b7b7e4043c6b4" ] ],
[ "2018-01-16T16:15:05.12345Z", 1.14, 2.0, 1.10, 1.4, 1000, [ "a1f40d53f75357eb914554359b207b7b745cf096dbcb028eb77b7b7e4043c6b4" ] ],
[ "2018-01-16T17:15:05.12345Z", 1.15, 2.0, 1.10, 1.4, 1000, [ "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a" ] ]
]
Key | Type | Description
--------------|-------|------------------------------------------------------
time | str | ISO 8601 datetime, with microseconds, of the time at
| | the beginning of the time slice.
low | float | Exchange rate lower bound within the time slice.
high | float | Exchange rate upper bound within the time slice.
open | float | Exchange rate of first filled order at the beginning
| | of the time slice.
close | float | Exchange rate of last filled order at the end of the
| | time slice.
volume | int | Total volume of the taker asset within the time
| | slice.
order_ids | arr | Array of GUIDs of all filled orders within the time
| | slice.
)"
},
RPCExamples{
HelpExampleCli("dxGetOrderHistory", "SYS LTC 1540660180 1540660420 60")
+ HelpExampleRpc("dxGetOrderHistory", "\"SYS\", \"LTC\", 1540660180, 1540660420, 60")
+ HelpExampleCli("dxGetOrderHistory", "SYS LTC 1540660180 1540660420 60 true false 18000")
+ HelpExampleRpc("dxGetOrderHistory", "\"SYS\", \"LTC\", 1540660180, 1540660420, 60, true, false, 18000")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
//--Validate query parameters
if (params.size() < 5 || params.size() > 8)
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"(maker) (taker) (start time) (end time) (granularity) "
"(order_ids, default=false)[optional] "
"(with_inverse, default=false)[optional] "
"(limit, default="+std::to_string(xQuery::IntervalLimit{}.count())+")[optional]"
// "(interval_timestamp, one of [at_start | at_end])[optional] "
));
const xQuery query{
params[0].get_str(), // maker
params[1].get_str(), // taker
params[4].get_int(), // granularity (need before start/end time)
params[2].get_int64(), // start time
params[3].get_int64(), // end time
params.size() > 5 && params[5].get_bool()
? xQuery::WithTxids::Included
: xQuery::WithTxids::Excluded,
params.size() > 6 && params[6].get_bool()
? xQuery::WithInverse::Included
: xQuery::WithInverse::Excluded,
params.size() > 7
? xQuery::IntervalLimit{params[7].get_int()}
: xQuery::IntervalLimit{},
params.size() > 8
? xQuery::IntervalTimestamp{params[8].get_str()}
: xQuery::IntervalTimestamp{}
};
if (query.error())
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, query.what() ));
try {
//--Process query, get result
auto& xseries = xbridge::App::instance().getXSeriesCache();
std::vector<xAggregate> result = xseries.getXAggregateSeries(query);
//--Serialize result
Array arr{};
const boost::posix_time::time_duration offset = query.interval_timestamp.at_start()
? query.granularity
: boost::posix_time::seconds{0};
for (const auto& x : result) {
double volume = x.fromVolume.amount<double>();
Array ohlc{
ArrayIL{xbridge::iso8601(x.timeEnd - offset), x.low, x.high, x.open, x.close, volume}
};
if (query.with_txids == xQuery::WithTxids::Included) {
Array orderIds{};
for (const auto& id : x.orderIds)
orderIds.emplace_back(id);
ohlc.emplace_back(orderIds);
}
arr.emplace_back(ohlc);
}
return uret(arr);
} catch(const std::exception& e) {
return uret(xbridge::makeError(xbridge::UNKNOWN_ERROR, __FUNCTION__, e.what() ));
} catch( ... ) {
return uret(xbridge::makeError(xbridge::UNKNOWN_ERROR, __FUNCTION__, "unknown exception" ));
}
}
UniValue dxGetOrder(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetOrder",
"\nReturns order info by order ID.\n",
{
{"id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The order ID."},
},
RPCResult{
R"(
{
"id": "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a",
"maker": "SYS",
"maker_size": "0.100",
"taker": "LTC",
"taker_size": "0.01",
"updated_at": "1970-01-01T00:00:00.00000Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "exact",
"partial_minimum": "0.000000",
"partial_orig_maker_size": "0.000000",
"partial_orig_taker_size": "0.000000",
"partial_repost": false,
"partial_parent_id": "",
"status": "open"
}
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxGetOrder", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432")
+ HelpExampleRpc("dxGetOrder", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\"")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() != 1) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "(id)"));
}
uint256 id = uint256S(params[0].get_str());
auto &xapp = xbridge::App::instance();
const xbridge::TransactionDescrPtr order = xapp.transaction(uint256(id));
if(order == nullptr) {
return uret(xbridge::makeError(xbridge::TRANSACTION_NOT_FOUND, __FUNCTION__, id.ToString()));
}
xbridge::WalletConnectorPtr connFrom = xapp.connectorByCurrency(order->fromCurrency);
xbridge::WalletConnectorPtr connTo = xapp.connectorByCurrency(order->toCurrency);
if(!connFrom) {
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, order->fromCurrency));
}
if (!connTo) {
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, order->toCurrency));
}
Object result;
result.emplace_back(Pair("id", order->id.GetHex()));
result.emplace_back(Pair("maker", order->fromCurrency));
result.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(order->fromAmount)));
result.emplace_back(Pair("taker", order->toCurrency));
result.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(order->toAmount)));
result.emplace_back(Pair("updated_at", xbridge::iso8601(order->txtime)));
result.emplace_back(Pair("created_at", xbridge::iso8601(order->created)));
result.emplace_back(Pair("order_type", order->orderType()));
result.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(order->minFromAmount)));
result.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(order->origFromAmount)));
result.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(order->origToAmount)));
result.emplace_back(Pair("partial_repost", order->repostOrder));
result.emplace_back(Pair("partial_parent_id", parseParentId(order->getParentOrder())));
result.emplace_back(Pair("status", order->strState()));
return uret(result);
}
UniValue dxMakeOrder(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxMakeOrder",
"\nCreate a new exact order. Exact orders must be taken for the full order amount. "
"For partial orders, see dxMakePartialOrder.\n"
"You can only create orders for markets with assets supported by your node (view with dxGetLocalTokens) "
"and the network (view with dxGetNetworkTokens). There are no fees to make orders.\n"
"\nNote:\n"
"XBridge will first attempt to use funds from the specified maker address. "
"If this address does not have sufficient funds to cover the order, then "
"it will pull funds from other addresses in the wallet. Change is "
"deposited to the address with the largest input used.\n",
{
{"maker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset being sold by the maker (e.g. LTC)."},
{"maker_size", RPCArg::Type::STR, RPCArg::Optional::NO, "The amount of the maker asset being sent."},
{"maker_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The maker address containing asset being sent."},
{"taker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset being bought by the maker (e.g. BLOCK)."},
{"taker_size", RPCArg::Type::STR, RPCArg::Optional::NO, "The amount of the taker asset to be received."},
{"taker_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The taker address for the receiving asset."},
{"type", RPCArg::Type::STR, RPCArg::Optional::NO, "The order type. Options: exact"},
{"dryrun", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Simulate the order submission without actually submitting the order, i.e. a test run. Options: dryrun"},
},
RPCResult{
R"(
{
"id": "4306a107113c4562afa6273ecd9a3990ead53a0227f74ddd9122272e453ae07d",
"maker": "SYS",
"maker_size": "1.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "0.100000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-16T00:00:00.00000Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"block_id": "38729344720548447445023782734923740427863289632489723984723",
"order_type": "exact",
"partial_minimum": "0.000000",
"partial_orig_maker_size": "0.000000",
"partial_orig_taker_size": "0.000000",
"partial_repost": false,
"partial_parent_id": "",
"status": "created"
}
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxMakeOrder", "LTC 25 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BLOCK 1000 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR exact")
+ HelpExampleRpc("dxMakeOrder", "\"LTC\", \"25\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BLOCK\", \"1000\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"exact\"")
+ HelpExampleCli("dxMakeOrder", "LTC 25 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BLOCK 1000 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR exact dryrun")
+ HelpExampleRpc("dxMakeOrder", "\"LTC\", \"25\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BLOCK\", \"1000\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"exact\", \"dryrun\"")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() < 7) {
throw runtime_error("dxMakeOrder (maker) (maker size) (maker address) (taker) (taker size)\n"
"(taker address) (type) (dryrun)[optional]\n"
"Create a new order. You can only create orders for markets with tokens\n"
"supported by your node. There are no fees to make orders. [dryrun] will\n"
"validate the order without submitting the order to the network (test run).");
}
if (!xbridge::xBridgeValidCoin(params[1].get_str())) {
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS,
"The maker_size is too precise. The maximum precision supported is " +
std::to_string(xbridge::xBridgeSignificantDigits(xbridge::TransactionDescr::COIN)) + " digits.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
if (!xbridge::xBridgeValidCoin(params[4].get_str())) {
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS,
"The taker_size is too precise. The maximum precision supported is " +
std::to_string(xbridge::xBridgeSignificantDigits(xbridge::TransactionDescr::COIN)) + " digits.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
std::string fromCurrency = params[0].get_str();
double fromAmount = boost::lexical_cast<double>(params[1].get_str());
std::string fromAddress = params[2].get_str();
std::string toCurrency = params[3].get_str();
double toAmount = boost::lexical_cast<double>(params[4].get_str());
std::string toAddress = params[5].get_str();
std::string type = params[6].get_str();
// Validate the order type
if (type != "exact") {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"Only the exact type is supported at this time."));
}
// Check that addresses are not the same
if (fromAddress == toAddress) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maker_address and taker_address cannot be the same: " + fromAddress));
}
// Check upper limits
if (fromAmount > (double)xbridge::TransactionDescr::MAX_COIN ||
toAmount > (double)xbridge::TransactionDescr::MAX_COIN) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maximum supported size is " + std::to_string(xbridge::TransactionDescr::MAX_COIN)));
}
// Check lower limits
if (fromAmount <= 0 || toAmount <= 0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The minimum supported size is " + xbridge::xBridgeStringValueFromPrice(1.0/xbridge::TransactionDescr::COIN)));
}
// Validate addresses
xbridge::WalletConnectorPtr connFrom = xbridge::App::instance().connectorByCurrency(fromCurrency);
xbridge::WalletConnectorPtr connTo = xbridge::App::instance().connectorByCurrency(toCurrency);
if (!connFrom) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + fromCurrency));
if (!connTo) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + toCurrency));
xbridge::App &app = xbridge::App::instance();
if (!app.isValidAddress(fromAddress, connFrom)) {
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__, fromAddress));
}
if (!app.isValidAddress(toAddress, connTo)) {
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__, toAddress));
}
if(fromAmount <= .0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maker_size must be greater than 0."));
}
if(toAmount <= .0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The taker_size must be greater than 0."));
}
// Perform explicit check on dryrun to avoid executing order on bad spelling
bool dryrun = false;
if (params.size() == 8) {
std::string dryrunParam = params[7].get_str();
if (dryrunParam != "dryrun") {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, dryrunParam));
}
dryrun = true;
}
Object result;
auto statusCode = app.checkCreateParams(fromCurrency, toCurrency,
xbridge::xBridgeAmountFromReal(fromAmount), fromAddress);
switch (statusCode) {
case xbridge::SUCCESS:{
// If dryrun
if (dryrun) {
result.emplace_back(Pair("id", uint256().GetHex()));
result.emplace_back(Pair("maker", fromCurrency));
result.emplace_back(Pair("maker_size",
xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
result.emplace_back(Pair("maker_address", fromAddress));
result.emplace_back(Pair("taker", toCurrency));
result.emplace_back(Pair("taker_size",
xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
result.emplace_back(Pair("taker_address", toAddress));
result.emplace_back(Pair("order_type", "exact"));
result.emplace_back(Pair("partial_minimum","0"));
result.emplace_back(Pair("partial_orig_maker_size", "0"));
result.emplace_back(Pair("partial_orig_taker_size", "0"));
result.emplace_back(Pair("partial_repost", false));
result.emplace_back(Pair("partial_parent_id", parseParentId(uint256())));
result.emplace_back(Pair("status", "created"));
return uret(result);
}
break;
}
case xbridge::INVALID_CURRENCY: {
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromCurrency));
}
case xbridge::NO_SESSION:{
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromCurrency));
}
case xbridge::INSIFFICIENT_FUNDS:{
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromAddress));
}
default:
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
uint256 id = uint256();
uint256 blockHash = uint256();
statusCode = xbridge::App::instance().sendXBridgeTransaction
(fromAddress, fromCurrency, xbridge::xBridgeAmountFromReal(fromAmount),
toAddress, toCurrency, xbridge::xBridgeAmountFromReal(toAmount), id, blockHash);
if (statusCode == xbridge::SUCCESS) {
Object obj;
obj.emplace_back(Pair("id", id.GetHex()));
obj.emplace_back(Pair("maker_address", fromAddress));
obj.emplace_back(Pair("maker", fromCurrency));
obj.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
obj.emplace_back(Pair("taker_address", toAddress));
obj.emplace_back(Pair("taker", toCurrency));
obj.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
const auto &createdTime = xbridge::App::instance().transaction(id)->created;
obj.emplace_back(Pair("created_at", xbridge::iso8601(createdTime)));
obj.emplace_back(Pair("updated_at", xbridge::iso8601(boost::posix_time::microsec_clock::universal_time()))); // TODO Need actual updated time, this is just estimate
obj.emplace_back(Pair("block_id", blockHash.GetHex()));
obj.emplace_back(Pair("order_type", "exact"));
obj.emplace_back(Pair("partial_minimum","0"));
obj.emplace_back(Pair("partial_orig_maker_size", "0"));
obj.emplace_back(Pair("partial_orig_taker_size", "0"));
obj.emplace_back(Pair("partial_repost", false));
obj.emplace_back(Pair("partial_parent_id", parseParentId(uint256())));
obj.emplace_back(Pair("status", "created"));
return uret(obj);
} else {
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
}
UniValue dxTakeOrder(const JSONRPCRequest& request) {
if (request.fHelp || request.params.size() < 3 || request.params.size() > 5)
throw std::runtime_error(
RPCHelpMan{"dxTakeOrder",
"\nThis call is used to take an order. You can only take orders for assets supported "
"by your node (view with dxGetLocalTokens). Taking your own order is not supported. "
"Taking an order has a 0.015 BLOCK fee.\n"
"\nNote:\n"
"XBridge will first attempt to use funds from the specified from_address. "
"If this address does not have sufficient funds to cover the order, then "
"it will pull funds from other addresses in the wallet. Change is "
"deposited to the address with the largest input used.\n",
{
{"id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The ID of the order being filled."},
{"from_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The address containing asset being sent."},
{"to_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The address for the receiving asset."},
{"amount", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "The amount to take (allowed only on partial orders)"},
{"dryrun", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Simulate the order submission without actually submitting the order, i.e. a test run. Options: dryrun"},
},
RPCResult{
R"(
{
"id": "4306aa07113c4562ffa6278ecd9a3990ead53a0227f74ddd9122272e453ae07d",
"maker": "SYS",
"maker_size": "0.100",
"taker": "LTC",
"taker_size": "0.01",
"updated_at": "1970-01-01T00:00:00.00000Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "exact",
"partial_minimum": "0.000000",
"partial_repost": false,
"status": "accepting"
}
Key | Type | Description
----------------|------|-----------------------------------------------------
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset being
| | sold by the maker.
maker_size | str | Maker trading size. String is used to preserve
| | precision.
taker | str | Taker trading asset; the ticker of the asset being
| | sold by the taker.
taker_size | str | Taker trading size. String is used to preserve
| | precision.
updated_at | str | ISO 8601 datetime, with microseconds, of the last
| | time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of when the
| | order was created.
status | str | The order status.
)"
},
RPCExamples{
HelpExampleCli("dxTakeOrder", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR")
+ HelpExampleRpc("dxTakeOrder", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\"")
+ HelpExampleCli("dxTakeOrder", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR 0.5")
+ HelpExampleRpc("dxTakeOrder", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"0.5\"")
+ HelpExampleCli("dxTakeOrder", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR 0.5 dryrun")
+ HelpExampleRpc("dxTakeOrder", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"0.5\", \"dryrun\"")
},
}.ToString());
uint256 id = uint256S(request.params[0].get_str());
std::string fromAddress = request.params[1].get_str();
std::string toAddress = request.params[2].get_str();
xbridge::App &app = xbridge::App::instance();
// Check that addresses are not the same
if (fromAddress == toAddress) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The from_address and to_address cannot be the same: " + fromAddress));
}
double amount{0};
if (request.params.size() >= 4) {
const auto amountStr = request.params[3].get_str();
if (!amountStr.empty()) {
amount = boost::lexical_cast<double>(amountStr);
if (amount <= 0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The amount cannot be less than or equal to 0: " + request.params[3].get_str()));
}
}
}
// Perform explicit check on dryrun to avoid executing order on bad spelling
bool dryrun = false;
if (request.params.size() == 5) {
std::string dryrunParam = request.params[4].get_str();
if (dryrunParam != "dryrun") {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, dryrunParam));
}
dryrun = true;
}
Object result;
xbridge::Error statusCode;
xbridge::TransactionDescrPtr txDescr = app.transaction(id);
if (!txDescr) {
WARN() << "transaction not found " << __FUNCTION__;
return uret(xbridge::makeError(xbridge::TRANSACTION_NOT_FOUND, __FUNCTION__));
}
CAmount fromSize = txDescr->toAmount;
CAmount toSize = txDescr->fromAmount;
// If no amount is specified on a partial order by default use the full
// order sizes (will result in the entire partial order being taken).
if (txDescr->isPartialOrderAllowed() && xbridge::xBridgeAmountFromReal(amount) > 0) {
if (xbridge::xBridgeAmountFromReal(amount) < txDescr->minFromAmount) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "The minimum amount for this order is: " +
xbridge::xBridgeStringValueFromAmount(txDescr->minFromAmount)));
} else if (xbridge::xBridgeAmountFromReal(amount) > txDescr->fromAmount) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "The maximum amount for this order is: " +
xbridge::xBridgeStringValueFromAmount(txDescr->fromAmount)));
}
if (xbridge::xBridgeAmountFromReal(amount) < toSize) {
toSize = xbridge::xBridgeAmountFromReal(amount);
fromSize = xbridge::xBridgeSourceAmountFromPrice(toSize, txDescr->toAmount, txDescr->fromAmount);
}
} else if (amount > 0) {
WARN() << "partial orders are not allowed for this order " << __FUNCTION__;
return uret(xbridge::makeError(xbridge::INVALID_PARTIAL_ORDER, __FUNCTION__));
}
// Check taker sending coin balance (toCurrency here because the swap frame of reference hasn't occurred yet)
statusCode = app.checkAcceptParams(txDescr->toCurrency, fromSize);
switch (statusCode)
{
case xbridge::SUCCESS: {
if (txDescr->isLocal()) // no self trades
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "Unable to accept your own order."));
// taker [to] will match order [from] currency (due to pair swap happening later)
xbridge::WalletConnectorPtr connTo = xbridge::App::instance().connectorByCurrency(txDescr->fromCurrency);
// taker [from] will match order [to] currency (due to pair swap happening later)
xbridge::WalletConnectorPtr connFrom = xbridge::App::instance().connectorByCurrency(txDescr->toCurrency);
if (!connFrom) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + txDescr->toCurrency));
if (!connTo) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + txDescr->fromCurrency));
// Check for valid toAddress
if (!app.isValidAddress(toAddress, connTo))
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__,
": " + txDescr->fromCurrency + " address is bad. Are you using the correct address?"));
// Check for valid fromAddress
if (!app.isValidAddress(fromAddress, connFrom))
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__,
": " + txDescr->toCurrency + " address is bad. Are you using the correct address?"));
if (dryrun) {
result.emplace_back(Pair("id", uint256().GetHex()));
result.emplace_back(Pair("maker", txDescr->fromCurrency));
result.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(fromSize)));
result.emplace_back(Pair("taker", txDescr->toCurrency));
result.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(toSize)));
result.emplace_back(Pair("updated_at", xbridge::iso8601(boost::posix_time::microsec_clock::universal_time())));
result.emplace_back(Pair("created_at", xbridge::iso8601(txDescr->created)));
result.emplace_back(Pair("order_type", txDescr->orderType()));
result.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(txDescr->minFromAmount)));
result.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(txDescr->origFromAmount)));
result.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(txDescr->origToAmount)));
result.emplace_back(Pair("partial_repost", txDescr->repostOrder));
result.emplace_back(Pair("partial_parent_id", parseParentId(txDescr->getParentOrder())));
result.emplace_back(Pair("status", "filled"));
return uret(result);
}
break;
}
case xbridge::TRANSACTION_NOT_FOUND:
{
return uret(xbridge::makeError(xbridge::TRANSACTION_NOT_FOUND, __FUNCTION__, id.ToString()));
}
case xbridge::NO_SESSION:
{
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, txDescr->toCurrency));
}
case xbridge::INSIFFICIENT_FUNDS:
{
return uret(xbridge::makeError(xbridge::INSIFFICIENT_FUNDS, __FUNCTION__, fromAddress));
}
default:
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
// TODO swap is destructive on state (also complicates historical data)
std::swap(txDescr->fromCurrency, txDescr->toCurrency);
std::swap(txDescr->fromAmount, txDescr->toAmount);
statusCode = app.acceptXBridgeTransaction(id, fromAddress, toAddress, fromSize, toSize);
if (statusCode == xbridge::SUCCESS) {
result.emplace_back(Pair("id", id.GetHex()));
result.emplace_back(Pair("maker", txDescr->fromCurrency));
result.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(fromSize)));
result.emplace_back(Pair("taker", txDescr->toCurrency));
result.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(toSize)));
result.emplace_back(Pair("updated_at", xbridge::iso8601(boost::posix_time::microsec_clock::universal_time())));
result.emplace_back(Pair("created_at", xbridge::iso8601(txDescr->created)));
result.emplace_back(Pair("order_type", txDescr->orderType()));
result.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(txDescr->minFromAmount)));
result.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(txDescr->origFromAmount)));
result.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(txDescr->origToAmount)));
result.emplace_back(Pair("partial_repost", txDescr->repostOrder));
result.emplace_back(Pair("partial_parent_id", parseParentId(txDescr->getParentOrder())));
result.emplace_back(Pair("status", txDescr->strState()));
return uret(result);
} else {
// restore state on error
txDescr->fromCurrency = txDescr->origFromCurrency;
txDescr->fromAmount = txDescr->origFromAmount;
txDescr->toCurrency = txDescr->origToCurrency;
txDescr->toAmount = txDescr->origToAmount;
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
}
UniValue dxCancelOrder(const JSONRPCRequest& request)
{
if(request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxCancelOrder",
"\nThis call is used to cancel one of your own orders. This automatically "
"rolls back the order if a trade is in process.\n",
{
{"id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The ID of the order to cancel."},
},
RPCResult{
R"(
{
"id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"maker": "SYS",
"maker_size": "0.100",
"maker_address": "SVTbaYZ8oApVn3uNyimst3GKyvvfzXQgdK",
"taker": "LTC",
"taker_size": "0.01",
"taker_address": "LVvFhzRoMRGTtGihHp7jVew3YoZRX8y35Z",
"updated_at": "1970-01-01T00:00:00.00000Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"status": "canceled"
}
Key | Type | Description
----------------|------|-----------------------------------------------------
id | str | The order ID.
maker | str | Sending asset of party cancelling the order.
maker_size | str | Sending trading size. String is used to preserve
| | precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Receiving asset of party cancelling the order.
taker_size | str | Receiving trading size. String is used to preserve
| | precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the last
| | time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of when the
| | order was created.
status | str | The order status (canceled).
)"
},
RPCExamples{
HelpExampleCli("dxCancelOrder", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432")
+ HelpExampleRpc("dxCancelOrder", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\"")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() != 1)
{
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "(id)"));
}
LOG() << "rpc cancel order " << __FUNCTION__;
const auto sid = params[0].get_str();
if (uint256S(sid).IsNull())
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, strprintf("Invalid order id [%s]", sid)));
uint256 id = uint256S(sid);
xbridge::TransactionDescrPtr tx = xbridge::App::instance().transaction(id);
if (!tx)
{
return uret(xbridge::makeError(xbridge::TRANSACTION_NOT_FOUND, __FUNCTION__, id.ToString()));
}
if (tx->state >= xbridge::TransactionDescr::trCreated)
{
return uret(xbridge::makeError(xbridge::INVALID_STATE, __FUNCTION__, "The order is already " + tx->strState()));
}
const auto res = xbridge::App::instance().cancelXBridgeTransaction(id, crRpcRequest);
if (res != xbridge::SUCCESS)
{
return uret(xbridge::makeError(res, __FUNCTION__));
}
xbridge::WalletConnectorPtr connFrom = xbridge::App::instance().connectorByCurrency(tx->fromCurrency);
xbridge::WalletConnectorPtr connTo = xbridge::App::instance().connectorByCurrency(tx->toCurrency);
if (!connFrom) {
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, tx->fromCurrency));
}
if (!connTo) {
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, tx->toCurrency));
}
Object obj;
obj.emplace_back(Pair("id", id.GetHex()));
obj.emplace_back(Pair("maker", tx->fromCurrency));
obj.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(tx->fromAmount)));
obj.emplace_back(Pair("maker_address", connFrom->fromXAddr(tx->from)));
obj.emplace_back(Pair("taker", tx->toCurrency));
obj.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(tx->toAmount)));
obj.emplace_back(Pair("taker_address", connTo->fromXAddr(tx->to)));
obj.emplace_back(Pair("refund_tx", tx->refTx));
obj.emplace_back(Pair("updated_at", xbridge::iso8601(tx->txtime)));
obj.emplace_back(Pair("created_at", xbridge::iso8601(tx->created)));
obj.emplace_back(Pair("status", tx->strState()));
return uret(obj);
}
UniValue dxFlushCancelledOrders(const JSONRPCRequest& request)
{
if(request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxFlushCancelledOrders",
"\nThis call is used to remove your cancelled orders that are older than the specified amount of time.\n",
{
{"ageMillis", RPCArg::Type::NUM, "0", "Remove cancelled orders older than this amount of milliseconds."},
},
RPCResult{
R"(
{
"ageMillis": 0,
"now": "20191126T024005.352285",
"durationMicrosec": 0,
"flushedOrders": [
{
"id": "582a02ada05c8a4bb39b34de0eb54767bcb95a7792e5865d3a0babece4715f47",
"txtime": "20191126T023945.855058",
"use_count": 1
},
{
"id": "a508cd8d110bdc0b1fd819a89d94cdbf702e3aa40edbe654af5d556ff3c43a0a",
"txtime": "20191126T023956.270409",
"use_count": 1
}
]
}
Key | Type | Description
------------------|------|---------------------------------------------------
ageMillis | int | Millisecond value specified when making the call.
now | str | ISO 8601 datetime, with microseconds, of when the
| | call was executed.
durationMicrosec* | int | The amount of time in milliseconds it took to
| | process the call.
flushedOrders | arr | Array of cancelled orders that were removed.
id | str | The order ID.
txtime | str | ISO 8601 datetime, with microseconds, of when the
| | order was created.
use_count* | int | This value is strictly for debugging purposes.
)"
},
RPCExamples{
HelpExampleCli("dxFlushCancelledOrders", "")
+ HelpExampleRpc("dxFlushCancelledOrders", "")
+ HelpExampleCli("dxFlushCancelledOrders", "600000")
+ HelpExampleRpc("dxFlushCancelledOrders", "600000")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
const int ageMillis = params.size() == 0
? 0
: (params.size() == 1 ? params[0].get_int() : -1);
if (ageMillis < 0)
{
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"ageMillis must be an integer >= 0"));
}
const auto minAge = boost::posix_time::millisec{ageMillis};
LOG() << "rpc flush cancelled orders older than " << minAge << ": " << __FUNCTION__;
const auto now = boost::posix_time::microsec_clock::universal_time();
const auto list = xbridge::App::instance().flushCancelledOrders(minAge);
const auto micros = boost::posix_time::time_duration{ boost::posix_time::microsec_clock::universal_time() - now };
Object result{
Pair{"ageMillis", ageMillis},
Pair{"now", xbridge::iso8601(now)},
Pair{"durationMicrosec", static_cast<int>(micros.total_microseconds())},
};
Array a;
for(const auto & it : list) {
a.emplace_back(
ArrayValue{Object{
Pair{"id", it.id.GetHex()},
Pair{"txtime", xbridge::iso8601(it.txtime)},
Pair{"use_count", it.use_count},
}}
);
}
result.emplace_back("flushedOrders", a);
return uret(result);
}
UniValue dxGetOrderBook(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetOrderBook",
"\nThis call is used to retrieve open orders at various detail levels:\n"
"\nDetail 1 - Returns the best bid and ask.\n"
"Detail 2 - Returns a list of aggregated orders. This is useful for charting.\n"
"Detail 3 - Returns a list of non-aggregated orders. This is useful for bot trading.\n"
"Detail 4 - Returns the best bid and ask with the order IDs.\n"
"\nNote:\n"
"This call will only return orders for markets with both assets supported by your "
"node (view with dxGetLocalTokens). To view all orders, set ShowAllOrders=true in "
"your xbridge.conf header and reload it with dxLoadXBridgeConf.\n",
{
{"detail", RPCArg::Type::NUM, RPCArg::Optional::NO, "The detail level."},
{"maker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the token being sold by the maker (e.g. LTC)."},
{"taker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the token being sold by the taker (e.g. BLOCK)."},
{"max_orders", RPCArg::Type::NUM, "50", "The maximum total orders to display for bids and asks combined."},
},
RPCResult{
"\n"
},
RPCExamples{
HelpExampleCli("dxGetOrderBook", "3 BLOCK LTC")
+ HelpExampleRpc("dxGetOrderBook", "3, \"BLOCK\", \"LTC\"")
+ HelpExampleCli("dxGetOrderBook", "3 BLOCK LTC 60")
+ HelpExampleRpc("dxGetOrderBook", "3, \"BLOCK\", \"LTC\", 60")
},
}.ToString());
Value js;
json_spirit::read_string(request.params.write(), js);
Array params = js.get_array();
if ((params.size() < 3 || params.size() > 4))
{
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"(detail, 1-4) (maker) (taker) (max_orders, default=50)[optional]"));
}
Object res;
TransactionMap trList = xbridge::App::instance().transactions();
{
/**
* @brief detaiLevel - Get a list of open orders for a product.
* The amount of detail shown can be customized with the level parameter.
*/
const auto detailLevel = params[0].get_int();
const auto fromCurrency = params[1].get_str();
const auto toCurrency = params[2].get_str();
std::size_t maxOrders = 50;
if (params.size() == 4)
maxOrders = params[3].get_int();
if (maxOrders < 1)
maxOrders = 1;
if (detailLevel < 1 || detailLevel > 4)
{
return uret(xbridge::makeError(xbridge::INVALID_DETAIL_LEVEL, __FUNCTION__));
}
res.emplace_back(Pair("detail", detailLevel));
res.emplace_back(Pair("maker", fromCurrency));
res.emplace_back(Pair("taker", toCurrency));
/**
* @brief bids - array with bids
*/
Array bids;
/**
* @brief asks - array with asks
*/
Array asks;
if(trList.empty())
{
LOG() << "empty transactions list";
res.emplace_back(Pair("asks", asks));
res.emplace_back(Pair("bids", bids));
return uret(res);
}
TransactionMap asksList;
TransactionMap bidsList;
//copy all transactions in currencies specified in the parameters
// ask orders are based in the first token in the trading pair
std::copy_if(trList.begin(), trList.end(), std::inserter(asksList, asksList.end()),
[&toCurrency, &fromCurrency](const TransactionPair &transaction)
{
if(transaction.second == nullptr)
return false;
if (transaction.second->fromAmount <= 0 || transaction.second->toAmount <= 0)
return false;
if (transaction.second->state != xbridge::TransactionDescr::trPending)
return false;
return ( boost::iequals(transaction.second->toCurrency, toCurrency) &&
boost::iequals(transaction.second->fromCurrency, fromCurrency) );
});
// bid orders are based in the second token in the trading pair (inverse of asks)
std::copy_if(trList.begin(), trList.end(), std::inserter(bidsList, bidsList.end()),
[&toCurrency, &fromCurrency](const TransactionPair &transaction)
{
if(transaction.second == nullptr)
return false;
if (transaction.second->fromAmount <= 0 || transaction.second->toAmount <= 0)
return false;
if (transaction.second->state != xbridge::TransactionDescr::trPending)
return false;
return ( boost::iequals(transaction.second->toCurrency, fromCurrency) &&
boost::iequals(transaction.second->fromCurrency, toCurrency));
});
std::vector<xbridge::TransactionDescrPtr> asksVector;
std::vector<xbridge::TransactionDescrPtr> bidsVector;
for (const auto &trEntry : asksList)
asksVector.emplace_back(trEntry.second);
for (const auto &trEntry : bidsList)
bidsVector.emplace_back(trEntry.second);
// sort asks descending
std::sort(asksVector.begin(), asksVector.end(),
[](const xbridge::TransactionDescrPtr &a, const xbridge::TransactionDescrPtr &b)
{
const auto priceA = xbridge::price(a);
const auto priceB = xbridge::price(b);
return priceA > priceB;
});
//sort bids descending
std::sort(bidsVector.begin(), bidsVector.end(),
[](const xbridge::TransactionDescrPtr &a, const xbridge::TransactionDescrPtr &b)
{
const auto priceA = xbridge::priceBid(a);
const auto priceB = xbridge::priceBid(b);
return priceA > priceB;
});
// floating point comparisons
// see Knuth 4.2.2 Eq 36
auto floatCompare = [](const double a, const double b) -> bool
{
const auto epsilon = std::numeric_limits<double>::epsilon();
return (fabs(a - b) / fabs(a) <= epsilon) && (fabs(a - b) / fabs(b) <= epsilon);
};
switch (detailLevel)
{
case 1:
{
//return only the best bid and ask
if (!bidsList.empty()) {
const auto bidsItem = std::max_element(bidsList.begin(), bidsList.end(),
[](const TransactionPair &a, const TransactionPair &b)
{
//find transaction with best bids
const auto &tr1 = a.second;
const auto &tr2 = b.second;
if(tr1 == nullptr)
return true;
if(tr2 == nullptr)
return false;
const auto priceA = xbridge::priceBid(tr1);
const auto priceB = xbridge::priceBid(tr2);
return priceA < priceB;
});
const auto bidsCount = std::count_if(bidsList.begin(), bidsList.end(),
[bidsItem, floatCompare](const TransactionPair &a)
{
const auto &tr = a.second;
if(tr == nullptr)
return false;
const auto price = xbridge::priceBid(tr);
const auto &bestTr = bidsItem->second;
if (bestTr != nullptr)
{
const auto bestBidPrice = xbridge::priceBid(bestTr);
return floatCompare(price, bestBidPrice);
}
return false;
});
const auto &tr = bidsItem->second;
if (tr != nullptr)
{
const auto bidPrice = xbridge::priceBid(tr);
bids.emplace_back(Array{xbridge::xBridgeStringValueFromPrice(bidPrice),
xbridge::xBridgeStringValueFromAmount(tr->toAmount),
static_cast<int64_t>(bidsCount)});
}
}
if (!asksList.empty()) {
const auto asksItem = std::min_element(asksList.begin(), asksList.end(),
[](const TransactionPair &a, const TransactionPair &b)
{
//find transactions with best asks
const auto &tr1 = a.second;
const auto &tr2 = b.second;
if(tr1 == nullptr)
return true;
if(tr2 == nullptr)
return false;
const auto priceA = xbridge::price(tr1);
const auto priceB = xbridge::price(tr2);
return priceA < priceB;
});
const auto asksCount = std::count_if(asksList.begin(), asksList.end(),
[asksItem, floatCompare](const TransactionPair &a)
{
const auto &tr = a.second;
if(tr == nullptr)
return false;
const auto price = xbridge::price(tr);
const auto &bestTr = asksItem->second;
if (bestTr != nullptr)
{
const auto bestAskPrice = xbridge::price(bestTr);
return floatCompare(price, bestAskPrice);
}
return false;
});
const auto &tr = asksItem->second;
if (tr != nullptr)
{
const auto askPrice = xbridge::price(tr);
asks.emplace_back(Array{xbridge::xBridgeStringValueFromPrice(askPrice),
xbridge::xBridgeStringValueFromAmount(tr->fromAmount),
static_cast<int64_t>(asksCount)});
}
}
res.emplace_back(Pair("asks", asks));
res.emplace_back(Pair("bids", bids));
return uret(res);
}
case 2:
{
//Top X bids and asks (aggregated)
/**
* @brief bound - calculate upper bound
*/
auto bound = std::min<int32_t>(maxOrders, bidsVector.size());
for (size_t i = 0; i < bound; ++i) // Best bids are at the beginning of the stack (sorted descending, highest price better)
{
if(bidsVector[i] == nullptr)
continue;
Array bid;
//calculate bids and push to array
const auto bidAmount = bidsVector[i]->toAmount;
const auto bidPrice = xbridge::priceBid(bidsVector[i]);
auto bidSize = bidAmount;
const auto bidsCount = std::count_if(bidsList.begin(), bidsList.end(),
[bidPrice, floatCompare](const TransactionPair &a)
{
const auto &tr = a.second;
if(tr == nullptr)
return false;
const auto price = xbridge::priceBid(tr);
return floatCompare(price, bidPrice);
});
//array sorted by bid price, we can to skip the transactions with equals bid price
while((++i < bound) && floatCompare(xbridge::priceBid(bidsVector[i]), bidPrice)) {
bidSize += bidsVector[i]->toAmount;
}
bid.emplace_back(xbridge::xBridgeStringValueFromPrice(bidPrice));
bid.emplace_back(xbridge::xBridgeStringValueFromAmount(bidSize));
bid.emplace_back(static_cast<int64_t>(bidsCount));
bids.emplace_back(bid);
}
bound = std::min<int32_t>(maxOrders, asksVector.size());
const auto asks_len = static_cast<int32_t>(asksVector.size());
for (int32_t i = asks_len - bound; i < asks_len; ++i) // Best asks are at the back of the stack (sorted descending, lowest price better)
{
if(asksVector[i] == nullptr)
continue;
Array ask;
//calculate asks and push to array
const auto askAmount = asksVector[i]->fromAmount;
const auto askPrice = xbridge::price(asksVector[i]);
auto askSize = askAmount;
const auto asksCount = std::count_if(asksList.begin(), asksList.end(),
[askPrice, floatCompare](const TransactionPair &a)
{
const auto &tr = a.second;
if(tr == nullptr)
return false;
const auto price = xbridge::price(tr);
return floatCompare(price, askPrice);
});
//array sorted by price, we can to skip the transactions with equals price
while((++i < bound) && floatCompare(xbridge::price(asksVector[i]), askPrice)){
askSize += asksVector[i]->fromAmount;
}
ask.emplace_back(xbridge::xBridgeStringValueFromPrice(askPrice));
ask.emplace_back(xbridge::xBridgeStringValueFromAmount(askSize));
ask.emplace_back(static_cast<int64_t>(asksCount));
asks.emplace_back(ask);
}
res.emplace_back(Pair("asks", asks));
res.emplace_back(Pair("bids", bids));
return uret(res);
}
case 3:
{
//Full order book (non aggregated)
auto bound = std::min<int32_t>(maxOrders, bidsVector.size());
for (size_t i = 0; i < bound; ++i) // Best bids are at the beginning of the stack (sorted descending, highest price better)
{
if(bidsVector[i] == nullptr)
continue;
Array bid;
const auto bidAmount = bidsVector[i]->toAmount;
const auto bidPrice = xbridge::priceBid(bidsVector[i]);
bid.emplace_back(xbridge::xBridgeStringValueFromPrice(bidPrice));
bid.emplace_back(xbridge::xBridgeStringValueFromAmount(bidAmount));
bid.emplace_back(bidsVector[i]->id.GetHex());
bids.emplace_back(bid);
}
bound = std::min<int32_t>(maxOrders, asksVector.size());
const auto asks_len = static_cast<int32_t>(asksVector.size());
for (int32_t i = asks_len - bound; i < asks_len; ++i) // Best asks are at the back of the stack (sorted descending, lowest price better)
{
if(asksVector[i] == nullptr)
continue;
Array ask;
const auto bidAmount = asksVector[i]->fromAmount;
const auto askPrice = xbridge::price(asksVector[i]);
ask.emplace_back(xbridge::xBridgeStringValueFromPrice(askPrice));
ask.emplace_back(xbridge::xBridgeStringValueFromAmount(bidAmount));
ask.emplace_back(asksVector[i]->id.GetHex());
asks.emplace_back(ask);
}
res.emplace_back(Pair("asks", asks));
res.emplace_back(Pair("bids", bids));
return uret(res);
}
case 4:
{
//return Only the best bid and ask
if (!bidsList.empty()) {
const auto bidsItem = std::max_element(bidsList.begin(), bidsList.end(),
[](const TransactionPair &a, const TransactionPair &b)
{
//find transaction with best bids
const auto &tr1 = a.second;
const auto &tr2 = b.second;
if(tr1 == nullptr)
return true;
if(tr2 == nullptr)
return false;
const auto priceA = xbridge::priceBid(tr1);
const auto priceB = xbridge::priceBid(tr2);
return priceA < priceB;
});
const auto &tr = bidsItem->second;
if (tr != nullptr)
{
const auto bidPrice = xbridge::priceBid(tr);
bids.emplace_back(xbridge::xBridgeStringValueFromPrice(bidPrice));
bids.emplace_back(xbridge::xBridgeStringValueFromAmount(tr->toAmount));
Array bidsIds;
bidsIds.emplace_back(tr->id.GetHex());
for(const TransactionPair &tp : bidsList)
{
const auto &otherTr = tp.second;
if(otherTr == nullptr)
continue;
if(tr->id == otherTr->id)
continue;
const auto otherTrBidPrice = xbridge::priceBid(otherTr);
if(!floatCompare(bidPrice, otherTrBidPrice))
continue;
bidsIds.emplace_back(otherTr->id.GetHex());
}
bids.emplace_back(bidsIds);
}
}
if (!asksList.empty()) {
const auto asksItem = std::min_element(asksList.begin(), asksList.end(),
[](const TransactionPair &a, const TransactionPair &b)
{
//find transactions with best asks
const auto &tr1 = a.second;
const auto &tr2 = b.second;
if(tr1 == nullptr)
return true;
if(tr2 == nullptr)
return false;
const auto priceA = xbridge::price(tr1);
const auto priceB = xbridge::price(tr2);
return priceA < priceB;
});
const auto &tr = asksItem->second;
if (tr != nullptr)
{
const auto askPrice = xbridge::price(tr);
asks.emplace_back(xbridge::xBridgeStringValueFromPrice(askPrice));
asks.emplace_back(xbridge::xBridgeStringValueFromAmount(tr->fromAmount));
Array asksIds;
asksIds.emplace_back(tr->id.GetHex());
for(const TransactionPair &tp : asksList)
{
const auto &otherTr = tp.second;
if(otherTr == nullptr)
continue;
if(tr->id == otherTr->id)
continue;
const auto otherTrAskPrice = xbridge::price(otherTr);
if(!floatCompare(askPrice, otherTrAskPrice))
continue;
asksIds.emplace_back(otherTr->id.GetHex());
}
asks.emplace_back(asksIds);
}
}
res.emplace_back(Pair("asks", asks));
res.emplace_back(Pair("bids", bids));
return uret(res);
}
default:
return uret(xbridge::makeError(xbridge::INVALID_DETAIL_LEVEL, __FUNCTION__));
}
}
}
UniValue dxGetMyOrders(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetMyOrders",
"\nReturns a list of all of your orders (of all states). "
"It will only return orders from your current session.\n",
{},
RPCResult{
R"(
[
{
"id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"maker": "SYS",
"maker_size": "100.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "10.500000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "partial",
"partial_minimum": "10.000000",
"partial_orig_maker_size": "100.000000",
"partial_orig_taker_size": "10.500000",
"partial_repost": true,
"partial_parent_id": "",
"status": "open"
},
{
"id": "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a",
"maker": "SYS",
"maker_size": "4.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "0.400000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "partial",
"partial_minimum": "0.400000",
"partial_orig_maker_size": "4.000000",
"partial_orig_taker_size": "0.400000",
"partial_repost": true,
"partial_parent_id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"status": "open"
}
]
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxGetMyOrders", "")
+ HelpExampleRpc("dxGetMyOrders", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (!params.empty()) {
Object error;
error.emplace_back(Pair("error",
xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS,
"This function does not accept any parameters.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
xbridge::App & xapp = xbridge::App::instance();
Array r;
TransactionVector orders;
TransactionMap trList = xbridge::App::instance().transactions();
// Filter local orders
for (auto i : trList) {
const xbridge::TransactionDescrPtr &t = i.second;
if(!t->isLocal())
continue;
orders.push_back(t);
}
// Add historical orders
TransactionMap history = xbridge::App::instance().history();
// Filter local orders only
for (auto &item : history) {
const xbridge::TransactionDescrPtr &ptr = item.second;
if (ptr->isLocal() &&
(ptr->state == xbridge::TransactionDescr::trFinished ||
ptr->state == xbridge::TransactionDescr::trCancelled)) {
orders.push_back(ptr);
}
}
// Return if no records
if (orders.empty())
return uret(r);
// sort ascending by updated time
std::sort(orders.begin(), orders.end(),
[](const xbridge::TransactionDescrPtr &a, const xbridge::TransactionDescrPtr &b) {
return (a->txtime) < (b->txtime);
});
std::map<std::string, bool> seen;
for (const auto &t : orders) {
// do not process already seen orders
if (seen.count(t->id.GetHex()))
continue;
seen[t->id.GetHex()] = true;
xbridge::WalletConnectorPtr connFrom = xapp.connectorByCurrency(t->fromCurrency);
xbridge::WalletConnectorPtr connTo = xapp.connectorByCurrency(t->toCurrency);
std::string makerAddress;
std::string takerAddress;
if (connFrom)
makerAddress = connFrom->fromXAddr(t->from);
if (connTo)
takerAddress = connTo->fromXAddr(t->to);
Object o;
o.emplace_back(Pair("id", t->id.GetHex()));
// maker data
o.emplace_back(Pair("maker", t->fromCurrency));
o.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(t->fromAmount)));
o.emplace_back(Pair("maker_address", makerAddress));
// taker data
o.emplace_back(Pair("taker", t->toCurrency));
o.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(t->toAmount)));
o.emplace_back(Pair("taker_address", takerAddress));
// dates
o.emplace_back(Pair("updated_at", xbridge::iso8601(t->txtime)));
o.emplace_back(Pair("created_at", xbridge::iso8601(t->created)));
// partial order details
o.emplace_back(Pair("order_type", t->orderType()));
o.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(t->minFromAmount)));
o.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(t->origFromAmount)));
o.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(t->origToAmount)));
o.emplace_back(Pair("partial_repost", t->repostOrder));
o.emplace_back(Pair("partial_parent_id", parseParentId(t->getParentOrder())));
o.emplace_back(Pair("status", t->strState()));
r.emplace_back(o);
}
return uret(r);
}
UniValue dxGetMyPartialOrderChain(const JSONRPCRequest& request) {
if (request.fHelp || request.params.empty() || request.params.size() > 1)
throw std::runtime_error(
RPCHelpMan{"dxGetMyPartialOrderChain",
"\nReturns a list of all orders related to the specified "
"order id. This includes partial orders that were repost "
"from a parent order.\n",
{
{"order_id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Order id"},
},
RPCResult{
R"(
[
{
"id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"maker": "SYS",
"maker_size": "100.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "10.500000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "partial",
"partial_minimum": "10.000000",
"partial_orig_maker_size": "100.000000",
"partial_orig_taker_size": "10.500000",
"partial_repost": true,
"partial_parent_id": "",
"status": "open"
},
{
"id": "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a",
"maker": "SYS",
"maker_size": "4.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "0.400000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-15T18:25:05.12345Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"order_type": "partial",
"partial_minimum": "0.400000",
"partial_orig_maker_size": "4.000000",
"partial_orig_taker_size": "0.400000",
"partial_repost": true,
"partial_parent_id": "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"status": "open"
}
]
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxGetMyPartialOrderChain", "\"6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a\"")
+ HelpExampleRpc("dxGetMyPartialOrderChain", "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a")
},
}.ToString());
RPCTypeCheck(request.params, {UniValue::VSTR});
const auto orderid = uint256S(request.params[0].get_str());
if (orderid.IsNull())
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "bad order id"));
UniValue r(UniValue::VARR);
xbridge::App & xapp = xbridge::App::instance();
auto orderChain = xapp.getPartialOrderChain(orderid);
std::map<std::string, bool> seen;
for (const auto & t : orderChain) {
// do not process already seen orders
if (seen.count(t->id.GetHex()))
continue;
seen[t->id.GetHex()] = true;
xbridge::WalletConnectorPtr connFrom = xapp.connectorByCurrency(t->fromCurrency);
xbridge::WalletConnectorPtr connTo = xapp.connectorByCurrency(t->toCurrency);
std::string makerAddress;
std::string takerAddress;
if (connFrom)
makerAddress = connFrom->fromXAddr(t->from);
if (connTo)
takerAddress = connTo->fromXAddr(t->to);
UniValue o(UniValue::VOBJ);
o.pushKV("id", t->id.GetHex());
// maker data
o.pushKV("maker", t->fromCurrency);
o.pushKV("maker_size", xbridge::xBridgeStringValueFromAmount(t->fromAmount));
o.pushKV("maker_address", makerAddress);
// taker data
o.pushKV("taker", t->toCurrency);
o.pushKV("taker_size", xbridge::xBridgeStringValueFromAmount(t->toAmount));
o.pushKV("taker_address", takerAddress);
// dates
o.pushKV("updated_at", xbridge::iso8601(t->txtime));
o.pushKV("created_at", xbridge::iso8601(t->created));
// partial order details
o.pushKV("order_type", t->orderType());
o.pushKV("partial_minimum", xbridge::xBridgeStringValueFromAmount(t->minFromAmount));
o.pushKV("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(t->origFromAmount));
o.pushKV("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(t->origToAmount));
o.pushKV("partial_repost", t->repostOrder);
o.pushKV("partial_parent_id", parseParentId(t->getParentOrder()));
o.pushKV("status", t->strState());
r.push_back(o);
}
return r;
}
UniValue dxPartialOrderChainDetails(const JSONRPCRequest& request) {
if (request.fHelp || request.params.empty() || request.params.size() > 1)
throw std::runtime_error(
RPCHelpMan{"dxPartialOrderChainDetails",
"\nReturns detailed information about a partial order "
"chain. This includes original amounts, total amount, "
"reported amounts sent and received and other information.\n",
{
{"order_id", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "Order id"},
},
RPCResult{
R"(
{
"first_order_id": "0b28e7c7de9a048dd2cb28b7d91062a052d16adf6d1a2154aa99ab2321c29770",
"maker": "BLOCK",
"maker_address": "y4Fn5z58KFA4qLcktBFCrKc8UHrWnNaVym",
"taker": "LTC",
"taker_address": "LWvt2ygq8QDkVEcCkMWHR4qXCqL2gC9D2B",
"partial_minimum": "0.100000",
"partial_orig_maker_size": "0.100000",
"partial_orig_taker_size": "0.000100",
"first_order_time": "2020-07-23T23:52:05.999Z",
"last_order_time": "2020-07-23T23:58:34.604Z",
"total_reported_sent": "0.200000",
"total_reported_received": "0.000200",
"total_reported_notsent": "0.800000",
"total_reported_notreceived": "0.000800",
"total_orders_open": 0,
"total_orders_finished": 2,
"total_orders_canceled": 1,
"orders": [
"0b28e7c7de9a048dd2cb28b7d91062a052d16adf6d1a2154aa99ab2321c29770",
"d3afd3b5faf604245a6962214bd0460bec88ff275236480d24b9e5cd45d44c41",
"5d4bde2de3d6982ce40da82da3b55803f82e11672b2292c611aec9b54cc4c4c9"
],
"p2sh_deposits": [
"a3bd9b849696946a06ad90b5e03337dba326400192d5b9b96ce0faf2cb513377",
"a29c4d06941877b501d0b5fe6dc054ca177f723e30a987f67a5871df8b14bfa5",
""
],
"p2sh_deposits_counterparty": [
"41e106c3668d097166cc4a5cce283a9079e769859c4a5467826506fc2547725e",
"c2f86465d26b3f90f559e3fe56a4a0aa44ee01e07f1e27b2236f16be92991f25",
""
]
}
Key | Type | Description
---------------------------|------|-----------------------------------------------------
first_order_id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset being
| | sold by the maker.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset being
| | sold by the taker.
taker_address | str | Address for receiving the incoming asset.
partial_minimum | str | The minimum amount that can be taken. This applies
| | to `partial` order types and will show `0` on
| | `exact` order types.
partial_orig_maker_size | str | The partial order original maker_size.
partial_orig_taker_size | str | The partial order original taker_size.
first_order_time | str | ISO 8601 datetime, with microseconds, of the last
| | time the order was updated.
last_order_time | str | ISO 8601 datetime, with microseconds, of when the
| | order was created.
total_reported_sent | str | Total amount of maker coin sent to traders.
total_reported_received | str | Total amount of taker coin received from traders.
total_reported_notsent | str | Total amount of maker coin not yet sent to traders.
total_reported_notreceived | str | Total amount of taker coin not yet received from traders.
total_orders_open | int | Total number of open orders.
total_orders_finished | int | Total number of completed orders.
total_orders_canceled | int | Total number of canceled orders.
orders | arr | All orders in the partial order chain.
p2sh_deposits | arr | All p2sh deposit txids sorted by "orders" data (1 for each order)
p2sh_deposits_counterparty | arr | All p2sh counterparty deposit txids sorted by "orders" data (1 for each order)
)"
},
RPCExamples{
HelpExampleCli("dxPartialOrderChainDetails", "\"6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a\"")
+ HelpExampleRpc("dxPartialOrderChainDetails", "6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a")
},
}.ToString());
RPCTypeCheck(request.params, {UniValue::VSTR});
const auto orderid = uint256S(request.params[0].get_str());
if (orderid.IsNull())
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, "bad order id"));
xbridge::App & xapp = xbridge::App::instance();
auto orderChain = xapp.getPartialOrderChain(orderid);
if (orderChain.empty())
return UniValue(UniValue::VOBJ);
const auto firstOrder = orderChain[0];
const auto lastOrder = orderChain[orderChain.size()-1];
xbridge::WalletConnectorPtr connFrom = xapp.connectorByCurrency(firstOrder->fromCurrency);
xbridge::WalletConnectorPtr connTo = xapp.connectorByCurrency(firstOrder->toCurrency);
const auto firstOrderId = firstOrder->id;
const auto maker = firstOrder->fromCurrency;
const auto taker = firstOrder->toCurrency;
const auto makerAddress = connFrom ? connFrom->fromXAddr(firstOrder->from) : "";
const auto takerAddress = connTo ? connTo->fromXAddr(firstOrder->to) : "";
const auto partialMinimum = xbridge::xBridgeStringValueFromAmount(firstOrder->minFromAmount);
const auto makerOrigSize = xbridge::xBridgeStringValueFromAmount(firstOrder->origFromAmount);
const auto takerOrigSize = xbridge::xBridgeStringValueFromAmount(firstOrder->origToAmount);
const auto firstOrderTime = xbridge::iso8601(firstOrder->created);
const auto lastOrderTime = xbridge::iso8601(lastOrder->txtime);
int64_t totalSent{0}, totalReceived{0}, totalNotSent{0}, totalNotReceived{0};
int totalOpen{0}, totalInProgress{0}, totalFinished{0}, totalCanceled{0};
UniValue uvorders(UniValue::VARR);
UniValue uvp2sh(UniValue::VARR);
UniValue uvp2shcparty(UniValue::VARR);
for (const auto & t : orderChain) {
if (t->state == xbridge::TransactionDescr::trFinished) {
totalSent += t->fromAmount;
totalReceived += t->toAmount;
++totalFinished;
} else {
totalNotSent += t->fromAmount;
totalNotReceived += t->toAmount;
}
if (t->state <= xbridge::TransactionDescr::trPending)
++totalOpen;
if (t->state > xbridge::TransactionDescr::trPending && t->state < xbridge::TransactionDescr::trFinished)
++totalInProgress;
if (t->state == xbridge::TransactionDescr::trCancelled)
++totalCanceled;
uvorders.push_back(t->id.GetHex());
uvp2sh.push_back(t->binTxId);
uvp2shcparty.push_back(t->oBinTxId);
}
UniValue o(UniValue::VOBJ);
o.pushKV("first_order_id", firstOrderId.GetHex());
o.pushKV("maker", maker);
o.pushKV("maker_address", makerAddress);
o.pushKV("taker", taker);
o.pushKV("taker_address", takerAddress);
o.pushKV("partial_minimum", partialMinimum);
o.pushKV("partial_orig_maker_size", makerOrigSize);
o.pushKV("partial_orig_taker_size", takerOrigSize);
o.pushKV("first_order_time", firstOrderTime);
o.pushKV("last_order_time", lastOrderTime);
o.pushKV("total_reported_sent", xbridge::xBridgeStringValueFromAmount(totalSent));
o.pushKV("total_reported_received", xbridge::xBridgeStringValueFromAmount(totalReceived));
o.pushKV("total_reported_notsent", xbridge::xBridgeStringValueFromAmount(totalNotSent));
o.pushKV("total_reported_notreceived", xbridge::xBridgeStringValueFromAmount(totalNotReceived));
o.pushKV("total_orders_open", totalOpen);
o.pushKV("total_orders_finished", totalFinished);
o.pushKV("total_orders_canceled", totalCanceled);
o.pushKV("orders", uvorders);
o.pushKV("p2sh_deposits", uvp2sh);
o.pushKV("p2sh_deposits_counterparty", uvp2shcparty);
return o;
}
UniValue dxGetTokenBalances(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetTokenBalances",
"\nReturns a list of available balances for all connected wallets on your "
"node (view with dxGetLocalTokens).\n"
"\nNote:\n"
"These balances do not include Segwit UTXOs or those being used in open or in process orders. "
"XBridge works best with pre-sliced UTXOs so that your entire wallet balance is capable of "
"multiple simultaneous trades. Use dxSplitInputs or dxSplitAddress to generate trading inputs.\n",
{},
RPCResult{
R"(
{
"BLOCK": "250.83492174",
"LTC": "0.568942",
"MONA": "3.452",
"SYS": "1050.128493"
}
Key | Type | Description
-------------|------|--------------------------------------------------------
Object | obj | Key-value object of the assets and respective balances.
-- key | str | The asset symbol.
-- value | str | The available wallet balance amount.
)"
},
RPCExamples{
HelpExampleCli("dxGetTokenBalances", "")
+ HelpExampleRpc("dxGetTokenBalances", "")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() != 0)
{
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS, "This function does not accept any parameters.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
Object res;
// Wallet balance
double walletBalance = boost::numeric_cast<double>(xbridge::availableBalance()) / boost::numeric_cast<double>(COIN);
res.emplace_back("Wallet", xbridge::xBridgeStringValueFromPrice(walletBalance));
// Add connected wallet balances (fetch balances concurrently)
const auto &connectors = xbridge::App::instance().connectors();
std::condition_variable cv;
Mutex cv_mu;
Mutex mu; // lock writes to res
int cores = GetNumCores()/2;
if (cores > connectors.size())
cores = connectors.size();
if (cores <= 0)
cores = 1;
int count = 0;
boost::thread_group tg;
for(const auto &connector : connectors)
{
count++;
tg.create_thread([&cv,&mu,&count,&connector,&res]() {
RenameThread("blocknet-balance-check");
const auto & excluded = xbridge::App::instance().getAllLockedUtxos(connector->currency);
const auto balance = connector->getWalletBalance(excluded);
{
LOCK(mu);
if (balance >= 0) // Ignore results from disconnected wallets
res.emplace_back(connector->currency, xbridge::xBridgeStringValueFromPrice(balance));
count--;
}
cv.notify_one();
});
while (count >= cores) { // block when queue is full
WAIT_LOCK(cv_mu, lock);
cv.wait(lock);
}
}
tg.join_all(); // wait for all to complete
return uret(res);
}
UniValue dxGetLockedUtxos(const JSONRPCRequest& request)
{
if (request.fHelp)
throw std::runtime_error(
RPCHelpMan{"dxGetLockedUtxos",
"\nReturns a list of locked UTXOs used in orders. You can only use "
"this call if you have a Service Node setup.\n",
{
{"id", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "The order ID. If omitted, a list of UTXOs used in all orders will be returned."},
},
RPCResult{
R"(
[
{
"id" : "91d0ea83edc79b9a2041c51d08037cff87c181efb311a095dfdd4edbcc7993a9",
"LTC" : [
6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a,
6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a,
6be548bc46a3dcc69b6d56529948f7e679dd96657f85f5870a017e005caa050a
]
}
]
Key | Type | Description
----------------|------|-----------------------------------------------------
id | str | The order ID.
Object | obj | Key-value object of the asset and UTXOs for the
| | forementioned order.
-- key | str | The asset symbol.
-- value | arr | The UTXOs locked for the given order ID.
)"
},
RPCExamples{
HelpExampleCli("dxGetLockedUtxos", "")
+ HelpExampleRpc("dxGetLockedUtxos", "")
+ HelpExampleCli("dxGetLockedUtxos", "524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432")
+ HelpExampleRpc("dxGetLockedUtxos", "\"524137449d9a35fa707ee395abab32bedae91aa2aefb6e3611fcd8574863e432\"")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
if (params.size() > 1)
{
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS, "Too many parameters.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
xbridge::Exchange & e = xbridge::Exchange::instance();
if (!e.isStarted())
{
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::Error::NOT_EXCHANGE_NODE)));
error.emplace_back(Pair("code", xbridge::Error::NOT_EXCHANGE_NODE));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
uint256 id;
if(params.size() == 1)
id = uint256S(params[0].get_str());
std::vector<xbridge::wallet::UtxoEntry> items;
if(!e.getUtxoItems(id, items))
{
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::Error::TRANSACTION_NOT_FOUND, id.GetHex())));
error.emplace_back(Pair("code", xbridge::Error::TRANSACTION_NOT_FOUND));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
Array utxo;
for(const xbridge::wallet::UtxoEntry & entry : items)
utxo.emplace_back(entry.toString());
Object obj;
if(id.IsNull())
{
obj.emplace_back(Pair("all_locked_utxo", utxo));
return uret(obj);
}
xbridge::TransactionPtr pendingTx = e.pendingTransaction(id);
xbridge::TransactionPtr acceptedTx = e.transaction(id);
if (!pendingTx->isValid() && !acceptedTx->isValid())
{
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::Error::TRANSACTION_NOT_FOUND, id.GetHex())));
error.emplace_back(Pair("code", xbridge::Error::TRANSACTION_NOT_FOUND));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
obj.emplace_back(Pair("id", id.GetHex()));
if(pendingTx->isValid())
obj.emplace_back(Pair(pendingTx->a_currency(), utxo));
else if(acceptedTx->isValid())
obj.emplace_back(Pair(acceptedTx->a_currency() + "_and_" + acceptedTx->b_currency(), utxo));
return uret(obj);
}
UniValue gettradingdata(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 2)
throw std::runtime_error(
RPCHelpMan{"gettradingdata",
"\nReturns an object of XBridge trading records. This information is "
"pulled from on-chain history so pulling a large amount of blocks will "
"result in longer response times.\n",
{
{"blocks", RPCArg::Type::NUM, "43200", "The number of blocks to return trade records for (60s block time)."},
{"errors", RPCArg::Type::BOOL, "false", "show errors"},
},
RPCResult{
"{\n"
" \"timestamp\": \"1559970139\", (uint64) Unix epoch timestamp in seconds of when the trade took place.\n"
" \"txid\": \"4b409r5c5fb1986p30cf7c19afec2c8\", (string) The Blocknet trade fee transaction ID.\n"
" \"to\": \"Bqtes8j14rE65kcpsEors5JDzDaHiaMtLG\", (string) The address of the Service Node that received the trade fee.\n"
" \"xid\": \"9eb57bas331eab3zf3daefd8364cdbL\", (string) The XBridge transaction ID.\n"
" \"from\": \"BLOCK\", (string) The symbol of the token bought by the maker.\n"
" \"fromAmount\": 0.001111, (uint64) The amount of the token that was bought by the maker.\n"
" \"to\": \"SYS\", (string) The symbol of the token sold by the maker.\n"
" \"toAmount\": 0.001000, (uint64) The amount of the token that was sold by the maker.\n"
"}\n"
},
RPCExamples{
HelpExampleCli("gettradingdata", "")
+ HelpExampleRpc("gettradingdata", "")
+ HelpExampleCli("gettradingdata", "86400")
+ HelpExampleRpc("gettradingdata", "86400")
+ HelpExampleCli("gettradingdata", "86400 true")
+ HelpExampleRpc("gettradingdata", "86400, true")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
uint32_t countOfBlocks = 43200;
bool showErrors = false;
if (params.size() >= 1) {
if (params.size() == 2) {
RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VBOOL});
showErrors = params[1].get_bool();
} else
RPCTypeCheck(request.params, {UniValue::VNUM});
countOfBlocks = params[0].get_int();
}
LOCK(cs_main);
Array records;
CBlockIndex * pindex = chainActive.Tip();
int64_t timeBegin = chainActive.Tip()->GetBlockTime();
for (; pindex->pprev && pindex->GetBlockTime() > (timeBegin-30*24*60*60) && countOfBlocks > 0;
pindex = pindex->pprev, --countOfBlocks)
{
CBlock block;
if (!ReadBlockFromDisk(block, pindex, Params().GetConsensus()))
{
// throw
continue;
}
const auto timestamp = block.GetBlockTime();
for (const CTransactionRef & tx : block.vtx)
{
const auto txid = tx->GetHash().GetHex();
std::string snode_pubkey{};
const CurrencyPair p = TxOutToCurrencyPair(tx->vout, snode_pubkey);
switch(p.tag) {
case CurrencyPair::Tag::Error:
// Show errors
if (showErrors)
records.emplace_back(Object{
Pair{"timestamp", timestamp},
Pair{"txid", txid},
Pair{"xid", p.error()}
});
break;
case CurrencyPair::Tag::Valid:
records.emplace_back(Object{
Pair{"timestamp", timestamp},
Pair{"txid", txid},
Pair{"to", snode_pubkey},
Pair{"xid", p.xid()},
Pair{"from", p.from.currency().to_string()},
Pair{"fromAmount", p.from.amount<double>()},
Pair{"to", p.to.currency().to_string()},
Pair{"toAmount", p.to.amount<double>()},
});
break;
case CurrencyPair::Tag::Empty:
default:
break;
}
}
}
return uret(records);
}
UniValue dxGetTradingData(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 2)
throw std::runtime_error(
RPCHelpMan{"dxGetTradingData",
"\nReturns an object of XBridge trading records. This information is "
"pulled from on-chain history so pulling a large amount of blocks will "
"result in longer response times.\n",
{
{"blocks", RPCArg::Type::NUM, "43200", "The number of blocks to return trade records for (60s block time)."},
{"errors", RPCArg::Type::BOOL, "false", "Shows an error if an error is detected."},
},
RPCResult{
R"(
[
{
"timestamp": 1559970139,
"fee_txid": "4b409e5c5fb1986930cf7c19afec2c89ac2ad4fddc13c1d5479b66ddf4a8fefb",
"nodepubkey": "Bqtms8j1zrE65kcpsEorE5JDzDaHidMtLG",
"id": "9eb57bac331eab34f3daefd8364cdb2bb05259c407d805d0bd0c",
"taker": "BLOCK",
"taker_size": 0.001111,
"maker": "SYS",
"maker_size": 0.001000
},
{
"timestamp": 1559970139,
"fee_txid": "3de7479e8a88ebed986d3b7e7e135291d3fd10e4e6d4c6238663db42c5019286",
"nodepubkey": "Bqtms8j1zrE65kcpsEorE5JDzDaHidMtLG",
"id": "fd0fed3ee9fe557d5735768c9bdcd4ab2908165353e0f0cef0d5",
"taker": "BLOCK",
"taker_size": 0.001577,
"maker": "SYS",
"maker_size": 0.001420
}
]
Key | Type | Description
------------|------|---------------------------------------------------------
timestamp | int | Unix epoch timestamp of when the trade took place.
fee_txid | str | The Blocknet trade fee transaction ID.
nodepubkey | str | The pubkey of the service node that received the trade
| | fee.
id | str | The order ID.
taker | str | Taker trading asset; the ticker of the asset being sold
| | by the taker.
taker_size | int | Taker trading size.
maker | str | Maker trading asset; the ticker of the asset being sold
| | by the maker.
maker_size | int | Maker trading size.
)"
},
RPCExamples{
HelpExampleCli("dxGetTradingData", "")
+ HelpExampleRpc("dxGetTradingData", "")
+ HelpExampleCli("dxGetTradingData", "43200")
+ HelpExampleRpc("dxGetTradingData", "43200")
+ HelpExampleCli("dxGetTradingData", "43200 true")
+ HelpExampleRpc("dxGetTradingData", "43200, true")
},
}.ToString());
Value js; json_spirit::read_string(request.params.write(), js); Array params = js.get_array();
uint32_t countOfBlocks = 43200;
bool showErrors = false;
if (params.size() >= 1) {
if (params.size() == 2) {
RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VBOOL});
showErrors = params[1].get_bool();
} else
RPCTypeCheck(request.params, {UniValue::VNUM});
countOfBlocks = params[0].get_int();
}
LOCK(cs_main);
Array records;
CBlockIndex * pindex = chainActive.Tip();
int64_t timeBegin = chainActive.Tip()->GetBlockTime();
for (; pindex->pprev && pindex->GetBlockTime() > (timeBegin-30*24*60*60) && countOfBlocks > 0;
pindex = pindex->pprev, --countOfBlocks)
{
CBlock block;
if (!ReadBlockFromDisk(block, pindex, Params().GetConsensus()))
{
// throw
continue;
}
const auto timestamp = block.GetBlockTime();
for (const CTransactionRef & tx : block.vtx)
{
const auto txid = tx->GetHash().GetHex();
std::string snode_pubkey{};
const CurrencyPair p = TxOutToCurrencyPair(tx->vout, snode_pubkey);
switch(p.tag) {
case CurrencyPair::Tag::Error:
// Show errors
if (showErrors)
records.emplace_back(Object{
Pair{"timestamp", timestamp},
Pair{"fee_txid", txid},
Pair{"id", p.error()}
});
break;
case CurrencyPair::Tag::Valid:
records.emplace_back(Object{
Pair{"timestamp", timestamp},
Pair{"fee_txid", txid},
Pair{"nodepubkey", snode_pubkey},
Pair{"id", p.xid()},
Pair{"taker", p.from.currency().to_string()},
Pair{"taker_size", p.from.amount<double>()},
Pair{"maker", p.to.currency().to_string()},
Pair{"maker_size", p.to.amount<double>()},
});
break;
case CurrencyPair::Tag::Empty:
default:
break;
}
}
}
return uret(records);
}
UniValue dxMakePartialOrder(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 6)
throw std::runtime_error(
RPCHelpMan{"dxMakePartialOrder",
"\nCreate a new partial order. Partial orders don't require the entire order to be filled. "
"For exact orders, see dxMakeOrder.\n"
"You can only create orders for markets with assets supported by your node (view with dxGetLocalTokens) "
"and the network (view with dxGetNetworkTokens). There are no fees to make orders. \n"
"\nWhen a partial order is created, multiple inputs will be selected or "
"generated. Using multiple inputs is optimal for allowing partial orders of "
"varying sizes while minimizing the amount of change (change not reposted). "
"This maximizes the amount remaining that can be immediately reposted.\n"
"\nThe way input selection/generation is done depends on your total "
"`maker_size` and `minimum_size`. XBridge will first attempt to find "
"existing inputs that are properly sized for the order. If needed, existing "
"inputs will automatically be split into the proper size at the time the "
"order is posted. While the inputs are being generated, the order will "
"remain in the `new` state. Once the generated inputs have 1 confirmation "
"the order will proceed to the `open` state.\n"
"\nNote:\n"
"XBridge will first attempt to use funds from the specified maker address. "
"If this address does not have sufficient funds to cover the order, then "
"it will pull funds from other addresses in the wallet. Change is "
"deposited to the address with the largest input used.\n",
{
{"maker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset being sold by the maker (e.g. LTC)."},
{"maker_size", RPCArg::Type::STR, RPCArg::Optional::NO, "The amount of the maker asset being sent."},
{"maker_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The maker address containing asset being sent."},
{"taker", RPCArg::Type::STR, RPCArg::Optional::NO, "The symbol of the asset being bought by the maker (e.g. BLOCK)."},
{"taker_size", RPCArg::Type::STR, RPCArg::Optional::NO, "The amount of the taker asset to be received."},
{"taker_address", RPCArg::Type::STR, RPCArg::Optional::NO, "The taker address for the receiving asset."},
{"minimum_size", RPCArg::Type::STR, RPCArg::Optional::NO, "Minimum maker_size that can be traded in the partial order."},
{"repost", RPCArg::Type::STR, "true", "Repost partial order remainder after taken. Options: true/false"},
{"dryrun", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "Simulate the order submission without actually submitting the order, i.e. a test run. Options: dryrun"},
},
RPCResult{
R"(
{
"id": "4306a107113c4562afa6273ecd9a3990ead53a0227f74ddd9122272e453ae07d",
"maker": "SYS",
"maker_size": "1.000000",
"maker_address": "SVTbaYZ8olpVn3uNyImst3GKyrvfzXQgdK",
"taker": "LTC",
"taker_size": "0.100000",
"taker_address": "LVvFhZroMRGTtg1hHp7jVew3YoZRX8y35Z",
"updated_at": "2018-01-16T00:00:00.00000Z",
"created_at": "2018-01-15T18:15:30.12345Z",
"block_id": "38729344720548447445023782734923740427863289632489723984723",
"order_type": "partial",
"partial_minimum": "0.200000",
"partial_orig_maker_size": "2.000000",
"partial_orig_taker_size": "0.200000",
"partial_repost": true,
"partial_parent_id": "1faeba06827929f16490c61ba633522158e8d44163c47f735078eac0304c5eb6",
"status": "created"
}
Key | Type | Description
------------------------|------|---------------------------------------------
Array | arr | An array of all orders with each order
| | having the following parameters.
id | str | The order ID.
maker | str | Maker trading asset; the ticker of the asset
| | being sold by the maker.
maker_size | str | Maker trading size. String is used to
| | preserve precision.
maker_address | str | Address for sending the outgoing asset.
taker | str | Taker trading asset; the ticker of the asset
| | being sold by the taker.
taker_size | str | Taker trading size. String is used to
| | preserve precision.
taker_address | str | Address for receiving the incoming asset.
updated_at | str | ISO 8601 datetime, with microseconds, of the
| | last time the order was updated.
created_at | str | ISO 8601 datetime, with microseconds, of
| | when the order was created.
order_type | str | The order type.
partial_minimum* | str | The minimum amount that can be taken.
partial_orig_maker_size*| str | The partial order original maker_size.
partial_orig_taker_size*| str | The partial order original taker_size.
partial_repost | str | Whether the order will be reposted or not.
| | This applies to `partial` order types and
| | will show `false` for `exact` order types.
partial_parent_id | str | The previous order id of a reposted partial
| | order. This will return an empty string if
| | there is no parent order.
status | str | The order status.
* This only applies to `partial` order types and will show `0` on `exact`
order types.
)"
},
RPCExamples{
HelpExampleCli("dxMakePartialOrder", "LTC 25 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BLOCK 1000 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR 100")
+ HelpExampleRpc("dxMakePartialOrder", "\"LTC\", \"25\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BLOCK\", \"1000\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"100\"")
+ HelpExampleCli("dxMakePartialOrder", "LTC 25 LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H BLOCK 1000 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR 100 true dryrun")
+ HelpExampleRpc("dxMakePartialOrder", "\"LTC\", \"25\", \"LLZ1pgb6Jqx8hu84fcr5WC5HMoKRUsRE8H\", \"BLOCK\", \"1000\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", \"100\", \"true\", \"dryrun\"")
},
}.ToString());
if (!xbridge::xBridgeValidCoin(request.params[1].get_str())) {
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS,
"The maker_size is too precise. The maximum precision supported is " +
std::to_string(xbridge::xBridgeSignificantDigits(xbridge::TransactionDescr::COIN)) + " digits.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
if (!xbridge::xBridgeValidCoin(request.params[4].get_str())) {
Object error;
error.emplace_back(Pair("error", xbridge::xbridgeErrorText(xbridge::INVALID_PARAMETERS,
"The taker_size is too precise. The maximum precision supported is " +
std::to_string(xbridge::xBridgeSignificantDigits(xbridge::TransactionDescr::COIN)) + " digits.")));
error.emplace_back(Pair("code", xbridge::INVALID_PARAMETERS));
error.emplace_back(Pair("name", __FUNCTION__));
return uret(error);
}
std::string fromCurrency = request.params[0].get_str();
double fromAmount = boost::lexical_cast<double>(request.params[1].get_str());
std::string fromAddress = request.params[2].get_str();
std::string toCurrency = request.params[3].get_str();
double toAmount = boost::lexical_cast<double>(request.params[4].get_str());
std::string toAddress = request.params[5].get_str();
double partialMinimum = boost::lexical_cast<double>(request.params[6].get_str());
// Check if min_size > maker_size
if (partialMinimum > fromAmount) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The minimum_size can't be more than maker_size"));
}
// Check that addresses are not the same
if (fromAddress == toAddress) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maker_address and taker_address cannot be the same: " + fromAddress));
}
// Check upper limits
if (fromAmount > (double)xbridge::TransactionDescr::MAX_COIN ||
toAmount > (double)xbridge::TransactionDescr::MAX_COIN) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maximum supported size is " + std::to_string(xbridge::TransactionDescr::MAX_COIN)));
}
// Check lower limits
if (fromAmount <= 0 || toAmount <= 0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The minimum supported size is " + xbridge::xBridgeStringValueFromPrice(1.0/xbridge::TransactionDescr::COIN)));
}
// Validate addresses
xbridge::WalletConnectorPtr connFrom = xbridge::App::instance().connectorByCurrency(fromCurrency);
xbridge::WalletConnectorPtr connTo = xbridge::App::instance().connectorByCurrency(toCurrency);
if (!connFrom) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + fromCurrency));
if (!connTo) return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, "Unable to connect to wallet: " + toCurrency));
xbridge::App &app = xbridge::App::instance();
if (!app.isValidAddress(fromAddress, connFrom)) {
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__, fromAddress));
}
if (!app.isValidAddress(toAddress, connTo)) {
return uret(xbridge::makeError(xbridge::INVALID_ADDRESS, __FUNCTION__, toAddress));
}
if(fromAmount <= .0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The maker_size must be greater than 0."));
}
if(toAmount <= .0) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The taker_size must be greater than 0."));
}
if (connFrom->isDustAmount(partialMinimum)) {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__,
"The partial minimum_size is dust, i.e. it's too small."));
}
bool repost{true};
if (request.params.size() >= 8)
repost = !(request.params[7].get_str() == "false");
// Perform explicit check on dryrun to avoid executing order on bad spelling
bool dryrun = false;
if (request.params.size() == 9) {
std::string dryrunParam = request.params[8].get_str();
if (dryrunParam != "dryrun") {
return uret(xbridge::makeError(xbridge::INVALID_PARAMETERS, __FUNCTION__, dryrunParam));
}
dryrun = true;
}
Object result;
auto statusCode = app.checkCreateParams(fromCurrency, toCurrency,
xbridge::xBridgeAmountFromReal(fromAmount), fromAddress);
switch (statusCode) {
case xbridge::SUCCESS:{
// If dryrun
if (dryrun) {
result.emplace_back(Pair("id", uint256().GetHex()));
result.emplace_back(Pair("maker", fromCurrency));
result.emplace_back(Pair("maker_size",
xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
result.emplace_back(Pair("maker_address", fromAddress));
result.emplace_back(Pair("taker", toCurrency));
result.emplace_back(Pair("taker_size",
xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
result.emplace_back(Pair("taker_address", toAddress));
result.emplace_back(Pair("order_type", "partial"));
result.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(partialMinimum))));
result.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
result.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
result.emplace_back(Pair("partial_repost", repost));
result.emplace_back(Pair("partial_parent_id", parseParentId(uint256())));
result.emplace_back(Pair("status", "created"));
return uret(result);
}
break;
}
case xbridge::INVALID_CURRENCY: {
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromCurrency));
}
case xbridge::NO_SESSION:{
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromCurrency));
}
case xbridge::INSIFFICIENT_FUNDS:{
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromAddress));
}
case xbridge::NO_SERVICE_NODE:{
return uret(xbridge::makeError(statusCode, __FUNCTION__, fromCurrency + "/" + toCurrency));
}
default:
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
uint256 id = uint256();
uint256 blockHash = uint256();
statusCode = xbridge::App::instance().sendXBridgeTransaction(fromAddress, fromCurrency,
xbridge::xBridgeAmountFromReal(fromAmount), toAddress, toCurrency,
xbridge::xBridgeAmountFromReal(toAmount), std::vector<xbridge::wallet::UtxoEntry>{},
true, repost, xbridge::xBridgeAmountFromReal(partialMinimum), id, blockHash);
if (statusCode == xbridge::SUCCESS) {
Object obj;
obj.emplace_back(Pair("id", id.GetHex()));
obj.emplace_back(Pair("maker_address", fromAddress));
obj.emplace_back(Pair("maker", fromCurrency));
obj.emplace_back(Pair("maker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
obj.emplace_back(Pair("taker_address", toAddress));
obj.emplace_back(Pair("taker", toCurrency));
obj.emplace_back(Pair("taker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
const auto &createdTime = xbridge::App::instance().transaction(id)->created;
obj.emplace_back(Pair("created_at", xbridge::iso8601(createdTime)));
obj.emplace_back(Pair("updated_at", xbridge::iso8601(boost::posix_time::microsec_clock::universal_time()))); // TODO Need actual updated time, this is just estimate
obj.emplace_back(Pair("block_id", blockHash.GetHex()));
obj.emplace_back(Pair("order_type", "partial"));
obj.emplace_back(Pair("partial_minimum", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(partialMinimum))));
obj.emplace_back(Pair("partial_orig_maker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(fromAmount))));
obj.emplace_back(Pair("partial_orig_taker_size", xbridge::xBridgeStringValueFromAmount(xbridge::xBridgeAmountFromReal(toAmount))));
obj.emplace_back(Pair("partial_repost", repost));
obj.emplace_back(Pair("partial_parent_id", parseParentId(uint256())));
obj.emplace_back(Pair("status", "created"));
return uret(obj);
} else {
return uret(xbridge::makeError(statusCode, __FUNCTION__));
}
}
UniValue dxSplitAddress(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 3 || request.params.size() > 6)
throw std::runtime_error(
RPCHelpMan{"dxSplitAddress",
"\nSplits unused coin in the given address into the specified size. Left over amounts "
"end up in change. UTXOs being used in existing orders will not be included by the "
"splitter (view with dxGetUtxos). You can only split UTXOs for assets supported by "
"your node (view with dxGetLocalTokens).\n",
{
{"token", RPCArg::Type::STR, RPCArg::Optional::NO, "The ticker of the asset you want to split UTXOs for."},
{"split_amount", RPCArg::Type::STR, RPCArg::Optional::NO, "The desired UTXO output size."},
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The address to split UTXOs in. Only coin in this address will be split."},
{"include_fees", RPCArg::Type::BOOL, "true", "Include the trade P2SH deposit fees in the split UTXO (add deposit fee to `spit_amount` value."},
{"show_rawtx", RPCArg::Type::BOOL, "false", "Include the raw transaction in the response (rawtx can be submitted manually)."},
{"submit", RPCArg::Type::BOOL, "true", "Submit the raw transaction to the network."},
},
RPCResult{
R"(
{
"token": "BLOCK",
"include_fees": true,
"split_amount_requested": "4.0",
"split_amount_with_fees": "4.00040000",
"split_utxo_count": 6,
"split_total": "24.44852981",
"txid": "7f87cba104b3c19f6e25fbc82b3cde5d73714e01d6a54943d3c8fb07ce315db4",
"rawtx": ""
}
Key | Type | Description
-----------------------|------|----------------------------------------------
token | str | Asset you are splitting UTXOs for.
include_fees | bool | Whether you requested to include the fees.
split_amount_requested | str | Requested split amount.
split_amount_with_fees | str | Requested split amount with fees included.
split_utxo_count | int | Amount of resulting split UTXOs.
split_total | str | Total amount of in the address prior to
| | splitting.
txid | str | Hex string of the splitting transaction.
rawtx | str | Hex string of the raw splitting transaction.
)"
},
RPCExamples{
HelpExampleCli("dxSplitAddress", "BLOCK 10.5 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR")
+ HelpExampleRpc("dxSplitAddress", "\"BLOCK\", \"10.5\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\"")
+ HelpExampleCli("dxSplitAddress", "BLOCK 10.5 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR true false true")
+ HelpExampleRpc("dxSplitAddress", "\"BLOCK\", \"10.5\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", true, false, true")
},
}.ToString());
auto token = request.params[0].get_str();
auto splitAmount = request.params[1].get_str();
auto address = request.params[2].get_str();
bool includeFees{true};
bool showRawTx{false};
bool submitTx{true};
if (!request.params[3].isNull())
includeFees = request.params[3].get_bool();
if (!request.params[4].isNull())
showRawTx = request.params[4].get_bool();
if (!request.params[5].isNull())
submitTx = request.params[5].get_bool();
auto & xapp = xbridge::App::instance();
xbridge::WalletConnectorPtr conn = xapp.connectorByCurrency(token);
if (!conn)
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, token));
auto utxos = xapp.getAllLockedUtxos(token);
const CAmount sa = xbridge::xBridgeIntFromReal(boost::lexical_cast<double>(splitAmount));
std::string txid, rawtx, failReason;
CAmount totalSplit{0};
CAmount splitInclFees{0};
int splitCount{0};
if (!conn->splitUtxos(sa, address, includeFees, utxos, std::set<COutPoint>{}, totalSplit, splitInclFees, splitCount, txid, rawtx, failReason))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, failReason));
int errorcode{0};
std::string txid2, errmsg;
if (submitTx && !conn->sendRawTransaction(rawtx, txid2, errorcode, errmsg))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, errmsg));
UniValue r(UniValue::VOBJ);
r.pushKV("token", token);
r.pushKV("include_fees", includeFees);
r.pushKV("split_amount_requested", xbridge::xBridgeStringValueFromAmount(sa));
r.pushKV("split_amount_with_fees", xbridge::xBridgeStringValueFromAmount(splitInclFees));
r.pushKV("split_utxo_count", splitCount);
r.pushKV("split_total", xbridge::xBridgeStringValueFromAmount(totalSplit));
r.pushKV("txid", txid);
r.pushKV("rawtx", showRawTx ? rawtx : "");
return r;
}
UniValue dxSplitInputs(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 3 || request.params.size() > 7)
throw std::runtime_error(
RPCHelpMan{"dxSplitInputs",
"\nSplits specified UTXOs into the given size and address. Left over amounts "
"end up in change. UTXOs being used in existing orders will not be included "
"by the splitter (view with dxGetUtxos). You can only split UTXOs for assets "
"supported by your node (view with dxGetLocalTokens).\n",
{
{"token", RPCArg::Type::STR, RPCArg::Optional::NO, "The ticker of the asset you want to split UTXOs for."},
{"split_amount", RPCArg::Type::STR, RPCArg::Optional::NO, "The desired UTXO output size."},
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The address split UTXOs and change will be sent to."},
{"include_fees", RPCArg::Type::BOOL, RPCArg::Optional::NO, "Include the trade P2SH deposit fees in the split UTXO (add deposit fee to `spit_amount` value."},
{"show_rawtx", RPCArg::Type::BOOL, RPCArg::Optional::NO, "Include the raw transaction in the response (can be submitted manually)."},
{"submit", RPCArg::Type::BOOL, RPCArg::Optional::NO, "Submit the raw transaction to the network."},
{"utxos", RPCArg::Type::ARR, RPCArg::Optional::NO, "List of UTXO inputs.",
{
{"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
{
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The UTXO transaction ID."},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The UTXO output index."},
},
},
}}
},
RPCResult{
R"(
{
"token": "BLOCK",
"include_fees": true,
"split_amount_requested": "4.0",
"split_amount_with_fees": "4.00040000",
"split_utxo_count": 6,
"split_total": "24.44852981",
"txid": "7f87cba104b2c19f6e25fbc82b3cde5d73714e01d6a54943d3c8fb07ce315db4",
"rawtx": ""
}
Key | Type | Description
-----------------------|------|----------------------------------------------
token | str | The asset you are splitting UTXOs for.
include_fees | bool | Whether you requested to include the fees.
split_amount_requested | str | The requested split amount.
split_amount_with_fees | str | The requested split amount with fee included.
split_utxo_count | int | The amount of resulting split UTXOs.
split_total | str | The total amount of in the address prior to
| | splitting.
txid | str | The hex string of the splitting transaction.
rawtx | str | The hex string of the raw splitting
| | transaction.
)"
},
RPCExamples{
HelpExampleCli("dxSplitInputs", "BLOCK 10.5 BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR true false true [{\"txid\":\"ed7d16abd5c0bf42dec36335d0f63938f1d9c10e7202bc780b888a51d291d3dc\",\"vout\":0},{\"txid\":\"ed7d16abd5c0bf42dec36335d0f63938f1d9c10e7202bc780b888a51d291d3dc\",\"vout\":1}]")
+ HelpExampleRpc("dxSplitInputs", "\"BLOCK\", \"10.5\", \"BWQrvmuHB4C68KH5V7fcn9bFtWN8y5hBmR\", true, false, true, [{\"txid\":\"ed7d16abd5c0bf42dec36335d0f63938f1d9c10e7202bc780b888a51d291d3dc\",\"vout\":0},{\"txid\":\"ed7d16abd5c0bf42dec36335d0f63938f1d9c10e7202bc780b888a51d291d3dc\",\"vout\":1}]")
},
}.ToString());
auto token = request.params[0].get_str();
auto splitAmount = request.params[1].get_str();
auto address = request.params[2].get_str();
bool includeFees = request.params[3].get_bool();
bool showRawTx = request.params[4].get_bool();
bool submitTx = request.params[5].get_bool();
const auto paramUtxos = request.params[6].get_array();
if (paramUtxos.empty())
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, "No utxos were specified"));
std::set<COutPoint> userUtxos;
for (const auto & val : paramUtxos.getValues()) {
std::map<std::string, UniValue> utxo;
val.getObjMap(utxo);
userUtxos.insert(COutPoint{uint256S(utxo["txid"].get_str()), (uint32_t)utxo["vout"].get_int()});
}
auto & xapp = xbridge::App::instance();
xbridge::WalletConnectorPtr conn = xapp.connectorByCurrency(token);
if (!conn)
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, token));
auto excludedUtxos = xapp.getAllLockedUtxos(token);
for (const auto & utxo : excludedUtxos) {
COutPoint vout{uint256S(utxo.txId), utxo.vout};
if (userUtxos.count(vout))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, "Cannot split utxo already in use: " + vout.ToString()));
}
const CAmount sa = xbridge::xBridgeIntFromReal(boost::lexical_cast<double>(splitAmount));
std::string txid, rawtx, failReason;
CAmount totalSplit{0};
CAmount splitInclFees{0};
int splitCount{0};
if (!conn->splitUtxos(sa, address, includeFees, excludedUtxos, userUtxos, totalSplit, splitInclFees, splitCount, txid, rawtx, failReason))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, failReason));
int errorcode{0};
std::string txid2, errmsg;
if (submitTx && !conn->sendRawTransaction(rawtx, txid2, errorcode, errmsg))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, errmsg));
UniValue r(UniValue::VOBJ);
r.pushKV("token", token);
r.pushKV("include_fees", includeFees);
r.pushKV("split_amount_requested", xbridge::xBridgeStringValueFromAmount(sa));
r.pushKV("split_amount_with_fees", xbridge::xBridgeStringValueFromAmount(splitInclFees));
r.pushKV("split_utxo_count", splitCount);
r.pushKV("split_total", xbridge::xBridgeStringValueFromAmount(totalSplit));
r.pushKV("txid", txid);
r.pushKV("rawtx", showRawTx ? rawtx : "");
return r;
}
UniValue dxGetUtxos(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() < 1 || request.params.size() > 2)
throw std::runtime_error(
RPCHelpMan{"dxGetUtxos",
"\nReturns all compatible and unlocked UTXOs for the specified asset. "
"Currently only P2PKH UTXOs are supported (Segwit UTXOs not supported). "
"You can only view UTXOs for assets supported by your node (view with dxGetLocalTokens).\n",
{
{"token", RPCArg::Type::STR, RPCArg::Optional::NO, "The ticker of the asset you want to view UTXOs for."},
{"include_used", RPCArg::Type::BOOL, "false", "Include UTXOs used in existing orders."},
},
RPCResult{
R"(
[
{
"txid": "c019edf2a71efcfc9b1ec50cd0d9db54c55b74acd0bcc81cefd6ffbba359a210",
"vout": 2,
"amount": "3.26211780",
"address": "BrPHj12ZSm7roD2gvrjRG2gD4TzeP1YDXG",
"scriptPubKey": "7b1ef56a92cec50cd0d147876a914ffd6fcbb4c5724a4057de",
"confirmations": 11904,
"orderid": ""
},
{
"txid": "a91c224c0725745cd0bcc81cefd6ffbba3f6cc36956cd566c50cd0d9db5c55b7",
"vout": 0,
"amount": "2.44485198",
"address": "BJYS5dd4Mx5bFxfYDX136SLrv5kGCZaUtF",
"scriptPubKey": "7e36ab914fc645b2b9fd5ce704f54bc34a59a56c9671eb355b",
"confirmations": 20690,
"orderid": "e1b0f4bf05e6c47506abf5d717c95baa1b6de79dd1758673a8cdd171ddad6578"
}
]
Key | Type | Description
----------------|------|-----------------------------------------------------
txid | str | Transaction ID of the UTXO.
vout | int | Vout index of the UTXO.
amount | str | UTXO amount.
address | str | UTXO address.
scriptPubKey | str | UTXO address script pubkey.
confirmations | int | UTXO blockchain confirmation count.
orderid | str | The order ID if the UTXO is currently being used in
| | an order.
)"
},
RPCExamples{
HelpExampleCli("dxGetUtxos", "BLOCK")
+ HelpExampleRpc("dxGetUtxos", "\"BLOCK\"")
+ HelpExampleCli("dxGetUtxos", "BTC")
+ HelpExampleRpc("dxGetUtxos", "\"BTC\"")
+ HelpExampleCli("dxGetUtxos", "BLOCK true")
+ HelpExampleRpc("dxGetUtxos", "\"BLOCK\", true")
},
}.ToString());
const auto token = request.params[0].get_str();
bool includeUsed{false};
if (!request.params[1].isNull())
includeUsed = request.params[1].get_bool();
auto & xapp = xbridge::App::instance();
xbridge::WalletConnectorPtr conn = xapp.connectorByCurrency(token);
if (!conn)
return uret(xbridge::makeError(xbridge::NO_SESSION, __FUNCTION__, token));
std::set<xbridge::wallet::UtxoEntry> excluded = xapp.getAllLockedUtxos(token);
std::vector<xbridge::wallet::UtxoEntry> unspent;
if (!conn->getUnspent(unspent, !includeUsed ? excluded : std::set<xbridge::wallet::UtxoEntry>{}))
return uret(xbridge::makeError(xbridge::BAD_REQUEST, __FUNCTION__, "failed to get unspent transaction outputs"));
UniValue r(UniValue::VARR);
for (const auto & utxo : unspent) {
UniValue o(UniValue::VOBJ);
o.pushKV("txid", utxo.txId);
o.pushKV("vout", static_cast<int>(utxo.vout));
o.pushKV("amount", xbridge::xBridgeStringValueFromPrice(utxo.amount, conn->COIN));
o.pushKV("address", utxo.address);
o.pushKV("scriptPubKey", utxo.scriptPubKey);
o.pushKV("confirmations", static_cast<int>(utxo.confirmations));
o.pushKV("orderid", "");
if (excluded.count(utxo) > 0) {
auto orderid = xapp.orderWithUtxo(utxo);
o.pushKV("orderid", orderid.IsNull() ? "" : orderid.GetHex());
}
r.push_back(o);
}
return r;
}
// clang-format off
static const CRPCCommand commands[] =
{ // category name actor (function) argNames
// -------------------- ----------------------------- ----------------------------- ----------
{ "xbridge", "dxGetOrderFills", &dxGetOrderFills, {} },
{ "xbridge", "dxGetOrders", &dxGetOrders, {} },
{ "xbridge", "dxGetOrder", &dxGetOrder, {} },
{ "xbridge", "dxGetLocalTokens", &dxGetLocalTokens, {} },
{ "xbridge", "dxLoadXBridgeConf", &dxLoadXBridgeConf, {} },
{ "xbridge", "dxGetNewTokenAddress", &dxGetNewTokenAddress, {} },
{ "xbridge", "dxGetNetworkTokens", &dxGetNetworkTokens, {} },
{ "xbridge", "dxMakeOrder", &dxMakeOrder, {} },
{ "xbridge", "dxMakePartialOrder", &dxMakePartialOrder, {} },
{ "xbridge", "dxTakeOrder", &dxTakeOrder, {} },
{ "xbridge", "dxCancelOrder", &dxCancelOrder, {} },
{ "xbridge", "dxGetOrderHistory", &dxGetOrderHistory, {} },
{ "xbridge", "dxGetOrderBook", &dxGetOrderBook, {} },
{ "xbridge", "dxGetTokenBalances", &dxGetTokenBalances, {} },
{ "xbridge", "dxGetMyOrders", &dxGetMyOrders, {} },
{ "xbridge", "dxGetMyPartialOrderChain", &dxGetMyPartialOrderChain, {"order_id"} },
{ "xbridge", "dxPartialOrderChainDetails", &dxPartialOrderChainDetails, {"order_id"} },
{ "xbridge", "dxGetLockedUtxos", &dxGetLockedUtxos, {} },
{ "xbridge", "dxFlushCancelledOrders", &dxFlushCancelledOrders, {} },
{ "xbridge", "gettradingdata", &gettradingdata, {} },
{ "xbridge", "dxGetTradingData", &dxGetTradingData, {} },
{ "xbridge", "dxSplitAddress", &dxSplitAddress, {"token", "splitamount", "address", "include_fees", "show_rawtx", "submit"} },
{ "xbridge", "dxSplitInputs", &dxSplitInputs, {"token", "splitamount", "address", "include_fees", "show_rawtx", "submit", "utxos"} },
{ "xbridge", "dxGetUtxos", &dxGetUtxos, {"token", "include_used"} },
};
// clang-format on
void RegisterXBridgeRPCCommands(CRPCTable &t)
{
for (const auto & command : commands)
t.appendCommand(command.name, &command);
}
|
{"hexsha": "6d31a761de6552b8f302eb64521b0cd5505aac37", "size": 169380, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/xbridge/rpcxbridge.cpp", "max_stars_repo_name": "shrnkld/blocknet", "max_stars_repo_head_hexsha": "f85bdf3eeebb1ed8c2321ebd928232d4885b30b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 122.0, "max_stars_repo_stars_event_min_datetime": "2019-05-08T22:15:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T08:12:48.000Z", "max_issues_repo_path": "src/xbridge/rpcxbridge.cpp", "max_issues_repo_name": "shrnkld/blocknet", "max_issues_repo_head_hexsha": "f85bdf3eeebb1ed8c2321ebd928232d4885b30b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 67.0, "max_issues_repo_issues_event_min_datetime": "2019-10-30T19:12:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T14:42:00.000Z", "max_forks_repo_path": "src/xbridge/rpcxbridge.cpp", "max_forks_repo_name": "shrnkld/blocknet", "max_forks_repo_head_hexsha": "f85bdf3eeebb1ed8c2321ebd928232d4885b30b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 58.0, "max_forks_repo_forks_event_min_datetime": "2019-05-24T10:27:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-10T20:55:29.000Z", "avg_line_length": 48.1056518035, "max_line_length": 319, "alphanum_fraction": 0.5746546227, "num_tokens": 41095}
|
*deck l2opxv
subroutine l2opxv( lmn, v, itl, itu, lmnv, lenscr, scr, eval,
& leig )
c
c compute the matrix vector product, (L^2) * v, in an unnormalized
c cartesian basis, and determine if v(*) is an eigenvector of the
c total angular momentum operator.
c
c input:
c lmn = (l + m + n) where l, m, and n are the exponents of x, y, and z
c respectively. lmn=0 for cartesian "s" functions, 1 for
c cartesian "p" functions, 2 for cartesian "d" functions, etc.
c v(1:xyzdim) = input vector. the elements are coefficients of the
c cartesian basis functions (x**l * y**m * z**n).
c where xyzdim = ((lmn + 1) * (lmn + 2))/2
c lmnv(1:3,1:lmnvmx) = cartesian exponents of each basis function.
c lenscr = scratch vector length.
c scr(1:lenscr) = scratch work vector. This must be at least as large
c as the cartesian subspace dimension.
c lenscr >= xyzdim
c
c output:
c eval = vector expectation value = <v| L^2 |v> / <v|v>
c leig = eigenvector return code.
c = -4 for vector norm error.
c = -3 for lmnv(*,*) inconsistency.
c = -2 for lenscr error.
c = -1 if v(*) is not an eigenvector of L^2
c = principal quantum number if v(*) is an eigenvector.
c 0 for s-type vectors, 1 for p-type vectors, etc.
c the eigenvalue is given by (leig*(leig+1)) to within numerical
c precision.
c 28-oct-96 account for nonorthogonality of cartesian gaussians (rmp)
c 11-jun-91 written by ron shepard with suggestions from r. m. pitzer.
c
implicit logical(a-z)
c
c # dummy:
c integer lmn, lenv, lenscr, leig
integer itl, itu, lmn, lenscr, leig
integer lmnv(3,*)
real*8 eval
real*8 v(*), scr(*)
c
c # local:
integer l, m, n, i, j, it, jt, xyzdim, ll, mm, nn, p, q
real*8 vi, rnorm, vnorm2
c
c # small is used to determine if a vector is close enough to be
c # called an eigenvector. this should be about 1000x the
c # machine epsilon to avoid sqrt() precision problems.
c
real*8 zero, one, two, fourth, small
parameter(zero=0d0, one=1d0, two=2d0, fourth=0.25d0, small=1d-10)
c
c # bummer error types.
integer wrnerr, nfterr, faterr
parameter (wrnerr=0,nfterr=1,faterr=2)
c
integer iodfac
external iodfac
c
c # the [l,m,n] basis function is assigned the internal index:
c # (lx*(lx+1))/2 + n + 1 with lx=(lmn-l)=(m+n)
c # this convention is inconsistent with the rest of this program,
c # but it is used anyway since it is convenient and since the
c # computed results are local.
c
integer llp1
llp1(l) = (l * (l + 1)) / 2 + 1
c
leig = -1
eval = -one
c
c # check to make sure scr(*) is large enough.
xyzdim = llp1( lmn) + lmn
if ( xyzdim .gt. lenscr ) then
call bummer('l2opxv: need larger scr(*), (xyzdim-lenscr)=',
& (xyzdim-lenscr), wrnerr )
leig = -2
return
endif
c
c # initialize the scratch vector.
call wzero(xyzdim,scr,1)
c
c # compute the matrix-vector product and the vector norm.
c # matrix elements of lop(*,*) are computed from the operator
c # definition in cartesian space.
c
vnorm2 = zero
do 30 it = itl, itu
i = it - (itl - 1)
l = lmnv(1,it)
m = lmnv(2,it)
n = lmnv(3,it)
vi = v(i)
do 20 jt = itl, it-1
j = jt - (itl - 1)
vnorm2 = vnorm2 + (two * vi) * v(j) *
& iodfac(l,lmnv(1,jt),m,lmnv(2,jt),n,lmnv(3,jt))
20 continue
vnorm2 = vnorm2 + vi * vi * iodfac(l,l,m,m,n,n)
c
if ( (l+m+n) .ne. lmn ) then
c # inconsistent exponents in the basis function.
call bummer('l2opxv: (l+m+n-lmn)=', (l+m+n-lmn), wrnerr )
leig = -3
return
endif
c
c # L^2 is sparse in this representation. each vi contributes
c # to, at most, only 7 elements in the matrix-vector product.
c
if ( l .ge. 2 ) then
c # include -l*(l-1) * ( [l-2,m,n+2] + [l-2,m+2,n] ) terms.
ll = l * (l - 1)
p = llp1( lmn - l + 2 ) + n
scr(p) = scr(p) - vi * (ll)
p = p + 2
scr(p) = scr(p) - vi * (ll)
endif
c
if ( m .ge. 2 ) then
c # include -m*(m-1) * ( [l,m-2,n+2] + [l+2,m-2,n] ) terms.
mm = m * (m - 1)
p = llp1( lmn - l ) + n + 2
scr(p) = scr(p) - vi * (mm)
p = llp1( lmn - l - 2 ) + n
scr(p) = scr(p) - vi * (mm)
endif
c
if ( n .ge. 2 ) then
c # include -n*(n-1) * ( [l,m+2,n-2] + [l+2,m,n-2] ) terms.
nn = n * (n - 1)
p = llp1( lmn - l ) + n - 2
scr(p) = scr(p) - vi * (nn)
p = llp1( lmn - l - 2 ) + n - 2
scr(p) = scr(p) - vi * (nn)
endif
c
c # include the 2*(l*m+l*n+m*n+l+m+n)*[l,m,n] diagonal term.
c
p = llp1( lmn - l ) + n
scr(p) = scr(p) + vi * (2 * (l * (m + n) + m * n + l + m + n))
30 continue
c
if ( vnorm2 .le. small ) then
call bummer('l2mxv: small vector norm, lnm=', lmn, wrnerr)
leig = -4
return
endif
c
c # compute the expectation value.
c
eval = zero
do 50 it = itl, itu
i = it - (itl - 1)
l = lmnv(1,it)
m = lmnv(2,it)
n = lmnv(3,it)
p = llp1( lmn - l ) + n
do 40 jt = itl, itu
j = jt - (itl - 1)
eval = eval + v(j) * scr(p) *
& iodfac(l,lmnv(1,jt),m,lmnv(2,jt),n,lmnv(3,jt))
40 continue
50 continue
eval = eval / vnorm2
c
c # compute the residual norm.
c #
rnorm = zero
c
do 70 it = itl, itu
i = it - (itl - 1)
l = lmnv(1,it)
m = lmnv(2,it)
n = lmnv(3,it)
p = llp1( lmn - l ) + n
do 60 jt = itl, it-1
j = jt - (itl - 1)
q = llp1( lmn - lmnv(1,jt) ) + lmnv(3,jt)
rnorm = rnorm + two * ( scr(p) - eval * v(i) ) *
& ( scr(q) - eval * v(j) ) *
& iodfac(l,lmnv(1,jt),m,lmnv(2,jt),n,lmnv(3,jt))
60 continue
rnorm = rnorm + (scr(p) - eval * v(i))**2 * iodfac(l,l,m,m,n,n)
70 continue
c
c # normalize w.r.t. |v|=1.
rnorm = rnorm / vnorm2
c
rnorm = sqrt( rnorm )
if ( rnorm .gt. small ) then
c
c # v(*) is not an eigenvector.
c
leig = -1
else
c
c # v(*) is an eigenvector.
c
c # determine leig such that eval = leig*(leig+1)
c
c # the following assignment should truncate to the
c # next smaller integer value...
leig = ( sqrt( eval + fourth ) )
endif
c
return
end
|
{"hexsha": "1ffa37bf753839b839d2c0a9c78aa15fb918b9e0", "size": 6885, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "source/argosecp/argos1e/l2opxv.f", "max_stars_repo_name": "MOLFDIR/MOLFDIR", "max_stars_repo_head_hexsha": "e71c7ecf77ee018bbdeaa004dda04369ce02be89", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-09-18T11:34:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-16T08:57:01.000Z", "max_issues_repo_path": "source/argosecp/argos1e/l2opxv.f", "max_issues_repo_name": "yingxingcheng/MOLFDIR", "max_issues_repo_head_hexsha": "0802a5b93150e7db22a2bcfe6941dad62a55f4d0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2016-10-26T12:26:36.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-20T05:53:35.000Z", "max_forks_repo_path": "source/argosecp/argos1e/l2opxv.f", "max_forks_repo_name": "yingxingcheng/MOLFDIR", "max_forks_repo_head_hexsha": "0802a5b93150e7db22a2bcfe6941dad62a55f4d0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-09-18T12:34:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-16T08:57:13.000Z", "avg_line_length": 32.323943662, "max_line_length": 72, "alphanum_fraction": 0.5061728395, "num_tokens": 2442}
|
"""
Main file to run for each experiment with the correct config.yml file as the argument.
"""
import argparse
import copy
import json
import logging
from pathlib import Path
import numpy as np
import torch
import yaml
from aif360.algorithms.postprocessing import (CalibratedEqOddsPostprocessing,
EqOddsPostprocessing,
RejectOptionClassification)
from sklearn.metrics import balanced_accuracy_score
from sklearn.preprocessing import StandardScaler
from utils import get_data, get_valid_objective, get_test_objective
from tabular_models import load_model, train_model
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f'device: {device}')
logger = logging.getLogger("Debiasing")
log_handler = logging.StreamHandler()
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
log_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.propagate = False
class Data(object):
def __init__(self, config, seed):
self.train, self.valid, self.test, self.priv, self.unpriv = get_data(config['dataset'], config['protected'], seed=seed)
# priv_index is the index of the priviledged column.
priv_index = self.train.protected_attribute_names.index(list(self.priv[0].keys())[0])
scale_orig = StandardScaler()
self.X_train = torch.tensor(scale_orig.fit_transform(self.train.features), dtype=torch.float32)
self.y_train = torch.tensor(self.train.labels.ravel(), dtype=torch.float32)
self.p_train = self.train.protected_attributes[:, priv_index]
self.X_valid = torch.tensor(scale_orig.transform(self.valid.features), dtype=torch.float32)
self.X_valid_gpu = self.X_valid.to(device)
self.y_valid = torch.tensor(self.valid.labels.ravel(), dtype=torch.float32)
self.y_valid_gpu = self.y_valid.to(device)
self.p_valid = self.valid.protected_attributes[:, priv_index]
self.p_valid_gpu = torch.tensor(self.p_valid).to(device)
valid_train_indices, valid_valid_indices = torch.split(torch.randperm(self.X_valid.size(0)), int(0.7*self.X_valid.size(0)))
self.X_valid_train, self.X_valid_valid = self.X_valid[valid_train_indices, :], self.X_valid[valid_valid_indices, :]
self.y_valid_train, self.y_valid_valid = self.y_valid[valid_train_indices], self.y_valid[valid_valid_indices]
self.p_valid_train, self.p_valid_valid = self.p_valid[valid_train_indices], self.p_valid[valid_valid_indices]
self.X_test = torch.tensor(scale_orig.transform(self.test.features), dtype=torch.float32)
self.X_test_gpu = self.X_test.to(device)
self.y_test = torch.tensor(self.test.labels.ravel(), dtype=torch.float32)
self.y_test_gpu = self.y_test.to(device)
self.p_test = self.test.protected_attributes[:, priv_index]
self.p_test_gpu = torch.tensor(self.p_test).to(device)
self.num_features = self.X_train.size(1)
def main(config):
seed = np.random.randint(0, high=10000)
if 'seed' in config:
seed = config['seed']
torch.manual_seed(seed)
np.random.seed(seed)
# Setup directories to save models and results
Path('models').mkdir(exist_ok=True)
Path('results').mkdir(exist_ok=True)
# Get Data
logger.info(f'Loading Data from dataset: {config["dataset"]}.')
data = Data(config, seed)
# Get trained model
model = load_model(data.num_features, config.get('hyperparameters', {}))
model_path = (Path('models') / Path(config['modelpath']))
if model_path.is_file():
logger.info(f'Loading Model from {model_path}.')
model.load_state_dict(torch.load(model_path))
else:
logger.info(f'{model_path} does not exist. Retraining model from scratch.')
train_model(model, data, epochs=config.get('epochs', 1001))
torch.save(model.state_dict(), model_path)
model_state_dict = copy.deepcopy(model.state_dict())
# Preliminaries
logger.info('Setting up preliminaries.')
model.eval()
with torch.no_grad():
valid_pred = data.valid.copy(deepcopy=True)
valid_pred.scores = model(data.X_valid)[:, 0].reshape(-1, 1).numpy()
valid_pred.labels = np.array(valid_pred.scores > 0.5)
test_pred = data.test.copy(deepcopy=True)
test_pred.scores = model(data.X_test)[:, 0].reshape(-1, 1).numpy()
test_pred.labels = np.array(test_pred.scores > 0.5)
results_valid = {}
results_test = {}
# Evaluate default model
if 'default' in config['models']:
logger.info('Finding best threshold for default model to minimize objective function')
threshs = np.linspace(0, 1, 1001)
performances = []
for thresh in threshs:
perf = balanced_accuracy_score(data.y_valid, valid_pred.scores > thresh)
performances.append(perf)
best_thresh = threshs[np.argmax(performances)]
logger.info('Evaluating default model with best threshold.')
results_valid['default'] = get_valid_objective(valid_pred.scores > best_thresh, data, config)
logger.info(f'Results: {results_valid["default"]}')
results_test['default'] = get_test_objective(test_pred.scores > best_thresh, data, config)
# Evaluate ROC
if 'ROC' in config['models']:
metric_map = {
'spd': 'Statistical parity difference',
'aod': 'Average odds difference',
'eod': 'Equal opportunity difference'
}
ROC = RejectOptionClassification(unprivileged_groups=data.unpriv,
privileged_groups=data.priv,
low_class_thresh=0.01, high_class_thresh=0.99,
num_class_thresh=100, num_ROC_margin=50,
metric_name=metric_map[config['metric']],
metric_ub=0.05, metric_lb=-0.05)
logger.info('Training ROC model with validation dataset.')
ROC = ROC.fit(data.valid, valid_pred)
logger.info('Evaluating ROC model.')
y_pred = ROC.predict(valid_pred).labels.reshape(-1)
results_valid['ROC'] = get_valid_objective(y_pred, data, config)
logger.info(f'Results: {results_valid["ROC"]}')
y_pred = ROC.predict(test_pred).labels.reshape(-1)
results_test['ROC'] = get_test_objective(y_pred, data, config)
ROC = None
# Evaluate Equality of Odds
if 'EqOdds' in config['models']:
eqodds = EqOddsPostprocessing(privileged_groups=data.priv,
unprivileged_groups=data.unpriv)
logger.info('Training Equality of Odds model with validation dataset.')
eqodds = eqodds.fit(data.valid, valid_pred)
logger.info('Evaluating Equality of Odds model.')
y_pred = eqodds.predict(valid_pred).labels.reshape(-1)
results_valid['EqOdds'] = get_valid_objective(y_pred, data, config)
logger.info(f'Results: {results_valid["EqOdds"]}')
y_pred = eqodds.predict(test_pred).labels.reshape(-1)
results_test['EqOdds'] = get_test_objective(y_pred, data, config)
eqodds = None
# Evaluate Calibrated Equality of Odds
if 'CalibEqOdds' in config['models']:
cost_constraint = config['CalibEqOdds']['cost_constraint']
cpp = CalibratedEqOddsPostprocessing(privileged_groups=data.priv,
unprivileged_groups=data.unpriv,
cost_constraint=cost_constraint)
logger.info('Training Calibrated Equality of Odds model with validation dataset.')
cpp = cpp.fit(data.valid, valid_pred)
logger.info('Evaluating Calibrated Equality of Odds model.')
y_pred = cpp.predict(valid_pred).labels.reshape(-1)
results_valid['CalibEqOdds'] = get_valid_objective(y_pred, data, config)
logger.info(f'Results: {results_valid["CalibEqOdds"]}')
y_pred = cpp.predict(test_pred).labels.reshape(-1)
results_test['CalibEqOdds'] = get_test_objective(y_pred, data, config)
cpp = None
# Evaluate Random Debiasing
if 'random' in config['models']:
from algorithms.random import random_debiasing
results_valid['random'], results_test['random'] = random_debiasing(model_state_dict, data, config, device)
# Evaluate fairBO
if 'fairBO' in config['models']:
from algorithms.fairBO import fairBO_debiasing
results_valid['fairBO'], results_test['fairBO'] = fairBO_debiasing(model_state_dict, data, config, device)
# Evaluate Layerwise Optimizer
if 'layerwiseOpt' in config['models']:
from algorithms.layerwiseOpt import layerwiseOpt_debiasing
results_valid['layerwiseOpt'], results_test['layerwiseOpt'] = layerwiseOpt_debiasing(model_state_dict, data, config, device)
# Evaluate Adversarial
if 'adversarial' in config['models']:
from algorithms.adversarial import adversarial_debiasing
results_valid['adversarial'], results_test['adversarial'] = adversarial_debiasing(model_state_dict, data, config, device)
# Mitigating Unwanted Biases with Adversarial Learning
if 'mitigating' in config['models']:
from algorithms.mitigating import mitigating_debiasing
results_valid['mitigating'], results_test['mitigating'] = mitigating_debiasing(model_state_dict, data, config, device)
# Save Results
results_valid['config'] = config
logger.info(f'Validation Results: {results_valid}')
logger.info(f'Saving validation results to {config["experiment_name"]}_valid_output.json')
with open(Path('results') / f'{config["experiment_name"]}_valid_output.json', 'w') as fh:
json.dump(results_valid, fh)
results_test['config'] = config
logger.info(f'Test Results: {results_test}')
logger.info(f'Saving validation results to {config["experiment_name"]}_test_output.json')
with open(Path('results') / f'{config["experiment_name"]}_test_output.json', 'w') as fh:
json.dump(results_test, fh)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config', help='Path to configuration yaml file.')
args = parser.parse_args()
with open(args.config, 'r') as fh:
config = yaml.load(fh, Loader=yaml.FullLoader)
main(config)
|
{"hexsha": "68affcde54ae506cc86aa72f9550a6404a3d4f49", "size": 10497, "ext": "py", "lang": "Python", "max_stars_repo_path": "intraproc_tabular.py", "max_stars_repo_name": "abacusai/intraprocessing_debiasing", "max_stars_repo_head_hexsha": "b4f0c35e299022b1e71e26686220e90440687100", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-12-06T17:05:37.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T10:59:42.000Z", "max_issues_repo_path": "intraproc_tabular.py", "max_issues_repo_name": "abacusai/intraprocessing_debiasing", "max_issues_repo_head_hexsha": "b4f0c35e299022b1e71e26686220e90440687100", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-03-01T19:59:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-25T22:23:27.000Z", "max_forks_repo_path": "intraproc_tabular.py", "max_forks_repo_name": "abacusai/intraprocessing_debiasing", "max_forks_repo_head_hexsha": "b4f0c35e299022b1e71e26686220e90440687100", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-06T12:13:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T13:50:00.000Z", "avg_line_length": 44.2911392405, "max_line_length": 132, "alphanum_fraction": 0.6756216062, "include": true, "reason": "import numpy", "num_tokens": 2356}
|
# MIT license
# Copyright (c) Microsoft Corporation. All rights reserved.
# See LICENSE in the project root for full license information.
module NotebooksUtils
# import Pluto
import PlutoUI
# import Markdown
import Format
import Makie
import Makie.AbstractPlotting
import Makie.AbstractPlotting.MakieLayout
import WGLMakie
import GLMakie
mutable struct Defs
authors::String # authors
desc::String # description of the notebook
width::Int64
html::String # work area to accumulate html syntaxt in order to save cell space (ouput at the end of a single cell)
end
Defs() = Defs("?Authors?")
Defs(authors) = Defs(authors, "")
Defs(authors, desc) = Defs(authors, desc, 1000, "")
function fix_path(path)
return replace(abspath(path), "\\"=>"/")
end
"""
function run(notebook_filename)
Launch Pluto and allow teh user to open a specific notebook.
"""
function run(notebook_filename)
run(; path=notebook_filename)
end
"""
function run(; port=nothing, path=nothing, sysimage_file=nothing )
Launch Pluto and allow teh user to open a specific notebook.
Also allow the usage of a sysimage file for faster loading.
"""
function run(; port=nothing, path=nothing, sysimage_file=nothing, auto_detect_sysimage = false )
@eval begin
import Pluto
local file_path = $path
local sysimage_file_path = $sysimage_file
if (file_path === nothing)
@info "Launching Pluto"
else
file_path = fix_path(file_path)
if (!isfile(file_path))
@warn "Can't find the notebook file [$(file_path)] - trying to locate it in samples"
base_filename = splitdir(file_path)[2]
file_path = fix_path(joinpath(splitdir(splitdir(@__DIR__)[1])[1], "samples", "notebooks", base_filename))
if (!isfile(file_path))
@warn "Can't find the notebook file [$(file_path)] in the samples folder - launching Pluto without a file"
else
@info "Launching Notebook [$(file_path)]"
end
else
@info "Launching Notebook [$(file_path)]"
end
end
# handling the sysimage file
if (sysimage_file_path !== nothing)
if (!isfile(sysimage_file_path))
@warn "Can't find the sysimage file [$(sysimage_file_path)] - launching Pluto without a sysimage"
sysimage_file_path = nothing
end
else
if ($auto_detect_sysimage)
if Sys.iswindows()
if (!isfile("JuliaSysimage.dll"))
sysimage_file_path = "JuliaSysimage.dll"
end
elseif Sys.islinux()
if (!isfile("JuliaSysimage.so"))
sysimage_file_path = "JuliaSysimage.so"
end
end
end
end
local options = Pluto.Configuration.from_flat_kwargs(
port=$port,
notebook=file_path,
launch_browser=true,
host="127.0.0.1",
require_secret_for_open_links = false,
require_secret_for_access = false,
run_notebook_on_load = true,
sysimage=sysimage_file_path,
# banner = "yes",
)
session = Pluto.ServerSession(options=options)
Pluto.run(session)
end
end
"""
run_sample(sample_name::String)
Launch Pluto and allow the user to open a specific sample notebook. If a notebook of the same name exists in the current working folder,
it will be opened in Pluto, otherwise, the original sample notebook will be copied to the current folder and be used.
This beheviour will prevent users from updating the original sample notebook.
"""
function run_sample(sample_name::String, edit_original::Bool = false)
folder, basename = splitdir(sample_name)
filename = "DummyFile,jl"
if isempty(folder)
if (isfile(basename))
# file already exists in current folder - launch pluto and point to it
filename = fix_path(abspath(basename))
else
# file does not exists in current directory
file_in_samples = fix_path(joinpath(splitdir(splitdir(@__DIR__)[1])[1], "samples", "notebooks", basename))
if (isfile(file_in_samples))
if (edit_original)
@warn "You are about to edit the original sample notebook"
filename = file_in_samples
else
# file exists - need to be copied into the current folder
src_fn = file_in_samples
dst_fn = fix_path(abspath(basename))
@info "Copying the sample notebook [$basename] from the samples directory to the current one\n$src_fn ->\n$dst_fn"
cp(src_fn, dst_fn)
filename = dst_fn
end
else
# file does not exist in samples - need to issue an error
# @error "Can't find a sample notebook named [file_in_samples] - please check again"
throw(ArgumentError("Can't find a sample notebook named [$file_in_samples] - please check again" ))
end
end
end
@info "Running sample notebook from [$filename]"
run(; path=filename)
end
mutable struct VariableInfo
bond::Any
html::String
end
function GetVarInfo(bond)
res = VariableInfo(bond, HTMLFromObj(bond))
return res
end
struct UISlider
range::AbstractRange
default::Number
dec::Int16
end
UISlider() = UISlider(1:10, 1, -1)
UISlider(r) = UISlider(r, r.start, -1)
UISlider(r, def) = UISlider(r, def, -1)
# UISlider(r, def, format::String) = UISlider(r, def, format)
function Base.show(io::IO, ::MIME"text/html", slider::UISlider)
if (slider.dec == -1)
print(io, """
<input type="range"
min="$(first(slider.range))"
step="$(step(slider.range))"
max="$(last(slider.range))"
value="$(slider.default)"
oninput="this.nextElementSibling.value=this.value">
<output>$(slider.default)</output>""")
else
fmt = "%.$(slider.dec)f"
print(io, """
<input type="range"
min="$(first(slider.range))"
step="$(step(slider.range))"
max="$(last(slider.range))"
value="$(slider.default)"
oninput="this.nextElementSibling.value=parseFloat(this.value).toFixed($(slider.dec))">
<output>$(Format.cfmt( fmt, slider.default ))</output>""")
end
end
function SetDefs(defs::Defs)
@info "I'm in SetDefs"
ret = PlutoUI.Show(MIME"text/html"(), """
<style>
main {
max-width: $(defs.width)px;
}
</style>
""")
#@info "The return type $(typeof(ret))"
return ret
end
function DefsClearHTML(defs::Defs)
defs.html = ""
end
function DefsAddHTML(defs::Defs, html::String)
defs.html = defs.html * html
end
function DefsHTML(defs::Defs)
return defs.html
end
# we don't need to use this function anymore - we can extract the html of items and use it using the PlutoUI.show command
# function SetHTMLMarkdown(val::Bool)
# if (val)
# @info "SetHTMLMarkdown: Allow Raw HTML Tags in Markdown"
# @eval Markdown.htmlesc(io::IO, s::AbstractString) = print(io,s)
# else
# @info "SetHTMLMarkdown: Original process - Raw HTML is not allowed"
# @eval Markdown.htmlesc(io::IO, s::AbstractString) =
# for ch in s
# print(io, get(Markdown._htmlescape_chars, ch, ch))
# end
# end
# end
"""
function SetBackend(defs::Defs, be::String)
this is my first comment try
"""
function SetBackend(defs::Defs, be::String)
if (be == "Web")
@info "Makie backend set to WEB (WGLMakie)"
WGLMakie.activate!()
AbstractPlotting.__init__()
AbstractPlotting.inline!(true)
else
@info "Makie backend set to STATIC (GLMakie)"
GLMakie.activate!()
AbstractPlotting.__init__()
AbstractPlotting.inline!(true)
end
end
"""
InitNotebook(; port=8449)
initialize the JSServe package.
"""
function InitNotebook(; port=8449)
@eval begin
try
import JSServe
local port = 8449 # the port you want
JSServe.JSSERVE_CONFIGURATION.listen_port[] = port
JSServe.JSSERVE_CONFIGURATION.external_url[] = "http://localhost:$(port)"
JSServe.JSSERVE_CONFIGURATION.content_delivery_url[] = "http://localhost:$(port)"
return JSServe.Page() # needs to get displayed by Pluto
catch e
@warn "Can't initialize the JSServe package\n$e"
end
end
end
function HTMLFromObj(obj)
io = IOBuffer()
Base.show(io, MIME"text/html"(), obj)
res = String(take!(io))
return res
end
# function MDFromString(str)
# # SetHTMLMarkdown(true)
# io = IOBuffer(str)
# res_md = Markdown.parse(io, )
# # SetHTMLMarkdown(false)
# return res_md
# end
function HTMLFix(html::String)
html = replace(html, "\r" => "")
html = replace(html, "\n" => "")
return html
end
function HTMLFloatingBox(items; name="plutoui-genericfloatingbox", header="?? header ??", kwargs...)
res = ""
res = res *
"""<nav class="$name aside indent">""" *
"""<header>$header</header>""" *
"""<section>""" *
"""<span>"""
for item in items
item_level = 1
if (startswith(item, "@ "))
item_level = 2
elseif (startswith(item, "@@ "))
item_level = 3
elseif (startswith(item, "@@@ "))
item_level = 4
elseif (startswith(item, "@@@@ "))
item_level = 5
elseif (startswith(item, "@@@@@ "))
item_level = 6
elseif (startswith(item, "@@@@@@ "))
item_level = 7
end
if (startswith(item, "@"))
item2 = item[item_level+2:end]
else
item2 = item
end
# println("Item [$item] Level [$item_level]")
res = res *
"""<div class="params-row">""" *
"""<p class = "H$item_level">""" *
item2 *
"""</p>""" *
"""</div>""" *
"""\n"""
#Input: $(@bind nnnn MySlider(1:100, 10))
end
# closing the nav tags
res = res *
"""</span>""" *
"""</section>""" *
"""</nav>"""
# add the style for this floating box
res = res * HTMLFloatingBoxStyle(name; kwargs...)
return HTMLFix(res)
end
function HTMLFloatingBoxStyle(name::String; right="1rem", top="20rem", width="25%", kwargs...)
@info "HTMLFloatingBoxStyle: right=$right, top=$top, width=$width"
res = """<style>
@media screen and (min-width: 1081px) {
.$name.aside {
position: fixed;
right: $right;
top: $top;
width: $width;
padding: 10px;
border: 3px solid rgba(0, 0, 0, 0.15);
border-radius: 10px;
box-shadow: 0 0 11px 0px #00000010;
max-height: 500px;
overflow: auto;
z-index: 5;
background: white;
}
}
.$name header {
display: block;
font-size: 1.5em;
margin-top: 0.67em;
margin-bottom: 0.67em;
margin-left: 0;
margin-right: 0;
font-weight: bold;
border-bottom: 2px solid rgba(0, 0, 0, 0.15);
}
.$name section .params-row {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
padding-bottom: 2px;
}
.highlight-pluto-cell-shoulder {
background: rgba(0, 0, 0, 0.05);
background-clip: padding-box;
}
.$name section a {
text-decoration: none;
font-weight: normal;
color: gray;
}
/* hover */
.$name section a:hover {
color: black;
}
/* a-ref indentation */
.$name.indent section a.H1 {
font-weight: 700;
line-height: 1em;
}
.$name.indent section a.H1 {
padding-left: 0px;
}
.$name.indent section a.H2 {
padding-left: 10px;
}
.$name.indent section a.H3 {
padding-left: 20px;
}
.$name.indent section a.H4 {
padding-left: 30px;
}
.$name.indent section a.H5 {
padding-left: 40px;
}
.$name.indent section a.H6 {
padding-left: 50px;
}
/* paragraph indentation */
.$name.indent section p.H1 {
font-weight: 700;
line-height: 1em;
}
.$name.indent section p.H1 {
padding-left: 0px;
}
.$name.indent section p.H2 {
padding-left: 10px;
}
.$name.indent section p.H3 {
padding-left: 20px;
}
.$name.indent section p.H4 {
padding-left: 30px;
}
.$name.indent section p.H5 {
padding-left: 40px;
}
.$name.indent section p.H6 {
padding-left: 50px;
}
</style>"""
return HTMLFix(res)
end
function HTMLNewDocLayout()
res = """
<style>
body {
display: block;
}
main {
max-width: 73%;
padding-left: 50px;
width: 100%;
}
</style>
"""
return HTMLFix(res)
end
function HTMLFixTOC()
res = """
<style>
@media screen and (min-width: 1081px) {
.plutoui-toc.aside {
top: 4%;
max-height: 40%;
}
}
</style
"""
return HTMLFix(res)
end
end # module NotebooksUtils
export NotebooksUtils
|
{"hexsha": "942f3187a810d94eba68c0eeef4e8073492317a6", "size": 13961, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/NotebooksUtils/NotebooksUtils.jl", "max_stars_repo_name": "KristofferC/OpticSim.jl", "max_stars_repo_head_hexsha": "3dbe82a51fb3c7d2896f19318d3e4756e54fb6d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-04T03:42:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-04T03:42:27.000Z", "max_issues_repo_path": "src/NotebooksUtils/NotebooksUtils.jl", "max_issues_repo_name": "KristofferC/OpticSim.jl", "max_issues_repo_head_hexsha": "3dbe82a51fb3c7d2896f19318d3e4756e54fb6d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/NotebooksUtils/NotebooksUtils.jl", "max_forks_repo_name": "KristofferC/OpticSim.jl", "max_forks_repo_head_hexsha": "3dbe82a51fb3c7d2896f19318d3e4756e54fb6d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-20T23:30:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T23:30:02.000Z", "avg_line_length": 27.5909090909, "max_line_length": 136, "alphanum_fraction": 0.5581978368, "num_tokens": 3413}
|
# coding=utf-8
"""
This is the python implementation of the online learning method using Iterative Parameter Mixture.
This implementation is now supporting:
- Perceptron
- PA-I, PA-II
- CW
- AROW
- SCW-I, SCW-II
"""
import numpy as np
import scipy.sparse as sp
from joblib import Parallel, delayed
from update_func import Perceptron, PA_I, PA_II, CW, AROW, SCW_I, SCW_II
class Updater():
""" This class support some online learning methods, i.e. weight update method, using Iterative Parameter Mixture.
"""
def __init__(self, C=0.01, r=1.0, eta=0.1, process_num=1, method="PA-II"):
"""
Params:
C(float): Parameter to adjust the degree of penalty, aggressiveness parameter (C>=0)
r(float): regularization parameter for AROW
process_num(int): # of parallerization (default:1)
method(str): learning method (Perceptrion, PA-I, PA-II, CW, AROW, SCW-I, SCW-II)
"""
self.C = C # Parameter to adjust the degree of penalty on PA-II and SCW(C>=0)
self.r = r # regularization parameter for AROW
self.eta = eta # confidence parameter on CW and SCW
self.PROCESS_NUM = process_num
self.METHOD = method # default PA-II
assert self.METHOD in ["Perceptron", "PA-I", "PA-II", "CW", "AROW", "SCW-I", "SCW-II"], "Invalid method name {name}".format(self.METHOD)
def __make_minibatch(self, x_list, y_list):
"""
Params:
x_list(csr_matrix): csr_matrix of feature vectors.
y_list(list): np.ndarray of labels corresponding to each feature vector
Returns:
x_batch(list): batch of feature vectors
y_batch(list): batch of labels
"""
x_batch = []
y_batch = []
N = x_list.shape[0] # # of data
np.random.seed(0) # set seed for permutation
perm = np.random.permutation(N)
for p in xrange(self.PROCESS_NUM):
ini = N * (p) / self.PROCESS_NUM
fin = N * (p + 1) / self.PROCESS_NUM
x_batch.append(x_list[perm[ini:fin]])
y_batch.append(y_list[perm[ini:fin]])
return x_batch, y_batch
def __iterative_parameter_mixture(self, callback, weight):
"""
Params:
callback: callback for parallerized process
weight(Weight): current weight class
Returns:
loss_list(list): list of loss value
"""
_w_sum = sp.csr_matrix((1, weight.dims), dtype=np.float32)
loss_list = []
for _w, _loss_list in callback:
_w_sum += _w
loss_list += _loss_list
# insert updated weight
weight.set_weight(1.0 / self.PROCESS_NUM * _w_sum)
weight.epoch += 1
return loss_list
def __iterative_parameter_mixture_for_distweight(self, callback, weight):
"""
Params:
callback: callback for parallerized process
weight(Weight): current weight class
Returns:
loss_list(list): list of loss value
"""
_mu_sum = sp.csr_matrix((1, weight.dims), dtype=np.float32)
_sigma_sum = sp.csr_matrix(([1.0 for _ in xrange(weight.dims)], ([0 for _ in xrange(weight.dims)], range(weight.dims))), (1, weight.dims), dtype=np.float32)
loss_list = []
for _loss_list, _mu, _sigma in callback:
_mu_sum += _mu
_sigma_sum += _sigma
loss_list += _loss_list
# insert updated weight
weight.set_weight(1.0 / self.PROCESS_NUM * _mu_sum)
weight.set_conf(1.0 / self.PROCESS_NUM * _sigma_sum)
weight.epoch += 1
return loss_list
def update(self, x_list, y_list, weight):
""" Update weight parameter according to {self.METHOD} update rule.
Params:
x_list(csr_matrix): csr_matrix of feature vectors.
y_list(list): np.ndarray of labels corresponding to each feature vector
weight(Weight, DistWeight): class of weight (if you want to use AROW, weight class should be DistWeight)
Returns:
loss_list(list): List of loss value
"""
assert x_list.shape[0] == len(y_list), "invalid shape: x_list, y_list"
# make minibatch for Iterative Parameter Mixture
#if self.METHOD == "Perceptrion" or \
# self.METHOD == "PA-I" or \
# self.METHOD == "PA-II":
# x_batch, y_batch = self.__make_minibatch(x_list, y_list)
x_batch, y_batch = self.__make_minibatch(x_list, y_list)
# choose learning method and run
if self.METHOD == "Perceptron":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(Perceptron)(i, x_batch[i], y_batch[i], weight.get_weight()) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture(callback, weight)
elif self.METHOD == "PA-I":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(PA_I)(i, x_batch[i], y_batch[i], weight.get_weight(), self.C) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture(callback, weight)
elif self.METHOD == "PA-II":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(PA_II)(i, x_batch[i], y_batch[i], weight.get_weight(), self.C) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture(callback, weight)
elif self.METHOD == "CW":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(CW)(x_batch[i], y_batch[i], weight.get_weight(), weight.get_conf(), self.eta) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture_for_distweight(callback, weight)
"""
loss_list, mu, sigma = CW(x_list, y_list, weight.get_weight(), weight.get_conf(), self.eta)
weight.set_weight(mu)
weight.set_conf(sigma)
weight.epoch += 1
"""
elif self.METHOD == "AROW":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(AROW)(x_batch[i], y_batch[i], weight.get_weight(), weight.get_conf(), self.r) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture_for_distweight(callback, weight)
"""
loss_list, mu, sigma = AROW(x_list, y_list, weight.get_weight(), weight.get_conf(), self.r)
weight.set_weight(mu)
weight.set_conf(sigma)
weight.epoch += 1
"""
elif self.METHOD == "SCW-I":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(SCW_I)(x_batch[i], y_batch[i], weight.get_weight(), weight.get_conf(), self.C, self.eta) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture_for_distweight(callback, weight)
"""
loss_list, mu, sigma = SCW_I(x_list, y_list, weight.get_weight(), weight.get_conf(), self.C, self.eta)
weight.set_weight(mu)
weight.set_conf(sigma)
weight.epoch += 1
"""
elif self.METHOD == "SCW-II":
callback = Parallel(n_jobs=self.PROCESS_NUM)( \
delayed(SCW_II)(x_batch[i], y_batch[i], weight.get_weight(), weight.get_conf(), self.C, self.eta) for i in range(self.PROCESS_NUM))
loss_list = self.__iterative_parameter_mixture_for_distweight(callback, weight)
"""
loss_list, mu, sigma = SCW_II(x_list, y_list, weight.get_weight(), weight.get_conf(), self.C, self.eta)
weight.set_weight(mu)
weight.set_conf(sigma)
weight.epoch += 1
"""
return loss_list
|
{"hexsha": "4f336aa71ed8fcae38d3eb50706be74a78ce4eb9", "size": 7859, "ext": "py", "lang": "Python", "max_stars_repo_path": "libs/updater.py", "max_stars_repo_name": "AkihikoWatanabe/online_learning_libs", "max_stars_repo_head_hexsha": "e23d644728657914a3d2f0e13068ee2a2869815c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-05-24T04:17:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T01:07:04.000Z", "max_issues_repo_path": "libs/updater.py", "max_issues_repo_name": "AkihikoWatanabe/online_learning_libs", "max_issues_repo_head_hexsha": "e23d644728657914a3d2f0e13068ee2a2869815c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libs/updater.py", "max_forks_repo_name": "AkihikoWatanabe/online_learning_libs", "max_forks_repo_head_hexsha": "e23d644728657914a3d2f0e13068ee2a2869815c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.6611111111, "max_line_length": 164, "alphanum_fraction": 0.6031301692, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1897}
|
"""
Tamer Abousoud
Main Robot Controls
---
Functions robot can perform:
- Move in a straight path
- Turn to a given angle
- Take pictures/video
- Return GPS coordinates
- Return sensor data (e.g. distance sensors, accelerometer, gyro)
"""
import os
import sys
import time
import math
import datetime as dt
import numpy as np
from collections import deque, defaultdict
# os.getcwd()
# *** IMPORTANT! ***
# Add path to webots controller libraries to use external control
sys.path.append("/usr/local/webots/lib/controller/python38")
# import robot and devices
from controller import Robot, Motor, Camera, GPS, Accelerometer, Gyro, DistanceSensor
# Directory for captured images:
capturedImgDir = '/home/tamer/MSCA/MSCA32019_RealTimeSystems/Project/robotics/robot_images'
# Simulation parameters
TIME_STEP = 64
MAX_SPEED = 6.28 # rad/s
# E-Puck properties for use with various functions
# properties to calculate rotation angle
axle_length = 52e-3 # 52mm as provided in docs
motor_steps_per_sec = 1000 # provided in docs
# There is conflicting info in docs about e-puck max linear velocity
# 0.115 m/s was found to work best for resolving angular motion
max_linear_velocity = 0.115 # 0.125 # 6.28 * 20.5e-3 # 0.25 # m/s
# Angle traversed per motor step at max speed
max_angle_per_motorstep = (max_linear_velocity/motor_steps_per_sec) / (axle_length/2)
class deviceOffException(Exception):
'''Raise an error if a device is disabled'''
pass
class WriteError(Exception):
'''Raise if trying to write a read-only attribute'''
pass
# Create a class to define the robot and its functions
class epuckControl(Robot):
'''
E-Puck v1 control to use in the simulated world
'''
def initialize(self, time_step=TIME_STEP, max_speed=MAX_SPEED, starting_angle=0,\
history_size=25):
'''
time_step: simulation time step in milliseconds
max_speed: max rotational speed of robot wheel motors (rad/s)
history_size: Number of steps to keep in history
NOTE: Currently, history is only recorded when the robot is in motion
'''
self.time_step = time_step
self.max_speed = max_speed
# Robot devices
self.camera = self.getCamera('camera')
self.gps = self.getGPS('gps')
self.leftMotor = self.getMotor('left wheel motor')
self.rightMotor = self.getMotor('right wheel motor')
# Initialize motors
self.leftMotor.setPosition(float('inf'))
self.rightMotor.setPosition(float('inf'))
self.leftMotor.setVelocity(0.0)
self.rightMotor.setVelocity(0.0)
# Initial conditions
self.stopped = True
self.camera_on = False
self.camera_freq = 1
self.save_imgs = False
self.img_format = 'png'
self.save_img_every_nframes = 4
self.gps_on = False
self.gps_freq = 1
# Maintain robot history
self._current_angle = starting_angle # track orientation wrt world (radians)
self._history = deque([], maxlen=history_size)
self.__fmt = '%Y-%m-%d %H:%M:%S.%f' # timestamp format
@property
def robot_angle(self):
'''Robot's angle wrt to its forward +ve axis'''
return self._current_angle
@robot_angle.setter
def robot_angle(self, value):
raise WriteError(f'`robot_angle()` is read-only. If you want to rotate the robot use `turn({value})`')
@staticmethod
def angular_to_linear(value, reverse=False):
'''
Convert angular velocity to linear and vice-versa. Default is angular to linear.
Uses the e-puck wheel properties for conversion.
---
value: angular (rad/s) or linear velocity (m/s) to convert
reverse: convert from linear to angular if True
'''
wheel_radius = 20.5e-3 # 20.5mm per e-puck specs
if reverse:
return value / wheel_radius
return value * wheel_radius
def turn(self, angle, speed:float):
'''
Rotate the robot about the vertical (y) axis
---
angle: rotation angle in degrees
+ve (ccw) is left, -ve (cw) is right
sampling_period: time between measurements in ms.
Should be equal to or less than
the simulation time step.
If less, should be evenly divisible
(e.g. if time step is 32 ms sampling_period should
be 32, 16, 8, 4 ...)
speed: motor speed as fraction of top speed
float between [0, 1]
'''
if speed > 1 or speed < -1:
raise ValueError('Speed must be float between [0, 1]')
# Set motor speed
motor_speed = speed * self.max_speed
# Convert angle to radians
angle_rad = np.radians(angle)
# Calculate angle increment per time step
motor_steps = (self.time_step * 1e-3) * motor_steps_per_sec * speed
delta_angle = motor_steps * max_angle_per_motorstep
total_angle = 0 # angle swept since rotation start
# After n_steps, turn more slowly for better accuracy
n_steps = int(0.95 * (abs(angle_rad) / delta_angle))
reduction = 0.05
step = 0
direction = 1 if angle_rad > 0 else -1
while True:
step_info = {}
if self.step(self.time_step) == -1 or abs(total_angle) > abs(angle_rad):
break
else:
step += 1
reduce = 1 if step < n_steps else reduction
step_info['time'] = dt.datetime.now().strftime(self.__fmt)
# Set motor speeds
leftSpeed = -motor_speed * direction * reduce
rightSpeed = motor_speed * direction * reduce
# Rotate robot
self.leftMotor.setVelocity(leftSpeed)
self.rightMotor.setVelocity(rightSpeed)
# Keep track of angle swept
total_angle += delta_angle * reduce
self._current_angle += delta_angle*reduce*direction
if self.gps_on and step % self.gps_freq == 0:
step_info['position'] = self.gps.getValues()
step_info['speed'] = self.gps.getSpeed()
if (self.camera_on and self.save_imgs) and (step %\
self.camera_freq*self.save_img_every_nframes == 0):
self.save_camera_img(output=self.img_format)
step_info['image_data'] = self.camera.getImageArray()
self._history.append(step_info) # update history
def move(self, speed, distance, reverse=False):
'''
speed: motor speed as fraction of top speed
float between [0, 1]
distance: distance to travel in meters
'''
if speed > 1 or speed < 0:
raise ValueError('Speed must be float between [0, 1]')
motor_speed = speed * self.max_speed
# motor_speed is angular velocity; convert to linear increment
dist_increment = self.angular_to_linear(motor_speed) * (self.time_step * 1e-3)
dist_traveled = 0
# For accuracy, slow down before reaching endpoint at n_steps
n_steps = int(distance // dist_increment)
# After n_steps reduce speed
reduction = 0.2 # reduce to 1/5
self.stopped = False
rvrs = -1 if reverse else 1
step = 0 # counter for frequency-dependent functions
while True:
step_info = {} # collect info for history
if self.step(self.time_step) == -1 or dist_traveled > distance:
break
else:
step += 1
reduce = 1 if step < n_steps else reduction
# Adjust motor speeds to get to end
self.leftMotor.setVelocity(motor_speed * reduce * rvrs)
self.rightMotor.setVelocity(motor_speed * reduce * rvrs)
# Update distance
dist_traveled += dist_increment * reduce
step_info['time'] = dt.datetime.now().strftime(self.__fmt)
if self.gps_on and step % self.gps_freq == 0:
step_info['position'] = self.gps.getValues()
step_info['speed'] = self.gps.getSpeed()
if (self.camera_on and self.save_imgs) and (step %\
self.camera_freq*self.save_img_every_nframes == 0):
self.save_camera_img(output='jpg')
step_info['image_data'] = self.camera.getImageArray()
self._history.append(step_info) # update history
def stop(self):
if not self.stopped:
self.leftMotor.setVelocity(0.0)
self.rightMotor.setVelocity(0.0)
self.stopped = True
def idle(self, wait_time):
'''Idle the robot for `wait_time` seconds'''
if not self.stopped:
self.stop()
time.sleep(wait_time)
### GPS FUNCTIONS ###
def start_gps(self, frequency:int=1):
'''
frequency: sampling period relative to time step.
e.g. `frequency` = 2 returns coordinates
every 2 time steps. Should be int >= 1
'''
if frequency < 1:
raise ValueError('`frequency` should be an int >= 1')
if not isinstance(frequency, int):
frequency = int(np.round(frequency))
print(f'Non-integer given for `frequency`, value rounded to {frequency}')
self.gps_freq = frequency
if not self.gps_on:
self.gps.enable(self.time_step * self.gps_freq)
self.gps_on = True
def stop_gps(self):
if self.gps_on:
self.gps.disable()
self.gps_on = False
### CAMERA FUNCTIONS ###
def start_camera(self, frequency:int=1, save_imgs=False):
'''
frequency: sampling period relative to time step.
e.g. `frequency` = 2 returns image
every 2 time steps. Should be int >= 1
'''
if frequency < 1:
raise ValueError('`frequency` should be an int >= 1')
if not isinstance(frequency, int):
frequency = int(np.round(frequency))
print(f'Non-integer given for `frequency`, value rounded to {frequency}')
self.camera_freq = frequency
self.camera.enable(self.time_step * frequency)
self.camera_on = True
self.save_imgs = save_imgs
def stop_camera(self):
self.camera.disable()
self.camera_on = False
def save_camera_img(self, img_dir=capturedImgDir, output:str='png', quality=90,\
every_nframes:int=4):
'''
img_dir: Save images to this directory
output: Either 'png' or 'jpg'
quality: Only for jpg, from 1 (worst) to 100 best
every_nframes: Save only the nth frame (e.g. 4 saves every 4th frame)
'''
img_num = len(os.listdir(img_dir)) + 1
img_name = 'robot_img' + str(img_num).zfill(5) + '.' + output
file_name = '/'.join([img_dir, img_name])
self.save_img_every_nframes = every_nframes
self.camera.saveImage(file_name, quality)
# ========================================================================================== #
# Some simple processes to test functions
# robot = epuckControl()
# robot.initialize()
# print(robot._history)
# robot.idle(2)
# robot.start_camera()
# robot.turn(90, 0.1)
# robot.move(0.4, 0.4)
# robot.idle(3)
# robot.start_gps(frequency=4)
# robot.gps_coords()
# robot.turn(-50, 0.05)
# robot.move(0.25, 1.0)
# robot.stop_camera()
# robot.stop_gps()
# robot.idle(2)
# # print(robot._history)
# robot.turn(30, 0.05)
|
{"hexsha": "dbda1262a5798c5109bacbe7168cd0b20d456b49", "size": 11875, "ext": "py", "lang": "Python", "max_stars_repo_path": "Webots_Object_Finding/controllers/simulation/robotControl.py", "max_stars_repo_name": "tsoud/robotics", "max_stars_repo_head_hexsha": "ca3626fd3fce67afb65fcb97f6df4b1112033a7b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Webots_Object_Finding/controllers/simulation/robotControl.py", "max_issues_repo_name": "tsoud/robotics", "max_issues_repo_head_hexsha": "ca3626fd3fce67afb65fcb97f6df4b1112033a7b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Webots_Object_Finding/controllers/simulation/robotControl.py", "max_forks_repo_name": "tsoud/robotics", "max_forks_repo_head_hexsha": "ca3626fd3fce67afb65fcb97f6df4b1112033a7b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7357954545, "max_line_length": 110, "alphanum_fraction": 0.5978947368, "include": true, "reason": "import numpy", "num_tokens": 2739}
|
import numpy as np
class BackendOperations(object):
"""A class for centralizing backend operations
This class will be growing systematically. This is probably not the best
solution but can be worked out later.
Parameters
----------
backend : object
A backend object: numpy, tensorflow, or pytorch.
"""
def __init__(self, backend):
self.backend = backend
self.name = self.backend.__name__
def dot(self, a, b):
"""Dot product"""
if self.name == "torch":
if isinstance(a, np.ndarray):
a = self.backend.Tensor(a).float()
if isinstance(b, np.ndarray):
b = self.backend.Tensor(b).float()
return self.backend.matmul(a, b)
else:
return self.backend.dot(a, b)
def logspace(self, a, b, num):
"""Logspace"""
if self.name == "torch":
return self.backend.logspace(start=float(a), end=float(b), steps=num)
else:
return self.backend.logspace(a, b, num)
def log10(self, a):
"""Log base 10"""
if self.name == "torch":
a = self.backend.Tensor([a])
return self.backend.log10(a)
def norm(self, a):
"""Norm between two vectors"""
if self.name == "torch":
return self.backend.norm(a).float()
else:
return self.backend.linalg.norm(a)
def exp(self, a):
"""Exponential of a number"""
return self.backend.exp(a)
def from_numpy(self, a):
"""Convert from numpy to right data type"""
a = np.array(a) # This is the safest way
return self.backend.from_numpy(a).float()
def to_numpy(self, a):
"""Convert from numpy to right data type"""
if self.name == "torch":
return a.numpy()
def divide(self, a, b):
"""Divide two vectors/tensors"""
if self.name == "torch":
return self.backend.div(a, b)
def sum(self, a):
"""Sum a list of values"""
if self.name == "torch":
return self.backend.sum(a)
|
{"hexsha": "f862270fe5d5c4679df9a8a2e06d6dd7d679103d", "size": 2124, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml4chem/backends/operations.py", "max_stars_repo_name": "muammar/mlchem", "max_stars_repo_head_hexsha": "365487c23ea3386657e178e56ab31adfe8d5d073", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 77, "max_stars_repo_stars_event_min_datetime": "2019-08-05T17:30:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T14:31:35.000Z", "max_issues_repo_path": "ml4chem/backends/operations.py", "max_issues_repo_name": "muammar/ml4chem", "max_issues_repo_head_hexsha": "365487c23ea3386657e178e56ab31adfe8d5d073", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-07-31T18:59:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-18T18:15:07.000Z", "max_forks_repo_path": "ml4chem/backends/operations.py", "max_forks_repo_name": "muammar/mlchem", "max_forks_repo_head_hexsha": "365487c23ea3386657e178e56ab31adfe8d5d073", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 15, "max_forks_repo_forks_event_min_datetime": "2020-02-28T10:11:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-01T13:45:33.000Z", "avg_line_length": 28.32, "max_line_length": 81, "alphanum_fraction": 0.549905838, "include": true, "reason": "import numpy,from numpy", "num_tokens": 472}
|
from manimlib import *
import copy
import networkx as nx
from .algo_vgroup import *
from .algo_node import *
import queue
class DataNode(object):
def __init__(self, id, k, v, raw):
self.id = id
self.k = k
self.v = v
self.raw = raw
class AlgoRBTreeNode(object):
def __init__(self, t, id, k, v, color):
self.id = id
self.tree = t
self.k = k
self.v = v
self.color = color
self.p = t.nil()
self.left = t.nil()
self.right = t.nil()
def addChild(self, t, z):
y = self
if y.isNil():
t.root = z
elif z.k < y.k:
y.left = z
else:
y.right = z
def isNil(self):
return self.k < 0
def isNotNil(self):
return not self.isNil()
def isLeaf(self):
return self.left.isNil() and self.right.isNil()
def isLeft(self):
return self == self.p.left
def isRight(self):
return self == self.p.right
def brother(self):
if self.isLeft():
return self.p.right
else:
return self.p.left
def replaceChild(self, t, u, v):
if u == self.left:
self.left = v
self.setLeft(t, v)
else:
self.right = v
self.setRight(t, v)
v.p = self
v.setParent(t, self)
def setLeft(self, t, x):
self.left = x
t.add_edge(self, x)
def setRight(self, t, x):
self.right = x
t.add_edge(self, x)
def setParent(self, t, x):
self.p = x
t.add_edge(self.p, self)
def setColor(self, c):
self.color = c
n = self.tree.get_node(self.id)
if n:
n.set_color(c)
class AlgoRBTreeContext():
def __init__(self):
self.animate = True
self.insert_message = True
self.delete_message = True
self.run_time = 1
self.wait_time = 2
class AlgoRBTree(AlgoVGroup):
def __init__(self, scene, **kwargs):
self.edge_objs = {}
self.node_objs = {}
self.ctx = AlgoRBTreeContext()
self.scene = scene
self.edges = []
self.nodes = []
super().__init__(**kwargs)
self.node_id = 0
self.raw_nil = None
self.raw_nil = AlgoRBTreeNode(self, 0, -1, 0, BLACK)
self.root = self.nil()
# self.add_node(nil)
self.center()
def nil(self):
id = self.get_node_id()
if self.raw_nil == None:
return None
n = copy.copy(self.raw_nil)
n.id = id
return n
def insert(self, z):
y = self.nil()
x = self.root
while x.isNotNil():
y = x
if z.k < x.k:
x = x.left
else:
x = x.right
z.p = y
y.addChild(self, z)
self.add_node(z)
self.scene.show_message("插入元素%d"%(z.k), animate=self.ctx.insert_message)
self.update_nodes()
self.insertFixup(z)
self.update_nodes()
def insertFixup(self, z:AlgoRBTreeNode):
while z.p.color == RED:
if z.p.isLeft():
self.scene.show_message("父节点在左边", animate=self.ctx.insert_message)
y = z.p.brother()
if y.color == RED:
print("insert left case 1")
self.scene.show_message("插入case 1:叔叔节点存在并且为红色", animate=self.ctx.insert_message)
z.p.setColor(BLACK)
y.setColor(BLACK)
z.p.p.setColor(RED)
z = z.p.p
self.update_nodes()
else:
if z.isRight():
print("insert left case 2")
self.scene.show_message("插入case 2:新节点在右边,左旋新节点再执行case 3", animate=self.ctx.insert_message)
z = z.p
self.leftRotate(z)
self.update_nodes()
print("insert left case 3")
self.scene.show_message("插入case 3:设置父节点为黑色,祖父节点为红色,右旋祖父节点%d"%(z.p.p.k), animate=self.ctx.insert_message)
z.p.setColor(BLACK)
z.p.p.setColor(RED)
self.rightRotate(z.p.p)
self.update_nodes()
else:
self.scene.show_message("父节点在右边", animate=self.ctx.insert_message)
y = z.p.brother()
if y.color == RED:
print("insert right case 1")
self.scene.show_message("插入case 1:叔叔节点存在并且为红色", animate=self.ctx.insert_message)
z.p.setColor(BLACK)
y.setColor(BLACK)
z.p.p.setColor(RED)
z = z.p.p
self.update_nodes()
else:
if z.isLeft():
print("insert right case 2")
self.scene.show_message("插入case 2:新节点在左边,右旋转再执行case 3", animate=self.ctx.insert_message)
z = z.p
self.rightRotate(z)
self.update_nodes()
print("insert right case 3")
self.scene.show_message("插入case 3:设置父节点为黑色,祖父节点为红色,左旋祖父节点%d"%(z.p.p.k), animate=self.ctx.insert_message)
z.p.setColor(BLACK)
z.p.p.setColor(RED)
self.leftRotate(z.p.p)
self.update_nodes()
self.root.setColor(BLACK)
def dumpInternal(self, n, d):
if (not n):
return
for _ in range(d):
print("--", end='')
print("%d(%d %d)"%(n.k, n.k, n.v))
self.dumpInternal(n.left, d + 1)
self.dumpInternal(n.right, d + 1)
def dump(self, n):
self.dumpInternal(n, 1)
def leftRotate(self, x):
y = x.right
# x.right = y.left
x.setRight(self, y.left)
if y.left.isNotNil():
# y.left.p = x
y.left.setParent(self, x)
# y.p = x.p
y.setParent(self, x.p)
self.transplant(x, y)
# y.left = x
y.setLeft(self, x)
# x.p = y
x.setParent(self, y)
def rightRotate(self, x):
y = x.left
# x.left = y.right
x.setLeft(self, y.right)
if y.right.isNotNil():
# y.right.p = x
y.right.setParent(self, x)
# y.p = x.p
y.setParent(self, x.p)
self.transplant(x, y)
# y.right = x
y.setRight(self, x)
# x.p = y
x.setParent(self, y)
def transplant(self, u, v):
if u.p.isNil():
self.root = v
self.root.setParent(self, self.nil())
else:
u.p.replaceChild(self, u, v)
def calc_networkx(self, nodes, edges):
self.g = nx.Graph()
for k in nodes:
self.g.add_node(k.id)
for k in edges:
self.g.add_edge(*k)
self.pos_infos = nx.nx_agraph.graphviz_layout(self.g, prog='dot', args='-Grankdir="TB"')
points = self.pos_infos
x = [points[k][0] for k in points]
y = [points[k][1] for k in points]
c = (sum(x) / len(points), sum(y) / len(points))
for k in self.pos_infos:
self.pos_infos[k] = np.array(self.pos_infos[k])-np.array(c)
return self.pos_infos
def calc_tree_data(self, root):
q = []
q.append(root)
nodes = []
edges = []
while len(q)>0:
p = q.pop(0)
nodes.append(DataNode(p.id, p.k, p.v, p))
if p.left:
self.check_node(p.left)
self.check_edge(p, p.left)
edges.append((p.id, p.left.id))
q.append(p.left)
if p.right:
self.check_node(p.right)
self.check_edge(p, p.right)
edges.append((p.id, p.right.id))
q.append(p.right)
return nodes, edges
def check_edge(self, x, y):
if (x.id, y.id) not in self.edge_objs:
self.add_edge(x, y)
def update_nodes(self):
if not self.ctx.animate:
return
# 数据层
nodes, edges = self.calc_tree_data(self.root)
# layout
pos_infos = self.calc_networkx(nodes, edges)
#
self.move_nodes(pos_infos, nodes, edges)
# 构造树
def move_nodes(self, pos_infos, nodes, edges):
self.nodes = nodes
self.edges = edges
# remove unused nodes
keys = list(self.node_objs.keys())
for k in keys:
if k not in [x.id for x in self.nodes]:
o = self.node_objs[k]
self.remove(o)
self.scene.remove(o)
del self.node_objs[k]
node_animations = []
for k in self.nodes:
n = self.get_node(k.id)
p = self.get_node_pos(k.id)
n.set_color(k.raw.color)
if self.ctx.animate:
node_animations.append(ApplyMethod(n.move_to, p, run_time=self.ctx.run_time))
else:
node_animations.append(ApplyMethod(n.move_to, p, run_time=0.01))
# remove edges
keys = list(self.edge_objs.keys())
for k in keys:
if k not in self.edges:
e = self.edge_objs[k]
self.remove(e)
self.scene.remove(e)
del self.edge_objs[k]
animations = []
for k in self.edges:
e = self.get_edge(*k)
p1 = np.array(self.get_node_pos(k[0]))+DOWN*0.25
p2 = np.array(self.get_node_pos(k[1]))+UP*0.25
if self.ctx.animate:
animations.append(ApplyMethod(e.put_start_and_end_on, p1, p2, run_time=self.ctx.run_time))
else:
animations.append(ApplyMethod(e.put_start_and_end_on, p1, p2, run_time=0.01))
self.scene.play(*node_animations, *animations)
if self.ctx.animate:
self.scene.wait(self.ctx.wait_time)
def set(self, k, v):
if self.root.isNil():
self.scene.show_message("插入元素%d"%(k), animate=self.ctx.insert_message, delay=0)
z = AlgoRBTreeNode(self, self.get_node_id(), k, v, BLACK)
self.root = z
self.add_node(z)
self.update_nodes()
else:
z = AlgoRBTreeNode(self, self.get_node_id(), k, v, RED)
self.insert(z)
def add_node(self, z):
if z.id in self.node_objs:
return
n = AlgoNode(str(z.k))
if z.isNil():
n.scale(0.5)
tri = Triangle().scale(0.2)
n.add(tri)
tri.center()
n.outline_obj.scale(0)
n.text_obj.scale(0)
n.set_color(RED)
self.node_objs[z.id] = n
self.add(n)
# add edges
self.add_edge(z.p, z)
self.add_edge(z, z.left)
self.add_edge(z, z.right)
def add_edge(self, n, t):
if not n or not t:
return
a = Arrow(ORIGIN, ORIGIN, thickness=0.03, buff=1.25)
a.set_color(GREEN_C)
self.add(a)
self.edge_objs[(n.id, t.id)] = a
def get_node(self, id):
if id not in self.node_objs:
return None
return self.node_objs[id]
def get_nil_nodes(self):
n = []
for k in self.nodes:
if k.k == -1:
n.append(self.get_node(k.id))
return n
def get_edge(self, i, j):
return self.edge_objs[(i, j)]
def getInternal(self, n, k):
if not n or n.isNil():
return None
if n.k == k:
return n
if k < n.k:
return self.getInternal(n.left, k)
return self.getInternal(n.right, k)
def treeMinimum(self, x):
p = x
while p.left.isNotNil():
p = p.left
return p
def deleteInternal(self, z):
self.scene.show_message("删除节点%d"%(z.k), animate=self.ctx.delete_message)
y = z
origin_color = y.color
x = None
if (z.left.isNil()):
x = z.right
self.transplant(z, z.right)
elif (z.right.isNil()):
x = z.left
self.transplant(z, z.left)
else:
y = self.treeMinimum(z.right)
origin_color = y.color
x = y.right
if (y.p == z):
x.p = y
else:
self.transplant(y, y.right)
y.right = z.right
y.right.p = y
self.transplant(z, y)
y.left = z.left
y.left.p = y
y.color = z.color
self.update_nodes()
if (origin_color == BLACK):
self.deleteFixUp(x)
def deleteFixUp(self, x):
while (x != self.root and x.color == BLACK):
if (x.isLeft()):
self.scene.show_message("替换节点在左边", animate=self.ctx.delete_message)
w = x.brother()
if (w.color == RED):
print("delete left case 1")
self.scene.show_message("删除case 1:兄弟节点是红色,设置兄弟节点为黑色,设置父节点为红色", animate=self.ctx.delete_message)
self.scene.show_message("左旋父节点%d"%(x.p.k), animate=self.ctx.delete_message)
w.setColor(BLACK)
x.p.setColor(RED)
self.leftRotate(x.p)
w = x.p.right
self.update_nodes()
if (w.left.color == BLACK and w.right.color == BLACK):
print("delete left case 2")
self.scene.show_message("删除case 2:兄弟节点的子节点都是黑色,设置父节点为红色", animate=self.ctx.delete_message)
w.setColor(RED)
x = x.p
else:
if (w.right.color == BLACK):
print("delete left case 3")
self.scene.show_message("删除case 3:兄弟节点的右孩子为黑色,右旋兄弟节点", animate=self.ctx.delete_message)
self.scene.show_message("设置兄弟节点的左孩子为红色,兄弟节点为红色", animate=self.ctx.delete_message)
w.left.setColor(BLACK)
w.setColor(RED)
self.scene.show_message("右旋兄弟节点", animate=self.ctx.delete_message)
self.rightRotate(w)
w = x.p.right
self.update_nodes()
print("delete left case 4")
self.scene.show_message("删除case 4:设置兄弟节点为父节点的颜色,父节点为黑色,兄弟节点右孩子黑色,左旋父节点",
animate=self.ctx.delete_message)
w.color = x.p.color
x.p.setColor(BLACK)
w.right.setColor(BLACK)
self.leftRotate(x.p)
x = self.root
self.update_nodes()
else:
self.scene.show_message("节点在右边", animate=self.ctx.delete_message)
w = x.p.left
if (w.color == RED):
print("delete right case 1")
self.scene.show_message("删除case 1:兄弟节点是红色,设置兄弟节点为黑色,设置父节点为红色", animate=self.ctx.delete_message)
self.scene.show_message("右旋父节点%d"%(x.p.k), animate=self.ctx.delete_message)
w.setColor(BLACK)
x.p.setColor(RED)
self.rightRotate(x.p)
w = x.p.left
self.update_nodes()
if (w.right.color == BLACK and w.left.color == BLACK):
print("delete right case 2")
self.scene.show_message("删除case 2:兄弟节点的子节点都是黑色,设置父节点为红色", animate=self.ctx.delete_message)
w.setColor(RED)
x = x.p
self.update_nodes()
else:
if (w.left.color == BLACK):
print("delete right case 3")
self.scene.show_message("删除case 3:兄弟节点的左孩子为黑色,右旋兄弟节点", animate=self.ctx.delete_message)
self.scene.show_message("设置兄弟节点的右孩子为红色,兄弟节点为红色", animate=self.ctx.delete_message)
w.right.setColor(BLACK)
w.setColor(RED)
self.leftRotate(w)
w = x.p.left
self.update_nodes()
print("delete right case 4")
self.scene.show_message("删除case 4:设置兄弟节点为父节点的颜色,父节点为黑色,兄弟节点左孩子黑色,右旋父节点", animate=self.ctx.delete_message)
w.color = x.p.color
x.p.setColor(BLACK)
w.left.setColor(BLACK)
self.rightRotate(x.p)
x = self.root
self.update_nodes()
x.setColor(BLACK)
def delete(self, k):
# print("remove ", k)
z = self.getInternal(self.root, k)
if z:
self.deleteInternal(z)
def get_node_id(self):
self.node_id += 1
return self.node_id
def check_node(self, p):
if p.id not in self.node_objs:
self.add_node(p)
def hide_all(self):
for k in self.node_objs:
self.remove(self.node_objs[k])
for k in self.edge_objs:
self.remove(self.edge_objs[k])
def show_node(self, id):
n = self.get_node(id)
self.scene.play(FadeIn(n))
def show_edge(self, i, j):
a = self.edge_objs[(i, j)]
self.scene.play(FadeIn(a))
def get_node_pos(self, k):
p = self.pos_infos[k]
ratio = 60
return [p[0]/ratio, p[1]/ratio, 0]
|
{"hexsha": "943c686523141e285e7866c7304060517d0e52dc", "size": 17794, "ext": "py", "lang": "Python", "max_stars_repo_path": "animations/src/algo_rbtree.py", "max_stars_repo_name": "mckm2000/algorithm-stone", "max_stars_repo_head_hexsha": "23bad1c093093e311d7fe7cc57c6877b26a711c7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "animations/src/algo_rbtree.py", "max_issues_repo_name": "mckm2000/algorithm-stone", "max_issues_repo_head_hexsha": "23bad1c093093e311d7fe7cc57c6877b26a711c7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "animations/src/algo_rbtree.py", "max_forks_repo_name": "mckm2000/algorithm-stone", "max_forks_repo_head_hexsha": "23bad1c093093e311d7fe7cc57c6877b26a711c7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8888888889, "max_line_length": 125, "alphanum_fraction": 0.4872428909, "include": true, "reason": "import networkx", "num_tokens": 4429}
|
import sys
import os
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, os.path.join(ROOT_DIR, "src"))
import util
import torch
import numpy as np
from model import make_model
from render import NeRFRenderer
import torchvision.transforms as T
import tqdm
import imageio
import matplotlib.pyplot as plt
import skimage.io
from skimage.transform import resize
from scipy.ndimage import rotate
from matplotlib import pyplot as plt
from pydicom import dcmread
from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
## Hyperparams
elevation = 0.0
num_views = 48
## Radius and focal length set as in 2.4.f as here https://iopscience.iop.org/article/10.1088/0031-9155/45/10/305/pdf
radius = 100 # how far away the x-ray source is from centre of the the patient in cm
focal = 140 # how far away the x-ray source is from the detector in cm
## Resolution and sensor size can be set independently
W = H = width_pixels = height_pixels = 512 # number of pixels over width/height
width = height = 60 # width/height of detector in cm
gif = True
device = 'cuda'
output = os.path.join(ROOT_DIR, "output")
def normalize(arr):
denominator = arr.max() - arr.min()
if denominator == 0:
return arr
return (arr - arr.min()) / denominator
## Load in DICOM
# z thickness is 3mm which is way bigger than what we want. Covid-19 dataset with 1.25mm thickness
# would be much better if we can calibrate it properly (or clamp so min is -1000?).
arrs = []
for i in range(130):
path = f"../data/manifest-OtXaMwL56190865641215613043/QIN LUNG CT/QIN-LSC-0003/08-06-2003-1-CT Thorax wContrast-41946/2.000000-THORAX W 3.0 B41 Soft Tissue-71225/1-{i+1:03}.dcm"
ds = dcmread(path)
arr = ds.pixel_array
arrs.append(arr)
arr = np.array(arrs).astype(np.float32) # 130, 512, 512
arr = np.swapaxes(arr, 0, 1) # swap axes for nicer orientation
x_lim, y_lim, z_lim = 511, 129, 511 # replace with ct_shape or vice versa
## Extract parameters from metadata
# voxel size in mm
voxel_size = torch.tensor([float(ds.PixelSpacing[0]), float(ds.SliceThickness), float(ds.PixelSpacing[1])])
# HU rescaling params
rescale_intercept = float(ds.RescaleIntercept)
rescale_slope = float(ds.RescaleSlope)
## Rescale HU and calculate real world patient sizes in cm (attenuation coefficients are in cm^{-1})
# rescale to standard HU
arr = rescale_intercept + (arr * rescale_slope)
# size of ct scan in cm (= voxel size * num voxels)
ct_shape = torch.tensor([x_lim, y_lim, z_lim])+1
ct_size = voxel_size * ct_shape / 10
ct_size = ct_size.to(device)
# nearest and furthest z values based on radius of source and size of ct scan
z_near = radius - (ct_size[-1].item() / 2)
z_far = radius + (ct_size[-1].item() / 2)
## Calculate x-ray source positions
_coord_from_blender = util.coord_from_blender()
render_poses = torch.stack(
[
_coord_from_blender @ util.pose_spherical(angle, elevation, radius)
for angle in np.linspace(-180, 180, num_views + 1)[:-1]
],
0,
)
## Wrapper to get closest CT voxel for any xyz coordinate
class CTImage(torch.nn.Module):
def __init__(self, img, water_coeff=0.08):
super().__init__()
# Convert from HU to linear attenuation coefficients
# Changing water attenuation coefficient changes contrast
self.water_coeff = water_coeff
self.img = ((img.clamp(min=-1000) / 1000) + 1) * water_coeff
def forward(self, xyz, coarse=True, viewdirs=None, far=False):
# xyz is in range -0.5*ct_size to 0.5*ct_size. Scale to be in range [0,1]
xyz = xyz.squeeze(0)
xyz = (xyz + (ct_size.unsqueeze(0) / 2)) / ct_size.unsqueeze(0)
# scale xyz to nearest value in pixel space
xyz[:,0] *= x_lim
xyz[:,1] *= y_lim
xyz[:,2] *= z_lim
xyz = xyz.long().transpose(0,1)
# get rows where values are out of bounds and put them back in bounds
mask = (xyz[0,:]<0) | (xyz[1,:]<0) | (xyz[2,:]<0) | (xyz[0,:]>x_lim) | (xyz[1,:]>y_lim) | (xyz[2,:]>z_lim)
xyz[:,mask] = 0
sigma = self.img[tuple(xyz)]
# Anything out of bounds set as air
sigma[mask] = 0
sigma = sigma.reshape(1, -1, 1)
rgb = torch.ones(1, sigma.size(1), 3).to(device)
return torch.cat((rgb, sigma), dim=-1).to(device)
focal = torch.tensor(focal, dtype=torch.float32, device=device)
# TODO: Change num coarse and fine to take into account each voxel exactly once
image = CTImage(torch.tensor(arr).to(device))
renderer = NeRFRenderer(
n_coarse=512, depth_std=0.01, sched=[],
white_bkgd=False, composite_x_ray=True, eval_batch_size=50000, lindisp=True
).to(device=device)
render_par = renderer.bind_parallel(image, [0], simple_output=True).eval()
render_rays = util.gen_rays_variable_sensor(render_poses, width_pixels, height_pixels, width, height, focal, z_near, z_far).to(device)
all_rgb_fine = []
for rays in tqdm(torch.split(render_rays.view(-1, 8), 80000, dim=0)):
rgb, _depth = render_par(rays[None])
all_rgb_fine.append(rgb[0])
_depth = None
rgb_fine = torch.cat(all_rgb_fine)
# rgb_fine = 1-normalize(rgb_fine)
rgb_fine = torch.clamp(1 - rgb_fine, 0, 1)
frames = (rgb_fine.view(num_views, H, W).cpu().numpy() * 255).astype(
np.uint8
)
im_name = "raw_data"
frames_dir_name = os.path.join(output, im_name + "_frames")
os.makedirs(frames_dir_name, exist_ok=True)
for i in range(num_views):
frm_path = os.path.join(frames_dir_name, "{:04}.png".format(i))
imageio.imwrite(frm_path, frames[i])
if gif:
vid_path = os.path.join(output, im_name + "_vid.gif")
imageio.mimwrite(vid_path, frames, fps=24)
else:
vid_path = os.path.join(output, im_name + "_vid.mp4")
imageio.mimwrite(vid_path, frames, fps=24, quality=8)
print("Wrote to", vid_path)
|
{"hexsha": "d5025445e42c34d9e16ef50ac906b6bb39482c8a", "size": 5814, "ext": "py", "lang": "Python", "max_stars_repo_path": "eval/render_ct.py", "max_stars_repo_name": "abrilcf/pixel-nerf", "max_stars_repo_head_hexsha": "9a6a8ab6c39ec01d52df3bf4c03830f7162cc679", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "eval/render_ct.py", "max_issues_repo_name": "abrilcf/pixel-nerf", "max_issues_repo_head_hexsha": "9a6a8ab6c39ec01d52df3bf4c03830f7162cc679", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "eval/render_ct.py", "max_forks_repo_name": "abrilcf/pixel-nerf", "max_forks_repo_head_hexsha": "9a6a8ab6c39ec01d52df3bf4c03830f7162cc679", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-04T12:10:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-04T12:10:20.000Z", "avg_line_length": 34.8143712575, "max_line_length": 183, "alphanum_fraction": 0.6945304438, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1693}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.