text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from bokeh import mpl
from bokeh.plotting import show
# We generated random data
data = 1 + np.random.randn(20, 6)
# And then just call the violinplot from Seaborn
sns.violinplot(data, color="Set3")
plt.title("Seaborn violin plot in bokeh.")
show(mpl.to_bokeh(name="violin"))
|
{"hexsha": "bc138ffd8c9a65c021e4aea619087e4c5e498a5f", "size": 353, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/compat/seaborn/violin.py", "max_stars_repo_name": "timelyportfolio/bokeh", "max_stars_repo_head_hexsha": "a976a85535cf137c6238ce9e90b41ab14ae8ce22", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-07T18:55:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-07T18:55:59.000Z", "max_issues_repo_path": "examples/compat/seaborn/violin.py", "max_issues_repo_name": "timelyportfolio/bokeh", "max_issues_repo_head_hexsha": "a976a85535cf137c6238ce9e90b41ab14ae8ce22", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/compat/seaborn/violin.py", "max_forks_repo_name": "timelyportfolio/bokeh", "max_forks_repo_head_hexsha": "a976a85535cf137c6238ce9e90b41ab14ae8ce22", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-01T08:38:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T08:38:53.000Z", "avg_line_length": 22.0625, "max_line_length": 48, "alphanum_fraction": 0.7620396601, "include": true, "reason": "import numpy", "num_tokens": 98}
|
// The template and inlines for the -*- C++ -*- rational number classes.
// Initially implemented by Wai-Shing Luk <luk036@gmail.com>
//
/** @file include/rational.hpp
* This is a C++ Library header.
*/
#ifndef FUN_RATIONAL_HPP
#define FUN_RATIONAL_HPP 1
#include <cassert>
#include <type_traits> // is_integral<T>
#include <boost/operators.hpp>
namespace fun
{
/**
* @defgroup rational (extended) Rational Number
* @ingroup arithmetic
*
* Classes and functions for (extended) rational number.
* Reference: MF103-
* @{
*/
// Forward declarations.
//template<typename _Z> struct rational;
/// greatest common divider
template<typename _Z, class = typename
std::enable_if<std::is_integral<_Z>::value>::type>
//xxx requires is_integral<_Z>::value
inline constexpr _Z gcd(const _Z& a, const _Z& b) noexcept
{ return b == _Z(0) ? abs(a) : gcd(b, a%b); }
/**
* Rational number.
*
* @param Z Type of rational number elements
* @todo unit testing
*/
template <typename _Z, class = typename
std::enable_if<std::is_integral<_Z>::value>::type>
//xxx requires is_integral<_Z>::value
struct rational : boost::ordered_field_operators<rational<_Z>,
boost::ordered_field_operators2<rational<_Z>, _Z> >
{
/// Value typedef.
typedef _Z value_type;
/// Default constructor.
/// Unspecified parameters default to 0.
explicit
rational(const _Z& p = _Z(), const _Z& q = _Z(1))
: _num{p}, _denom{q}
{
assert(!(_num == _Z(0) && _denom == _Z(0)));
normalize();
}
// Lets the compiler synthesize the copy constructor
//rational (const rational<_Z>&) = default;
/// Copy constructor
template<typename _Up>
explicit constexpr
rational(const rational<_Up>& s) noexcept
: _num{s.num()}, _denom{s.denom()}
{ }
/// Return first element of rational number.
constexpr _Z num() const noexcept { return _num; }
/// Return second element of rational number.
constexpr _Z denom() const noexcept { return _denom; }
// Lets the compiler synthesize the assignment operator
// rational<_Z>& operator= (const rational<_Z>&);
/// Assign this rational number to rational number @a s.
template<typename _Up>
rational<_Z>& operator=(const rational<_Up>& s)
{ _num = s.num(); _denom = s.denom(); return *this; }
/// Increase this rational number (prefix operator)
rational<_Z>& operator++()
{ _num += _denom; return *this; }
/// Decrease this rational number (prefix operator)
rational<_Z>& operator--()
{ _num -= _denom; return *this; }
/// Increase this rational number (postfix operator)
rational<_Z> operator++(int)
{ rational<_Z> res(*this); ++(*this); return res; }
/// Decrease this rational number (postfix operator)
rational<_Z> operator--(int)
{ rational<_Z> res(*this); --(*this); return res; }
/// Add @a s to this rational number.
rational<_Z>& operator+=(const _Z& a)
{ _num += _denom * a; return *this; }
/// Subtract @a s from this rational number.
rational<_Z>& operator-=(const _Z& a)
{ _num -= _denom * a; return *this; }
/// Multiply this rational number by @a a.
rational<_Z>& operator*=(const _Z& a)
{ _num *= a; normalize(); return *this; }
/// Divide this rational number by @a a.
rational<_Z>& operator/=(const _Z& a)
{ _denom *= a; normalize(); return *this; }
/// Add @a s to this rational number.
template<typename _Up>
rational<_Z>& operator+=(const rational<_Up>& s)
{
_num = _num * s.denom() + _denom * s.num();
_denom *= s.denom();
normalize();
return *this;
}
/// Subtract @a s from this rational number.
template<typename _Up>
rational<_Z>& operator-=(const rational<_Up>& s)
{
_num = _num * s.denom() - _denom * s.num();
_denom *= s.denom();
normalize();
return *this;
}
/// Multiply @a s to this rational number.
template<typename _Up>
rational<_Z>& operator*=(const rational<_Up>& s)
{
_num *= s.num();
_denom *= s.denom();
normalize();
return *this;
}
/// Divide @a s to this rational number.
template<typename _Up>
rational<_Z>& operator/=(const rational<_Up>& s)
{
*this *= rational<_Z>(s.denom(), s.num());
return *this;
}
/// Cast to double
operator double () const { return double(num()) / denom(); }
private:
/// Normalize rational number.
void normalize() {
if (_denom < _Z()) {
_num = -_num;
_denom = -_denom;
}
_Z g = gcd(_num, _denom);
_num /= g;
_denom /= g;
}
private:
_Z _num;
_Z _denom;
};
// Operators:
/// Return new rational number @a r plus @a s.
template<typename _Z, typename _Up>
inline auto
operator+(const rational<_Z>& r, const rational<_Up>& s)
-> rational<decltype(r.num()*s.denom())>
{
auto num = r.num() * s.denom() + r.denom() * s.num();
decltype(num) denom = r.denom() * s.denom();
return rational<decltype(num)> {num, denom};
}
/// Return new rational number @a r plus @a s.
template<typename _Z, typename _Up>
inline auto
operator-(const rational<_Z>& r, const rational<_Up>& s)
-> rational<decltype(r.denom()*s.denom())>
{
//auto num = r.num() * s.denom() - r.denom() * s.num();
//decltype(num) denom = r.denom() * s.denom();
return rational<decltype(r.denom()*s.denom())>
{ r.num() * s.denom() - r.denom() * s.num(),
r.denom() * s.denom() };
}
/// Return new rational number @a r times @a s.
template<typename _Z, typename _Up>
inline auto
operator*(const rational<_Z>& r, const rational<_Up>& s)
-> rational<decltype(r.num()*s.num())>
{
return rational<decltype(r.num()*s.num())>
{ r.num()*s.num(), r.denom()*s.denom() };
}
/// Return new rational number @a r times @a s.
template<typename _Z, typename _Up>
inline auto
operator/(const rational<_Z>& r, const rational<_Up>& s)
-> decltype(r * s)
{
return r * rational<_Up>(s.denom(), s.num());
}
//xxx /// Return new rational number @a r minus @a s.
//xxx template<typename _Z>
//xxx inline rational<_Z>
//xxx operator-(rational<_Z> r, const rational<_Z>& s) { return r -= s; }
//xxx
//xxx //@{
//xxx /// Return new rational number @a r times @a a.
//xxx template<typename _Z>
//xxx inline rational<_Z>
//xxx operator*(rational<_Z> r, const rational<_Z>& s) { return r *= s; }
//xxx
//xxx template<typename _Z>
//xxx inline rational<_Z>
//xxx operator*(rational<_Z> r, const _Z& a) { return r *= a; }
//xxx
//xxx template<typename _Z>
//xxx inline rational<_Z>
//xxx operator*(const _Z& a, rational<_Z> r) { return r *= a; }
//xxx //@}
//xxx
//xxx /// Return new rational number @a r divided by @a a.
//xxx template<typename _Z>
//xxx inline rational<_Z>
//xxx operator/(rational<_Z> r, const _Z& a) { return r /= a; }
/// Return @a r.
template<typename _Z>
inline constexpr rational<_Z>
operator+(const rational<_Z>& r) noexcept { return r; }
/// Return negation of @a r
template<typename _Z>
inline constexpr rational<_Z>
operator-(const rational<_Z>& r) noexcept
{ return rational<_Z>(-r.num(), r.denom()); }
/// Return true if @a r is equal to @a s.
template<typename _Z, typename _Up>
inline bool
operator==(const rational<_Z>& r, const rational<_Up>& s)
{
assert(!(r.num() == 0 && r.denom() == 0)); // NaN
assert(!(s.num() == 0 && s.denom() == 0)); // NaN
return r.num() == s.num() && s.denom() == r.denom();
}
/// Return false if @a r is equal to @a s.
template<typename _Z, typename _Up>
inline constexpr bool
operator!=(const rational<_Z>& r, const rational<_Up>& s) noexcept
{ return !(r == s); }
/// Return true if @a r is less than @a s.
template<typename _Z, typename _Up>
inline constexpr bool
operator<(const rational<_Z>& r, const rational<_Up>& s) noexcept
{ return r.num()*s.denom() < r.denom()*s.num(); }
/// Insertion operator for rational number values.
template<typename _Z, class _Stream>
_Stream& operator<<(_Stream& os, const rational<_Z>& r)
{
const auto& a = r.num();
const auto& b = r.denom();
_Z zero(0), one(1);
if (b == one) { os << a; return os; }
if (b != zero) { os << '(' << a << '/' << b << ')'; return os; }
if (a < zero) { os << "-Inf"; return os; }
if (a > zero) { os << "Inf"; return os; }
os << "NaN"; return os;
}
}
#endif
|
{"hexsha": "55642e2519886ad0fd5489ec8e67271b5414f54c", "size": 8699, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "lib/include/fun/rational.hpp", "max_stars_repo_name": "luk036/fun", "max_stars_repo_head_hexsha": "ac3896eb8741767324d6b400d38573a66f0917b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/include/fun/rational.hpp", "max_issues_repo_name": "luk036/fun", "max_issues_repo_head_hexsha": "ac3896eb8741767324d6b400d38573a66f0917b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/include/fun/rational.hpp", "max_forks_repo_name": "luk036/fun", "max_forks_repo_head_hexsha": "ac3896eb8741767324d6b400d38573a66f0917b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4881355932, "max_line_length": 75, "alphanum_fraction": 0.5931716289, "num_tokens": 2539}
|
import os
import numpy as np
try:
import numba
HAS_NUMBA = True
except ImportError:
HAS_NUMBA = False
List = list
Dict = dict
def set_list_type_for_jit():
global List, Dict
List = numba.typed.List
Dict = numba.typed.Dict
return
def create_nb_List(py_list):
nb_List = List()
if not py_list:
whatNew = List()
whatNew.append("a")
whatNew.pop()
return whatNew # numba does not deal with None
for val in py_list:
nb_List.append(val)
return nb_List
def create_nb_Dict(py_dict):
nb_Dict = Dict()
for tag, val in py_dict.items():
nb_Dict[tag] = val
return nb_Dict
# @numba.njit(cache=True)
def root_finding_newton(fun, J, x, eps, max_iter, args):
"""
Solve nonlinear system fun(x)=0 by Newton's method.
J is the Jacobian of fun(x). Both fun(x) and J must be functions of x.
At input, x holds the start value. The iteration continues
until ||F|| < eps.
"""
F_value = fun(x, args)
F_value_ = F_value.reshape((-1, 1))
F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector
iteration_counter = 0
while abs(F_norm) > eps and iteration_counter < max_iter:
delta = np.linalg.solve(J(x, args), -F_value_)
for i in range(x.size): # wtf numba!?!?!
x[i] += delta[i, 0]
F_value = fun(x, args)
F_value_ = F_value.reshape((-1, 1))
F_norm = np.linalg.norm(F_value, 2)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(F_norm) > eps:
iteration_counter = -1
raise ValueError("Maximum iteration reached in newton root finding!")
return x, iteration_counter
# @numba.njit#(cache=True)
def numeric_jacobian(fun, x, diff_eps, args):
J = np.zeros((len(x), len(x)))
for i in range(len(x)):
x1 = x.copy()
x2 = x.copy()
x1[i] += diff_eps
x2[i] -= diff_eps
f1 = fun(x1, args)
f2 = fun(x2, args)
J[:, i] = (f1 - f2) / (2 * diff_eps)
return J
def create_jacobian(fun):
# @numba.njit()
def numba_J(x, args):
return numeric_jacobian(fun, x, 1e-8, args)
if HAS_NUMBA:
numba_J = numba.njit(cache=True)(numba_J)
return numba_J
# @numba.njit(cache=True)
def root_finding_newton_previously(fun, J, x, eps, max_iter, args):
"""
Solve nonlinear system fun(x)=0 by Newton's method.
J is the Jacobian of fun(x). Both fun(x) and J must be functions of x.
At input, x holds the start value. The iteration continues
until ||F|| < eps.
"""
F_value = fun(x, args)
# F_value_ = F_value.reshape((-1, 1))
F_norm = np.linalg.norm(F_value, 2) # l2 norm of vector
iteration_counter = 0
while abs(F_norm) > eps and iteration_counter < max_iter:
delta = np.linalg.solve(J(x, args), -F_value)
x = x + delta
F_value = fun(x, args)
# F_value_ = F_value.reshape((-1, 1))
F_norm = np.linalg.norm(F_value, 2)
iteration_counter += 1
# Here, either a solution is found, or too many iterations
if abs(F_norm) > eps:
iteration_counter = -1
raise ValueError("Maximum iteration reached in newton root finding!")
return x, iteration_counter
# # Testing:
# @numba.njit(cache=True)
# def F(x, args):
# return np.array(
# [x[0]**2 - x[1] + x[0]*np.cos(args[0]*x[0]),
# x[0]*x[1] + np.exp(-x[1]) - x[0]**(-1)])
# @numba.njit(cache=True)
# def J(x, args):
# return np.array(
# [[2*x[0] + np.cos(args[0]*x[0]) - args[0]*x[0]*np.sin(args[0]*x[0]), -1],
# [x[1] + x[0]**(-2), x[0] - np.exp(-x[1])]])
# if __name__ == "__main__":
# expected = np.array([1.0, 0.0])
# tol = 1e-4
# x_guess = np.array([2.0, -1.0])
# args = (np.pi,)
# J_num = numeric_jacobian(F, x_guess, 1e-8, args)
# J_ext = J(x_guess, args)
# J_numba = create_jacobian(F)
# x, n = root_finding_newton(F, J_numba, x_guess, 0.0001, 100, args)
# print(n, x)
# error_norm = np.linalg.norm(expected - x, ord=2)
# assert error_norm < tol, 'norm of error =%g' % error_norm
# print('norm of error =%g' % error_norm)
if HAS_NUMBA:
root_finding_newton = numba.njit(cache=True)(root_finding_newton)
numeric_jacobian = numba.njit(cache=True)(numeric_jacobian)
root_finding_newton_previously = numba.njit(cache=True)(
root_finding_newton_previously
)
|
{"hexsha": "3d690df8a865a976e74688ab5355cb97930b98b6", "size": 4477, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyequion/utils_for_numba.py", "max_stars_repo_name": "caiofcm/pyequion", "max_stars_repo_head_hexsha": "762ce1fb68cbbf35e52f7d4db2c34bd29f1dd18c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-12-08T19:54:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T23:51:52.000Z", "max_issues_repo_path": "pyequion/utils_for_numba.py", "max_issues_repo_name": "caiofcm/pyequion", "max_issues_repo_head_hexsha": "762ce1fb68cbbf35e52f7d4db2c34bd29f1dd18c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-04T13:21:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-10T19:26:08.000Z", "max_forks_repo_path": "pyequion/utils_for_numba.py", "max_forks_repo_name": "caiofcm/pyequion", "max_forks_repo_head_hexsha": "762ce1fb68cbbf35e52f7d4db2c34bd29f1dd18c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-02-05T11:11:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-13T22:25:39.000Z", "avg_line_length": 27.4662576687, "max_line_length": 83, "alphanum_fraction": 0.5988385079, "include": true, "reason": "import numpy,import numba", "num_tokens": 1374}
|
library(tidyverse)
library(doParallel)
library(randomForest)
library(ggthemes)
library(data.table)
setwd('your_working_directory')
# Load dataset
df <- fread("file_name.csv") %>%
dplyr::select(-axiv_index_b, -axiv_index_t)
df$transition_noTransition <- as.factor(df$transition_noTransition)
# Set up parallel processing
cl <- makeCluster(detectCores() - 1)
registerDoParallel(cl)
res <- list()
# Fitting a random forest model (with a leave-one-out)
for (i in unique(df$subject)) {
cat('Testing on:', i, " ")
t <- proc.time()
dat <- rbind(df %>% dplyr::filter(subject != i) %>% dplyr::filter(transition_noTransition == 1) %>%
dplyr::select(-subject, -video_time, -X),
((df %>% dplyr::filter(subject != i) %>% dplyr::filter(transition_noTransition == 0) %>%
dplyr::select(-subject, -video_time, -X)) [sample(nrow(df %>%
dplyr::filter(subject != i) %>% dplyr::filter(transition_noTransition == 0)), 30000), ]))
# Here we take 30000 values for the noTransition condition, considering that generally there are
# much more moments of noTransition than transition. This value of 30000 can be changed.
model.rf <- foreach(ntree = rep(29, 7), .combine = combine, .multicombine = TRUE,
.packages="randomForest") %dopar%
randomForest(transition_noTransition ~ .,
data = dat,
ntree = ntree,
mtry = 2,
importance = TRUE,
trim = TRUE,
returnData = FALSE,
)
pred <- dplyr::filter(df, subject == i)
res[[i]] <- data.frame(subject = i, X = pred$X, obs = pred$transition_noTransition, pred = predict(model.rf, pred),
predict(model.rf, dplyr::filter(df, subject == i), type = "prob"))
cat('(finished in: ', round((proc.time() - t)[[3]]/60, 2), " min)\n", sep = "")
}
stopCluster(cl)
# Joining all together
res <- bind_rows(res)
# Observing the results in a confusion matrix
caret::confusionMatrix(res$obs, res$pred)
|
{"hexsha": "e55ecbbb7311d424a103026774e43d43218f8707", "size": 2180, "ext": "r", "lang": "R", "max_stars_repo_path": "2_classifier_creation.r", "max_stars_repo_name": "neebul/activityShiftDetector", "max_stars_repo_head_hexsha": "5a9ffa03e8dcbd17e207f0dc657179f338665154", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "2_classifier_creation.r", "max_issues_repo_name": "neebul/activityShiftDetector", "max_issues_repo_head_hexsha": "5a9ffa03e8dcbd17e207f0dc657179f338665154", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2_classifier_creation.r", "max_forks_repo_name": "neebul/activityShiftDetector", "max_forks_repo_head_hexsha": "5a9ffa03e8dcbd17e207f0dc657179f338665154", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0588235294, "max_line_length": 119, "alphanum_fraction": 0.5811926606, "num_tokens": 563}
|
# python imports
import os, shutil
from string import Template
from math import log10
import subprocess
import time
# global library imports
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# local imports
from krg_utils import *
from utils import plot_2d_image
from tinti import tinti
from srf import FaultSegment, PointSource, FiniteFaultSource, write
from flat_earth import convert_local_idx_to_geo
meters_per_kilometer = 1e3
centimeters_per_meter = 1e2
kgm_to_gcm = 1e-3
def main(kwargs=None):
print('Generating rupture model using SO-KRG v1.0')
print('========================================\n')
params = kwargs or {}
plot_on = params['plot_on']
tapering = params['tapering']
writing = params['writing']
layered = params['layered']
generate_fields = params['generate_fields']
resample = False
write_template = False
force_slip_to_zero = True
debug = False
if params:
for k, v in params.items():
print(f'{k}: {v}')
print('resample: ' + str(resample))
print('write_template: ' + str(write_template))
print('force_slip_to_zero: ' + str(force_slip_to_zero))
print('debug: ' + str(debug))
print()
if generate_fields:
print('Generating random fields...')
cmnd = [
"RScript",
"--vanilla",
"generic_sim_tottori.R",
str(params['output_path']),
str(params['seed']),
str(params['nsim']),
str(params['dx']),
str(params['fault_length']),
str(params['fault_width'])
]
return_code = subprocess.run(" ".join(cmnd), capture_output=True, shell=True)
print(" ".join(cmnd))
print(return_code.stdout.decode('utf-8'))
if return_code.returncode != 0:
print('\tError generating random fields. Exiting program.')
print(return_code.stderr.decode('utf-8'))
exit(1)
else:
print('Skipping random field generation. Using pre-existing simulations.')
# generate strike, dip, and rake
nhat1 = np.fromfile("nhat1", "f").reshape(801, 2601)
nhat2 = np.absolute(np.fromfile("nhat2", "f").reshape(801, 2601))
nhat3 = np.fromfile("nhat3", "f").reshape(801, 2601) # make vector point "up"
# note: starting at x=1000 to reduce model size for small model
# should implement this calculation outside
nhat1 = nhat1[::4]
nhat2 = nhat2[::4]
nhat3 = nhat3[::4]
#fienen "the three-point problem"
# project onto horizontal plane, calculate angle between
print('Computing strike dip and rake...')
dip = get_dip(nhat1, nhat2, nhat3)
strike = get_strike(nhat1, nhat3, mean_strike=params['strike'])
# rake = np.ones(strike.shape)*180.0 # constant rake
rake = strike - 90 # strike is 270 and rake is 180
# using array 1 index
for src_idx in range(1, params['nsim']+1):
print(f'Preparing source model {src_idx}...')
src_dir = f'./source_models/'
output_name = f'sokrg-bbp_source{src_idx}'
out_dir = f'./source_models/source{src_idx}'
if not os.path.isdir( out_dir ):
os.makedirs( out_dir )
# don't think we will use resampling to improve simultion times so hard-coding it out here
params['nx'] = params['fault_length'] // params['dx'] + 1
params['nz'] = params['fault_width'] // params['dx'] + 1
params['inx'] = params['nx']
params['inz'] = params['nz']
# read normal score transforms, change to quantile transform
slip_sc = pd.read_csv('slip_nscore_transform_table.csv')
psv_sc = pd.read_csv('psv_nscore_transform_table.csv')
vrup_sc = pd.read_csv('vrup_nscore_transform_table.csv')
# extract data
slip = np.fromfile(src_dir + f'slip_sim{src_idx}.bin').reshape(params['inz'], params['inx'])
# flag used when choosing magnitudes
if force_slip_to_zero:
slip = slip - slip.mean()
if not debug:
psv = np.fromfile(src_dir + f'psv_sim{src_idx}.bin').reshape(params['inz'], params['inx'])
vrup = np.fromfile(src_dir + f'vrup_sim{src_idx}.bin').reshape(params['inz'], params['inx'])
if resample:
slip=resample2d(slip, shape=[params['nz'],params['nx']])
psv=resample2d(psv, shape=[params['nz'],params['nx']])
vrup=resample2d(vrup, shape=[params['nz'],params['nx']])
else:
params['nx'] = params['inx']
params['nz'] = params['inz']
# cut size of model down for computational ease
slip = slip[:-1, :-1]
psv = psv[:-1, :-1]
# psv=(psv-psv.mean())/psv.std()
vrup = vrup[:-1, :-1]
# update parameters
params['nx'] -= 1
params['nz'] -= 1
if layered:
# bbp model storerd using kilometers
material = expand_bbp_velocity_model(
np.loadtxt(params['velocity_model_path']),
params['nx'],
params['nz'],
params['dx'] * 1e-3
)
# convert to meters
vp = material[0]*1e3
vs = material[1]*1e3
rho = material[2]*1e3
else:
vs = 3464*np.ones((params['nz'], params['nx']))
rho = 2700*np.ones((params['nz'], params['nx']))
# transform from normal-scores change this to normalized versions
slip = transform_normal_scores(slip, slip_sc)
psv = transform_normal_scores(psv, psv_sc)
vrup = transform_normal_scores(vrup, vrup_sc)
if tapering:
avg_slip_pre = slip.mean()
# from simulations, slip tapers larger
taper_width = params['taper_width_slip']
slip = boundary_taper(slip,
taper_width=taper_width,
free_surface=True,
values=np.array(((0.60, 0.05), (0.05, 0.05))) )
avg_slip_post = slip.mean()
slip_taper_ratio = avg_slip_pre / avg_slip_post
slip = slip * slip_taper_ratio
# taper to 30% of mean along-strike psv at z = taper_width * dx
taper_width = params['taper_width_psv']
ny,nx=psv.shape
baseline = np.ones( (ny-4*taper_width, nx-2*taper_width) )
padded = np.pad( baseline,
((3*taper_width,taper_width), (taper_width,taper_width)),
'linear_ramp',
end_values=np.array(((0.30, 0.05), (0.05, 0.05))) )
psv = padded * psv
vrup = vrup * vs
else:
vrup = vrup * vs
# compute moment
print('Computing moment...')
moment = get_moment(slip, vs, rho, params)
moment_ratio = params['target_moment'] / moment.sum()
if moment_ratio >= 1.1 or moment_ratio <= 0.9:
print('Warning: greater than 10 percent different between simulated moment and target moment. Consider adjusting fault area.')
print(f"\tTarget moment: {params['target_moment']}, Simulated moment: {moment.sum()}, Ratio: {moment_ratio}")
# material model and fault area is constant; therefore, only change comes from slip
slip = slip * moment_ratio
moment = get_moment(slip, vs, rho, params)
print(f'moment: {moment.sum()}\nmw: {2./3 * (log10(moment.sum()) - 9.05)}')
print()
trup = compute_trup(vrup, params)
# replace large nan values with large number; ie., they didn't rupture
inds = np.where(np.isnan(trup))
trup[inds] = 999.
# compute new psv given tinti kinematic parameters
# 1) cap max(slip/psv) = 2
psv_eff=psv.copy()
inds=np.where(slip/psv_eff > 2)
psv_eff[inds]=slip[inds] / 2
# 2) cap min(psv) = 0.1
inds=np.where(psv_eff < 0.1)
psv_eff[inds]=0.1
# estimate dcp based on mean of psv_eff and regression analysis, where vpeak/dcp = 2.46*fs_max
fs_max = params['fs_max']
ratio_vpeak_dcp = fs_max*2.46
dc_est = 1.0/ratio_vpeak_dcp * psv_eff.mean()
# compute ts on fault using dc_est
ratio_dcp_est_psv_eff = dc_est / psv_eff
ts = 1.55 * ratio_dcp_est_psv_eff
# this was chosen as a reasonable upper bound, but this needs to be defined more explicitly
# truptot=18.5
# savran and olsen, 2020 defines this as the average trup on the fault boundary
per = 97.5
truptot = np.percentile(
np.hstack([trup[0,:], trup[-1,:], trup[:,0], trup[:,-1]]),
per
)
treff=truptot-trup
# compute tr
tr = 3.62 * slip + 0.07 * treff;
tr[tr < 0] = 0
# prevent bad parameters
inds = np.where(tr < ts)
tr[inds] = 1.5 * ts[inds]
# compute test tr using eq.11 from tinti et al, 2005
tr_eq11 = (1.3*ts)/(dc_est/slip)**2
tr_eq7 = slip**2 / psv_eff**2 / (1.3*ts)
# compute psv of tinti functions
inds=np.where(tr > 0)
psv_tinti=np.zeros(psv_eff.shape)
psv_tinti[inds] = 1.04*slip[inds] / ( ((1.3*ts[inds])**0.54) * (tr[inds]**0.47) )
print(f'slip: min, max, mean ({slip.min():.2f}, {slip.max():.2f}, {slip.mean():.2f})')
print(f'psv: min, max ({psv.min():.2f}, {psv.max():.2f})')
print(f'psv/dcp: ({ratio_vpeak_dcp:.2f}) mean(psv_eff): ({psv_eff.mean():.2f}) dcp: ({dc_est:.2f})')
print(f'vrup: min, max ({vrup.min():.2f}, {vrup.max():.2f})')
print(f'trup: min, max ({trup.min():.2f}, {trup.max():.2f})')
print(f'ts: min, max, mean ({ts.min():.2f}, {ts.max():.2f}, {ts.mean():.2f})')
print(f'tr: min, max, mean ({tr.min():.2f}, {tr.max():.2f}, {tr.mean():.2f})')
print(f'psv_tinti: min, max, mean ({psv_tinti.min():.2f}, {psv_tinti.max():.2f}, {psv_tinti.mean():.2f})')
print(f'tr_eq11_eq11: min, max, mean ({tr_eq11.min():.2f}, {tr_eq11.max():.2f}, {tr_eq11.mean():.2f})')
print(f'tr_eq7_eq11: min, max, mean ({tr_eq7.min():.2f}, {tr_eq7.max():.2f}, {tr_eq7.mean():.2f})')
print(f'truptot: {truptot}')
print(f'avg strike: {strike.mean():.2f}')
print()
if plot_on:
x = np.arange(0,params['nx'])
z = np.arange(0,params['nz'])
plotting_data = {'data': slip, 'contour': trup}
plot_2d_image( plotting_data, out_dir + "/slip-" + output_name + ".pdf" , nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = "Slip (m)", xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = True, clim=(0, slip.max()), cmap='jet')
plot_2d_image( psv, out_dir + "/psv-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V^{peak}$ (m/s)', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, clim=(0, psv.max()), cmap='jet' )
plot_2d_image( trup, out_dir + "/trup-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r"$t_{0}$ (s)", xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = True, clim=(0,12.5) )
plot_2d_image( vrup/vs, out_dir + "/vrup-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V_{rup}/c_s$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='viridis', clim=(0, 1.0) )
plot_2d_image( vs, out_dir + "/vs-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$c_s$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet' )
plot_2d_image( ts, out_dir + "/ts-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$\tau_s$ (s)', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet' )
plot_2d_image( tr, out_dir + "/tr-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$\tau_r$ (s)', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet' )
plot_2d_image( tr_eq11, out_dir + "/treq11-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$tr_{eq11}$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet' )
plot_2d_image( tr_eq7, out_dir + "/treq7-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$tr_{eq7}$', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet' )
plot_2d_image( psv_tinti, out_dir + "/psv_tinti-" + output_name + ".pdf", nx = params['nx'], nz = params['nz'], dx = params['dx']*1e-3,
clabel = r'$V^{peak}$ (m/s)', xlabel = "Distance (km)", ylabel = "Distance (km)",
surface_plot = False, contour_plot = False, cmap='jet', clim=(0,psv_tinti.max()), show_plots=params['show_plots'])
plt.close('all')
# write to file for input
if writing:
dtype = '<f4'
print('Writing files...')
# start at 1000 to reduce the size of the source simulation.
vs.astype(dtype).tofile(os.path.join(out_dir, output_name + '_vs.bin'))
rho.astype(dtype).tofile(os.path.join(out_dir, output_name + '_rho.bin'))
slip.astype(dtype).tofile(os.path.join(out_dir, output_name + '_slip.bin'))
psv.astype(dtype).tofile(os.path.join(out_dir, output_name + '_psv.bin'))
vrup.astype(dtype).tofile(os.path.join(out_dir, output_name + '_vrup.bin'))
trup.astype(dtype).tofile(os.path.join(out_dir, output_name + '_trup.bin'))
strike.astype(dtype).tofile(os.path.join(out_dir, output_name + '_strike.bin'))
dip.astype(dtype).tofile(os.path.join(out_dir, output_name + '_dip.bin'))
rake.astype(dtype).tofile(os.path.join(out_dir, output_name + '_rake.bin'))
moment.astype(dtype).tofile(os.path.join(out_dir, output_name + '_moment.bin'))
ts.astype(dtype).tofile(os.path.join(out_dir, output_name + '_ts.bin'))
tr.astype(dtype).tofile(os.path.join(out_dir, output_name + '_tr.bin'))
# making params.txt file
if write_template:
print('Writing parameter file to params.txt')
fin=open( 'params.tmpl' )
template=Template( fin.read() )
fin.close()
d = {
'psv_file': 'in/' + output_name + '_psv.bin',
'vs_file': 'in/' + output_name + '_vs.bin',
'rho_file': 'in/' + output_name + '_rho.bin',
'trup_file': 'in/' + output_name + '_trup.bin',
'strike_file': 'in/' + output_name + '_strike.bin',
'dip_file': 'in/' + output_name + '_dip.bin',
'rake_file': 'in/' + output_name + '_rake.bin',
'slip_file': 'in/' + output_name + '_slip.bin',
'momentrate_file': '../stripe_count_160/' + output_name + '_source.bin',
'coord_file': 'in/fault_coords.bin',
'dc': dc_est,
'median_ts': 0.05333333,
'truptot': truptot,
}
template=template.substitute(d)
fout=open(os.path.join(out_dir,'params.txt'),'w')
fout.write(template)
fout.close()
# copying fault_coords.bin
print('copying fault_coords.bin into out_dir')
shutil.copy2('./fault_coords.bin', out_dir)
# write SRF files for source model
if params['generate_srf']:
srf = FiniteFaultSource()
srf.version = 2.0
seg = FaultSegment()
# segment information
# fault center lon and lat
seg.elon = params['lon_top_center']
seg.elat = params['lat_top_center']
# num points along strike and dip
seg.nstk = params['nx']
seg.ndip = params['nz']
# fault length (km)
seg.len = params['fault_length'] / meters_per_kilometer
# fault width (km)
seg.wid = params['fault_width'] / meters_per_kilometer
# fault strike
seg.stk = np.mean(strike)
# fault dip
seg.dip = np.mean(dip)
# depth to top of fault (km)
seg.dtop = params['fault_top'] / meters_per_kilometer
# along strike hypo center (km)
seg.shyp = params['ihypo'][1] * params['dx'] / meters_per_kilometer
# along dip hypo center (km)
seg.dhyp = params['ihypo'][0] * params['dx'] / meters_per_kilometer
# add to finite source
srf.segment_headers.append(seg)
# prepare subfaults
lat0 = params['lat_top_center']
lon0 = params['lon_top_center']
stk = np.deg2rad(params['strike'])
length = params['fault_length'] / meters_per_kilometer
dx = params['dx']
t = np.arange(0.0, 25.0, 0.025)
srcs = []
for idz in range(params['nz']):
for idx in range(params['nx']):
p = PointSource()
# uses flat-earth approximation centered on fault
p.lat, p.lon = convert_local_idx_to_geo(idx, lat0, lon0, params['fault_length'], params['dx'], stk)
# subfault depth (km)
p.dep = (params['fault_top'] + idz * dx) / meters_per_kilometer
# strike and dip (planar)
p.stk = strike[idz, idx]
p.dip = dip[idz, idx]
# subfault area (cm^2)
p.area = params['dx'] * params['dx'] * centimeters_per_meter * centimeters_per_meter
# t_init of source time function
p.tinit = trup[idz,idx]
# extract material parameters
# vs (cm/s)
p.vs = vs[idz, idx] * centimeters_per_meter
p.dt = params['dt']
# rho (g/cm^3)
p.den = rho[idz, idx] * kgm_to_gcm
p.rake = params['rake']
# decompose using strike, dip, and rake
stf = centimeters_per_meter * slip[idz,idx] * tinti(t, ts[idz,idx], tr[idz,idx], 0.0)
if np.any(np.isnan(stf)):
print('nan found in stf with params {ts[idz,idx]:.2f}, {tr[idz,idx]:.2f}')
# prepare source-time function
stf_trimmed = np.trim_zeros(stf, trim='b')
new_length = stf_trimmed.shape[0]
# slip1, nt1
p.sr1 = stf_trimmed
p.slip1 = slip[idz,idx] * centimeters_per_meter
p.nt1 = new_length
# slip2, nt2
p.slip2 = 0 * centimeters_per_meter
p.nt2 = 0
# slip3, nt3
p.slip3 = 0 * centimeters_per_meter
p.nt3 = 0
srcs.append(p)
srf.point_sources.append(srcs)
# write out to file
print(f'Writing source {src_idx}')
write(f'./srf/tottori-sokrg_v4-src{src_idx-1:04d}.srf', srf)
if __name__ == "__main__":
# values with units are provided using kg/m/s
params = {
'fault_length': 27000,
'fault_width': 21000,
'dx' : 100,
'target_moment': 8.62e+18,
'ihypo' : (121, 135),
'fault_top' : 100,
'taper_width_slip': 30,
'taper_width_psv': 10,
'fs_max': 12.5,
'output_path': '/Users/kxu4143/Documents/Codes_KRG_SO/outputs',
'seed': 123456,
'nsim': 64,
'velocity_model_path': './central_japan_bbp1d.txt',
'plot_on': True,
'show_plots': False,
'tapering': True,
'writing': True,
'layered': True,
'generate_fields': True,
'generate_srf': True,
'lat_top_center': 35.269,
'lon_top_center': 133.357,
'hypo_along_stk': 0.00,
'hypo_along_dip': 14.00,
'strike': 150,
'rake': 180,
'dip': 90,
'dt': 0.025
}
t0 = time.time()
main(kwargs=params)
t1 = time.time()
print()
print(f"Generated {params['nsim']} source models in {t1-t0} seconds.")
# # plot histograms of border rupture times
# borders = np.hstack([trup[0,:],trup[-1,:], trup[:,0], trup[:,-1]])
# p90 = np.percentile(borders, 90)
# p95 = np.percentile(borders, 95)
# p97p5 = np.percentile(borders, 97.5)
# p99 = np.percentile(borders, 99)
# plt.figure()
# plt.hist(borders, bins=np.arange(0, 12, 0.1))
# plt.title(f'Source {src_idx}')
# plt.xlabel('t$_{rup}$ [s]')
# plt.ylabel('Count')
# plt.axvline(p90, color='k', label='[p90, p95, p97.5, p99]')
# plt.axvline(x=p95, color='k')
# plt.axvline(x=p97p5, color='k')
# plt.axvline(x=p99, color='k')
# plt.legend(loc='upper left')
# plt.show()
|
{"hexsha": "3151c83f417f75f1f8f5991374b08b7b871064e1", "size": 22516, "ext": "py", "lang": "Python", "max_stars_repo_path": "generate_scale_truncated_resample.py", "max_stars_repo_name": "longyearxuk/sokrg", "max_stars_repo_head_hexsha": "001fcf8275eb158765de4e99e0d442b1712aa061", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "generate_scale_truncated_resample.py", "max_issues_repo_name": "longyearxuk/sokrg", "max_issues_repo_head_hexsha": "001fcf8275eb158765de4e99e0d442b1712aa061", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "generate_scale_truncated_resample.py", "max_forks_repo_name": "longyearxuk/sokrg", "max_forks_repo_head_hexsha": "001fcf8275eb158765de4e99e0d442b1712aa061", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.4102564103, "max_line_length": 164, "alphanum_fraction": 0.5304228104, "include": true, "reason": "import numpy", "num_tokens": 6039}
|
-- Andreas, 2018-05-09, issue 2636, reported by nad
-- {-# OPTIONS -v tc.pos:10 #-}
id : (A : Set₁) → A → A
id A x = x
A : Set₁
A = Set
where
F : Set₁ → Set₁
F X = X
data D : Set₁ where
c : F D → D
lemma : F (D → Set) → D → Set
lemma fp d = id (F (D → Set)) fp d
-- Problem was:
-- Positivity checker for D complained when lemma was present.
|
{"hexsha": "43d754f32d616b014889d1742f3524a0e80f325a", "size": 364, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Bugs/Issue2636.agda", "max_stars_repo_name": "cruhland/agda", "max_stars_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Bugs/Issue2636.agda", "max_issues_repo_name": "cruhland/agda", "max_issues_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Bugs/Issue2636.agda", "max_forks_repo_name": "cruhland/agda", "max_forks_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 16.5454545455, "max_line_length": 62, "alphanum_fraction": 0.5549450549, "num_tokens": 139}
|
import csv
from iacorpus import load_dataset
from gensim import corpora, models
import numpy as np
import pandas as pd
from sqlalchemy import Table, Column, Integer, sql
from sklearn.model_selection import train_test_split
# read data
dataset = load_dataset('fourforums', host='localhost', port='3306', username='root', password='symwrm')
view = Table('psl', dataset.connection.metadata, Column('id', Integer, primary_key=True), autoload=True,
autoload_with=dataset.connection.engine)
query = dataset.connection.session.query(view)
qrs = pd.read_sql(query.statement, query.session.bind)
qrs_train, qrs_test = train_test_split(qrs, test_size=0.2)
# relations
quote = qrs[['id']]
quote.to_csv('PSL/Quote.txt', sep="\t", index=False, header=False)
quote_discussion = qrs[['id', 'discussion_id']].dropna()
quote_discussion.to_csv('PSL/Quote_Discussion.txt', sep="\t", index=False, header=False)
quote_discussion.discussion_id.drop_duplicates().to_csv('PSL/Discussion.txt', sep="\t", index=False, header=False)
quote_post = qrs[['id', 'discussion_id', 'post_id']].dropna()
quote_post['post_uid'] = quote_post.discussion_id.apply(str) + "_" + quote_post.post_id.apply(str)
quote_post = quote_post[['id', 'post_uid']]
quote_post.to_csv('PSL/Quote_Post.txt', sep="\t", index=False, header=False)
quote_post.post_uid.drop_duplicates().to_csv('PSL/Post.txt', sep="\t", index=False, header=False)
quote_author = qrs[['id', 'author_id']].dropna()
quote_author.to_csv('PSL/Quote_Author.txt', sep="\t", index=False, header=False)
quote_author.author_id.drop_duplicates().to_csv('PSL/Author.txt', sep="\t", index=False, header=False)
quote_topic = qrs[['id', 'topic']].dropna()
quote_topic.topic = quote_topic.topic.apply(lambda x: x.replace(" ", "_"))
quote_topic.to_csv('PSL/Quote_Topic.txt', sep="\t", index=False, header=False)
quote_topic.topic.drop_duplicates().to_csv('PSL/Topic.txt', sep="\t", index=False, header=False)
quote_response = qrs[['id', 'discussion_id', 'source_post_id']].dropna()
quote_response['source_post_uid'] = quote_response.discussion_id.apply(str) + "_" + quote_response.source_post_id.apply(
str)
quote_response = quote_response[~quote_response.source_post_uid.isin(quote_post.post_uid)]
quote_response.to_csv('PSL/Quote_Response.txt', sep="\t", index=False, header=False)
# labels
quote_stance = qrs[['id', 'topic_stance_votes_1', 'topic_stance_votes_2']].dropna()
quote_stance['stance'] = [1 if x >= 2 else (0 if x <= .5 else np.nan) for x in
quote_stance.topic_stance_votes_1 / quote_stance.topic_stance_votes_2]
quote_stance = quote_stance[['id', 'stance']].dropna()
quote_stance.to_csv('PSL/Quote_Stance.txt', sep="\t", index=False, header=False)
quote_stance.stance.drop_duplicates().to_csv('PSL/Stance.txt', sep="\t", index=False, header=False)
quote_tag = qrs[['id', 'disagree_agree', 'emotion_fact', 'nasty_nice', 'attacking_respectful', 'sarcasm_yes']].copy()
quote_tag['agree'] = quote_tag.disagree_agree >= 1
quote_tag['disagree'] = quote_tag.disagree_agree <= -1
quote_tag['fact'] = quote_tag.emotion_fact >= 1
quote_tag['emotion'] = quote_tag.emotion_fact <= -1
quote_tag['nice'] = quote_tag.nasty_nice >= 1
quote_tag['nasty'] = quote_tag.nasty_nice <= -1
quote_tag['respectful'] = quote_tag.attacking_respectful >= 1
quote_tag['attacking'] = quote_tag.attacking_respectful <= -1
quote_tag['sarcasm'] = quote_tag.sarcasm_yes >= 0.5
quote_tag[['id', 'disagree', 'agree', 'emotion', 'fact', 'nasty', 'nice', 'attacking', 'respectful', 'sarcasm']].to_csv(
'rawtag.txt', index=False, )
quote_tag = quote_tag.melt(id_vars=['id'],
value_vars=['disagree', 'agree', 'emotion', 'fact', 'nasty', 'nice', 'attacking',
'respectful', 'sarcasm'])
quote_tag[quote_tag.id.isin(qrs_test.id)][['id', 'variable']].to_csv('PSL/Tagging_targets.txt', sep="\t", index=False,
header=False)
quote_tag = quote_tag[quote_tag.value][['id', 'variable']]
quote_tag.variable.drop_duplicates().to_csv('PSL/Tag.txt', sep="\t", index=False, header=False)
quote_tag[quote_tag.id.isin(qrs_train.id)].to_csv('PSL/Tagging.txt', sep="\t", index=False, header=False)
quote_tag[quote_tag.id.isin(qrs_test.id)].to_csv('PSL/Tagging_truth.txt', sep="\t", index=False, header=False)
# SVM corpora
presented_quote = qrs.presented_quote.values
presented_response = qrs.presented_response.values
combined = np.concatenate((presented_quote, presented_response))
splited = [sentence.lower().split() for sentence in combined]
vocab = corpora.Dictionary(splited)
vocab.filter_extremes(no_below=5, no_above=0.5, keep_n=None)
corpus = [vocab.doc2bow(text) for text in splited]
tfidf = models.TfidfModel(corpus)
corpus = [tfidf[doc] for doc in corpus]
corpora.SvmLightCorpus.serialize("corpus.txt", corpus)
ids = qrs[['id']].copy()
ids['train'] = ids.id.isin(qrs_train.id)
ids.to_csv('ids.txt', index=False)
|
{"hexsha": "298d91a4f677ae2844cfc2018402f29caffaa743", "size": 4984, "ext": "py", "lang": "Python", "max_stars_repo_path": "psldatagen.py", "max_stars_repo_name": "eosW/CMPS290C", "max_stars_repo_head_hexsha": "c47fcf4469445256975c43e99a15bcde61153da7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "psldatagen.py", "max_issues_repo_name": "eosW/CMPS290C", "max_issues_repo_head_hexsha": "c47fcf4469445256975c43e99a15bcde61153da7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "psldatagen.py", "max_forks_repo_name": "eosW/CMPS290C", "max_forks_repo_head_hexsha": "c47fcf4469445256975c43e99a15bcde61153da7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.3434343434, "max_line_length": 120, "alphanum_fraction": 0.7174959872, "include": true, "reason": "import numpy", "num_tokens": 1355}
|
import numpy as np
import torch
from mlp import MLP
from torch import optim
from utils import cosine_distance_torch
# https://github.com/kimiandj/gsw
class GSW_NN:
def __init__(self, din=2, nofprojections=10, model_depth=3, num_filters=32, use_cuda=True):
self.nofprojections = nofprojections
if torch.cuda.is_available() and use_cuda:
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.parameters = None # This is for max-GSW
self.din = din
self.dout = nofprojections
self.model_depth = model_depth
self.num_filters = num_filters
self.model = MLP(din=self.din, dout=self.dout, num_filters=self.num_filters)
if torch.cuda.is_available() and use_cuda:
self.model.cuda()
def gsw(self, X, Y, random=True):
"""
Calculates GSW between two empirical distributions.
Note that the number of samples is assumed to be equal
(This is however not necessary and could be easily extended
for empirical distributions with different number of samples)
"""
N, dn = X.shape
M, dm = Y.shape
assert dn == dm and M == N
if random:
self.model.reset()
Xslices = self.model(X.to(self.device))
Yslices = self.model(Y.to(self.device))
Xslices_sorted = torch.sort(Xslices, dim=0)[0]
Yslices_sorted = torch.sort(Yslices, dim=0)[0]
return torch.sqrt(torch.sum((Xslices_sorted - Yslices_sorted) ** 2))
def dgsw(self, X, Y, iterations=50, lam=1, lr=1e-4):
"""
Calculates GSW between two empirical distributions.
Note that the number of samples is assumed to be equal
(This is however not necessary and could be easily extended
for empirical distributions with different number of samples)
"""
N, dn = X.shape
M, dm = Y.shape
assert dn == dm and M == N
self.model.reset()
optimizer = optim.Adam(self.model.parameters(), lr=lr)
for i in range(iterations):
optimizer.zero_grad()
Xslices = self.model(X.to(self.device))
Yslices = self.model(Y.to(self.device))
Xslices_sorted = torch.sort(Xslices, dim=0)[0]
Yslices_sorted = torch.sort(Yslices, dim=0)[0]
loss = -torch.sqrt(torch.sum((Xslices_sorted - Yslices_sorted) ** 2)) + lam * cosine_distance_torch(
Xslices, Yslices
)
loss.backward(retain_graph=True)
optimizer.step()
return self.gsw(X.to(self.device), Y.to(self.device), random=False)
def max_gsw(self, X, Y, iterations=50, lr=1e-4):
N, dn = X.shape
M, dm = Y.shape
assert dn == dm and M == N
self.model.reset()
optimizer = optim.Adam(self.model.parameters(), lr=lr)
total_loss = np.zeros((iterations,))
for i in range(iterations):
optimizer.zero_grad()
loss = -self.gsw(X.to(self.device), Y.to(self.device), random=False)
total_loss[i] = loss.item()
loss.backward(retain_graph=True)
optimizer.step()
return self.gsw(X.to(self.device), Y.to(self.device), random=False)
|
{"hexsha": "144e0c6166675870a2184ca8da611e16859dc4be", "size": 3304, "ext": "py", "lang": "Python", "max_stars_repo_path": "gswnn.py", "max_stars_repo_name": "ttaa9/DSW", "max_stars_repo_head_hexsha": "ca29d425d4a535f53e70a7a45ebd13a7b196f8c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2020-02-18T04:01:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T07:34:33.000Z", "max_issues_repo_path": "gswnn.py", "max_issues_repo_name": "ttaa9/DSW", "max_issues_repo_head_hexsha": "ca29d425d4a535f53e70a7a45ebd13a7b196f8c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-04-20T06:02:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T13:05:13.000Z", "max_forks_repo_path": "gswnn.py", "max_forks_repo_name": "ttaa9/DSW", "max_forks_repo_head_hexsha": "ca29d425d4a535f53e70a7a45ebd13a7b196f8c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-03-22T18:19:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T00:04:56.000Z", "avg_line_length": 35.1489361702, "max_line_length": 112, "alphanum_fraction": 0.60562954, "include": true, "reason": "import numpy", "num_tokens": 794}
|
#!/usr/bin/env python
import rospy
import rospkg
from generation import Generation
import random
import matplotlib.pyplot as plt
import datetime
import time
import numpy as np
import os
import copy
from annealing import annealing
def get_or_error(string):
if rospy.has_param(string):
return rospy.get_param(string)
else:
rospy.logerr("%s/%s not found ! exit", rospy.get_namespace(), string)
def mean(list):
ret = np.array(list)
return ret.mean()
if __name__ == '__main__':
rospy.init_node('manipulation_optimizer')
n_ind = rospy.get_param("individuals_number" )
n_trial = rospy.get_param("max_generations_number")
mutation_rate = rospy.get_param("mutation_rate" )
last_elements = rospy.get_param("last_elements" )
pc = rospy.get_param("probability_rank" )
weight = rospy.get_param("weight" )
final_annealing = rospy.get_param("final_annealing" )
write_data = rospy.get_param("save_data_to_txt" )
rospack = rospkg.RosPack()
pat = rospack.get_path('position_optimizer')
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
if write_data:
os.mkdir(pat + "/data/"+st)
file1 = open(pat + "/data/"+st+"/base_info.txt","w+")
file1.write("n_ind: " + str(n_ind)+", n_trial: "+str(n_trial)+", mutation rate: "+str(mutation_rate)
+ ", last element: "+str(last_elements)+", Pc: "+str(pc)+", weight: "+str(weight))
file1.close()
generation = Generation(n_ind)
generation.set_pc(pc)
for ind in generation.population:
ind.set_weight(weight)
old_generation = copy.deepcopy(generation)
best_of_all = []
fitness_of_all_time = []
iter_time = []
mean_of_all_gen = []
generation_unsuccess = []
use_precomputed_path = False
end = False
n_gen=0
jj = 0
while jj < n_trial:
# while True: #TODO: solo per prove di tempo
unsuccess = 0
time_iter_start = time.time()
n_gen += 1
jj = jj+1
if write_data:
file2 = open(pat + "/data/" + st + "/gen_"+str(n_gen) + ".txt", "w+")
file2.write("Generation,Fitness,Rank,Pick_x,Pick_y,Pick_theta,Place_x,Place_y,Place_theta,Pick_manip,Place_manip\n")
rospy.loginfo("---------------------- generation number: "+str(n_gen)+" -----------------------------")
for ind in generation.population:
ind.set_gen_n(n_gen)
if not ind.set_pick_place_fitness():
unsuccess = unsuccess+1
if not generation.probability():
rospy.logfatal("something not working")
generation.distribution()
iter = 0
maxiter = 2
# maxiter = rospy.get_param("max_planning_trial")
planned = False
while iter <= maxiter:
if not generation.population[-1].planTrajectory(use_precomputed_path):
iter = iter+1
use_precomputed_path = False
rospy.logdebug("iter number: "+str(iter))
else:
planned = True
break
rospy.logdebug("total iter number: "+str(iter))
if not planned:
rospy.loginfo("trying planning with old best solution")
if old_generation.population[-1].planTrajectory():
planned = True
generation.population[-1] = old_generation.population[-1]
else:
planned = False
if not planned:
rospy.logerr("solution not found, regenerate individuals")
if jj==1:
generation = Generation(n_ind)
generation.set_pc(pc)
for ind in generation.population:
ind.set_weight(weight)
jj = 0
else:
generation = copy.deepcopy(old_generation)
generation.selection()
for i in range(0, int(n_ind * mutation_rate)):
ind = random.randint(0, len(generation.children) - 1)
rospy.logdebug("mutant individual " + str(ind))
generation.mutation(ind)
if write_data:
file2.write( str(0)+", "+ str(0) + ", " + str(0) + ", " + str(0) + ", " + str(0) + ", "
+str(0)+", "+ str(0) + ", " + str(0)+ ", " + str(0) + ", " + str(0)+ ", " + str(0) + "\n")
file2.close()
continue
else:
rospy.logdebug("well done")
use_precomputed_path = True
generation.rank()
fitness_of_this_gen = []
for ind in generation.population:
# print ("id: " + str(ind.id) + ", fitness: " + str(ind.fitness) + ", probability: " + str(ind.prob) \
# + ", rank: " + str(ind.rank_probability) + ", manip: [ " + str(ind.pick_manipulability) + ", " +str(ind.place_manipulability) + "]")
if write_data:
file2.write( str(ind.id)+", "+ str(ind.fitness) + ", " + str(ind.rank_probability)
+ ", " + str(ind.chromosome[0])
+ ", " + str(ind.chromosome[1])
+ ", " + str(ind.chromosome[2])
+ ", " + str(ind.chromosome[3])
+ ", " + str(ind.chromosome[4])
+ ", " + str(ind.chromosome[5])
+ ", " + str(ind.pick_manipulability)+ ", " + str(ind.place_manipulability) + "\n")
fitness_of_all_time.append(ind.fitness)
fitness_of_this_gen.append(ind.fitness)
mean_of_all_gen.append(mean(fitness_of_this_gen))
best_of_all.append(copy.deepcopy(generation.population[-1]))
if jj == n_trial:
break
if len(best_of_all) > last_elements:
count = 0
for i in range(1, last_elements):
if best_of_all[-i].chromosome == best_of_all[-i-1].chromosome:
count += 1
if count == last_elements - 1:
end = True
if end == True:
rospy.loginfo("last " + str(last_elements) + " are equal. break")
break
generation.selection()
for i in range(0, int(n_ind*mutation_rate)):
ind = random.randint(0, len(generation.children) - 1)
rospy.loginfo("individuo "+str(ind)+" mutante")
generation.mutation(ind)
best_of_all[-1].set_pick_place_fitness()
old_generation = copy.deepcopy(generation)
iter_time.append(time.time()-time_iter_start)
generation_unsuccess.append(unsuccess)
old_best = copy.deepcopy(best_of_all[-1])
anneal_time = time.time()
if final_annealing:
h_pick, man_pi = annealing(best_of_all[-1],True)
h_place, man_pl = annealing(best_of_all[-1],False)
best_of_all[-1].chromosome_to_pose()
best_of_all[-1].seed_pick_joints = h_pick.joints[-1]
best_of_all[-1].seed_place_joints = h_place.joints[-1]
best_of_all[-1].set_pick_place_fitness()
# print "fitness: " + str(old_best.fitness) + " crom: " + str(old_best.chromosome) + " pick: "+ str(
# old_best.pick_manipulability) + " place: " + str(old_best.place_manipulability)
# print "fitness: " + str(best_of_all[-1].fitness) + " crom: " + str(best_of_all[-1].chromosome) + " pick: " + str(
# best_of_all[-1].pick_manipulability) + " place: " + str(best_of_all[-1].place_manipulability)
# print str(h_pick.manip)
# print str(h_pick.crom)
# print str(h_pick.joints)
# print str(h_place.manip)
# print str(h_place.crom)
# print str(h_place.joints)
if not best_of_all[-1].planTrajectory():
rospy.logfatal("something is wrong with the final position")
# print "fitness: " + str(best_of_all[-1].fitness) + " crom: " + str(best_of_all[-1].chromosome) + " pick: " + str(
# best_of_all[-1].pick_manipulability) + " place: " + str(best_of_all[-1].place_manipulability)
if write_data:
file3 = open(pat + "/data/"+st+"/pick_annealing.txt","w+")
file3.write("Pick_x,Pick_y,Pick_theta,Place_x,Place_y,Place_theta,manip\n")
for i in range(1,len(h_pick.manip)):
file3.write( str(h_pick.crom[i][0])
+ ", " + str(h_pick.crom[i][1])
+ ", " + str(h_pick.crom[i][2])
+ ", " + str(h_pick.crom[i][3])
+ ", " + str(h_pick.crom[i][4])
+ ", " + str(h_pick.crom[i][5])
+ ", " + str(h_pick.manip[i]) + "\n")
file3.close()
file4 = open(pat + "/data/"+st+"/place_annealing.txt","w+")
file4.write("Pick_x,Pick_y,Pick_theta,Place_x,Place_y,Place_theta,manip\n")
for i in range(1,len(h_place.manip)):
file4.write( str(h_place.crom[i][0])
+ ", " + str(h_place.crom[i][1])
+ ", " + str(h_place.crom[i][2])
+ ", " + str(h_place.crom[i][3])
+ ", " + str(h_place.crom[i][4])
+ ", " + str(h_place.crom[i][5])
+ ", " + str(h_place.manip[i]) + "\n")
file4.close()
end_time = time.time()
if write_data:
file5 = open(pat + "/data/"+st+"/time.txt","w+")
file5.write("total_time: " + str(end_time-ts)+", GA_time: "+str(anneal_time-ts)+", SA_time: "+str(end_time-anneal_time))
for t in iter_time:
file5.write("\niter time: " + str(t))
for u in generation_unsuccess:
file5.write("\nunsuccesfull individuals: " + str(u))
file5.close()
rospy.loginfo("best pick pose: \n " + str(best_of_all[-1].pick_pose ))
rospy.loginfo("\nbest place pose: \n " + str(best_of_all[-1].place_pose ))
rospy.loginfo("\nseed pick pose: \n " + str(best_of_all[-1].seed_pick_joints ))
rospy.loginfo("\nseed place pose: \n " + str(best_of_all[-1].seed_place_joints ))
file10 = open(pat + "/data/optimized_poses.txt", "w+")
file10.write("best pick pose: \n " + str(best_of_all[-1].pick_pose)
+ "\n\nbest place pose: \n " + str(best_of_all[-1].place_pose)
+ "\n\nseed pick pose: \n " + str(best_of_all[-1].seed_pick_joints)
+ "\n\nseed place pose: \n " + str(best_of_all[-1].seed_place_joints))
file10.close()
#
# n=0
# plt.figure()
# for i in range(0,len(mean_of_all_gen)):
# n+=1
# print "gen "+str(n)+" max fit: "+str(best_of_all[i].fitness) + " mean: " + str(mean_of_all_gen[i])+ " crom: " + str(best_of_all[i].chromosome)
# plt.plot(n, mean_of_all_gen[i], "ro")
# plt.plot(n, best_of_all[i].fitness, "bo")
# plt.xlabel("x generation")
# plt.ylabel("y mean, fitness")
# plt.legend()
# plt.grid()
# plt.show()
#
#
# plt.figure()
# n=0
# for i in range(0,len(h_pick.manip)):
# n+=1
# plt.plot(n, h_pick.manip[i], "ro")
# plt.plot(n, h_place.manip[i], "bo")
# plt.xlabel("x generation")
# plt.ylabel("y maip pick")
# plt.legend()
# plt.grid()
# plt.show()
|
{"hexsha": "fd8fef2c5de1f92f757bc0cf5f715e81523eea28", "size": 11735, "ext": "py", "lang": "Python", "max_stars_repo_path": "position_optimizer/script/genetic_alg.py", "max_stars_repo_name": "CNR-STIIMA-IRAS/position_optimizer", "max_stars_repo_head_hexsha": "b246cd02de6e3f3d1098eb4f7171f5a29b6b4f36", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-07T09:13:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-07T09:13:21.000Z", "max_issues_repo_path": "position_optimizer/script/genetic_alg.py", "max_issues_repo_name": "CNR-STIIMA-IRAS/position_optimizer", "max_issues_repo_head_hexsha": "b246cd02de6e3f3d1098eb4f7171f5a29b6b4f36", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "position_optimizer/script/genetic_alg.py", "max_forks_repo_name": "CNR-STIIMA-IRAS/position_optimizer", "max_forks_repo_head_hexsha": "b246cd02de6e3f3d1098eb4f7171f5a29b6b4f36", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.4920127796, "max_line_length": 152, "alphanum_fraction": 0.5290157648, "include": true, "reason": "import numpy", "num_tokens": 2917}
|
#include <boost/mpl/aux_/common_name_wknd.hpp>
|
{"hexsha": "c1c608eb40904a2fcf70ab3b0c9e805ba3125bff", "size": 47, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_mpl_aux__common_name_wknd.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_mpl_aux__common_name_wknd.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_mpl_aux__common_name_wknd.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.5, "max_line_length": 46, "alphanum_fraction": 0.8085106383, "num_tokens": 15}
|
from functools import reduce
import logging
import numpy as np
def prune_sum_eq_len(domain):
""" Prune if sum(val) or sum(pos * val) can't equal length """
min_sum, max_sum = domain.estimate("sum")
min_mult, max_mult = domain.estimate("mult")
constraints = [
min_sum > domain.length,
max_sum < domain.length,
min_mult > domain.length,
max_mult < domain.length,
]
pad = reduce(lambda x, y: x | y, constraints)
pad = pad & np.isnan(domain.grid)
domain.grid = np.where(pad, ~pad, domain.grid)
return pad.any()
def prune_fill_last_col(domain):
""" Fill with 1 if last available position in column """
pad = np.isnan(domain.grid) * (np.isnan(domain.grid).sum(0) == 1)
domain.grid = np.where(pad, pad, domain.grid)
return pad.any()
def prune_fill_last_number(domain):
""" Fill last missing number """
if np.isnan(domain.to_numbers()).sum() == 1:
position = np.nanmax(np.where(
np.isnan(domain.to_numbers()),
domain.numbers, # index
np.nan)).astype(int)
domain[position] = domain.length - np.nansum(domain.to_numbers())
return True
return False
def prune_less_than_possible(domain):
""" Fill with 0 values less than current number of occurences """
row_sum = np.nansum(domain.grid, 1)
pad = domain.missing_values() < row_sum
domain.grid = np.where(pad, ~pad, domain.grid)
return pad.any()
def prune_row_ready(domain):
""" Decide number at position if already filled corresponding row """
ready_rows = np.where(
np.isnan(domain.to_numbers()),
np.isnan(domain.grid).sum(1) == 0,
0)
row_sum = np.nansum(domain.grid, 1)
filler = np.where(ready_rows, row_sum, np.nan)
for position, value in enumerate(filler):
if ~np.isnan(value):
domain[position] = value
return False
def prune(domain):
""" Prune using listed functions """
logging.debug("Input domain:\n%s" % str(domain))
constraints = [
prune_less_than_possible,
prune_sum_eq_len,
prune_fill_last_col,
prune_fill_last_number,
prune_row_ready,
]
changed = feasible = True
while changed and feasible:
changed = False
not_changed_cnt = 0 # Exit if doesn't prune
for func in constraints:
not_changed_cnt += 1
logging.debug(
"Domain before {}:\n{}".format(func.__name__, str(domain)))
pruned = func(domain)
if pruned:
not_changed_cnt = 0
changed = changed or pruned
logging.debug(
"Domain after {}:\n{}".format(func.__name__, str(domain)))
feasible = domain.feasibility_test().all()
too_long_pruning = not_changed_cnt > len(constraints)
filled = np.isnan(domain.grid).sum() == 0
logging.debug("Filled:\n{}".format(filled))
if too_long_pruning or filled or not feasible:
changed = False
break
logging.debug("Feasible: {}".format(feasible))
logging.debug("Domain:\n%s" % str(domain))
return feasible
|
{"hexsha": "618b95091af2636414fab559e5d29123e1c26eb9", "size": 3207, "ext": "py", "lang": "Python", "max_stars_repo_path": "matrices/prune.py", "max_stars_repo_name": "Kopytok/magic_series", "max_stars_repo_head_hexsha": "679ee7d52c93871cbdf3c499656892a18c36ad2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "matrices/prune.py", "max_issues_repo_name": "Kopytok/magic_series", "max_issues_repo_head_hexsha": "679ee7d52c93871cbdf3c499656892a18c36ad2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matrices/prune.py", "max_forks_repo_name": "Kopytok/magic_series", "max_forks_repo_head_hexsha": "679ee7d52c93871cbdf3c499656892a18c36ad2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1170212766, "max_line_length": 75, "alphanum_fraction": 0.6083567197, "include": true, "reason": "import numpy", "num_tokens": 739}
|
from __future__ import print_function
import os
import sys
import random
from time import strftime, gmtime, time
from report_result import ReportResult
from configuration import Conf
from archive_results import ArchiveResults
import argparse
import shutil
import pickle
import json
from keras.preprocessing.text import Tokenizer
from keras import backend as K
from keras.callbacks import EarlyStopping
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
import threading
from scipy.stats import rankdata
import logging
import numpy as np
import tensorflow as tf
import pandas as pd
def clear_session():
K.clear_session()
def remove_plots():
shutil.rmtree('plots')
class Evaluator:
def __init__(self, conf_json, model, optimizer=None, name=None):
try:
data_path = os.environ['STACK_OVER_FLOW_QA']
except KeyError:
print("STACK_OVER_FLOW_QA is not set. Set it to your clone of https://github.com/mrezende/stack_over_flow_python")
sys.exit(1)
self.conf = Conf(conf_json)
self.model = model(self.conf)
if name is None:
self.name = self.conf.name() + '_' + model.__name__
logger.info(f'Initializing Evaluator ...')
logger.info(f'Name: {self.name}')
else:
self.name = name
self.path = data_path
self.params = self.conf.training_params()
optimizer = self.params['optimizer'] if optimizer is None else optimizer
self.model.compile(optimizer)
self.answers = self.load('answers.json') # self.load('generated')
self.training_data = self.load('training.json')
self.dev_data = self.load('dev.json')
self.eval_data = self.load('eval.json')
self._vocab = None
self._reverse_vocab = None
self._eval_sets = None
self.top1_ls = []
self.mrr_ls = []
##### Resources #####
def save_conf(self):
self.conf.save_conf()
def load(self, name):
return json.load(open(os.path.join(self.path, name), 'r'))
def vocab(self):
if self._vocab is None:
reverse_vocab = self.reverse_vocab()
self._vocab = dict((v, k.lower()) for k, v in reverse_vocab.items())
return self._vocab
def reverse_vocab(self):
if self._reverse_vocab is None:
samples = self.load('samples_for_tokenizer.json')
tokenizer = Tokenizer()
tokenizer.fit_on_texts(samples)
self._reverse_vocab = tokenizer.word_index
return self._reverse_vocab
##### Loading / saving #####
def save_epoch(self, name = None):
if not os.path.exists('models/'):
os.makedirs('models/')
suffix = self.name if name is None else name
logger.info(f'Saving weights: models/weights_epoch_{suffix}.h5')
self.model.save_weights(f'models/weights_epoch_{suffix}.h5', overwrite=True)
def load_epoch(self, name = None):
suffix = self.name if name is None else name
assert os.path.exists(f'models/weights_epoch_{suffix}.h5'), f'Weights at epoch {suffix} not found'
logger.info(f'Loading weights: models/weights_epoch_{suffix}.h5')
self.model.load_weights(f'models/weights_epoch_{suffix}.h5')
##### Converting / reverting #####
def convert(self, words):
rvocab = self.reverse_vocab()
if type(words) == str:
words = words.strip().lower().split(' ')
return [rvocab.get(w, 0) for w in words]
def revert(self, indices):
vocab = self.vocab()
return [vocab.get(i, 'X') for i in indices]
##### Padding #####
def padq(self, data):
return self.pad(data, self.conf.question_len())
def pada(self, data):
return self.pad(data, self.conf.answer_len())
def pad(self, data, len=None):
from keras.preprocessing.sequence import pad_sequences
return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0)
##### Training #####
def get_time(self):
return strftime('%Y-%m-%d %H:%M:%S', gmtime())
def train_and_evaluate(self, mode='train'):
val_losses = []
if mode == 'train':
val_loss = self.train(self.training_data)
val_losses.append(val_loss)
logger.info(f'Val loss: {val_loss}')
elif mode == 'evaluate':
results = {'top1': [], 'mrr': [], 'positions' : []}
logger.info('Evaluating...')
for i in range(0, 20):
top1, mrr, positions = self.evaluate(shuffle=True)
results['top1'].append(top1)
results['mrr'].append(mrr)
results['positions'].append(positions)
logger.info(f'Iteration: {i}: Top-1 Precision {top1}, MRR {mrr}, Positions: {positions}')
df = pd.DataFrame(results)
top1_desc = df.describe()['top1']
mrr_desc = df.describe()['mrr']
# save histogram plot
report = ReportResult({'positions': np.append([], results['positions'])}, index=[i for i in range(1, len(np.append([], results['positions'])) + 1)], plot_name = f'histogram_{self.name}')
report.generate_histogram()
report.save_plot()
logger.info(f'Top1 Description: {top1_desc}')
logger.info(f'MRR Description: {mrr_desc}')
def evaluate(self, X = None, name = None, shuffle=False):
self.load_epoch(name)
data = self.eval_data if X is None else X
top1, mrr, positions = self.get_score(data, verbose=True, shuffle=shuffle)
return top1, mrr, positions
def train(self, X):
batch_size = self.params['batch_size']
validation_split = self.params['validation_split']
nb_epoch = self.params['nb_epoch']
# top_50 = self.load('top_50')
questions = list()
good_answers = list()
for j, q in enumerate(X):
questions += [q['question']] * len(q['good_answers'])
good_answers += q['good_answers']
logger.info('Began training at %s on %d samples' % (self.get_time(), len(questions)))
questions = self.padq(questions)
good_answers = self.pada(good_answers)
# According to NN Design Book:
# For this reason it is best to try several different initial guesses in order to ensure that
# a global minimum has been obtained.
best_top1_mrr = {'top1': 0, 'mrr': 0}
hist_losses = {'val_loss': [], 'loss': []}
for i in range(1, nb_epoch + 1):
bad_answers = self.pada(random.sample(self.answers, len(good_answers)))
logger.info(f'Fitting epoch {i}')
hist = self.model.fit([questions, good_answers, bad_answers], epochs=1, batch_size=batch_size,
validation_split=validation_split, verbose=1)
val_loss = hist.history['val_loss'][0]
loss = hist.history['loss'][0]
hist_losses['val_loss'].append(val_loss)
hist_losses['loss'].append(loss)
# temporary weights from last training
self.save_epoch('aux')
# check MRR
top1, mrr, positions = self.evaluate(self.dev_data, 'aux')
if mrr > best_top1_mrr['mrr']:
best_top1_mrr['top1'] = top1
best_top1_mrr['mrr'] = mrr
logger.info(f'Epoch {i} Loss = {loss}, Validation Loss = {val_loss} ' +
f'(Best: TOP1 = {top1}, MRR = {mrr})')
# saving weights
self.save_epoch()
# Article: "Summarizing Source Code using a Neural Attention Model"
# terminate training when the learning rate goes
# below 0.001.
if loss < 0.001:
break
# save plot val_loss, loss
report = ReportResult(hist_losses, [i for i in range(1, len(hist_losses['loss']) + 1)], self.name)
plot = report.generate_line_report()
report.save_plot()
logger.info(f'saving loss, val_loss plot')
# save conf
self.save_conf()
clear_session()
return val_loss
def get_score(self, X, verbose=False, shuffle=False):
c_1, c_2 = 0, 0
random_bad_answers = random.sample(self.answers, 49)
logger.info(f'len X: {len(X)}')
positions = []
for i, d in enumerate(X):
bad_answers = d['bad_answers'] if shuffle is False else random_bad_answers
answers = d['good_answers'] + bad_answers
answers = self.pada(answers)
question = self.padq([d['question']] * len(answers))
sims = self.model.predict([question, answers])
n_good = len(d['good_answers'])
max_r = np.argmax(sims)
max_n = np.argmax(sims[:n_good])
r = rankdata(sims, method='max')
if verbose:
min_r = np.argmin(sims)
amin_r = answers[min_r]
amax_r = answers[max_r]
amax_n = answers[max_n]
logger.info(' ----- begin question ----- ')
logger.info(' '.join(self.revert(d['question'])))
logger.info('Predicted: ({}) '.format(sims[max_r]) + ' '.join(self.revert(amax_r)))
logger.info('Expected: ({}) Rank = {} '.format(sims[max_n], r[max_n]) + ' '.join(self.revert(amax_n)))
logger.info('Worst: ({})'.format(sims[min_r]) + ' '.join(self.revert(amin_r)))
logger.info(' ----- end question ----- ')
c_1 += 1 if max_r == max_n else 0
position = r[max_r] - r[max_n] + 1
c_2 += 1 / float(position)
positions.append(position)
top1 = c_1 / float(len(X))
mrr = c_2 / float(len(X))
print('Top-1 Precision: %f' % top1)
print('MRR: %f' % mrr)
return top1, mrr, positions
def save_score(self):
with open('results_conf.txt', 'a+') as append_file:
conf_json, name = self.conf.conf_json_and_name()
top1_precisions = ','.join(self.top1_ls)
mrrs = ','.join(self.mrr_ls)
append_file.write(f'{name}; {conf_json}; top-1 precision: {top1_precisions}; MRR: {mrrs}\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='run question answer selection')
parser.add_argument('--conf_file', metavar='CONF_FILE', type=str, default="stack_over_flow_conf.json", help='conf json file: stack_over_flow_conf.json')
parser.add_argument('--mode', metavar='MODE', type=str, default="train", help='mode: train|evaluate')
parser.add_argument('--conf_name', metavar='CONF_NAME', type=str, default=None, help='conf_name: part of name of weights file')
parser.add_argument('--model', metavar='MODEL', type=str, default='cnn-lstm',
help='model name: embedding|cnn|cnn-lstm|rnn-attention')
args = parser.parse_args()
# configure logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info('running %s' % ' '.join(sys.argv))
conf_file = args.conf_file
mode = args.mode
conf_name = args.conf_name
model = args.model
confs = json.load(open(conf_file, 'r'))
from keras_models import EmbeddingModel, ConvolutionModel, ConvolutionalLSTM, AttentionModel
for conf in confs:
logger.info(f'Conf.json: {conf}')
evaluator = None
if model == 'cnn-lstm':
evaluator = Evaluator(conf, model=ConvolutionalLSTM, name=conf_name)
elif model == 'embedding':
evaluator = Evaluator(conf, model=EmbeddingModel, name=conf_name)
elif model == 'cnn':
evaluator = Evaluator(conf, model=ConvolutionModel, name=conf_name)
elif model == 'rnn-attention':
evaluator = Evaluator(conf, model=AttentionModel, name=conf_name)
# train and evaluate the model
if evaluator is not None:
evaluator.train_and_evaluate(mode)
else:
parser.print_help()
sys.exit()
|
{"hexsha": "fee715223af227da11653dfca410100964d206df", "size": 12345, "ext": "py", "lang": "Python", "max_stars_repo_path": "stack_over_flow_qa_eval.py", "max_stars_repo_name": "mrezende/keras-language-modeling", "max_stars_repo_head_hexsha": "4cbfc161d55c5b903e667eedb476f9c8f1473894", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "stack_over_flow_qa_eval.py", "max_issues_repo_name": "mrezende/keras-language-modeling", "max_issues_repo_head_hexsha": "4cbfc161d55c5b903e667eedb476f9c8f1473894", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stack_over_flow_qa_eval.py", "max_forks_repo_name": "mrezende/keras-language-modeling", "max_forks_repo_head_hexsha": "4cbfc161d55c5b903e667eedb476f9c8f1473894", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2916666667, "max_line_length": 198, "alphanum_fraction": 0.5977318753, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2870}
|
extract.controls <- function(rg, probes, verbose=F) {
stopifnot(is.rg(rg))
x.mean <- function(x, na.rm=T) {
if (length(x) <= 1)
stop("It seems that the IDAT files do not match the supplied chip annotation.")
mean(x,na.rm=na.rm)
}
x.which <- function(x) {
i <- which(x)
if (length(i) == 0)
stop("It seems that the IDAT files do not match the supplied chip annotation")
i
}
msg(verbose=verbose)
probes.G <- probes[x.which(probes$dye == "G"),]
probes.R <- probes[x.which(probes$dye == "R"),]
rg$R <- rg$R[match(probes.R$address, rownames(rg$R)),]
rg$G <- rg$G[match(probes.G$address, rownames(rg$G)),]
bisulfite2 <- x.mean(rg$R[x.which(probes.R$target == "BISULFITE CONVERSION II"), "Mean"])
bisulfite1.G <- rg$G[x.which(probes.G$target == "BISULFITE CONVERSION I"
& probes.G$name
%in% sprintf("BS Conversion I%sC%s", c(" ", "-", "-"), 1:3)),"Mean"]
bisulfite1.R <- rg$R[x.which(probes.R$target == "BISULFITE CONVERSION I"
& probes.R$name %in% sprintf("BS Conversion I-C%s", 4:6)),"Mean"]
bisulfite1 <- x.mean(bisulfite1.G + bisulfite1.R)
stain.G <- rg$G[x.which(probes.G$target == "STAINING" & probes.G$name == "Biotin (High)"),"Mean"]
stain.R <- rg$R[x.which(probes.R$target == "STAINING" & probes.R$name == "DNP (High)"),"Mean"]
extension.R <- rg$R[x.which(probes.R$target == "EXTENSION"
& probes.R$name %in% sprintf("Extension (%s)", c("A", "T"))),"Mean"]
extension.G <- rg$G[x.which(probes.G$target == "EXTENSION"
& probes.G$name %in% sprintf("Extension (%s)", c("C", "G"))),"Mean"]
hybe <- rg$G[x.which(probes.G$target == "HYBRIDIZATION"),"Mean"]
targetrem <- rg$G[x.which(probes.G$target %in% "TARGET REMOVAL"),"Mean"]
nonpoly.R <- rg$R[x.which(probes.R$target == "NON-POLYMORPHIC"
& probes.R$name %in% sprintf("NP (%s)", c("A", "T"))),"Mean"]
nonpoly.G <- rg$G[x.which(probes.G$target == "NON-POLYMORPHIC"
& probes.G$name %in% sprintf("NP (%s)", c("C", "G"))),"Mean"]
spec2.G <- rg$G[x.which(probes.G$target == "SPECIFICITY II"),"Mean"]
spec2.R <- rg$R[x.which(probes.R$target == "SPECIFICITY II"),"Mean"]
spec2.ratio <- x.mean(spec2.G,na.rm=T)/x.mean(spec2.R,na.rm=T)
name <- sprintf("GT Mismatch %s (PM)", 1:3)
spec1.G <- rg$G[x.which(probes.G$target == "SPECIFICITY I" & probes.G$name %in% name),"Mean"]
spec1.Rp <- rg$R[x.which(probes.R$target == "SPECIFICITY I" & probes.R$name %in% name),"Mean"]
spec1.ratio1 <- x.mean(spec1.Rp,na.rm=T)/x.mean(spec1.G,na.rm=T)
name <- sprintf("GT Mismatch %s (PM)", 4:6)
spec1.Gp <- rg$G[x.which(probes.G$target == "SPECIFICITY I" & probes.G$name %in% name),"Mean"]
spec1.R <- rg$R[x.which(probes.R$target == "SPECIFICITY I" & probes.R$name %in% name),"Mean"]
spec1.ratio2 <- x.mean(spec1.Gp,na.rm=T)/x.mean(spec1.R,na.rm=T)
spec1.ratio <- (spec1.ratio1 + spec1.ratio2)/2
normA <- x.mean(rg$R[x.which(probes.R$target == "NORM_A"),"Mean"], na.rm = TRUE)
normT <- x.mean(rg$R[x.which(probes.R$target == "NORM_T"),"Mean"], na.rm = TRUE)
normC <- x.mean(rg$G[x.which(probes.G$target == "NORM_C"),"Mean"], na.rm = TRUE)
normG <- x.mean(rg$G[x.which(probes.G$target == "NORM_G"),"Mean"], na.rm = TRUE)
dye.bias <- (normC + normG)/(normA + normT)
probs <- c(0.01, 0.5, 0.99)
oob.G <- quantile(rg$G[with(probes.G, x.which(target == "OOB")),"Mean"], na.rm=T, probs=probs)
oob.R <- quantile(rg$R[with(probes.R, x.which(target == "OOB")),"Mean"], na.rm=T, probs=probs)
if (oob.R[["50%"]] < 1)
oob.R[["50%"]] <- 1
oob.ratio <- oob.G[["50%"]]/oob.R[["50%"]]
c(bisulfite1=bisulfite1,
bisulfite2=bisulfite2,
extension.G=extension.G,
extension.R=extension.R,
hybe=hybe,
stain.G=stain.G,
stain.R=stain.R,
nonpoly.G=nonpoly.G,
nonpoly.R=nonpoly.R,
targetrem=targetrem,
spec1.G=spec1.G,
spec1.R=spec1.R,
spec2.G=spec2.G,
spec2.R=spec2.R,
spec1.ratio1=spec1.ratio1,
spec1.ratio=spec1.ratio,
spec2.ratio=spec2.ratio,
spec1.ratio2=spec1.ratio2,
normA=normA,
normC=normC,
normT=normT,
normG=normG,
dye.bias=dye.bias,
oob.G=oob.G,
oob.ratio=oob.ratio)
}
|
{"hexsha": "234f8de084db7a0811c171d754d2be3bb980441a", "size": 4512, "ext": "r", "lang": "R", "max_stars_repo_path": "R/extract-controls.r", "max_stars_repo_name": "RichardJActon/meffil", "max_stars_repo_head_hexsha": "8cb1d18fb1f5e350a6774116c5b9571fed1c5067", "max_stars_repo_licenses": ["Artistic-2.0"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2015-04-21T18:35:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T10:48:31.000Z", "max_issues_repo_path": "R/extract-controls.r", "max_issues_repo_name": "RichardJActon/meffil", "max_issues_repo_head_hexsha": "8cb1d18fb1f5e350a6774116c5b9571fed1c5067", "max_issues_repo_licenses": ["Artistic-2.0"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2015-02-17T11:13:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T21:48:56.000Z", "max_forks_repo_path": "R/extract-controls.r", "max_forks_repo_name": "RichardJActon/meffil", "max_forks_repo_head_hexsha": "8cb1d18fb1f5e350a6774116c5b9571fed1c5067", "max_forks_repo_licenses": ["Artistic-2.0"], "max_forks_count": 20, "max_forks_repo_forks_event_min_datetime": "2015-11-17T22:40:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T16:10:42.000Z", "avg_line_length": 40.6486486486, "max_line_length": 101, "alphanum_fraction": 0.5598404255, "num_tokens": 1503}
|
import glob
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
plt.rc("text", usetex=True)
import numpy as np
from astropy.table import Table
from astropy.cosmology import Planck15
def plot_lc(f, name=None):
dt = []
lum = []
with open(f, "r") as inputf:
for line in inputf.readlines():
if len(line) > 40:
dt_s = float(line.split('\t')[0])
dt.append(dt_s/86400)
flux = float(line.split('\t')[3])
lum.append(flux * 4 * np.pi * d**2)
if name:
plt.plot(dt, lum, c='grey', alpha=1.0, lw=2, zorder=1)
else:
# just thin grey
plt.plot(dt, lum, c='grey', alpha=0.1, lw=1)
return dt, lum
# ZTF18abukavn
d = Planck15.luminosity_distance(z=0.033).cgs.value
ratio = 3.27E-11 # count-to-flux rate, erg/cm2/ct
xrtlc = Table.read(
"/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data/from_brad.dat",
format='ascii')
# Brad's zero-point is: MJD=58370.588137 = 2018 Sep 09 at 14:06:55.064 UT.
# Our zero-point is 2458370.6473
# so the difference is 0.44 days, or 38088.31681399606 sec
dt_sec = xrtlc['col1']+38088.31681399606
ct = xrtlc['col4']
lum = ct * ratio * 4 * np.pi * d**2
dt_day = dt_sec/86400
plt.scatter(dt_day, lum, marker='v', c='k', zorder=5)
print(dt_day)
plt.text(dt_day[0], lum[0]*1.2, 'AT2018gep', fontsize=14,
horizontalalignment='center', verticalalignment='bottom')
# Chandra
plt.scatter(16, 3E-15*4*np.pi*d**2, marker='v', c='k', zorder=5)
print(3E-15*4*np.pi*d**2)
plt.scatter(70, 3E-15*4*np.pi*d**2, marker='v', c='k', zorder=5)
# All the GRBs
data_dir = "/Users/annaho/Dropbox/Projects/Research/ZTF18abukavn/data/xrtlc"
# flist = glob.glob(data_dir + "/*_xrt_bin.txt")
# for f in flist:
# plot_lc(f)
# Individual LLGRBs
f = data_dir + "/060218_xrt_bin.txt"
dt, lum = plot_lc(f, name="060218")
plt.text(
0.06, 1.26E46, "060218",
horizontalalignment='center',
verticalalignment='bottom', fontsize=12)
f = data_dir + "/100316d_xrt_bin.txt"
dt, lum = plot_lc(f, name="100316D")
plt.text(
dt[0], lum[0]/2, '100316D',
horizontalalignment='left',
verticalalignment='top', fontsize=12)
f = data_dir + "/030329_xray.dat"
# this one has a different file
dat = np.loadtxt(f)
dt = dat[:,0]
flux = dat[:,1] * 1E-12
lum = flux * 4 * np.pi * d**2
plt.plot(dt, lum, c='grey', lw=2, zorder=1)
plt.text(
dt[0], lum[0], '030329',
horizontalalignment='center',
verticalalignment='bottom', fontsize=12)
# 2009bb
# Soderberg 2009
# detection from Chandra
d_09bb = 40*3.086E24
plt.scatter(31, 4.4E39, marker='o', c='grey', s=20)
plt.text(31, 4.4E39*2, "2009bb", fontsize=12,
horizontalalignment='center', verticalalignment='center')
# upper limits from Swift/XRT
# dt = np.array([5, 19, 23, 31])
# flim = np.array([1.3E-13, 1.7E-13, 2.5E-13, 3E-13])
# llim = 4 * np.pi * d_09bb**2 * flim
# plt.scatter(dt, llim, marker='v', c='grey', s=20)
# plt.plot(dt, llim, c='grey', lw=2)
# 1998bw
# The original four points of 980425 reported by Pian et al. (2000)
# Fig 7b
d_98bw = 38*3.086E24
dt = np.array([1, 2, 7.5, 200])
f = np.array([4.3E-13, 4.2E-13, 2.8E-13, 1.7E-13])
lum = 4 * np.pi * d_98bw**2 * f
# Kouvelioutou 2004, Chandra: Day 1281, 1.2E39
# Sixth point is XMM measurement reported by Pian et al. (2004) from 2002 March 28,
# but XMM can't resolve two sources, so this luminosity value includes two sources
# They re-analyzed this data...
# dt = 1281
# lum = 1.2E39
plt.plot(dt, lum, c='grey', lw=2, zorder=1)
plt.text(
dt[0], lum[0], '1998bw',
horizontalalignment='right',
verticalalignment='center', fontsize=12)
# 16asu limits
dt = np.array([7.4, 13.4, 19.2])
llim = np.array([2.5E43, 1.1E43, 1.5E43])
plt.scatter(dt, llim, marker='v', c='grey', s=40, facecolor='white')
plt.text(dt[0]*1.2, llim[0], '16asu', horizontalalignment='left', fontsize=12)
# One point for iPTF17cw
# Chandra: 8 Feb 2017
plt.scatter(41.8, 1E41, marker='o', c='grey', s=20)
plt.text(41.8, 1E41, '17cw', fontsize=12, horizontalalignment='right')
# Limits for 2012ap
plt.scatter(24, 2.4E39, marker='v', c='grey', s=40, facecolor='white')
plt.text(
24/1.1, 2.4E39, '2012ap', verticalalignment='center',
horizontalalignment='right', fontsize=12)
# Formatting
plt.xlabel(r"$\Delta t$ [days]", fontsize=14)
plt.ylabel(
r"X-ray luminosity [0.3-10 keV, erg\,s${}^{-1}$]", fontsize=14)
plt.yscale('log')
#plt.xscale('log')
plt.ylim(1E39, 1E47)
plt.xlim(-5, 46)
plt.tick_params(axis='both', labelsize=14)
plt.tight_layout()
plt.show()
#plt.savefig("xray_lc.png")
|
{"hexsha": "65e1ea93cc1010b22d6b6fbb8ec8525dcb2ba477", "size": 4587, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/extra_plots/xray_lc.py", "max_stars_repo_name": "annayqho/SN2018gep", "max_stars_repo_head_hexsha": "93cd64a1aab326771199f9093339df5bc4eb8002", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-02T09:51:36.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-17T22:16:24.000Z", "max_issues_repo_path": "code/extra_plots/xray_lc.py", "max_issues_repo_name": "steveschulze/SN2018gep", "max_issues_repo_head_hexsha": "93cd64a1aab326771199f9093339df5bc4eb8002", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/extra_plots/xray_lc.py", "max_forks_repo_name": "steveschulze/SN2018gep", "max_forks_repo_head_hexsha": "93cd64a1aab326771199f9093339df5bc4eb8002", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-11T18:43:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-11T18:43:21.000Z", "avg_line_length": 30.9932432432, "max_line_length": 83, "alphanum_fraction": 0.6468279922, "include": true, "reason": "import numpy,from astropy", "num_tokens": 1700}
|
#include <boost/serialization/ephemeral.hpp>
|
{"hexsha": "89d0a3de9a783cd7a96fdf523f9950fbe5f45ffd", "size": 45, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_serialization_ephemeral.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_serialization_ephemeral.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_serialization_ephemeral.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 22.5, "max_line_length": 44, "alphanum_fraction": 0.8222222222, "num_tokens": 11}
|
\documentclass[preprint]{sigplanconf}
% The following \documentclass options may be useful:
% preprint Remove this option only once the paper is in final form.
% 10pt To set in 10-point type instead of 9-point.
% 11pt To set in 11-point type instead of 9-point.
% numbers To obtain numeric citation style instead of author/year.
\usepackage{amsmath}
\usepackage[pdftex]{graphicx}
\usepackage{tipa}
\usepackage{float}
\graphicspath{{images/}}
\newcommand{\cL}{{\cal L}}
\begin{document}
\special{papersize=8.5in,11in}
\setlength{\pdfpageheight}{\paperheight}
\setlength{\pdfpagewidth}{\paperwidth}
\conferenceinfo{CONF 'yy}{Month d--d, 20yy, City, ST, Country}
\copyrightyear{20yy}
\copyrightdata{978-1-nnnn-nnnn-n/yy/mm}
\copyrightdoi{nnnnnnn.nnnnnnn}
% Uncomment the publication rights you want to use.
%\publicationrights{transferred}
%\publicationrights{licensed} % this is the default
%\publicationrights{author-pays}
\titlebanner{banner above paper title} % These are ignored unless
\preprintfooter{short description of paper} % 'preprint' option specified.
\title{High-Performance Persistent Graphs}
\subtitle{Storing graphs in key-mapped tries with lazy copying persistence}
\authorinfo{John Moody}
{Colorado College '16}
{john.moody@coloradocollege.edu}
\authorinfo{Benjamin Ylvisaker}
{Assistant Professor, Colorado College}
{ben.ylvisaker@coloradocollege.edu}
\maketitle
\begin{abstract}
In the world of persistent data structures, there exist few high performance graph libraries.
We propose in this paper a C library which stores an application-controlled persistent graph in a key-mapped trie, using chunking and lazy copying to conserve memory and increase performance.
We achieve -some stuff about time complexity that I haven't figured out yet and I have no results help-.
\end{abstract}
\category{CR-number}{subcategory}{third-level}
\keywords
persistent data structures, graphs, hash array mapped trie
\section{Introduction}
Graphs are one of the basic data structures in the programmer's arsenal.
A graph defines some number of nodes and edges, which connect nodes together.
Graphs have wide-ranging applications, from computational models to databases, networking and pathfinding.
To get information about nodes or edges in a graph within a program we typically use an array or some manner of key-value store.
A graph node's value is usually a list of adjacent nodes, which are either predecessors to that node or successors.
The value associated with an edge is typically just two identifiers for its predecessor and successor.
This paper explores storing graphs as a persistent data structure, which has some benefits over the traditional way of storing data for certain applications.
We will give a background on persistent data structures, and then propose a structure for persistent graphs with strong performance characteristics for various operations and optimized use of memory.
\section{Persistence}
What does it mean for data to be stored in a persistent way?
A piece of data is persistent if it does not change.
Consider a linked list in memory, Jeff:
\begin{figure}[H]
\includegraphics[scale=.35]{linkedlist}
\centering
\end{figure}
There are a number of ways to make an edit to this structure.
If we wanted to change the frontal value of Jeff from 1 to 5, and we do not need the original any longer, we may simply change it:
\begin{figure}[H]
\includegraphics[scale=.35]{linkedlist2}
\centering
\end{figure}
If we want this notion of persistence to apply to Jeff, however, Jeff cannot change.
Instead, we need to come up with a way to change the front value of Jeff to 5 while keeping the original version of Jeff with 1 at the front intact.
Enter "New Jeff":
\begin{figure}[H]
\includegraphics[scale=.35]{linkedlist3}
\centering
\end{figure}
Jeff, we notice, has not changed.
New Jeff preserves the parts of Jeff's structure that they have in common.
Persistent data structures, then, refer to structures like Jeff and New Jeff, which, after being created, will always remain the same.
\subsection{Trees and Reference Counting}
Let us now consider an example using a simple binary search tree, where we have D, and a separate copy D':
\begin{figure}[H]
\includegraphics[scale=.43]{treefig}
\centering
\end{figure}
For us to be able to make edits to D' without changing D, we must introduce the concept of reference counting.
A reference count keeps track of how many objects point to a given node.
Here, since B and F have reference counts greater than one, we know that we can't modify those nodes without changing another version of the data structure.
Therefore, when we insert G into D', we will copy any nodes that have reference counts greater than one and adjust the tree as necessary:
\begin{figure}[H]
\includegraphics[scale=.43]{treefig2}
\centering
\end{figure}
The same concept applies in deleting or modifying nodes.
\subsection{Why?}
Why are persistent data structures interesting, or valuable?
In a broad sense, persistent data structures offer a way to do quick, cheap analysis on multiple versions of a large data structure.
In a mutable system, analyzing multiple versions of a data structure typically involves expensive wholesale copying of the structure, with no easy way to reverse changes that have been performed.
With persistent data structures, copies of a structure can be very small in size relative to the entire structure.
Reverting changes to that structure is simple, shown with the reference counting scheme from above.
If systems that perform analysis on large data sets are concerned with change to that data set over time, persistent data structures can be a powerful tool both in terms of how much space is used as well as performance.
Further, persistent data structures are more easily guaranteed to be thread-safe, since operations on persistent structures will never write to the parts of their structure that they share with other versions.
This characteristic makes persistent data structures easily operated on by multiple processes and threads, which dovetails with the increasing numbers of multi-core consumer processors.
Am I just talking out my ass here or should I say more stuff?
\section{Tries}
So, what is the best way to represent a graph persistently?
We know that whatever structure we use, if we want to efficiently store memory between versions, we imagine it to have elements of tree-like structure, with pointers between discrete parts.
A simple binary search tree is possible, and is used for some persistent graph libraries.
However, the use of a binary search tree introduces significant memory inefficiency.
If a node can only point to two other nodes, trees become very deep very quickly, which means lookups become costly.
Rather, our paper discusses the use of a wide-fanout key-mapped trie \textipa{[t\textturnr a\textsci]}, a derivative of the hash array mapped trie.
Our structure has the following characteristics:
\begin{itemize}
\item The library performs no hashing of values (nodes or edges of the graph).
Rather, each value is given a unique key either during or prior to insertion according to the current balance of the trie.
\item Values are stored only in leaves.
\item Wide fanout, to minimize trie depth.
\item Array compression, with bitmaps to indicate non-null positions.
\item Values are chunked together with their parent nodes.
\item Nodes without children are combined with their parents, to reduce the number of pointers.
\end{itemize}
The combining together of nodes without children with their parents means that nodes are effectively stored by the first unique bits of their key rather than the entire key itself.
To understand how this works, consider a hypothetical insert with keys of length 8, and 4-bit fanout among nodes, of a value with the key \texttt{11000110}.
Since our fanout is 4 bits, we will consider two bits of the key at a time in determining which branch of the trie to pursue.
\begin{figure}[H]
\includegraphics[scale=.5]{trie1}
\centering
\end{figure}
Here, we could create nodes that span the entire depth of the tree to insert the value, but since we would chunk these nodes together later, we will simply store the node in the root.
Since there are no other keys that begin with \texttt{10} in the set of keys, we don't need to create an interstitial node.
Inserting a node with a key that does begin with \texttt{10} results in the following adjustments:
\begin{figure}[H]
\includegraphics[scale=.5]{trie2}
\centering
\end{figure}
When we look up one of our nodes by the key, the library will examine the first two bits of the key, \texttt{10}.
Further, since all values are stored in the leaves of the trie, we will actually store the bolded values in arrays at the tail end of each parent node. Hence, our current trie will actually appear like this in memory:
\begin{figure}[H]
\includegraphics[scale=.47]{trie2actual}
\centering
\end{figure}
\subsection{Array Compression}
Storing these pointers and values in arrays means that, for nodes with low populations, we waste a lot of space on empty array slots.
We compensate for this with an array compression scheme borrowed from Phil Bagwell's hash array mapped trie.
In this scheme, the actual arrays that store pointers and values are dense and dynamically sized.
Each node stores two bitmaps that store data about which spots in our hypothetically complete array are occupied.
If we want to access a value or pointer at a particular position, we will perform some bitwise arithmetic to determine in which dense array slot our desired value lies.
To illustrate an example, let us consider a bitmap 12 bits in length, which tells us about an array with seven values, \texttt{011011101010}.
For ease of comprehension, the array will be reversed so that the least significant bits of the bitmap correspond to the lowest indices in the array:
\begin{figure}[H]
\includegraphics[scale=.43]{bitmask}
\centering
\end{figure}
The bitmap indicates that the keys 2, 4, 6, 7, 8, 10, and 11 are currently occupied by values.
Let's imagine the key we want to use for insertion or lookup is 9, or, in other words, that the five bits of the key we are concerned with are \texttt{01001}.
This means we want to check the 8th index of the hypothetical array.
To verify whether this spot is empty, we will simply bitwise \texttt{AND} together the bitmask and a number whose 8 least significant bits are 1s.
The number of 1s in the result represents how many spots are occupied in the compressed array prior to the one we want to insert into (or, in other words, the array index of our desired spot).
We will now use the population count instruction, which is built in to most modern processor architectures, to derive the index we need in our dense array, 5:
\begin{figure}[H]
\includegraphics[scale=.45]{bitmask2}
\centering
\end{figure}
Using this scheme we conserve the memory that would normally be occupied by empty array slots.
This conservation is particularly important when we update the structure persistently, since creating copies of empty array slots would introduce large amounts of waste.
Let's take a look at our nodes in memory using the value bitmap from above, and the child bitmap \texttt{100010011001}.
Note that we have put the arrays back in the usual order to reflect how the values and pointers are actually stored in memory:
\begin{figure}[H]
\includegraphics[scale=.45]{nodewithbitmaps}
\centering
\end{figure}
\subsection{Trie Balancing}
To ensure good asymptotics when looking up values at random in our graph, as might happen during a traversal of the graph, we want to keep our trie as balanced as possible.
'Balance,' in the context of tree-like structures, means that values are equally distributed in branches.
If no part of the trie is deeper than any other part, we have a uniform lookup speed for all values, ensuring that we don't spend a long time finding certain values nested deep in the trie.
For our trie to retain balance, we have to create keys for new values that place them in the appropriately least-populated section of the trie.
We employ a simple scheme which could be optimized for greater performance.
In this scheme, a node stores the index of the least populated of its children, as well as the total size of everything beneath that node in the trie.
After each discrete insertion or deletion from our trie, the total size value is adjusted for each node affected, and, if necessary, the index of the least populated node changed.
In this figure, a value of size 5 is inserted into the right-most branch of the trie, which causes a new least-populated child to be designated:
\begin{figure}[H]
\includegraphics[scale=.45]{balancing}
\centering
\end{figure}
After the insert is performed, the child at 10 becomes the new least populated child:
\begin{figure}[H]
\includegraphics[scale=.45]{balancing2}
\centering
\end{figure}
This does some stuff to write performance that I'll talk about when I finish coding and testing it.
\subsection{Buffers}
With the densely packed arrays from section 3.1, every time we wish to insert into our trie structure, we need to resize the array of the node we insert into.
Since structs in C are not dynamically sized, the addition of a new value necessitates re-allocating the entire struct instance to compensate for the newly resized array.
This results in huge amounts of churning memory, which means very expensive writes.
To reduce the complexity and churn of the average write, we introduce \texttt{n}-sized buffers at the end of our dense arrays.
These buffers, which may be resized by the application, represent free space in which a node may store \texttt{n} values or children before the struct instance will be full and in need of reallocation.
This significantly decreases the amount of memory being allocated and freed on each insert on average. The exact effects of the various sizes of these buffers are detailed in our Results section.
Revisiting our diagram from the previous section, we can see how the buffers behave in our implementation:
\begin{figure}[H]
\includegraphics[scale=.42]{buffers}
\centering
\end{figure}
\subsection{Lazy Copying}
Next we will discuss the exact flavor of persistence that our library implements, ``lazy copying.''
Using lazy copying, an update to a trie structure \texttt{n} will, by default, be performed in place, without preserving the previous version.
Creating a copy of \texttt{n} causes a copy of \texttt{n}, \texttt{n'}, to be created, and increments the reference counts of its children.
Thenceforth, if an update to to either \texttt{n} or \texttt{n'} changes a node with a reference count greater than one, that node is first copied to prevent other versions of the structure from being modified.
A high-level example of how this works is seen in the earlier Trees and Reference Counting section. \par
Our chunking together of values with nodes leads to redundancy using lazy copying.
If an update to a branch of the trie requires that branch to be copied, and the nodes in that branch are full of values, we will make many redundant copies of the values in that branch.
We consider this an acceptable tradeoff considering the advantages in read-performance associated with chunking and reducing our overall number of pointers, though this solution does increase our memory footprint as we modify persistent copies of our graph.
\section{Results}
We include tests that show our memory usage and read/write performance for mutable and persistent updates using various configurations of the library (32-bit fanout, 64-bit fanout, 16-bit fanout?).
Next, we include a comparison of random insertion/deletion performance and memory footprint for different values of \texttt{VALUE\_BUFFER\_SIZE} and \texttt{CHILD\_BUFFER\_SIZE}.
Lastly, for the time complexities of a random traversal of a randomly generated Erdos-Renyi graph, as well as a random sequence of insertions, deletions, and lookups, we compare to the Boost Graph Library included in C++.
\appendix
\section{Appendix Title}
This is the text of the appendix, if you need one.
\acks
Acknowledgments, if needed.
% We recommend abbrvnat bibliography style.
\bibliographystyle{abbrvnat}
% The bibliography should be embedded for final submission.
\begin{thebibliography}{}
\softraggedright
\bibitem[Smith et~al.(2009)Smith, Jones]{smith02}
P. Q. Smith, and X. Y. Jones. ...reference text...
\end{thebibliography}
\end{document}
|
{"hexsha": "5558e33c1f3c87fc79316fe203eb175451495245", "size": 16532, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "writeup/thesis.tex", "max_stars_repo_name": "benjaminy/TenaciousCalf", "max_stars_repo_head_hexsha": "b90c002ce16fee579b04f5c9b92d1cc4b4f96b94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-11-09T19:22:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-11T08:18:34.000Z", "max_issues_repo_path": "writeup/thesis.tex", "max_issues_repo_name": "benjaminy/TenaciousCalf", "max_issues_repo_head_hexsha": "b90c002ce16fee579b04f5c9b92d1cc4b4f96b94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "writeup/thesis.tex", "max_forks_repo_name": "benjaminy/TenaciousCalf", "max_forks_repo_head_hexsha": "b90c002ce16fee579b04f5c9b92d1cc4b4f96b94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 64.831372549, "max_line_length": 257, "alphanum_fraction": 0.7915557706, "num_tokens": 3710}
|
"""Test the percentage column difference transformer."""
import numpy as np
import numpy.testing as nt
import pandas as pd
import pandas.testing as pt
import pytest
import src.preprocessing as pp
@pytest.fixture
def data():
data = {
'f1': np.array([100, 110, 98, 1500, 30]),
'f2': 100 * np.ones((5, )),
'f3': np.zeros((5, )),
'target1': 100 + np.arange(5),
'target2': 200 + np.arange(5),
}
return pd.DataFrame(data)
def test_it_checks_init_params(data: pd.DataFrame):
with pytest.raises(TypeError):
pp.TwoColPercentDiffTransformer((True, 'age'))
with pytest.raises(TypeError):
pp.TwoColPercentDiffTransformer((0, 1.4))
def test_it_checks_columns_in_df(data: pd.DataFrame):
with pytest.raises(ValueError):
pt = pp.TwoColPercentDiffTransformer(['f1', 'target3'])
pt.fit(data)
with pytest.raises(ValueError):
pt = pp.TwoColPercentDiffTransformer(['target3', 'f1'])
pt.fit(data)
def test_it_checks_no_zeros_in_a(data):
with pytest.raises(ValueError):
pt = pp.TwoColPercentDiffTransformer(['f3', 'target1'])
pt.fit(data)
def test_it_transforms_data(data: pd.DataFrame):
pt = pp.TwoColPercentDiffTransformer(['f2', 'f1'])
result = pt.fit_transform(data)
expected = pd.DataFrame(
data=np.array([0.0, 0.1, -0.02, 14.0, -0.7]),
columns=['delta_percent_f1_f2'])
nt.assert_array_equal(result, expected)
|
{"hexsha": "bcf40bba9f0a8476c7a6a0fc593404f84de2b4c6", "size": 1477, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_two_col_percent_diff_transformer.py", "max_stars_repo_name": "PieCampi/dl-toolkit", "max_stars_repo_head_hexsha": "6d212f22ed97af9b9e59b6c2e77198e472c3f628", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_two_col_percent_diff_transformer.py", "max_issues_repo_name": "PieCampi/dl-toolkit", "max_issues_repo_head_hexsha": "6d212f22ed97af9b9e59b6c2e77198e472c3f628", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_two_col_percent_diff_transformer.py", "max_forks_repo_name": "PieCampi/dl-toolkit", "max_forks_repo_head_hexsha": "6d212f22ed97af9b9e59b6c2e77198e472c3f628", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4655172414, "max_line_length": 63, "alphanum_fraction": 0.6519972918, "include": true, "reason": "import numpy", "num_tokens": 384}
|
# -*- coding: utf-8 -*-
"""
cloud_att_intermediate_values.py
Created on Tue Jun 30 8:53:09 2020
Determined the error between the published L_red value in the ITU-R validation data,
sheet P840-8 Lred, and the calculated L_red value using iturpy
@author: MAW32652
"""
import itur
from itur.models.itu840 import columnar_content_reduced_liquid, specific_attenuation_coefficients
from itur.utils import prepare_quantity, prepare_output_array,\
prepare_input_array, load_data, dataset_dir, memory
import os
import numpy as np
import xlrd
def cloud_attenuation_validation():
########## VARIABLE INITIALIZATION ###########
###excel variables
workbook = xlrd.open_workbook("H:/ITU-Rpy/CG-3M3J-13-ValEx-Rev5_0.xlsx")
sheet = workbook.sheet_by_name("P840-8 A_Clouds")
### ITU_rpy variables
# Validation data V=variables
epData = [] #epislon prime expected values
eppData= [] #epsilon prime prime expected values
etaData = [] #eta expected values
klData = [] #kl expected values
LredData = [] #L_red expected values
# Input variables
latList = [] #Latitude
lonList = [] #Longitude
fList = [] #frequency
eleList = [] #Elevation angle
pList = [] #Probability
#Output Variables
epList = [] #epsilon prime calculated values
epErrorList = [] #error for epsilon prime (Validation - calculated)
epPEList = [] #Epsilon prime percent error
eppList = [] #epsilon prime prime calculated values
eppErrorList = [] #error for epsilon prime prime (Validation - calculated)
eppPEList = [] #epsilon prime prime percent error
etaList= [] #eta calculated values
etaErrorList = [] #error for eta (Validation - calculated)
etaPEList = [] #eta percent error
klList = [] #kl calculated values
klErrorList = [] #error for kl (Validation - calculated)
klPEList = [] # kl percent error
LredList = [] #L_red calculated values
LredErrorList= [] #error for L_red (Validation - calculated)
LredPEList = [] # L_red percent error
########## Computation ##########
for i in range(63):
#populate the input variable lists
#data entries start on row 21
#expected outputs
epData.append(sheet.cell_value(i + 20, 8)) #Published epsilon prime data in column I
eppData.append(sheet.cell_value(i + 20, 9)) #Published epsilon prime prime data in column J
etaData.append(sheet.cell_value(i + 20, 10)) #Published eta data in column K
klData.append(sheet.cell_value(i + 20, 11)) #Published kl data in column L
LredData.append(sheet.cell_value(i + 20, 12)) #Published L_red data in column M
#input data
latList.append(sheet.cell_value(i + 20, 3)) #Latitdue inputs in column D
lonList.append(sheet.cell_value(i + 20, 4)) #Longitude inputs in column E
fList.append(sheet.cell_value(i + 20, 5)) #Frequency inputs in column F
eleList.append(sheet.cell_value(i + 20, 6)) #Elevation angle inputs in column G
pList.append(sheet.cell_value(i + 20, 7)) #probability inputs in column H
### kl ###
#detrmine kl and the associated intermediate values using P.840 functions
ep, epp, eta, kl = specific_attenuation_coefficients(fList[-1], T = 0)
epList.append(ep)
eppList.append(epp)
etaList.append(eta)
klList.append(kl)
#determine the error of the new calculated values
epError = epData [-1] - ep
epPE = (ep - epData[-1]) / epData[-1]
epErrorList.append(epError)
epPEList.append(epPE)
eppError = eppData [-1] - epp
eppPE = (epp - eppData[-1]) / eppData[-1]
eppErrorList.append(eppError)
eppPEList.append(eppPE)
etaError = etaData [-1] - eta
etaPE = (eta - etaData[-1]) / etaData[-1]
etaErrorList.append(etaError)
etaPEList.append(etaPE)
klError = klData[-1] - kl
klPE = (kl - klData[-1]) / klData[-1]
klErrorList.append(klError)
klPEList.append(klPE)
### L_red ###
#Calculate L red and put the value into the output list
L_red = columnar_content_reduced_liquid(latList[-1], lonList[-1], pList[-1])
LredList.append(L_red.value)
#determine the error for the new calculated value
LredError = LredData[-1] - L_red.value
LredPE = (L_red.value - LredData[-1])/ LredData[-1]
LredErrorList.append(LredError)
LredPEList.append(LredPE)
########### ERROR ANALYSIS ##########
epAvg = sum(epErrorList)/len(epErrorList)
eppAvg = sum(eppErrorList)/len(eppErrorList)
etaAvg = sum(etaErrorList)/len(etaErrorList)
klAvg = sum(klErrorList)/len(klErrorList)
LredAvg = sum(LredErrorList)/len(LredErrorList)
epMax = max(list(map(abs, epErrorList)))
eppMax = max(list(map(abs, eppErrorList)))
etaMax = max(list(map(abs, etaErrorList)))
klMax = max(list(map(abs, klErrorList)))
LredMax = max(list(map(abs, LredErrorList)))
epAvgPE = sum(epPEList)/len(epPEList)
eppAvgPE = sum(eppPEList)/len(eppPEList)
etaAvgPE = sum(etaPEList)/len(etaPEList)
klAvgPE = sum(klPEList)/len(klPEList)
LredAvgPE = sum(LredPEList)/len(LredPEList)
print()
print("Epsilon prime: ")
print("Average Error: " + '{:0.2e}'.format(epAvg))
print("Max Error: " + '{:0.2e}'.format(epMax))
print("Average Percent Error: " + '{:0.2e}'.format(epAvgPE))
print()
print("Epsilon prime prime: ")
print("Average Error: " + '{:0.2e}'.format(eppAvg))
print("Max Error: " + '{:0.2e}'.format(eppMax))
print("Average Percent Error: " + '{:0.2e}'.format(eppAvgPE))
print()
print("Eta: ")
print("Average Error: " + '{:0.2e}'.format(etaAvg))
print("Max Error: " + '{:0.2e}'.format(etaMax))
print("Average Percent Error: " + '{:0.2e}'.format(etaAvgPE))
print()
print("Note: Kl is a function of epsilon prime, epsilon prime prime, and eta")
print()
print("Kl: ")
print("Average Error: " + '{:0.2e}'.format(klAvg))
print("Max Error: " + '{:0.2e}'.format(klMax))
print("Average Percent Error: " + '{:0.2e}'.format(klAvgPE))
print ()
print("Note: L_red is not related to Kl")
print()
print("L_red: ")
print("Average Error: " + '{:0.2e}'.format(LredAvg))
print("Max Error: " + '{:0.2e}'.format(LredMax))
print("Average Percent Error: " + '{:0.2e}'.format(LredAvgPE))
|
{"hexsha": "91c6b5f042e1cfdf8f5dc0f63a692da867bebea2", "size": 6769, "ext": "py", "lang": "Python", "max_stars_repo_path": "itur/validation/validation_scripts/cloud_att_intermediate_values.py", "max_stars_repo_name": "the-aerospace-corporation/ITU-Rpy", "max_stars_repo_head_hexsha": "4456da2db9f28453d5a08339c84fe5bf25b999d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "itur/validation/validation_scripts/cloud_att_intermediate_values.py", "max_issues_repo_name": "the-aerospace-corporation/ITU-Rpy", "max_issues_repo_head_hexsha": "4456da2db9f28453d5a08339c84fe5bf25b999d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "itur/validation/validation_scripts/cloud_att_intermediate_values.py", "max_forks_repo_name": "the-aerospace-corporation/ITU-Rpy", "max_forks_repo_head_hexsha": "4456da2db9f28453d5a08339c84fe5bf25b999d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0150753769, "max_line_length": 99, "alphanum_fraction": 0.6117594918, "include": true, "reason": "import numpy", "num_tokens": 1906}
|
# """
# idris *.idr -o out.qb --codegen qb --cg-opt "--javaName" --cg-opt "--symemu"
#
# Requirements:
# - (v1.3) pkg> add ArgParse
# - (v1.3) pkg> add MLStyle
using MLStyle
using ArgParse
literal_map(kind, x) =
@match String(kind) begin
"float" => parse(Float64, x)
"int" => parse(Int64, x)
"bigInt" => parse(BigInt, x)
"char" => x[1]
"string" => x
"bool" => parse(Bool, x)
"unit" => nothing
"symbol" => QuoteNode(Symbol(x))
_ => error(kind)
end
function ExternalCall(name, args)
name = Symbol(name)
:(__RTS.$name($(args...)))
end
function ExternalVar(name)
name = Symbol(name)
return :(__RTS.$name)
end
function Var(name)
name = Symbol(name)
return name
end
function Call(name, args)
name = Symbol(name)
return :($name($(args...)))
end
function Defun(name, args, body)
name = Symbol(name)
args = Symbol.(args)
:(function $name($(args...)); $(body...) end)
end
function Introduction(name)
name = Symbol(name)
:($name = nothing)
end
function Update(name, exp)
name = Symbol(name)
:($name = $exp)
end
function Constant(n)
n
end
function Switch(var, xs, body)
isempty(xs) && return Expr(:block, body...)
body = Expr(:block, body...)
for (cc, stmts) in reverse(xs)
body = Expr(:elseif, Expr(:block, :($var == $cc)), Expr(:block, stmts...), body)
end
body.head = :if
return body
end
function If(cond, t, e)
return Expr(:if, cond, Expr(:block, t...), Expr(:block, e...))
end
function EffectExpr(exp)
exp
end
function Return(exp)
:(return $exp)
end
function read_and_gen(io)
ctor_stack = []
obj_stack = []
left_stack = []
left = 1
while true
while (left !== 0)
s = readuntil(io, '\n'; keep=false)
if isempty(s)
continue
end
pats = split(s)
dispatch = pats[1]
left -= 1
if dispatch == "constructor"
cons = pats[2]
n = parse(Int, pats[3])
push!(left_stack, left)
left = n
ctor = getproperty(@__MODULE__, Symbol(cons))
push!(ctor_stack, (ctor, n))
elseif dispatch == "literal"
kind = pats[2]
length = parse(Int, pats[3])
buf = String(read(io, length))
readuntil(io, '\n'; keep=false)
buf = literal_map(kind, buf)
push!(obj_stack, buf)
elseif dispatch == "list"
n = parse(Int, pats[2])
push!(left_stack, left)
left = n
push!(ctor_stack, (nothing, n))
else
error("malformed qb format")
end
end
isempty(ctor_stack) && begin
@assert length(obj_stack) === 1
return obj_stack[1]
end
(ctor, n) = pop!(ctor_stack)
args = []
for _ in 1:n
push!(args, pop!(obj_stack))
end
reverse!(args)
if ctor === nothing
v = args
else
v = ctor(args...)
end
push!(obj_stack, v)
left = pop!(left_stack)
end
end
s = ArgParseSettings()
@add_arg_table! s begin
"filename"
help = "input QB file"
required = true
"out"
help = "output Julia file"
required = true
end
parsed_args = parse_args(ARGS, s)
f = open(parsed_args["filename"])
big = read_and_gen(f)
close(f)
out = parsed_args["out"]
if out == "std"
println(Expr(:block, :(include("rts.jl")), big...))
else
open(out, "w") do f
println(f, Expr(:block, :(include("rts.jl")), big...))
end
end
|
{"hexsha": "f5526d04df765d4483988a4444b3f7ca04a4c2ac", "size": 3963, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "backend.jl", "max_stars_repo_name": "thautwarm/PPL2020-quick-and-reusable-code-generation-for-idris", "max_stars_repo_head_hexsha": "a532c6c47f4f0faaf7588bf74776a0d9835d2ba5", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2020-03-03T19:13:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T14:47:05.000Z", "max_issues_repo_path": "backend.jl", "max_issues_repo_name": "thautwarm/PPL2020-quick-and-reusable-code-generation-for-idris", "max_issues_repo_head_hexsha": "a532c6c47f4f0faaf7588bf74776a0d9835d2ba5", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-11T21:46:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-07T11:43:22.000Z", "max_forks_repo_path": "backend.jl", "max_forks_repo_name": "thautwarm/PPL2020-quick-and-reusable-code-generation-for-idris", "max_forks_repo_head_hexsha": "a532c6c47f4f0faaf7588bf74776a0d9835d2ba5", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-09-14T09:58:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T16:21:42.000Z", "avg_line_length": 23.1754385965, "max_line_length": 89, "alphanum_fraction": 0.4900328034, "num_tokens": 1019}
|
import argparse
import os
from typing import Generator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from .grammar import q_learner
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def main(results_dir: str, input_size: int, traces_per_attack: int):
qstore = q_learner.QValues()
qstore.load_q_values(os.path.join(results_dir, 'qlearner_logs', 'q_values.csv'))
replay_dic = pd.read_csv(os.path.join(results_dir, 'qlearner_logs', 'replay_database.csv')).drop_duplicates(
subset='countermeasures', keep='first')
replay_dic_last = pd.read_csv(os.path.join(results_dir, 'qlearner_logs', 'replay_database.csv')).drop_duplicates(
subset='countermeasures', keep='last')
ssp = AttrDict({
'input_size': input_size,
'output_states': 256,
'init_utility': 0.3,
'countermeasures_budget': 5,
})
ql = q_learner.QLearner(AttrDict({'ssp': ssp, 'TRACES_PER_ATTACK': traces_per_attack}),
ssp,
0.0,
qstore=qstore,
replay_dictionary=replay_dic)
metrics = ["cost", "GE at 10% traces", "GE at 50% traces", "GE #traces to 0", "reward"]
(cm_string,
countermeasures,
cost,
guessing_entropy_at_10_percent,
guessing_entropy_at_50_percent,
guessing_entropy_no_to_0) = ql.generate_countermeasures()
reward = ql.metrics_to_reward(
cost, guessing_entropy_at_10_percent, guessing_entropy_at_50_percent, guessing_entropy_no_to_0
)
iteration = replay_dic[replay_dic['countermeasures'] == cm_string]['ix_q_value_update'].values[0]
replay_dic['reward'] = replay_dic.apply(
lambda row: ql.metrics_to_reward(*ql.get_metrics_from_replay(row['countermeasures'])),
axis='columns'
)
results_sorted = replay_dic.sort_values(by=['reward'], ascending=False)
title = os.path.join(*results_dir.split(os.path.sep)[-2:])
with open(os.path.join(results_dir, 'results_overview.txt'), mode="w") as file:
file.write(f"Results for {os.path.join(*results_dir.split(os.path.sep)[-2:])}\n\n")
file.write("Best countermeasures according to Q-Learning:\n")
file.write(f"{cm_string}\n")
file.write(f"First found at iteration: {iteration}\n")
file.write("Metrics:\n")
file.writelines(
iterable_as_list(metrics, [
cost, guessing_entropy_at_10_percent, guessing_entropy_at_50_percent, guessing_entropy_no_to_0, reward
])
)
q_values = ql.qstore.to_dataframe()
file.write(f"\n\nAverage q_value: {q_values['utility'].mean()}\n")
file.write(
f"Average (filtered) q_value: {q_values[q_values['utility'] != ssp.init_utility]['utility'].mean()}\n")
with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 2000,
'display.max_colwidth', 200):
file.write("\n\nTop 20 total reward countermeasures:\n")
file.write(str(results_sorted.head(20)))
file.write("\n\nBottom 20 total reward countermeasures:\n")
file.write(str(results_sorted.tail(20)))
plt.style.use(os.path.dirname(__file__)+'/scatter_plot.mplstyle')
replay_dic.plot.scatter(x='reward', y='cost', c='epsilon', colormap='viridis',
figsize=(10, 9), xlim=(-0.02, 1.02), ylim=(-0.1, 5.1))
plt.xlabel('Q-Learning reward')
plt.ylabel('Relative Countermeasure Cost')
ax = plt.gca()
ax.figure.axes[-1].set_ylabel('Epsilon When First Generated')
max_beaten = replay_dic.loc[replay_dic['guessing_entropy_no_to_0'].idxmax()]
ax.figure.axes[0].axvline(max_beaten['reward'], color='red', lw=1.0)
plt.savefig(
os.path.join(results_dir, f'{title.replace(os.path.sep, "_")}_first_scatter.svg'),
format='svg', dpi=150, bbox_inches='tight'
)
plt.close()
replay_dic_last['reward'] = replay_dic_last.apply(
lambda row: ql.metrics_to_reward(*ql.get_metrics_from_replay(row['countermeasures'])),
axis='columns'
)
plt.style.use(os.path.dirname(__file__)+'/scatter_plot.mplstyle')
replay_dic_last.plot.scatter(x='reward', y='cost', c='epsilon', colormap='viridis',
figsize=(10, 9), xlim=(-0.02, 1.02), ylim=(-0.1, 5.1))
plt.xlabel('Q-Learning reward')
plt.ylabel('Relative Countermeasure Cost')
ax = plt.gca()
ax.figure.axes[-1].set_ylabel('Epsilon When Last Generated')
plt.savefig(
os.path.join(results_dir, f'{title.replace(os.path.sep, "_")}_last_scatter.svg'),
format='svg', dpi=150, bbox_inches='tight'
)
plt.close()
return replay_dic
def iterable_as_list(descriptions: iter, dictionary: iter) -> Generator[str, str, None]:
for description, el in zip(descriptions, dictionary):
yield f"- {description}: {el}\n"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'results_dir',
help='Directory with results of an experiment'
)
parser.add_argument(
'input_size',
help='The input layer size',
default=700,
type=int
)
parser.add_argument(
'traces_per_attack',
help='The number of traces used per attack',
type=int
)
args = parser.parse_args()
subdirs = next(os.walk(args.results_dir))[1]
if np.isin(subdirs, ["graphs", "trained_models", "qlearner_logs"]).all():
results = main(args.results_dir, args.input_size, args.traces_per_attack)
else:
print(f"Results dir {args.results_dir} does not contain the required graphs, trained_models and qlearner_logs "
"subfolders")
|
{"hexsha": "28d6cc87784b5b3cc1a8791cc873030c99efa826", "size": 5930, "ext": "py", "lang": "Python", "max_stars_repo_path": "countermeasures/display_results.py", "max_stars_repo_name": "AISyLab/RL-based-countermeasure-design-for-SCA", "max_stars_repo_head_hexsha": "f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "countermeasures/display_results.py", "max_issues_repo_name": "AISyLab/RL-based-countermeasure-design-for-SCA", "max_issues_repo_head_hexsha": "f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "countermeasures/display_results.py", "max_forks_repo_name": "AISyLab/RL-based-countermeasure-design-for-SCA", "max_forks_repo_head_hexsha": "f03895b4b13b0397f0cc7014d9e7d2738ff2a6a2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.5333333333, "max_line_length": 119, "alphanum_fraction": 0.6413153457, "include": true, "reason": "import numpy", "num_tokens": 1452}
|
from base import StructuredModel
import numpy as np
import sys
import heapq
import time
import random
import math
import multiprocessing
import copy
class Utils(object):
def greeting(name):
print("Hello, " + name)
def getData(self,path, Num):
file1 = open(path, 'r')
lineNum = 1
X = []
Y = []
X_influence = []
Y_influence = []
while True:
line = file1.readline()
if not line:
break
seedset = set(line.split())
if len(Y) < Num:
if lineNum % 5 == 1:
X.append(seedset)
if lineNum % 5 == 2:
Y.append(seedset)
if lineNum % 5 == 3:
X_influence.append(float(line))
if lineNum % 5 == 4:
Y_influence.append(float(line))
lineNum += 1
if (len(X) != Num) or (len(Y) != Num):
sys.exit("getData: data fetch failed with sizes: {} {}".format(
len(X),len(Y)))
return X, Y, X_influence, Y_influence
def getDataTrainTest(self, path, trainNum, testNum):
file1 = open(path, 'r')
lineNum = 1
X_train = []
Y_train = []
X_train_influence = []
Y_train_influence = []
X_test = []
Y_test = []
X_test_influence = []
Y_test_influence = []
while True:
line = file1.readline()
if not line:
break
seedset = set(line.split())
if len(Y_train) < trainNum:
if lineNum % 5 == 1:
X_train.append(seedset)
if lineNum % 5 == 2:
Y_train.append(seedset)
if lineNum % 5 == 3:
X_train_influence.append(float(line))
if lineNum % 5 == 4:
Y_train_influence.append(float(line))
else:
if len(Y_test) < testNum:
if lineNum % 5 == 1:
X_test.append(seedset)
if lineNum % 5 == 2:
Y_test.append(seedset)
if lineNum % 5 == 3:
X_test_influence.append(float(line))
if lineNum % 5 == 4:
Y_test_influence.append(float(line))
lineNum += 1
if (len(X_train) != trainNum) or (len(Y_test) != testNum):
sys.exit("getData: data fetch failed with sizes: {} {}".format(
len(X_train),len(Y_train)))
return X_train, Y_train, X_train_influence, Y_train_influence, X_test, Y_test, X_test_influence, Y_test_influence
def getDataTrainTestRandom(self,path, trainNum, testNum, Max):
lineNums=(np.random.permutation(Max)*5)[0:(trainNum+testNum)]
lineNums.sort()
file1 = open(path, 'r')
lineNum = 0
X_train, Y_train, X_train_influence, Y_train_influence = ([] for i in range(4))
X_test, Y_test, X_test_influence, Y_test_influence= ([] for i in range(4))
while len(lineNums)>0:
line = file1.readline()
if not line:
break
if lineNum != lineNums[0]:
lineNum += 1
else:
if(len(Y_train)<trainNum):
seedset = set(line.split())
X_train.append(seedset)
lineNum += 1
line = file1.readline()
seedset = set(line.split())
Y_train.append(seedset)
lineNum += 1
line = file1.readline()
X_train_influence.append(float(line))
lineNum += 1
line = file1.readline()
Y_train_influence.append(float(line))
lineNum += 1
lineNums=np.delete(lineNums, 0)
#print(Y_train)
#print("train++", len(Y_train),len(lineNums))
else:
seedset = set(line.split())
X_test.append(seedset)
lineNum += 1
line = file1.readline()
seedset = set(line.split())
Y_test.append(seedset)
lineNum += 1
line = file1.readline()
X_test_influence.append(float(line))
lineNum += 1
line = file1.readline()
Y_test_influence.append(float(line))
lineNum += 1
lineNums=np.delete(lineNums, 0)
#print("test++ {}"+format(len(lineNums)))
if (len(X_train) != trainNum) or (len(Y_test) != testNum):
sys.exit("getDataRandom: data fetch failed with sizes: X_train {} Y_test {}".format(
len(X_train),len(Y_test)))
return X_train, Y_train, X_train_influence, Y_train_influence, X_test, Y_test, X_test_influence, Y_test_influence
def getDataRandom(self,path, Num, Max):
lineNums=(np.random.permutation(Max)*5)[0:Num]
'''
lineNums = []
while len(lineNums)<Num:
num = math.ceil(random.uniform(0, 1)*Max)
if 5*num not in lineNums:
lineNums.append(5*num)
'''
lineNums.sort()
#print(lineNums)
file1 = open(path, 'r')
lineNum = 0
X = []
Y = []
X_influence = []
Y_influence = []
while len(lineNums)>0:
line = file1.readline()
if not line:
break
if lineNum != lineNums[0]:
lineNum += 1
else:
seedset = set(line.split())
X.append(seedset)
lineNum += 1
line = file1.readline()
seedset = set(line.split())
Y.append(seedset)
lineNum += 1
line = file1.readline()
X_influence.append(float(line))
lineNum += 1
line = file1.readline()
Y_influence.append(float(line))
lineNum += 1
lineNums=np.delete(lineNums, 0)
if (len(X) != Num) or (len(Y) != Num):
sys.exit("getDataRandom: data fetch failed with sizes: {} {}".format(
len(X),len(Y)))
return X, Y, X_influence, Y_influence
def testFunction(self, model, testNum,thread,X_test,Y_test, Y_pred, infTimes, random_pred = False):
if random_pred :
Y_pred =[]
for x in X_test:
w=np.random.random((1, model.size_joint_feature))
Y_pred.append(model.inference(x, w))
block_size =int (testNum/thread);
p = multiprocessing.Pool(thread)
influence_Xs = p.starmap(model.instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes) for i in range(thread)))
p.close()
p.join()
p = multiprocessing.Pool(thread)
influence_Ys = p.starmap(model.instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes, Y_test[i*block_size:(i+1)*block_size]) for i in range(thread)))
p.close()
p.join()
p = multiprocessing.Pool(thread)
influence_Y_preds = p.starmap(model.instance.testInfluence_0_block, ((X_test[i*block_size:(i+1)*block_size], infTimes, Y_pred[i*block_size:(i+1)*block_size]) for i in range(thread)))
p.close()
p.join()
influence_X=[]
influence_Y=[]
influence_Y_pred=[]
for i in range(thread):
influence_X.extend(influence_Xs[i])
influence_Y.extend(influence_Ys[i])
influence_Y_pred.extend(influence_Y_preds[i])
reduce_percent_opt=[]
reduce_percent_pre = []
com_to_opt = []
error_abs = []
error_ratio = []
for influence_x, influence_y, influence_y_pred in zip(influence_X, influence_Y, influence_Y_pred):
#print("{} {} {} {} {}".format(influence_x,influence_y,influence_y_pred, influence_x_read, influence_y_read))
reduce_percent_opt.append((influence_x-influence_y)/influence_x)
reduce_percent_pre.append( (influence_x-influence_y_pred)/influence_x)
com_to_opt.append((influence_x-influence_y_pred)/(influence_x-influence_y+0.01))
error_abs.append((influence_y_pred-influence_y))
error_ratio.append((influence_y_pred-influence_y)/influence_y)
#print()
print("error_abs: {} +- {}".format(np.mean(np.array(error_abs)), np.std(np.array(error_abs))))
print("com_to_opt: {} +- {}".format(np.mean(np.array(com_to_opt)), np.std(np.array(com_to_opt))))
def pooling(self,one_slack_svm, ratio):
maxWeight=max(one_slack_svm.w)
indexList=[]
for i in range(len(one_slack_svm.w)):
if one_slack_svm.w[i]>ratio*maxWeight:
indexList.append(i)
new_diffusionGraphs=[]
for i in indexList:
new_diffusionGraphs.append(copy.deepcopy(one_slack_svm.model.instance.diffusionGraphs[i]))
one_slack_svm.model.instance.diffusionGraphs=new_diffusionGraphs
one_slack_svm.model.instance.featureNum=len(indexList)
one_slack_svm.model.size_joint_feature=len(indexList)
class Train(object):
def __init__(self, attack, protect, a_influence, p_influence):
self.attack=attack
self.protect=protect
self.a_influence=a_influence
self.p_influence=p_influence
class SocialGraph(object):
class Node(object):
def __init__(self,index):
self.index = index
self.neighbor = {}
self.in_degree = 0
self.out_degree = 0
def print(self):
print(self.index)
for node in self.neighbor:
print("{} {} {} {}".format(str(self.index), str(node)
, str(self.neighbor[node][0]), str(self.neighbor[node][1])))
def __init__(self, path, vNum):
self.nodes={}
self.vNum = vNum
for v in range(self.vNum):
node = self.Node(str(v))
node.neighbor={}
self.nodes[str(v)]=node
file1 = open(path, 'r')
while True:
line = file1.readline()
if not line:
break
ints = line.split()
node1 = ints[0]
node2 = ints[1]
para_1 = float(ints[2])
para_2 = float(ints[3])
if node1 in self.nodes:
self.nodes[node1].neighbor[node2]=[para_1, para_2]
self.nodes[node1].out_degree += 1
self.nodes[node2].in_degree += 1
else:
sys.exit("non existing node")
if node2 not in self.nodes:
sys.exit("non existing node")
#print(path + " read")
def print(self):
for node in self.nodes:
self.nodes[node].print()
def getNeighborsByHot(self, y, hotNum):
temp = y.copy()
neighbors = y.copy()
for _ in range(hotNum):
for current in neighbors:
for current_to in self.nodes[current].neighbor:
temp.add(current_to)
neighbors = temp.copy()
return neighbors
def spreadMulti_n0(self,x,y,times):
return self.vNum-self.spreadMulti(x,y,times)
def spreadMulti(self, x,y,times):
local_state = random.Random()
count = 0
for _ in range(times):
count += self.spreadOnce(x,y,local_state)
return count/times
def spreadMulti_P(self, x,y,times, thread):
if not isinstance(thread, int):
sys.exit("thread should be int")
if thread >1:
p = multiprocessing.Pool(thread)
counts = sum(p.starmap(self.spreadMulti, ((x,y,int(times/thread))for _ in range(thread) )))
p.close()
p.join()
#counts = Parallel(n_jobs=thread, verbose=0 )(delayed(self.spreadMulti)(x,y,int(times/thread))for _ in range(thread))
return counts/thread
else:
sys.exit("spreadMulti_P wrong")
def spreadOnce(self, seedSet_x, seedSet_y, local_state ):
#local_state = np.random.RandomState()
#local_state.seed()
'''return # of 0-active nodes'''
tstate = {} # current state
fstate = {} # final time
tTime = dict() # best time
actTime = [] # all updated time
for x in seedSet_x:
tstate[x]=0
heapq.heappush(actTime, (0, x))
tTime[x]=0.0
for y in seedSet_y:
if y not in seedSet_x:
tstate[y]=1
heapq.heappush(actTime, (0, y))
tTime[y]=0.0
#print(tTime)
while len(actTime)>0:
current_node_time, current_node = heapq.heappop(actTime)
if current_node not in fstate:
if current_node_time != tTime[current_node]:
sys.exit("current_node_time != tTime[current_node]")
fstate[current_node]=current_node_time
self.spreadLocal(tstate, fstate, actTime, tTime, current_node, current_node_time, local_state)
count = 0
for x in tstate:
if tstate[x]==0:
count += 1
#print(self.vNum-count)
return count
def spreadLocal(self,tstate, fstate, actTime, tTime, current_node, current_node_time,local_state):
#print(tTime)
#print(self.nodes[current_node].neighbor)
for to_node, para in self.nodes[current_node].neighbor.items():
if (to_node in fstate) or (not self.isSuccess(self.nodes[to_node], local_state)):
pass
else:
transTime = self.getWeibull(para[0], para[1])
new_time = current_node_time+ transTime
if to_node in tstate:
if new_time <tTime[to_node]:
tTime[to_node]=new_time
tstate[to_node]=tstate[current_node]
heapq.heappush(actTime, (new_time , to_node))
if new_time == tTime[to_node]:
if tstate[current_node]==0:
tstate[to_node]=0
if to_node not in tstate:
# print(tTime)
tTime[to_node]=new_time
tstate[to_node]=tstate[current_node]
heapq.heappush(actTime, (new_time, to_node))
def isSuccess(self, to_node, local_state):
#seed = np.random.seed()
#local_state = np.random.RandomState(seed)
randnum= local_state.uniform(0, 1)
if randnum< 1.0/to_node.in_degree:
#if np.random.uniform(0,1)< 1.0/to_node.in_degree:
return True
else:
return False
def getWeibull(self, alpha, beta):
time = alpha*math.pow(-math.log(1-random.uniform(0, 1)), beta);
if time >= 0:
return math.ceil(time)+1
else:
sys.exit("time <0")
return None
def genTrains(self, pairsNum, path, simutimes, thread):
file1 = open('../data/power_list.txt', 'r')
seedSizes = []
while len(seedSizes)<pairsNum:
line = file1.readline()
if not line:
sys.exit("genTrains wrong")
break
seedSizes.append(int(line))
with open(path, 'w') as the_file:
p = multiprocessing.Pool(thread)
trains = p.starmap(self.getOneTrain, ((seedSizes[i],simutimes) for i in range(pairsNum) ))
print("pairs generated ")
p.close()
p.join()
for train in trains:
for x in train.attack:
the_file.write(x)
the_file.write(" ")
the_file.write("\n")
for x in train.protect:
the_file.write(x)
the_file.write(" ")
the_file.write("\n")
the_file.write("{}\n".format(train.a_influence))
the_file.write("{}\n".format(train.p_influence))
the_file.write("\n")
def getOneTrain(self, seedSize, simutimes):
a = self.getRandomSeed(seedSize)
p,_,_ = self.greedyMP(a,len(a),simutimes)
a_influence=self.spreadMulti(a, {}, simutimes)
p_influence=self.spreadMulti(a, p, simutimes)
return Train(a, p, a_influence, p_influence)
def getRandomSeed(self, seedSize):
a=set()
while len(a)<seedSize:
index = str(math.floor(random.uniform(0, 1)*self.vNum))
if index not in a:
a.add(index)
return a
def greedyMP(self,a,seedSize, simutimes):
c_score = self.spreadMulti_n0(a,{},simutimes)
#print("Initial: {}".format(c_score))
scores = [c_score]
gains = []
for node in range(self.vNum):
gain = self.spreadMulti_n0(a, [str(node)], simutimes) - c_score
#print(gain);
heapq.heappush(gains, (-gain, str(node)))
score_gain, node = heapq.heappop(gains)
solution = [node]
#score = -score
c_score = c_score - (-score_gain)
#print("{} + {} + {}".format(node, c_score, -score_gain))
scores.append(c_score)
# record the number of times the spread is computed
lookups = [self.vNum]
for _ in range(seedSize - 1):
node_lookup = 0
matched = False
while not matched:
node_lookup += 1
# here we need to compute the marginal gain of adding the current node
# to the solution, instead of just the gain, i.e. we need to subtract
# the spread without adding the current node
_, current_node = heapq.heappop(gains)
score_gain = self.spreadMulti_n0(a, solution + [current_node], simutimes) - c_score
# check if the previous top node stayed on the top after pushing
# the marginal gain to the heap
heapq.heappush(gains, (-score_gain, current_node))
matched = gains[0][1] == current_node
#print(node_lookup)
# spread stores the cumulative spread
score_gain, node = heapq.heappop(gains)
c_score = c_score - score_gain
solution.append(node)
#print("{} + {} + {}".format(node, c_score, -score_gain))
scores.append(c_score)
lookups.append(node_lookup)
return solution, scores, lookups
class DiffusionGraph(object):
'''
class Node(object):
def __init__(self,index):
self.index = index
self.neighbor = {}
def print(self):
for node in self.neighbor:
print(str(self.index)+" "+str(node)+" "+str(self.neighbor[node]))
'''
def __init__(self, path_graph, path_distance, vNum):
self.tranTimes={}
self.distance = {}
self.nodes=set()
self.vNum = vNum
for v in range(self.vNum):
#node = self.Node(str(v))
#node.neighbor={}
neighbor_1={}
self.tranTimes[str(v)]=neighbor_1
neighbor_2={}
self.distance[str(v)]=neighbor_2
self.nodes.add(str(v))
file1 = open(path_graph, 'r')
while True:
line = file1.readline()
if not line:
break
strings = line.split()
node1 = (strings[0])
node2 = (strings[1])
time = float(strings[2])
if node1 in self.tranTimes:
self.tranTimes[node1][node2]=time
else:
sys.exit("non existing node")
if node2 not in self.nodes:
sys.exit("non existing node")
file1 = open(path_distance, 'r')
while True:
line = file1.readline()
if not line:
break
strings = line.split()
node1 = (strings[0])
node2 = (strings[1])
time = float(strings[2])
if node1 in self.distance:
self.distance[node1][node2]=time
else:
sys.exit("non existing node")
if node2 not in self.nodes:
sys.exit("non existing node")
def print(self):
#for node in self.nodes:
#print(self.tranTimes[node])
for node in self.nodes:
print(self.distance[node])
def spread(self, seedSet_x, seedSet_y, getCover=False):
'''return # of 0-active nodes'''
tstate = {} # current state
fstate = {} # final time
tTime = dict() # best time
actTime = [] # all updated time
for x in seedSet_x:
tstate[x]=0
heapq.heappush(actTime, (0, x))
tTime[x]=0.0
for y in seedSet_y:
if y not in seedSet_x:
try:
tstate[y]=1
heapq.heappush(actTime, (0, y))
except:
print(y)
print(seedSet_y)
input("Press Enter to continue...")
tTime[y]=0.0
#print(tTime)
while len(actTime)>0:
current_node_time, current_node = heapq.heappop(actTime)
if current_node not in fstate:
if current_node_time != tTime[current_node]:
sys.exit("current_node_time != tTime[current_node]")
fstate[current_node]=current_node_time
self.spreadLocal(tstate, fstate, actTime, tTime, current_node, current_node_time)
count = 0
cover={}
for x in tstate:
if tstate[x]==0:
count += 1
cover[x]=tTime[x]
#print(self.vNum-count)
if getCover:
return count, cover
else:
return count
def spreadLocal(self,tstate, fstate, actTime, tTime, current_node, current_node_time):
#print(tTime)
#print(self.nodes[current_node].neighbor)
for to_node in self.tranTimes[current_node]:
tranTime=self.tranTimes[current_node][to_node]
if to_node in fstate:
pass
else:
new_time = current_node_time+ tranTime
if to_node in tstate:
if new_time <tTime[to_node]:
tTime[to_node]=new_time
tstate[to_node]=tstate[current_node]
heapq.heappush(actTime, (new_time , to_node))
if new_time == tTime[to_node]:
if tstate[current_node]==0:
tstate[to_node]=0
if to_node not in tstate:
# print(tTime)
tTime[to_node]=new_time
tstate[to_node]=tstate[current_node]
heapq.heappush(actTime, (new_time, to_node))
def getDistance(self, oneset, node):
distance = sys.maxsize
for x in oneset:
if node in self.distance[x]:
if self.distance[x][node]<distance:
distance=self.distance[x][node]
return distance
#class Edge(object):
# pass
class InputInstance(object):
def __init__(self, socialGraphPath, featurePath, featureNum, vNum, effectAreaHotNum, balance_para, loss_type,
featureRandom = False, maxFeatureNum = 500, thread = 1, LAI_method = None, indexes=None):
#self.graphs=[];
self.featureNum = featureNum
self.vNum = vNum
self.socialGraph = SocialGraph(socialGraphPath, vNum);
#self.socialGraph.print()
#print(self.socialGraph.nodes)
self.effectAreaHotNum = effectAreaHotNum;
self.balance_para = balance_para
self.thread=thread
self.LAI_method = LAI_method
if loss_type != None:
self.loss_type=loss_type.name
if loss_type.name == "hamming":
self.hammingWeight=loss_type.weight
self.hammingWeight = None
self.featureRandom = featureRandom
self.maxFeatureNum = maxFeatureNum
# self.readFeatures(path, featureNum)
# read social graph
#read features
self.diffusionGraphs = [];
if indexes != None:
lineNums=indexes
self.featureIndexes=lineNums
#print("lineNums: {}".format(lineNums))
for i in lineNums:
path_graph="{}{}_graph.txt".format(featurePath, i)
path_distance="{}{}_distance.txt".format(featurePath, i)
diffusionGraph = DiffusionGraph(path_graph,path_distance, vNum)
self.diffusionGraphs.append(diffusionGraph)
else:
if self.featureRandom:
lineNums=(np.random.permutation(maxFeatureNum))[0:featureNum]
self.featureIndexes=lineNums
#print("lineNums: {}".format(lineNums))
for i in lineNums:
path_graph="{}{}_graph.txt".format(featurePath, i)
path_distance="{}{}_distance.txt".format(featurePath, i)
diffusionGraph = DiffusionGraph(path_graph,path_distance, vNum)
self.diffusionGraphs.append(diffusionGraph)
else:
for i in range(featureNum):
path_graph="{}{}_graph.txt".format(featurePath, i)
path_distance="{}{}_distance.txt".format(featurePath, i)
diffusionGraph = DiffusionGraph(path_graph,path_distance, vNum)
self.diffusionGraphs.append(diffusionGraph)
#diffusionGraph.print()
def computeFeature(self, x, y):
feature = [];
for graph in self.diffusionGraphs:
feature.append(self.computeScoreOneGraph(x, y, graph))
return np.array(feature)
def computeScore(self, x, y, w):
feature = self.computeFeature(x, y)
return w.dot(feature)
def computeScoreOneGraph(self, x, y, graph):
'''compute f^g(M,P)'''
return self.vNum-graph.spread(x, y)
def inference(self, x, w):
#print("inference")
start_time = time.time()
c_score = self.computeScore(x, [ ], w)
#print("Initial: {}".format(c_score))
scores = [c_score]
gains = []
for node in range(self.vNum):
gain = self.computeScore(x, [str(node)], w) - c_score
#print(gain);
heapq.heappush(gains, (-gain, str(node)))
score_gain, node = heapq.heappop(gains)
solution = [node]
#score = -score
c_score = c_score - (score_gain)
#print("{} {}".format(node, -score_gain))
#print("{} + {} + {}".format(node, c_score, -score_gain))
scores.append(c_score)
# record the number of times the spread is computed
lookups = [self.vNum]
elapsed = [round(time.time() - start_time, 3)]
for _ in range(len(x) - 1):
node_lookup = 0
matched = False
while not matched:
node_lookup += 1
# here we need to compute the marginal gain of adding the current node
# to the solution, instead of just the gain, i.e. we need to subtract
# the spread without adding the current node
_, current_node = heapq.heappop(gains)
score_gain = self.computeScore(x, solution + [current_node], w) - c_score
# check if the previous top node stayed on the top after pushing
# the marginal gain to the heap
heapq.heappush(gains, (-score_gain, current_node))
matched = gains[0][1] == current_node
#print(node_lookup)
# spread stores the cumulative spread
score_gain, node = heapq.heappop(gains)
c_score = c_score - score_gain
solution.append(node)
#print("{} {}".format(node, -score_gain))
#print("{} + {} + {}".format(node, c_score, -score_gain))
scores.append(c_score)
lookups.append(node_lookup)
elapse = round(time.time() - start_time, 3)
elapsed.append(elapse)
return solution, scores, elapsed, lookups
def inferenceRandom(self,x,w):
solution = []
nodeSet = []
for i in range(self.vNum):
nodeSet.append(str(i))
while len(solution) < len(x):
node = random.choice(nodeSet)
if node not in x:
solution.append(node)
#print(self.computeScore(x, solution , w) )
return solution
def loss(self, y, y_hat):
if self.loss_type == None:
sys.exit("loss method not speficied.")
if self.loss_type == "area":
return self.similarity(y, y)-self.similarity(y, y_hat)
if self.loss_type == "hamming":
if y == y_hat:
return 0
else:
if self.hammingWeight == None:
sys.exit("hammingWeight == None")
return self.hammingWeight
def similarity(self, x, y):
set1 = self.socialGraph.getNeighborsByHot(x, 1)
set2 = self.socialGraph.getNeighborsByHot(y, 1)
return len(set1.intersection(set2))
def loss_augmented_inference(self, x, y ,w):
if self.loss_type == None:
sys.exit("loss_augmented_inference method not speficied.")
if self.loss_type == "area":
if self.LAI_method == "greedy":
return self.loss_augmented_inference_area_greedy(x, y, w)
if self.LAI_method == "lazy":
return self.loss_augmented_inference_area_greedy_lazy(x, y, w)
if self.LAI_method == "fastLazy":
return self.loss_augmented_inference_area_greedy_lazy_fast(x, y, w)
if self.LAI_method == "fastGreedy":
return self.loss_augmented_inference_area_greedy_fast(x, y, w)
if self.loss_type == "hamming":
return self.loss_augmented_inference_hamming(x, y, w)
def loss_augmented_inference_objective(self, x, y, y_pre, w):
inference = self.computeScore(x,y_pre,w)
loss = self.loss(y,y_pre)
#print("{} + {}".format(inference, loss))
return inference+self.balance_para*loss
def loss_augmented_inference_area_greedy(self, x, y ,w):
#print("loss_augmented_inference_greedy")
solution = set()
for i in range(len(x)):
c_value = 0
c_index = None
for v in range(self.vNum):
value = self.loss_augmented_inference_objective(x, y, solution.union({str(v)}), w)
if value>=c_value:
c_index=str(v)
c_value=value
#print("{} {}".format(c_index,c_value-self.loss_augmented_inference_objective(x, y, solution, w)))
solution.add(c_index)
#print(solution)
#print(self.loss_augmented_inference_objective(x, y, solution, w))
return solution
def loss_augmented_inference_area_greedy_fast(self, x, y ,w):
#print("loss_augmented_inference_greedy_fast")
solution = set()
c_cover = []
temp = []
for graph in self.diffusionGraphs:
tempp, c_coverOneGraph=graph.spread(x,{}, getCover=True)
c_cover.append(c_coverOneGraph)
temp.append(tempp)
#print(c_coverOneGraph)
for i in range(len(x)):
c_value = 0
c_index = None
t_cover = {};
for v in range(self.vNum):
value, node_cover = self.loss_augmented_inference_fast_scoreGain(x, y, solution, {str(v)}, w, c_cover)
if value>=c_value:
c_index=str(v)
c_value=value
t_cover = node_cover
#print("{} {}".format(c_index,c_value))
solution.add(c_index)
c_cover = t_cover
#print(solution)
#print(self.loss_augmented_inference_objective(x, y, solution, w))
return solution
def loss_augmented_inference_hamming(self, x, y ,w):
y1, scores,_,_ = self.inference(x, w)
score2 = self.computeScore(x, y, w)
if scores[-1]+self.loss(y, y1) > score2:
return y1
else:
return y
def loss_augmented_inference_area_greedy_lazy(self, x, y ,w):
#print("loss_augmented_inference_area_fakeLazeGreedy")
solution = set()
gains = []
for node in range(self.vNum):
gain = self.loss_augmented_inference_objective(x, y, solution.union({str(node)}), w)
#print(gain);
heapq.heappush(gains, (-gain, str(node)))
score_gain, node = heapq.heappop(gains)
solution.add(node)
c_score = -score_gain
#print("{} {}".format(node, -score_gain))
for _ in range(len(x) - 1):
matched = False
while not matched:
_, current_node = heapq.heappop(gains)
score_gain = self.loss_augmented_inference_objective(x, y, solution.union({current_node}), w) - c_score
heapq.heappush(gains, (-score_gain, current_node))
matched = gains[0][1] == current_node
score_gain, node = heapq.heappop(gains)
c_score = c_score - score_gain
solution.add(node)
#print("{} {}".format(node, -score_gain))
#print(self.loss_augmented_inference_objective(x, y, solution, w))
return solution
def loss_augmented_inference_area_greedy_lazy_fast(self, x, y ,w):
#print("loss_augmented_inference_area_fast")
solution = set()
gains = []
c_cover = []
temp = []
for graph in self.diffusionGraphs:
tempp, c_coverOneGraph=graph.spread(x,{}, getCover=True)
c_cover.append(c_coverOneGraph)
temp.append(tempp)
#print(c_coverOneGraph)
for node in range(self.vNum):
gain, node_cover = self.loss_augmented_inference_fast_scoreGain(x, y, solution, {str(node)}, w, c_cover)
#input("Press Enter to continue...")
#print(-gain)
heapq.heappush(gains, (-gain, str(node), node_cover))
#
score_gain, node, node_cover = heapq.heappop(gains)
solution.add(node)
c_score = -score_gain
#print("{} {}".format(node, -score_gain))
c_cover=node_cover
for _ in range(len(x) - 1):
matched = False
while not matched:
_, current_node, _ = heapq.heappop(gains)
score_gain, new_cover = self.loss_augmented_inference_fast_scoreGain(x, y, solution, {str(current_node)}, w, c_cover)
#if score_gain <0:
#print("score_gain {}".format(score_gain))
heapq.heappush(gains, (-score_gain, current_node, new_cover))
matched = gains[0][1] == current_node
score_gain, node, c_cover = heapq.heappop(gains)
c_score = c_score - score_gain
solution.add(node)
#print("{} {}".format(node, -score_gain))
#print(self.loss_augmented_inference_objective(x, y, solution, w))
return solution
def loss_augmented_inference_fast_scoreGain(self, x, y, current, newset, w, c_cover):
inferenceGain, new_cover = self.computeScoreGain(x, y, current, newset, w, c_cover)
lossGain = self.loss(y,current.union(newset))-self.loss(y,current)
#print("{} + {}".format(inference, loss))
return inferenceGain+self.balance_para*lossGain, new_cover
def computeScoreGain(self, x, y,current, newset, w, c_cover):
scoreGain = []
new_cover = []
#print(c_cover)
for graph, c_coverOneGraph in zip(self.diffusionGraphs, c_cover):
gain, newcoverOneGraph=self.computeScoreGainOneGraph(x, y,current, newset, c_coverOneGraph, graph)
#print("gain {}".format(gain))
scoreGain.append(gain)
new_cover.append(newcoverOneGraph)
#print("scoreGain {}".format(scoreGain))
return w.dot(np.array(scoreGain)), new_cover
def computeScoreGainOneGraph(self, x, y, current, newset, c_coverOneGraph, graph):
dnames=[]
newcoverOneGraph=c_coverOneGraph.copy()
for node in c_coverOneGraph:
#print(c_coverOneGraph[node])
if graph.getDistance(newset,node)<c_coverOneGraph[node]:
#input("Press Enter to continue...")
dnames.append(node)
for name in dnames:
del newcoverOneGraph[name]
#if len(c_coverOneGraph) != len(newcoverOneGraph):
# print("!!!! {} {}".format(len(c_coverOneGraph),len(newcoverOneGraph)))
return len(c_coverOneGraph)-len(newcoverOneGraph), newcoverOneGraph
def testInfluence_0(self, x, y, times, thread):
if thread>1:
return self.socialGraph.spreadMulti_P(x,y,times, thread)
else:
return self.socialGraph.spreadMulti(x,y,times)
def testInfluence_0_block(self, X, times, Y = None):
result = []
if Y == None:
for x in X:
result.append(self.socialGraph.spreadMulti(x,{},times))
else:
for x, y in zip(X,Y):
result.append(self.socialGraph.spreadMulti(x,y,times))
return result
class StratLearn(StructuredModel):
"""Interface definition for Structured Learners.
This class defines what is necessary to use the structured svm.
You have to implement at least joint_feature and inference.
"""
def __repr__(self):
return ("%s, size_joint_feature: %d"
% (type(self).__name__, self.size_joint_feature))
def __init__(self):
"""Initialize the model.
Needs to set self.size_joint_feature, the dimensionality of the joint
features for an instance with labeling (x, y).
"""
self.size_joint_feature = None
def _check_size_w(self, w):
if w.shape != (self.size_joint_feature,):
raise ValueError("Got w of wrong shape. Expected %s, got %s" %
(self.size_joint_feature, w.shape))
def initialize(self, X, Y, instance):
# set any data-specific parameters in the model
#self.featureNum = instance.featureNum
self.size_joint_feature= instance.featureNum
self.instance = instance
self.inference_calls = 0
#if self.n_features is None:
# self.n_features = n_features
#elif self.n_features != n_features:
# raise ValueError("Expected %d features, got %d"
# % (self.n_features, n_features))
#n_labels = Y.shape[1]
#if self.n_labels is None:
# self.n_labels = n_labels
#elif self.n_labels != n_labels:
# raise ValueError("Expected %d labels, got %d"
# % (self.n_labels, n_labels))
#self._set_size_joint_feature()
#self._set_class_weight()
pass
"""
def joint_feature(self, x, y):
raise NotImplementedError()
"""
def joint_feature(self, x, y):
return self.instance.computeFeature(x,y)
'''
feature = np.zeros(self.featureNum)
index = 0
for graph in self.instance.graphs:
distance_matrix = np.zeros( (2, 3) )
for v in range(self.instance.nNUm):
x_min=sys.maxsize
for u in x:
if distance_matrix[v][u]<x_min:
x_min=distance_matrix[v][u]
y_min=sys.maxsize
for u in y:
if distance_matrix[v][u]<y_min:
y_min=distance_matrix[v][u]
if y_min<x_min:
feature[index] += 1
index += 1
return feature
'''
def batch_joint_feature(self, X, Y, Y_true=None):
#print("batch_joint_feature running")
joint_feature_ = np.zeros(self.size_joint_feature)
if getattr(self, 'rescale_C', False):
for x, y, y_true in zip(X, Y, Y_true):
joint_feature_ += self.joint_feature(x, y, y_true)
else:
for x, y in zip(X, Y):
joint_feature_ += self.joint_feature(x, y)
#print("batch_joint_feature done")
return joint_feature_
def _loss_augmented_djoint_feature(self, x, y, y_hat, w):
# debugging only!
x_loss_augmented = self.loss_augment(x, y, w)
return (self.joint_feature(x_loss_augmented, y)
- self.joint_feature(x_loss_augmented, y_hat))
def inference_block(self, X, w,relaxed=None, constraints=None):
Y = []
for x in X:
Y.append(self.inference(x, w, relaxed, constraints))
return Y
def inference(self, x, w, relaxed=None, constraints=None):
self.inference_calls += 1
solution,_,_,_ = self.instance.inference(x,w)
return solution
#raise NotImplementedError()
def batch_inference(self, X, w, relaxed=None, constraints=None):
# default implementation of batch inference
if constraints:
return [self.inference(x, w, relaxed=relaxed, constraints=c)
for x, c in zip(X, constraints)]
return [self.inference(x, w, relaxed=relaxed)
for x in X]
def loss(self, y, y_hat):
'''
# hamming loss:
if isinstance(y_hat, tuple):
return self.continuous_loss(y, y_hat[0])
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y] * (y != y_hat))
return np.sum(y != y_hat)
'''
return self.instance.loss(y,y_hat)
def batch_loss(self, Y, Y_hat):
# default implementation of batch loss
return [self.loss(y, y_hat) for y, y_hat in zip(Y, Y_hat)]
def max_loss(self, y):
# maximum possible los on y for macro averages
sys.exit("max_loss not implemented")
if hasattr(self, 'class_weight'): return np.sum(self.class_weight[y])
return y.size
def continuous_loss(self, y, y_hat):
# continuous version of the loss
# y is the result of linear programming
sys.exit("continuous_loss not implemented")
if y.ndim == 2:
raise ValueError("FIXME!")
gx = np.indices(y.shape)
# all entries minus correct ones
result = 1 - y_hat[gx, y]
if hasattr(self, 'class_weight'):
return np.sum(self.class_weight[y] * result)
return np.sum(result)
def loss_augmented_inference(self, x, y, w, relaxed=None):
#print("FALLBACK no loss augmented inference found")
#return self.inference(x, w)
#print("loss_augmented_inference RUNNING")
self.inference_calls += 1
y_pre = self.instance.loss_augmented_inference(x,y,w)
#print("loss_augmented_inference DONE")
return y_pre
def loss_augmented_inference_block(self, X, Y, w, relaxed=None):
#print("FALLBACK no loss augmented inference found")
#return self.inference(x, w)
#print("loss_augmented_inference RUNNING")
self.inference_calls += len(X)
result =[]
for x, y in zip(X,Y):
result.append(self.instance.loss_augmented_inference(x,y,w))
return result
def batch_loss_augmented_inference(self, X, Y, w, relaxed=None):
#sys.exit("batch_loss_augmented_inference not implemented")
# default implementation of batch loss augmented inference
return [self.loss_augmented_inference(x, y, w, relaxed=relaxed)
for x, y in zip(X, Y)]
def _set_class_weight(self):
sys.exit("_set_class_weight not implemented")
if not hasattr(self, 'size_joint_feature'):
# we are not initialized yet
return
if hasattr(self, 'n_labels'):
n_things = self.n_labels
else:
n_things = self.n_states
if self.class_weight is not None:
if len(self.class_weight) != n_things:
raise ValueError("class_weight must have length n_states or"
" be None")
self.class_weight = np.array(self.class_weight)
self.uniform_class_weight = False
else:
self.class_weight = np.ones(n_things)
self.uniform_class_weight = True
|
{"hexsha": "6d1aebda74ebfcd2a54d17bbc3f5756c528ca372", "size": 47096, "ext": "py", "lang": "Python", "max_stars_repo_path": "StratLearner/stratLearner.py", "max_stars_repo_name": "cdslabamotong/stratLearner", "max_stars_repo_head_hexsha": "58f278d438eed92683a7daac2605ec39abd18c94", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-12-02T06:58:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T01:21:59.000Z", "max_issues_repo_path": "StratLearner/stratLearner.py", "max_issues_repo_name": "dm-ytlds/stratLearner", "max_issues_repo_head_hexsha": "3ad880a5ca0472a3a5823fa27db7dd2bc8ba0f33", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "StratLearner/stratLearner.py", "max_forks_repo_name": "dm-ytlds/stratLearner", "max_forks_repo_head_hexsha": "3ad880a5ca0472a3a5823fa27db7dd2bc8ba0f33", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-02T06:58:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T06:58:32.000Z", "avg_line_length": 37.2006319115, "max_line_length": 190, "alphanum_fraction": 0.5298326822, "include": true, "reason": "import numpy", "num_tokens": 10530}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 07:02:58 2020
@author: Sourabh Bhat ( https://spbhat.in/ )
"""
import numpy as np
import scipy.special
# Neural network class definition
class NeuralNetwork:
# initialize the neural network
def __init__(self, numInputNodes, numHiddenNodes, numOutputNodes, learningRate):
# set number of nodes in each input, hidden, output layer
self.inodes = numInputNodes
self.hnodes = numHiddenNodes
self.onodes = numOutputNodes
# learning rate
self.lr = learningRate
# link weight matrices, wih and who
# weights inside the arrays are w_i_j, where link is
# from node i to node j in the next layer
# w11 w21
# w12 w22 etc.
self.wih = np.random.normal(0.0, 1.0 / np.sqrt(self.inodes),
(self.hnodes, self.inodes))
self.who = np.random.normal(0.0, 1.0 / np.sqrt(self.hnodes),
(self.onodes, self.hnodes))
# activation function is the sigmoid function
self.activation_function = lambda x: scipy.special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
# convert lists to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate signals emerging from the final output layer
final_outputs = self.activation_function(final_inputs)
# output layer error is the (target - actual)
output_errors = targets - final_outputs
# hidden layer error is the output_errors, split by weights
# and recombined at hidden nodes
hidden_errors = np.dot(self.who.T, output_errors)
# update the weights for links between the hidden and output layers
self.who += self.lr * \
np.dot(output_errors * final_outputs * (1.0 - final_outputs),
hidden_outputs.T)
# update the weights for links between the input and hidden layers
self.wih += self.lr * \
np.dot(hidden_errors * hidden_outputs * (1.0 - hidden_outputs),
inputs.T)
pass
# query the neural network
def query(self, inputs_list):
# convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
# calculate signals into hidden layer
hidden_inputs = np.dot(self.wih, inputs)
# calculate the signals emerging from hidden layer
hidden_outputs = self.activation_function(hidden_inputs)
# calculate signals into final output layer
final_inputs = np.dot(self.who, hidden_outputs)
# calculate signals emerging from the final output layer
final_outputs = self.activation_function(final_inputs)
return final_outputs
if __name__ == "__main__":
nn = NeuralNetwork(3, 3, 3, 0.3)
print(nn.query([1, 2, 3]))
print("test")
|
{"hexsha": "1f4fb29a577be900f7e9bd1529fec28472ef3ec9", "size": 3467, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/python/nn/NeuralNetwork.py", "max_stars_repo_name": "heySourabh/NeuralNetwork", "max_stars_repo_head_hexsha": "aafc7081ce9db9a5422b418e9a1586d3b3df041f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-09-23T02:12:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-23T02:12:00.000Z", "max_issues_repo_path": "code/python/nn/NeuralNetwork.py", "max_issues_repo_name": "heySourabh/NeuralNetwork", "max_issues_repo_head_hexsha": "aafc7081ce9db9a5422b418e9a1586d3b3df041f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/python/nn/NeuralNetwork.py", "max_forks_repo_name": "heySourabh/NeuralNetwork", "max_forks_repo_head_hexsha": "aafc7081ce9db9a5422b418e9a1586d3b3df041f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1145833333, "max_line_length": 84, "alphanum_fraction": 0.6140755697, "include": true, "reason": "import numpy,import scipy", "num_tokens": 773}
|
import numpy as np
import Ray
array = np.array([
[
[ 0, 1, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[-2, 0,-2, 0],
],
])
oldarray = np.array([
[
[ 0, 0, 0, 0],
[ 0, 1, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
],
[
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0, 0, 0],
[ 0, 0,-1, 0],
],
])
array = np.array([
[
[ 0, 0, 0, 6],
[ 0, 2, 0, 2],
[ 0, 0, 0, 0],
[-6,-3, 0, 0],
],
])
array = np.array([
[
[-6, 0, 0, 0, 0, 6],
[ 0, 0, 0, 0, 2 ,0],
[ 0, 0, 0, 0, 3 ,0],
[ 0, 0, 2, 0, 0 ,0],
],
])
array = np.array([
[
[-6, 0, 0, 0, 0, 6],
[-4, 0,-1, 0, 0 ,0],
[ 0, 0, 0, 0, 3 ,0],
[ 0, 0,-3, 0, 0 ,0],
],
])
guy = Ray.Chess_AI(np.ascontiguousarray(array), 1, False)
moves = guy.get_moves()
print(guy.best_move(1))
if not moves:
if check:
print('CHECKMATE')
else:
print('DRAW')
else:
print(f'{len(moves)} moves available')
|
{"hexsha": "09fe78eb24be0c093591ecb4b9e115c513c5af5d", "size": 941, "ext": "py", "lang": "Python", "max_stars_repo_path": "c++ test.py", "max_stars_repo_name": "guille0/space-chess", "max_stars_repo_head_hexsha": "3e8a3c8c8b91fbcbc00fbb4b35596a3b2ad1a37c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2019-08-02T16:52:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-21T15:32:14.000Z", "max_issues_repo_path": "c++ test.py", "max_issues_repo_name": "guille0/space-chess", "max_issues_repo_head_hexsha": "3e8a3c8c8b91fbcbc00fbb4b35596a3b2ad1a37c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-01-02T07:44:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-02T07:44:22.000Z", "max_forks_repo_path": "c++ test.py", "max_forks_repo_name": "guille0/space-chess", "max_forks_repo_head_hexsha": "3e8a3c8c8b91fbcbc00fbb4b35596a3b2ad1a37c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-10T17:52:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T14:55:48.000Z", "avg_line_length": 14.2575757576, "max_line_length": 57, "alphanum_fraction": 0.3761955367, "include": true, "reason": "import numpy", "num_tokens": 501}
|
\documentclass{scarv-report}
\usepackage{scarvsoc}
\title{SCARV-SoC\\Technical Report and User Guide}
\date{Version $0.0.1$ (\today)}
\author{Ben Marshall}
\affil{
Department of Computer Science, University of Bristol,\\
Merchant Venturers Building, Woodland Road,\\
Bristol, BS8 1UB, United Kingdom.\\
\url{{ben.marshall}@bristol.ac.uk}
}
\begin{document}
% =============================================================================
\MKPROLOGUE
% =============================================================================
\section{Introduction}
\label{sec:intro}
\import{./tex/}{intro.tex}
\section{Features}
\label{sec:features}
\import{./tex/}{features.tex}
\section{Project Organisation}
\label{sec:organisation}
\import{./tex/}{organisation.tex}
\section{SoC Design}
\label{sec:design}
\import{./tex/}{design-overview.tex}
\subsection{Memory Map}
\label{sec:design:memory-map}
\import{./tex/}{design-memory-map.tex}
\subsection{SCARV-CPU}
\import{./tex/}{design-scarv-cpu.tex}
\subsection{Local Interconnect}
\label{sec:design:block:local-ic}
\import{./tex/}{design-local-ic.tex}
\subsection{Local Memories}
\import{./tex/}{design-local-mem.tex}
\subsection{AXI Bus Bridge}
\label{sec:design:axi-bridge}
\import{./tex/}{design-axi-bridge.tex}
\subsection{Xilinx Peripherals}
\import{./tex/}{design-xilinx.tex}
\section{Hardware Development Flows}
\label{sec:hw-flows}
\subsection{Getting Started}
\label{sec:flow:getting-started}
\import{./tex/}{flow-getting-started.tex}
\subsection{Make Flow Overview}
\import{./tex/}{flow-make.tex}
\subsection{Verilator Simulation}
\label{sec:flow:verilator}
\import{./tex/}{flow-verilator.tex}
\subsection{Symbiyosys Formal Verification Flow}
\import{./tex/}{flow-symbiyosys.tex}
\subsection{Yosys Synthesis}
\import{./tex/}{flow-yosys-synth.tex}
\subsection{Xilinx Vivado Project}
\import{./tex/}{flow-xilinx-vivado.tex}
\section{Software Development Guide}
\label{sec:sw-development}
\subsection{Selfchecking Tests}
\import{./tex/}{sw-selfcheck.tex}
\subsection{First Stage Boot Loader}
\label{sec:sw:fsbl}
\import{./tex/}{sw-fsbl.tex}
\subsection{Board Support Package}
\label{sec:sw:bsp}
\import{./tex/}{sw-bsp.tex}
\subsection{Example Programs}
\import{./tex/}{sw-examples.tex}
\subsection{Developing Programs}
\import{./tex/}{sw-development.tex}
\section{Acknowledgements}
This work has been supported in part by EPSRC via grant EP/R012288/1,
under the RISE (\url{http://www.ukrise.org}) programme.
% =============================================================================
\MKEPILOGUE
% =============================================================================
\end{document}
|
{"hexsha": "35e804cd5eaccbf804ca41e9ceb1302f9de2029e", "size": 2680, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/scarvsoc.tex", "max_stars_repo_name": "scottwedge/scarv-soc", "max_stars_repo_head_hexsha": "6e29e7a103a0b2ac67deb2701044332917230c27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/scarvsoc.tex", "max_issues_repo_name": "scottwedge/scarv-soc", "max_issues_repo_head_hexsha": "6e29e7a103a0b2ac67deb2701044332917230c27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/scarvsoc.tex", "max_forks_repo_name": "scottwedge/scarv-soc", "max_forks_repo_head_hexsha": "6e29e7a103a0b2ac67deb2701044332917230c27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5210084034, "max_line_length": 79, "alphanum_fraction": 0.6541044776, "num_tokens": 737}
|
#include <boost/multiprecision/cpp_dec_float.hpp>
#include <boost/multiprecision/cpp_int.hpp>
#include <boost/numeric/conversion/cast.hpp>
//typedef boost::multiprecision::cpp_dec_float_50 xmc_float;
typedef boost::multiprecision::number<boost::multiprecision::cpp_dec_float<64> > xmc_float;
typedef boost::multiprecision::cpp_int xmc_int;
typedef boost::multiprecision::uint128_t xmc_uint_128;
const xmc_float XMC_UINT = xmc_float(1000000000000.0);
const xmc_uint_128 XMC_INT_MAX = xmc_uint_128((uint64_t)10000000000000000000ull);
inline double xmc_int_to_double(xmc_int amount) {
xmc_uint_128 amount_128 = amount.convert_to<xmc_uint_128>();
//std::cout<<"uint128_t amount:" << amount_128 << std::endl;
if(amount_128 < XMC_INT_MAX)
{
uint64_t int_amount = amount_128.convert_to<uint64_t>();
//std::cout<< "amount < XMC_DEFAULT_DECIMAL :" << int_amount << std::endl;
double ret = int_amount / 1000000000000.0;
//std::cout<< "****** return value:" << ret << std::endl;
return ret;
}
//std::setprecision(std::numeric_limits<xmc_float>::max_digits10);
//std::cout<<"# XMC int to double ==> xmc_int:" << amount << std::endl;
//xmc_float amount_float = amount_128.convert_to<xmc_float>();
xmc_float amount_float = xmc_float(amount_128);
//std::cout<<"# XMC int to double ==> xmc_float:" << amount_float << std::endl;
xmc_float amount_xmc = amount_float / XMC_UINT;
//std::cout<<"#XMC int to double ==> amount_xmc:" << amount_xmc << std::endl;
double ret = amount_xmc.convert_to<double>();
//std::cout<<"#XMC int to double ==> result:" << ret << std::endl;
return ret;
}
|
{"hexsha": "29987c6ad2100a67b3e57a8eeb29b885fbc7ff8b", "size": 1608, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/libwalletqt/xmc_int_to_double.hpp", "max_stars_repo_name": "toints/monero-GUI", "max_stars_repo_head_hexsha": "ea29f0ae0e1bc9e00b8a9b69679e302513c3dfcd", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-04-25T18:02:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-30T06:24:55.000Z", "max_issues_repo_path": "src/libwalletqt/xmc_int_to_double.hpp", "max_issues_repo_name": "toints/monero-GUI", "max_issues_repo_head_hexsha": "ea29f0ae0e1bc9e00b8a9b69679e302513c3dfcd", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/libwalletqt/xmc_int_to_double.hpp", "max_forks_repo_name": "toints/monero-GUI", "max_forks_repo_head_hexsha": "ea29f0ae0e1bc9e00b8a9b69679e302513c3dfcd", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9.0, "max_forks_repo_forks_event_min_datetime": "2018-04-25T01:34:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T01:55:13.000Z", "avg_line_length": 44.6666666667, "max_line_length": 91, "alphanum_fraction": 0.7232587065, "num_tokens": 466}
|
'''
Functions for preprocessing/transforming data between extraction from the database and input to the model.
'''
import numpy as np
import pandas as pd
import dataset
def encode_labels_str2int(data, y_col='family'):
'''
Create 'label' column in data_df that features integer values corresponding to text labels contained in y_col.
Arguments:
data: dataset.util.ResultIter, Should be the returned result from loading data from the leavesdb database (e.g. data = leavesdb.db_query.load_data(db)).
y_col: str, name of the columns containing text labels for each sample in data.
Returns:
data_df: pd.DataFrame, Contains 3 columns, one for paths, one for str labels, and one for int labels.
'''
data = pd.DataFrame(data)
data['label'] = pd.Categorical(data[y_col])
data['label'] = data['label'].cat.codes
return data
encode_labels = encode_labels_str2int #For backwards compatibility
def generate_encoding_map(data, text_label_col='family', int_label_col='label'):
'''
Returns a dictionary mapping integer labels to their corresponding text label
{0:'Annonaceae',
...
19:'Passifloraceae'}
'''
#Review the below TODO, may be unecessary because similar process is used in encode_labels_str2int
#TODO Potentially filter for unique text_label_col values instead, ensures the int representation remains the same in the case of changes.
int_labels, label_indices = np.unique(data[int_label_col], return_index=True)
text_labels = data[text_label_col].iloc[label_indices].values
return {int_label:text_label for int_label, text_label in zip(int_labels, text_labels)}
def one_hot_encode_labels(labels):
'''
Arguments:
labels, list(int): list of labels encoded in scalar integers
Returns:
encoded_labels, np.array: numpy array with shape = (num_samples, num_classes) and elements of value 0 or 1.
'''
num_samples = len(labels)
num_classes = np.max(labels)+1
encoded_labels = np.zeros((num_samples, num_classes))
for i, label in enumerate(labels):
encoded_labels[i,label] = 1
return encoded_labels
def one_hot_decode_labels(one_hot_labels):
'''
Arguments:
one_hot_labels, np.array: one_hot_encoded labels with features on axis=1 and samples on axis=0
Returns:
np.array: shape=(num_samples,1), integer values indicating label
'''
return np.argmax(one_hot_labels, axis=1).reshape(-1,1)
def get_class_counts(data_df, verbose=True):
labels, label_counts = np.unique(data_df['label'], return_counts=True)
if verbose:
print('label : count')
for label, count in zip(labels, label_counts):
print(label,' : ', count)
return labels, label_counts
def filter_low_count_labels(data_df, threshold=2, y_col='family', verbose = True):
'''
Function for omitting samples that belong to a class with a population size below the threshold. Used primarily for omitting classes with only 1 sample.
'''
data_df = encode_labels(data_df, y_col=y_col)
labels, label_counts = np.unique(data_df['label'], return_counts=True)
filtered_labels = np.where(label_counts >= threshold)[0]
filtered_data = data_df[data_df['label'].isin(filtered_labels)]
if verbose:
print(f"filter_low_count_labels(data_df, threshold={threshold}, y_col={y_col}, verbose = {verbose})")
print(f'Selecting only samples that belong to a class with population >= {threshold} samples')
print(f'Previous num_classes = {len(label_counts)}, new num_classes = {len(filtered_labels)}')
print(f'Previous data_df.shape = {data_df.shape}, new data_df.shape = {filtered_data.shape}')
return filtered_data
|
{"hexsha": "2c9efd9c725bd0cb695340aa4906107920057cdd", "size": 3752, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyleaves/data_pipeline/.ipynb_checkpoints/preprocessing-checkpoint.py", "max_stars_repo_name": "JacobARose/pyleaves", "max_stars_repo_head_hexsha": "27b4016c850148981f3d021028c9272f18df121d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-25T14:50:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-27T06:54:46.000Z", "max_issues_repo_path": "pyleaves/data_pipeline/.ipynb_checkpoints/preprocessing-checkpoint.py", "max_issues_repo_name": "JacobARose/pyleaves", "max_issues_repo_head_hexsha": "27b4016c850148981f3d021028c9272f18df121d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-11-21T06:24:37.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-19T14:49:14.000Z", "max_forks_repo_path": "pyleaves/data_pipeline/.ipynb_checkpoints/preprocessing-checkpoint.py", "max_forks_repo_name": "JacobARose/pyleaves", "max_forks_repo_head_hexsha": "27b4016c850148981f3d021028c9272f18df121d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4947368421, "max_line_length": 160, "alphanum_fraction": 0.7158848614, "include": true, "reason": "import numpy", "num_tokens": 847}
|
import numpy as np
import matplotlib.pyplot as pl
import h5py
import platform
import os
import pickle
import seaborn as sns
import json
from ipdb import set_trace as stop
class plotDNN(object):
def __init__(self, root, noise):
self.root = root
self.noise = noise
self.dataFile = "/scratch1/deepLearning/DNMilne/database/database_BigBear.h5"
f = h5py.File(self.dataFile, 'r')
self.pars = f.get("parameters")
self.nModels, _ = self.pars.shape
self.lower = np.min(self.pars, axis=0)
self.upper = np.max(self.pars, axis=0)
self.nTraining = int(self.nModels * 0.9)
self.nValidation = int(self.nModels * 0.1)
def readDNNResults(self):
with open("{0}_{1}_prob.pkl".format(self.root, self.noise), "rb") as outfile:
self.prob = pickle.load(outfile)
def plotLoss(self):
labels = ['out_BField', 'out_theta', 'out_chi', 'out_vmac', 'out_a', 'out_B0', 'out_B1', 'out_doppler', 'out_kl']
labelsTxt = ['B', 'theta', 'chi', 'vmac', 'a', 'B0', 'B1', 'doppler', 'kl']
with open("{0}_loss.json".format(self.root), 'r') as f:
tmp = json.load(f)
n = len(tmp)
loss = np.zeros((n,5,2))
for i in range(5):
for j in range(n):
loss[j,i,0] = tmp[j]['{0}_acc'.format(labels[i])]
loss[j,i,1] = tmp[j]['val_{0}_acc'.format(labels[i])]
pl.close('all')
f, ax = pl.subplots(ncols=2, nrows=1, figsize=(12,6))
for i in range(5):
ax[0].plot(loss[:,i,0], label=labelsTxt[i])
ax[0].legend()
ax[0].set_title('Training set')
for i in range(5):
ax[1].plot(loss[:,i,1], label=labelsTxt[i])
ax[1].legend()
ax[1].set_title('Validation set')
pl.tight_layout()
stop()
def plot(self):
pl.close('all')
f, ax = pl.subplots(nrows=3, ncols=3, figsize=(12,10))
ax = ax.flatten()
labelsTxt = ['B [G]', r'$\theta_B$', r'$\phi_B$', r'$v_\mathrm{mac}$', 'a', 'B$_0$', 'B$_1$', r'$\Delta \lambda_D$ [m$\AA$]', r'$\eta$']
cmap = sns.color_palette()
for i in range(9):
prob = self.prob[i][0:1000,:]
nCases, nClasses = prob.shape
x = self.pars[self.nTraining:self.nTraining+1000,i][:,None] * np.ones((1,nClasses))
# x = self.pars[0:1000,i][:,None] * np.ones((1,nClasses))
y = np.linspace(self.lower[i], self.upper[i], nClasses)[None,:] * np.ones((nCases,1))
rgba = np.zeros((nCases,nClasses,4))
rgba[:,:,0:3] = cmap[0]
rgba[:,:,3] = prob * 0.1
x = x.reshape((nCases*nClasses))
y = y.reshape((nCases*nClasses))
rgba = rgba.reshape((nCases*nClasses,4))
ax[i].scatter(x, y, color=rgba)
ax[i].set_xlabel("Original {0}".format(labelsTxt[i]))
ax[i].set_ylabel("Recovered {0}".format(labelsTxt[i]))
ax[i].set_xlim([self.lower[i], self.upper[i]])
ax[i].set_ylim([self.lower[i], self.upper[i]])
pl.tight_layout()
pl.savefig("{0}_{1}_comparison.png".format(self.root, self.noise))
if (__name__ == '__main__'):
root = 'cnns/bigbear2'
noise = 1e-4
out = plotDNN(root, noise)
# out.plotLoss()
out.readDNNResults()
out.plot()
#
|
{"hexsha": "72fd856b84384845e21adabc22688e3860c0c5e0", "size": 3422, "ext": "py", "lang": "Python", "max_stars_repo_path": "DNMilne/training/doPlot.py", "max_stars_repo_name": "aasensio/DeepLearning", "max_stars_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DNMilne/training/doPlot.py", "max_issues_repo_name": "aasensio/DeepLearning", "max_issues_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DNMilne/training/doPlot.py", "max_forks_repo_name": "aasensio/DeepLearning", "max_forks_repo_head_hexsha": "71838115ce93e0ca96c8314cff3f07de1d64c235", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5, "max_line_length": 144, "alphanum_fraction": 0.5388661601, "include": true, "reason": "import numpy", "num_tokens": 996}
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 10:52:36 2015
Description:
@author: sacha gobeyn (sacha.gobeyn@ugent.be or sachagobeyn@gmail.com)
"""
import pandas as pd
import numpy as np
def load_and_preproces_data(inputdata,taxon,filter_parameters,variables,res,nan_value):
""" Load data and variables list
Parameters
----------
'inputdata' (str): name of inputdata
'variables' (str): name of variables
'taxon' (str): name of the taxon
'res' (str): name of directory to write output
Returns
-------
'inputdata' (pandas df): Biological and environmental measurements
columns: ["ID","taxon","abundance","variable","value"]
'variables' (pandas df): List of considered variables
columns: ["variable","name_sim","consider"]
"""
"Load inputdata"
[inputdata,variables,filter_parameters] = load_data([inputdata,variables,filter_parameters])
variables = variables["name_sim"][variables["consider"]==1].unique().tolist()
"Fix nan in inputdata"
inputdata = fix_nan(inputdata,nan_value)
"Extract data for considered taxon and variables"
inputdata = extract(inputdata,variables,taxon)
from resample import resample_data
" In earlier versions of code, automatic resampling option was available"
" However, now resampling should be done outside SDMIT"
"Part of code is left to assign unique ID's to each biological sample"
resample = "False"
inputdata = resample_data(inputdata,resample)
"Extract parameters for considered variables"
filter_parameters = filter_parameters[filter_parameters["variable"].isin(variables)]
return inputdata,filter_parameters,variables
def load_data(files):
"""Load multiple ".csv" files in pandas dataframes
Parameters
----------
'files' (list): Files (str) which should be read to pandas dataframes
Returns
-------
'data' (list): Pandas dataframes
"""
data = [0]*len(files)
for i in range(len(files)):
data[i] = pd.read_csv(files[i],encoding = "ISO-8859-1")
return data
def extract(inputdata,variables,taxon):
"""Extract/filter inputdata for the specified variables and taxon
Parameters
----------
'inputdata' (pandas df): Biological and environmental measurements
columns: ["ID","taxon","abundance","variable","value"]
'variables' (pandas df): List of considered variables
columns: ["variable","name_sim',"consider"]
'taxon' (str): name of the taxon
Returns
-------
'inputdata' (pandas df): Biological and environmental measurements
columns: ["ID","taxon","abundance","variable","value"]
"""
"Extract data for taxon and variables "
inputdata = inputdata[inputdata["variable"].isin(variables)]
inputdata = inputdata[inputdata["taxon"]==taxon]
"Remove NaN values"
inputdata = inputdata[~inputdata["value"].isnull()]
#inputdata["value"] = inputdata["value"].astype(float)
return inputdata
def fix_nan(inputdata,nan_value):
"""insert nan_value for empty records in X, Y and date
Parameters
----------
'inputdata' (pandas df): Biological and environmental measurements
columns: ["ID","taxon","abundance","variable","value"]
'nan_value' (float): value for nan value
Returns
-------
'inputdata' (pandas df): Biological and environmental measurements
columns: ["ID","taxon","abundance","variable","value"]
"""
col = ["date","X","Y"]
for i in col:
if np.sum(inputdata[i].isnull())>0:
inputdata.loc[:,i] = inputdata.loc[:,i].fillna(-nan_value)
return inputdata
#
#def prepare_dynamic_grid(filter_parameters,inputdata,runs):
#
# un_var = filter_parameters["variable"].unique()
#
# filter_parameters["lower_b"] = 0.
# filter_parameters["upper_b"] = 0.
# for i in un_var:
#
# b2 = filter_parameters.loc[filter_parameters["variable"]==i,"b2"].values[0]
# data_i = inputdata["value"][(inputdata["variable"]==i)]
# filter_parameters.loc[filter_parameters["variable"]==i,"lower_b"] = Grid([np.percentile(data_i[inputdata["value"]>b2],i) for i in np.arange(0,runs,1)])
# filter_parameters.loc[filter_parameters["variable"]==i,"upper_b"] = Grid([np.percentile(data_i[inputdata["value"]<b2],i) for i in np.arange(0,runs,1)])
#
# return filter_parameters
#
#class Grid():
#
# def __init__(self,values):
#
# self.values = values
#
# def sample(self,n):
#
# return self.values[n]
#
|
{"hexsha": "598ac050aa153d001fea7ecc3b3d4aab5d20fee0", "size": 5008, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/data_processing.py", "max_stars_repo_name": "Sachagobeyn/SDMIT", "max_stars_repo_head_hexsha": "86a5ca5f9e920528b967ac7976b6068f77c614bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/data_processing.py", "max_issues_repo_name": "Sachagobeyn/SDMIT", "max_issues_repo_head_hexsha": "86a5ca5f9e920528b967ac7976b6068f77c614bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/data_processing.py", "max_forks_repo_name": "Sachagobeyn/SDMIT", "max_forks_repo_head_hexsha": "86a5ca5f9e920528b967ac7976b6068f77c614bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-26T11:03:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-26T11:03:12.000Z", "avg_line_length": 33.8378378378, "max_line_length": 160, "alphanum_fraction": 0.5986421725, "include": true, "reason": "import numpy", "num_tokens": 1176}
|
[STATEMENT]
lemma \<psi>_im : "\<psi> ` GRepHomSet (\<star>) W \<subseteq> HRepHomSet (\<star>) W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<psi> ` GRepHomSet (\<star>) W \<subseteq> HRepHomSet (\<star>) W
[PROOF STEP]
using \<psi>T_W \<psi>T_hom FGModuleHomSetI
[PROOF STATE]
proof (prove)
using this:
?T \<in> GRepHomSet (\<star>) W \<Longrightarrow> \<psi> ?T ` indV \<subseteq> W
?T \<in> GRepHomSet (\<star>) W \<Longrightarrow> HRepHom (\<star>) (\<psi> ?T)
\<lbrakk>FGModuleHom ?G ?fgsmult ?V ?fgsmult' ?T; ?T ` ?V \<subseteq> ?W\<rbrakk> \<Longrightarrow> ?T \<in> FGModuleHomSet ?G ?fgsmult ?V ?fgsmult' ?W
goal (1 subgoal):
1. \<psi> ` GRepHomSet (\<star>) W \<subseteq> HRepHomSet (\<star>) W
[PROOF STEP]
by fastforce
|
{"llama_tokens": 302, "file": "Rep_Fin_Groups_Rep_Fin_Groups", "length": 2}
|
/*
* (C) Copyright 2015 ETH Zurich Systems Group (http://www.systems.ethz.ch/) and others.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributors:
* Markus Pilman <mpilman@inf.ethz.ch>
* Simon Loesing <sloesing@inf.ethz.ch>
* Thomas Etter <etterth@gmail.com>
* Kevin Bocksrocker <kevin.bocksrocker@gmail.com>
* Lucas Braun <braunl@inf.ethz.ch>
*/
#include <memory>
#include <array>
#include <unordered_map>
#include <unordered_set>
#include <thread>
#include <boost/asio.hpp>
#include <boost/lexical_cast.hpp>
#include <crossbow/logger.hpp>
#include <crossbow/program_options.hpp>
#include <crossbow/allocator.hpp>
#include <tellstore/ClientConfig.hpp>
#include <tellstore/ClientManager.hpp>
namespace ycsb {
using namespace boost::asio;
using error_code = boost::system::error_code;
namespace cmd {
constexpr int READ = 1;
constexpr int SCAN = 2;
constexpr int UPDATE = 3;
constexpr int INSERT = 4;
constexpr int DELETE = 5;
}
struct ClientBuffer {
std::unique_ptr<char[]> buffer;
size_t size;
ClientBuffer(size_t sz)
: buffer(new char[sz])
, size(sz)
{}
void grow() {
std::unique_ptr<char[]> nBuffer(new char[size + 1024]);
memcpy(nBuffer.get(), buffer.get(), size);
nBuffer.swap(buffer);
}
};
template<class Client, class Fun>
void read_all(size_t reqSize, size_t bt, size_t readTo, Client& client, Fun f) {
ClientBuffer& clientBuffer = client.buf();
if (bt - readTo >= reqSize) {
f(error_code(), bt, readTo);
return;
}
// resize if necessary
if (readTo + reqSize < clientBuffer.size) {
clientBuffer.grow();
}
client.socket().async_read_some(buffer(clientBuffer.buffer.get() + bt, clientBuffer.size - bt),
[reqSize, bt, readTo, &client, f](const error_code& ec, size_t numBytes){
if (ec) {
f(ec, numBytes + bt, readTo);
return;
}
ycsb::read_all(reqSize, bt + numBytes, readTo, client, f);
});
}
struct ResultWriter {
std::unique_ptr<char[]> data;
size_t size;
size_t offset;
ResultWriter(size_t sz)
: data(new char[sz])
, size(sz)
, offset(0)
{}
void grow() {
std::unique_ptr<char[]> nData(new char[size + 1024]);
memcpy(nData.get(), data.get(), size);
nData.swap(data);
size += 1024;
}
void write(size_t sz, const char* str) {
while (size - offset < sz + sizeof(int32_t)) {
grow();
}
*reinterpret_cast<int32_t*>(data.get() + offset) = int32_t(sz);
offset += sizeof(int32_t);
memcpy(data.get() + offset, str, sz);
offset += sz;
}
void write(const char* str) {
size_t len = *reinterpret_cast<const int32_t*>(str);
write(len, str + sizeof(int32_t));
}
void write(const crossbow::string& str) {
write(str.size(), str.data());
}
void write(int32_t value) {
while (size - offset < sizeof(value)) {
grow();
}
*reinterpret_cast<int32_t*>(data.get() + offset) = value;
offset += sizeof(value);
}
};
class Client : public std::enable_shared_from_this<Client> {
ip::tcp::socket mSocket;
tell::store::ClientManager<void>& mClientManager;
ClientBuffer mClientBuffer;
size_t mPrefixLength;
std::unordered_map<crossbow::string, tell::store::Table> mTableIds;
private: // commands
int32_t getInt(size_t& offset) {
auto res = *reinterpret_cast<int32_t*>(mClientBuffer.buffer.get() + offset);
offset += sizeof(int32_t);
return res;
}
crossbow::string getString(size_t& offset) {
int sz = getInt(offset);
crossbow::string res(mClientBuffer.buffer.get() + offset, sz);
offset += sz;
return res;
}
std::unordered_set<crossbow::string> getSet(size_t& offset) {
int num = getInt(offset);
std::unordered_set<crossbow::string> res;
res.reserve(num);
for (int i = 0; i < num; ++i) {
res.emplace(getString(offset));
}
return res;
}
std::unordered_map<crossbow::string, boost::any> getMap(size_t& offset) {
int num = getInt(offset);
std::unordered_map<crossbow::string, boost::any> result;
for (int i = 0; i < num; ++i) {
auto k = getString(offset);
auto v = getString(offset);
result.emplace(std::move(k), std::move(v));
}
return result;
}
tell::store::Table tableId(const crossbow::string& name, tell::store::ClientHandle& handle) {
auto iter = mTableIds.find(name);
if (iter != mTableIds.end()) {
return iter->second;
}
auto f = handle.getTable(name);
auto res = f->get();
mTableIds.emplace(name, res);
return res;
}
void doRead() {
auto self = shared_from_this();
mClientManager.execute([self](tell::store::ClientHandle& handle) {
size_t offset = 2*sizeof(int32_t);
auto tableName = self->getString(offset);
auto tId = self->tableId(tableName, handle);
auto keyStr = self->getString(offset);
uint64_t key = boost::lexical_cast<uint64_t>(keyStr.substr(self->mPrefixLength));
auto fields = self->getSet(offset);
auto resF = handle.get(tId, key);
auto res = resF->get();
auto& rec = tId.record();
ResultWriter result(1024);
if (fields.empty()) {
auto& fields = rec.schema().varSizeFields();
result.write(fields.size());
tell::store::GenericTuple tuple;
for (unsigned short i = 0; i < fields.size(); ++i) {
result.write(fields[i].name());
bool isNull;
tell::store::FieldType type;
const char* str = rec.data(res->data(), i, isNull, &type);
result.write(str);
}
} else {
result.write(int32_t(fields.size()));
}
for (auto& field : fields) {
result.write(field);
unsigned short fId;
#ifndef NDEBUG
assert(rec.idOf(field, fId));
#else
rec.idOf(field, fId);
#endif
bool isNull;
tell::store::FieldType type;
const char* str = rec.data(res->data(), fId, isNull, &type);
assert(type == tell::store::FieldType::TEXT);
result.write(str);
}
size_t size = result.offset;
char* resArr = result.data.release();
self->mSocket.get_io_service().post([self, size, resArr](){
async_write(self->mSocket, buffer(resArr, size), [self, resArr](const error_code& ec, size_t){
delete[] resArr;
if (ec) {
LOG_ERROR(ec.message());
return;
}
self->read();
});
});
});
}
void doInsert() {
auto self = shared_from_this();
mClientManager.execute([self](tell::store::ClientHandle& handle) {
size_t offset = 2*sizeof(int32_t);
auto tableName = self->getString(offset);
auto keyStr = self->getString(offset);
uint64_t key = boost::lexical_cast<uint64_t>(keyStr.substr(self->mPrefixLength));
auto values = self->getMap(offset);
auto tId = self->tableId(tableName, handle);
auto r = handle.insert(tId, key, uint64_t(1), values);
r->wait();
self->mSocket.get_io_service().post([self](){
self->mClientBuffer.buffer[0] = 0;
async_write(self->socket(), buffer(self->mClientBuffer.buffer.get(), 1), [self](const error_code& ec, size_t) {
if (ec) {
LOG_ERROR(ec.message());
return;
}
self->read();
});
});
});
}
void doUpdate() {
auto self = shared_from_this();
mClientManager.execute([self](tell::store::ClientHandle& handle) {
size_t offset = 2*sizeof(int32_t);
char errcode = 0;
auto tableName = self->getString(offset);
auto keyStr = self->getString(offset);
uint64_t key = boost::lexical_cast<uint64_t>(keyStr.substr(self->mPrefixLength));
auto values = self->getMap(offset);
auto tId = self->tableId(tableName, handle);
auto resF = handle.get(tId, key);
auto e = resF->error();
if (e) {
errcode = 2;
} else {
auto res = resF->get();
auto version = res->version();
auto& rec = tId.record();
auto& fields = rec.schema().varSizeFields();
tell::store::GenericTuple tuple;
for (unsigned short i = 0; i < fields.size(); ++i) {
if (values.count(fields[i].name()) == 0) {
bool isNull;
tell::store::FieldType type;
const char* str = rec.data(res->data(), i, isNull, &type);
values.emplace(fields[i].name(),
crossbow::string(str + sizeof(int32_t), *reinterpret_cast<const int32_t*>(str)));
}
}
auto r = handle.update(tId, key, version + 1, values);
r->wait();
}
self->mSocket.get_io_service().post([self, errcode](){
self->mClientBuffer.buffer[0] = errcode;
async_write(self->socket(), buffer(self->mClientBuffer.buffer.get(), 1), [self](const error_code& ec, size_t) {
if (ec) {
LOG_ERROR(ec.message());
return;
}
self->read();
});
});
});
}
void doDelete() {
auto self = shared_from_this();
mClientManager.execute([self](tell::store::ClientHandle& handle) {
size_t offset = 2*sizeof(int32_t);
auto tableName = self->getString(offset);
auto keyStr = self->getString(offset);
uint64_t key = boost::lexical_cast<uint64_t>(keyStr.substr(self->mPrefixLength));
auto tId = self->tableId(tableName, handle);
auto resF = handle.get(tId, key);
auto tuple = resF->get();
auto resp = handle.remove(tId, key, tuple->version() + 1);
resp->wait();
self->mSocket.get_io_service().post([self](){
self->mClientBuffer.buffer[0] = 1;
async_write(self->socket(), buffer(self->mClientBuffer.buffer.get(), 1), [self](const error_code& ec, size_t) {
if (ec) {
LOG_ERROR(ec.message());
return;
}
self->read();
});
});
});
}
public:
Client(io_service& service, tell::store::ClientManager<void>& clientManager)
: mSocket(service)
, mClientManager(clientManager)
, mClientBuffer(1024)
, mPrefixLength(4)
{}
ip::tcp::socket& socket() {
return mSocket;
}
ClientBuffer& buf() {
return mClientBuffer;
}
void read() {
auto self = shared_from_this();
mSocket.async_read_some(buffer(mClientBuffer.buffer.get(),
mClientBuffer.size), [self](const error_code& ec, size_t bt){
if (ec) {
LOG_ERROR(ec.message());
return;
}
if (bt < 4) {
LOG_ERROR("Could not read command, read only %1% bytes", bt);
std::terminate();
}
auto size = *reinterpret_cast<int32_t*>(self->mClientBuffer.buffer.get());
ycsb::read_all(size, bt, sizeof(int32_t), *self, [self](const error_code& ec, size_t bt, size_t rT) {
int32_t command = *reinterpret_cast<int32_t*>(self->mClientBuffer.buffer.get() + sizeof(int32_t));
switch (command) {
case cmd::READ:
self->doRead();
break;
case cmd::UPDATE:
self->doUpdate();
break;
case cmd::INSERT:
self->doInsert();
break;
case cmd::DELETE:
self->doDelete();
}
});
});
}
};
void accept(io_service& service, ip::tcp::acceptor& acceptor, tell::store::ClientManager<void>& clientManager) {
auto client = std::make_shared<Client>(service, clientManager);
acceptor.async_accept(client->socket(), [&service, &acceptor, &clientManager, client](const error_code& ec) {
if (ec) {
LOG_ERROR(ec.message());
return;
}
client->read();
accept(service, acceptor, clientManager);
});
}
} // namespace ycsb
int main(int argc, const char* argv[]) {
using namespace crossbow::program_options;
bool help = false;
bool createTable = false;
std::string host("0.0.0.0");
std::string port("8713");
crossbow::string logLevel("DEBUG");
crossbow::string storageNodes;
crossbow::string commitManager;
tell::store::ClientConfig config;
auto opts = create_options("tpcc_server",
value<'h'>("help", &help, tag::description{"print help"}),
value<'H'>("host", &host, tag::description{"Host to bind to"}),
value<'p'>("port", &port, tag::description{"Port to bind to"}),
value<'l'>("log-level", &logLevel, tag::description{"The log level"}),
value<'c'>("commit-manager", &commitManager, tag::description{"Address to the commit manager"}),
value<'C'>("create-table", &createTable, tag::description{"Client should create table on startup"}),
value<'s'>("storage-nodes", &storageNodes, tag::description{"Semicolon-separated list of storage node addresses"}),
value<-1>("network-threads", &config.numNetworkThreads, tag::ignore_short<true>{})
);
try {
parse(opts, argc, argv);
} catch (argument_not_found& e) {
std::cerr << e.what() << std::endl << std::endl;
print_help(std::cout, opts);
return 1;
}
if (help) {
print_help(std::cout, opts);
return 0;
}
crossbow::allocator::init();
crossbow::logger::logger->config.level = crossbow::logger::logLevelFromString(logLevel);
config.commitManager = config.parseCommitManager(commitManager);
config.tellStore = config.parseTellStore(storageNodes);
tell::store::ClientManager<void> clientManager(config);
if (createTable) {
bool done = false;
clientManager.execute([&done](tell::store::ClientHandle& handle) {
tell::store::Schema schema(tell::store::TableType::NON_TRANSACTIONAL);
schema.addField(tell::store::FieldType::TEXT, "field0", true);
schema.addField(tell::store::FieldType::TEXT, "field1", true);
schema.addField(tell::store::FieldType::TEXT, "field2", true);
schema.addField(tell::store::FieldType::TEXT, "field3", true);
schema.addField(tell::store::FieldType::TEXT, "field4", true);
schema.addField(tell::store::FieldType::TEXT, "field5", true);
schema.addField(tell::store::FieldType::TEXT, "field6", true);
schema.addField(tell::store::FieldType::TEXT, "field7", true);
schema.addField(tell::store::FieldType::TEXT, "field8", true);
schema.addField(tell::store::FieldType::TEXT, "field9", true);
handle.createTable("usertable", schema);
done = true;
});
while (!done) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
// initialize boost::asio
boost::asio::io_service service;
boost::asio::io_service::work work(service);
boost::asio::ip::tcp::acceptor a(service);
boost::asio::ip::tcp::acceptor::reuse_address option(true);
boost::asio::ip::tcp::resolver resolver(service);
boost::asio::ip::tcp::resolver::iterator iter;
if (host == "") {
iter = resolver.resolve(boost::asio::ip::tcp::resolver::query(port));
} else {
iter = resolver.resolve(boost::asio::ip::tcp::resolver::query(host, port));
}
boost::asio::ip::tcp::resolver::iterator end;
for (; iter != end; ++iter) {
boost::system::error_code err;
auto endpoint = iter->endpoint();
auto protocol = iter->endpoint().protocol();
a.open(protocol);
a.set_option(option);
a.bind(endpoint, err);
if (err) {
a.close();
LOG_WARN("Bind attempt failed " + err.message());
continue;
}
break;
}
if (!a.is_open()) {
LOG_ERROR("Could not bind");
return 1;
}
a.listen();
// we do not need to delete this object, it will delete itself
ycsb::accept(service, a, clientManager);
service.run();
}
|
{"hexsha": "acc62e20e610d117206e3d4066ee603c5d465987", "size": 18050, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "main.cpp", "max_stars_repo_name": "tellproject/ycsb-server", "max_stars_repo_head_hexsha": "8584d2b661221dbb172d267b664d1f7504d18c5c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.cpp", "max_issues_repo_name": "tellproject/ycsb-server", "max_issues_repo_head_hexsha": "8584d2b661221dbb172d267b664d1f7504d18c5c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.cpp", "max_forks_repo_name": "tellproject/ycsb-server", "max_forks_repo_head_hexsha": "8584d2b661221dbb172d267b664d1f7504d18c5c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8846918489, "max_line_length": 127, "alphanum_fraction": 0.5477562327, "num_tokens": 4151}
|
\documentclass{article}
\usepackage{graphicx}
\usepackage{titletoc}
\usepackage{titlesec}
\usepackage{geometry}
\usepackage{fontspec, xunicode, xltxtra}
\usepackage{float}
\usepackage{cite}
\usepackage{amsmath}
\usepackage{listings}
\usepackage{titletoc}
\usepackage{booktabs}
\geometry{left=3cm,right=3cm,top=3cm,bottom=3cm}
\DeclareMathOperator*{\argmin}{argmin}
\DeclareMathOperator*{\argmax}{argmax}
\DeclareMathOperator*{\logit}{logit}
\DeclareMathOperator*{\var}{var}
\DeclareMathOperator*{\cov}{cov}
\DeclareMathOperator*{\expec}{E}
\DeclareMathOperator*{\deriv}{d}
\DeclareMathOperator*{\const}{constant}
\begin{document}
\title{\textsf{Homework 7 for Bayesian Data Analysis}}
\author{Fan JIN\quad (2015011506)}
\maketitle
\section*{Importance Resampling}
{
Suppose you want to estimate $\expec_f h(\theta)$, with $f$ being some posterior distribution $P(\theta|y)$. Further suppose that you choose a proposal distribution $g(\theta)$, and get a sample $(x_1, \cdots, x_m)$ by importance resampling. Please prove that the average of $(h(x_1), \cdots, h(x_m))$ can be used as an estimator of $\expec_f h(\theta)$.
\textbf{Proof:}\quad Note that the probability density for $x_i$ is $w(x_i) g(x_i)$ for any $i$, which does not depend on the order in the sampling with replacement. It follows that
$$\expec_g {(h(x_i))} = \int{ h(x_i) w(x_i) g(x_i) \deriv{x_i} }$$
$$ = \int{ h(x_i) \frac{P(x_i|y)}{g(x_i)} g(x_i) \deriv{x_i} } = \int{ h(x_i) P(x_i|y) \deriv{x_i} } = \expec_f h(\theta)$$ for any $i$. Therefore, the average of $(h(x_1), \cdots, h(x_m))$ is an unbiased estimator of $\expec_f h(\theta)$:
$$\expec_g {\frac{1}{m} \sum_{i=1}^{m}{(h(x_i))}} = \frac{1}{m} \sum_{i=1}^{m} {\expec_g {h(x_i)}} = \frac{1}{m} \sum_{i=1}^{m} {\expec_f h(\theta)} = \expec_f h(\theta).$$
}
\section*{Question 10.6d}
{
$$p(\theta|y) = f(\theta) = N(0, 3) = \frac{1}{\sqrt{6\pi}} \exp{\left( -\frac{\theta^2}{3} \right)}.$$
$$g(\theta) = t_3 = \frac{2}{\pi \sqrt{3}} \left( 1+\frac{\theta^2}{3} \right) ^{-2}.$$
It follows that
$$\expec_g{\left[ (\frac{f(\theta)}{g(\theta)})^2 \right]} = \int {(\frac{f(\theta)}{g(\theta)})^2 g(\theta) \deriv{\theta}}$$
$$= \int {\frac{1}{6\pi} \exp{\left( -\frac{2\theta^2}{3} \right)} \cdot \frac{\pi \sqrt{3}}{2} \left( 1+\frac{\theta^2}{3} \right)^2 \deriv{\theta}}$$
$$= \frac{\sqrt{3}}{12} \int { \exp{\left( -\frac{2\theta^2}{3} \right)} \cdot \left( 1+\frac{\theta^2}{3} \right)^2 \deriv{\theta}}$$
$$= \frac{\sqrt{2}}{12} \int { \exp{\left( -t^2 \right)} \cdot \left( 1+\frac{t^2}{2} \right)^2 \deriv{t}}$$
$$= \frac{\sqrt{2}}{12} \left[ \int { \exp{\left( -t^2 \right)} \deriv{t}} + \int { t^2 \cdot \exp{\left( -t^2 \right)} \deriv{t}} + \frac{1}{4} \int { t^4 \cdot \exp{\left( -t^2 \right)} \deriv{t}} \right] $$
$$= \frac{\sqrt{2}}{12} \left[ \sqrt{\pi} + \frac{1}{2}\sqrt{\pi} + \frac{3}{16}\sqrt{\pi} \right] = \frac{9\sqrt{2\pi}}{64} \approx 0.3525.$$
The effective sample size for $n=10000$ is
$$n_\mathrm{eff} = \frac{n}{\expec_g{\left[ (\frac{f(\theta)}{g(\theta)})^2 \right]}} = 10000 / \frac{9\sqrt{2\pi}}{64} \approx 28369.$$
}
\section*{Question 11.1}
{
\textbf{Lemma: (Detailed Balance condition)}\quad If a Markov chain with transition probability $p(\cdot|\cdot)$ that satisfies
$$\pi(\theta_a) \cdot p(\theta_b|\theta_a) = \pi(\theta_b) \cdot p(\theta_a|\theta_b)$$ for some distribution $\pi(\cdot)$, then $\pi(\cdot)$ is the stationary distribution of this Markov chain.
Using the lemma above, we only need to verify that the Detailed Balance condition is satisfied when $\pi(\cdot) = p(\cdot|y)$.
Note that $r(\theta_a, \theta_b) \cdot r(\theta_b, \theta_a) = 1$, for
$$r(\theta_a, \theta_b) = \frac{ p(\theta_b|y) \cdot g(\theta_a|\theta_b) }{ p(\theta_a|y) \cdot g(\theta_b|\theta_a) }$$ and
$$r(\theta_b, \theta_a) = \frac{ p(\theta_a|y) \cdot g(\theta_b|\theta_a) }{ p(\theta_b|y) \cdot g(\theta_a|\theta_b) },$$
which means it is safe to assume that $r(\theta_a, \theta_b) \geq 1$ \emph{without loss of generality}. Thus, $\theta_b$ is always accepted after generated from the previous value $\theta_a$, with the probability of $1$. On the contrary, $\theta_a$ is accepted after generated from $\theta_b$ with the probability of $r(\theta_b, \theta_a)$. It follows that
$$p(\theta_b|\theta_a) = g(\theta_b|\theta_a) \cdot 1$$ and
$$p(\theta_a|\theta_b) = g(\theta_a|\theta_b) \cdot r(\theta_b, \theta_a).$$
Plug them all in, and we obtain
$$\mathrm{LHS} = p(\theta_a|y) \cdot p(\theta_b|\theta_a) = p(\theta_a|y) \cdot g(\theta_b|\theta_a) \cdot 1$$
$$= p(\theta_a|y) \cdot g(\theta_b|\theta_a),$$ and
$$\mathrm{RHS} = p(\theta_b|y) \cdot p(\theta_a|\theta_b) = p(\theta_b|y) \cdot g(\theta_a|\theta_b) \cdot r(\theta_b, \theta_a)$$
$$= p(\theta_b|y) \cdot g(\theta_a|\theta_b) \cdot \frac{ p(\theta_a|y) \cdot g(\theta_b|\theta_a) }{ p(\theta_b|y) \cdot g(\theta_a|\theta_b) }$$
$$= p(\theta_a|y) \cdot g(\theta_b|\theta_a),$$
which gives $\mathrm{LHS} = \mathrm{RHS}$ and proves the Detailed Balance condition. Q.E.D.
}
\clearpage
\end{document}
|
{"hexsha": "7b6e6082fc5daab6bca2250cf4cbf3aa736e0c8c", "size": 5157, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "HW7/Homework7.tex", "max_stars_repo_name": "goldsail/BayesianHomework", "max_stars_repo_head_hexsha": "d5506faccbf4d0b7b696c7c2bcb42d020bb0d357", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-07T18:55:43.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-07T18:55:43.000Z", "max_issues_repo_path": "HW7/Homework7.tex", "max_issues_repo_name": "kingium/BayesianHomework", "max_issues_repo_head_hexsha": "d5506faccbf4d0b7b696c7c2bcb42d020bb0d357", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HW7/Homework7.tex", "max_forks_repo_name": "kingium/BayesianHomework", "max_forks_repo_head_hexsha": "d5506faccbf4d0b7b696c7c2bcb42d020bb0d357", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 62.8902439024, "max_line_length": 361, "alphanum_fraction": 0.6358347877, "num_tokens": 1940}
|
import os
import sys
from mnist import load_mnist
import numpy as np
(x_train, t_train), (x_test, t_test) = load_mnist(
normalize=True, one_hot_label=True)
print(x_train.shape)
# (60000, 784)
print(t_train.shape)
# (60000, 10)
train_size = x_train.shape[0]
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
print(np.random.choice(60000, 10))
# [24598 13497 47089 19298 35610 21929 59697 46776 8818 40623]
|
{"hexsha": "9c732c3b61d69049458d772f5b74c82a0d12cca8", "size": 493, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/Chapter3_demo/4.py", "max_stars_repo_name": "Gedanke/Getting_started_with_deep_learning", "max_stars_repo_head_hexsha": "18636fb8aaca83f2157c08815e9c1f0b0f55d91a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-06T12:54:51.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-06T12:54:51.000Z", "max_issues_repo_path": "demo/Chapter3_demo/4.py", "max_issues_repo_name": "Gedanke/Getting_started_with_deep_learning", "max_issues_repo_head_hexsha": "18636fb8aaca83f2157c08815e9c1f0b0f55d91a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/Chapter3_demo/4.py", "max_forks_repo_name": "Gedanke/Getting_started_with_deep_learning", "max_forks_repo_head_hexsha": "18636fb8aaca83f2157c08815e9c1f0b0f55d91a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.4761904762, "max_line_length": 63, "alphanum_fraction": 0.7586206897, "include": true, "reason": "import numpy", "num_tokens": 160}
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Convert stable set instances into Pauli list. We read instances in
the Gset format, see https://web.stanford.edu/~yyye/yyye/Gset/ , for
compatibility with the maxcut format, but the weights on the edges
as they are not really used and are always assumed to be 1. The
graph is represented by an adjacency matrix.
"""
import logging
import numpy as np
from qiskit.quantum_info import Pauli
from qiskit.aqua.operators import WeightedPauliOperator
logger = logging.getLogger(__name__)
def get_operator(w):
"""Generate Hamiltonian for the maximum stable set in a graph.
Args:
w (numpy.ndarray) : adjacency matrix.
Returns:
tuple(WeightedPauliOperator, float): operator for the Hamiltonian and a
constant shift for the obj function.
"""
num_nodes = len(w)
pauli_list = []
shift = 0
for i in range(num_nodes):
for j in range(i + 1, num_nodes):
if w[i, j] != 0:
x_p = np.zeros(num_nodes, dtype=np.bool)
z_p = np.zeros(num_nodes, dtype=np.bool)
z_p[i] = True
z_p[j] = True
pauli_list.append([1.0, Pauli(z_p, x_p)])
shift += 1
for i in range(num_nodes):
degree = np.sum(w[i, :])
x_p = np.zeros(num_nodes, dtype=np.bool)
z_p = np.zeros(num_nodes, dtype=np.bool)
z_p[i] = True
pauli_list.append([degree - 1 / 2, Pauli(z_p, x_p)])
return WeightedPauliOperator(paulis=pauli_list), shift - num_nodes / 2
def stable_set_value(x, w):
"""Compute the value of a stable set, and its feasibility.
Args:
x (numpy.ndarray): binary string in original format -- not
graph solution!.
w (numpy.ndarray): adjacency matrix.
Returns:
tuple(float, bool): size of the stable set, and Boolean indicating
feasibility.
"""
assert len(x) == w.shape[0]
feasible = True
num_nodes = w.shape[0]
for i in range(num_nodes):
for j in range(i + 1, num_nodes):
if w[i, j] != 0 and x[i] == 0 and x[j] == 0:
feasible = False
break
return len(x) - np.sum(x), feasible
def get_graph_solution(x):
"""Get graph solution from binary string.
Args:
x (numpy.ndarray) : binary string as numpy array.
Returns:
numpy.ndarray: graph solution as binary numpy array.
"""
return 1 - x
|
{"hexsha": "3f66800198892ea277dc870f00921ad7896990cc", "size": 2923, "ext": "py", "lang": "Python", "max_stars_repo_path": "qiskit/optimization/applications/ising/stable_set.py", "max_stars_repo_name": "johannes-weidenfeller/qiskit-aqua", "max_stars_repo_head_hexsha": "7775410f4dabf09ec9f933bf411ead434550accf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-06-29T08:33:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-12T00:28:51.000Z", "max_issues_repo_path": "qiskit/optimization/applications/ising/stable_set.py", "max_issues_repo_name": "johannes-weidenfeller/qiskit-aqua", "max_issues_repo_head_hexsha": "7775410f4dabf09ec9f933bf411ead434550accf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-27T09:34:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-30T21:13:41.000Z", "max_forks_repo_path": "qiskit/optimization/applications/ising/stable_set.py", "max_forks_repo_name": "johannes-weidenfeller/qiskit-aqua", "max_forks_repo_head_hexsha": "7775410f4dabf09ec9f933bf411ead434550accf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-06-29T08:40:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-24T17:39:16.000Z", "avg_line_length": 30.4479166667, "max_line_length": 79, "alphanum_fraction": 0.6339377352, "include": true, "reason": "import numpy", "num_tokens": 742}
|
macro linklibrary_modes()
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
esc(quote
set_nanmode = () -> ccall( (:set_nanmode, $path), Void, (Bool,), true )
set_infmode = () -> ccall( (:set_nanmode, $path), Void, (Bool,), false )
set_roundstozero = () -> ccall( (:set_underflow, $path), Void, (Bool,), true )
set_roundsfromzero = () -> ccall( (:set_underflow, $path), Void, (Bool,), false )
get_nanmode = () -> ccall( (:get_nanmode, $path), Bool, ())
get_roundstozero = () -> ccall( (:get_underflow, $path), Bool, ())
end)
end
@linklibrary_modes
set_roundstozero()
#test that the interaction is stateful.
@test get_roundstozero();
println("testing underflow rounding mode")
#multiplication is the major operation that can cause underflow.
@linklibrary_mul(8,0)
@test pmul(0x01, 0x01) == 0x00
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x00
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(8,1)
@test pmul(0x01, 0x01) == 0x00
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x00
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(8,2)
@test pmul(0x01, 0x01) == 0x00
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x00
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(16,0)
@test pmul(0x0001, 0x0001) == 0x0000
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0000
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(16,1)
@test pmul(0x0001, 0x0001) == 0x0000
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0000
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(16,2)
@test pmul(0x0001, 0x0001) == 0x0000
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0000
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(32,0)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0000
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0000
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,1)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0000
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0000
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,2)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0000
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0000
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,3)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0000
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0000
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
set_roundsfromzero()
#test that the interaction is stateful.
@test !get_roundstozero();
println("testing non-underflowing rounding mode")
@linklibrary_mul(8,0)
@test pmul(0x01, 0x01) == 0x01
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x01
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(8,1)
@test pmul(0x01, 0x01) == 0x01
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x01
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(8,2)
@test pmul(0x01, 0x01) == 0x01
@test pmul(0x08, 0x08) == 0x01
@test pmul(0x08, 0x04) == 0x01
@test pmul(0x09, 0x04) == 0x01
@linklibrary_mul(16,0)
@test pmul(0x0001, 0x0001) == 0x0001
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0001
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(16,1)
@test pmul(0x0001, 0x0001) == 0x0001
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0001
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(16,2)
@test pmul(0x0001, 0x0001) == 0x0001
@test pmul(0x0080, 0x0080) == 0x0001
@test pmul(0x0080, 0x0040) == 0x0001
@test pmul(0x0081, 0x0040) == 0x0001
@linklibrary_mul(32,0)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0001
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,1)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0001
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,2)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0001
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
@linklibrary_mul(32,3)
@test pmul(0x0000_0001, 0x0000_0001) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_8000) == 0x0000_0001
@test pmul(0x0000_8000, 0x0000_4000) == 0x0000_0001
@test pmul(0x0000_8001, 0x0000_4000) == 0x0000_0001
################################################################################
# nan mode testing.
macro linklibrary_gt(n, es)
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
fnname = QuoteNode(Symbol(:p, n, :e, es, :_gt))
posittype = Symbol(:UInt, n)
esc(:(pgt = (a, b) -> ccall( ($fnname, $path), Bool, ($posittype, $posittype), a, b )))
end
macro linklibrary_lt(n, es)
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
fnname = QuoteNode(Symbol(:p, n, :e, es, :_gt))
posittype = Symbol(:UInt, n)
esc(:(plt = (a, b) -> ccall( ($fnname, $path), Bool, ($posittype, $posittype), a, b )))
end
macro linklibrary_gte(n, es)
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
fnname = QuoteNode(Symbol(:p, n, :e, es, :_gt))
posittype = Symbol(:UInt, n)
esc(:(pgte = (a, b) -> ccall( ($fnname, $path), Bool, ($posittype, $posittype), a, b )))
end
macro linklibrary_lte(n, es)
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
fnname = QuoteNode(Symbol(:p, n, :e, es, :_gt))
posittype = Symbol(:UInt, n)
esc(:(plte = (a, b) -> ccall( ($fnname, $path), Bool, ($posittype, $posittype), a, b )))
end
macro linklibrary_eq(n, es)
path = normpath(Pkg.dir("FastSigmoid"),"c-src","libfastposit.so")
fnname = QuoteNode(Symbol(:p, n, :e, es, :_eq))
posittype = Symbol(:UInt, n)
esc(:(peq = (a, b) -> ccall( ($fnname, $path), Bool, ($posittype, $posittype), a, b )))
end
@linklibrary_add(8,0)
@linklibrary_mul(8,0)
@linklibrary_div(8,0)
@linklibrary_gt(8,0)
@linklibrary_lt(8,0)
@linklibrary_gte(8,0)
@linklibrary_lte(8,0)
@linklibrary_eq(8,0)
set_nanmode()
@test get_nanmode()
@test padd(0x80, 0x40) == 0x80
@test padd(0x80, 0x80) == 0x80
@test pmul(0x80, 0x40) == 0x80
@test pmul(0x80, 0x00) == 0x80
@test pmul(0x80, 0x80) == 0x80
@test pdiv(0x80, 0x40) == 0x80
@test pdiv(0x80, 0x80) == 0x80
@test pdiv(0x40, 0x80) == 0x80
@test pgt(0x80, 0x40) == false
@test plt(0x80, 0x40) == false
@test pgte(0x80, 0x40) == false
@test plte(0x80, 0x40) == false
@test peq(0x80, 0x80) == false
set_infmode()
@test !get_nanmode()
@test padd(0x80, 0x40) == 0x80
#@test padd(0x80, 0x80) == 0x80 <== this should set errno.
@test pmul(0x80, 0x40) == 0x80
#@test pmul(0x80, 0x00) == 0x80 <== this should set errno.
@test pmul(0x80, 0x80) == 0x80
@test pdiv(0x80, 0x40) == 0x80
#@test pdiv(0x80, 0x80) == 0x80 <== this should set errno.
@test pdiv(0x40, 0x80) == 0x00 # note this is different.
@test pgt(0x80, 0x40) == true
@test plt(0x80, 0x40) == true
@test pgte(0x80, 0x40) == true
@test plte(0x80, 0x40) == true
@test peq(0x80, 0x80) == true
|
{"hexsha": "e88fa8ceb9cd0b41779aacb006851d46144a64e9", "size": 7327, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/c-lib-mode-test.jl", "max_stars_repo_name": "Etaphase/FastSigmoids.jl", "max_stars_repo_head_hexsha": "ebea3f97be21a36fd628cc6a38dddc5b8eee713f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2017-06-14T03:00:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-22T15:19:54.000Z", "max_issues_repo_path": "test/c-lib-mode-test.jl", "max_issues_repo_name": "ityonemo/FastSigmoids.jl", "max_issues_repo_head_hexsha": "ebea3f97be21a36fd628cc6a38dddc5b8eee713f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-02-07T16:13:53.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-06T20:11:27.000Z", "max_forks_repo_path": "test/c-lib-mode-test.jl", "max_forks_repo_name": "ityonemo/FastSigmoids.jl", "max_forks_repo_head_hexsha": "ebea3f97be21a36fd628cc6a38dddc5b8eee713f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-02-01T14:32:12.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-01T14:32:12.000Z", "avg_line_length": 33.1538461538, "max_line_length": 90, "alphanum_fraction": 0.687866794, "num_tokens": 3508}
|
\section{Section 0}\label{sec:zero}
This is a reference \cite{tur38}. This is an acronym: \ac{MI}. Fun fact: when using it again, it will only be displayed like such: \ac{MI}.
Note, that the gray boxes on the cover page can be replaced. Simply replace the \code{logo.png} file in the \code{images} folder.
cref Demonstration: Cref at beginning of sentence, cref in all other cases. \Cref{fig:logo} shows a simple fact, although \cref{fig:logo} could also show something else. \Cref{tab:simple} shows a simple fact, although \cref{tab:simple} could also show something else. \Cref{sec:one} shows a simple fact, although \cref{sec:one} could also show something else.
\image{logo}{Simple Figure}
Brackets work as designed: <test>
\begin{inparaenum}
\item All these items...
\item ...appear in one line
\item This is enabled by the paralist package.
\end{inparaenum}
\javafile{SetOperation}{A simple Javafile as an example}
|
{"hexsha": "1873735baad5ee3253424fcbe33c6a4d1937f533", "size": 929, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/section-0.tex", "max_stars_repo_name": "christian-steinmeyer/theses-template", "max_stars_repo_head_hexsha": "d53d59f1d05025f4a3d2b3ee8f3ff22c69ce124f", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2016-05-03T16:55:59.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-17T11:33:50.000Z", "max_issues_repo_path": "sections/section-0.tex", "max_issues_repo_name": "christian-steinmeyer/theses-template", "max_issues_repo_head_hexsha": "d53d59f1d05025f4a3d2b3ee8f3ff22c69ce124f", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/section-0.tex", "max_forks_repo_name": "christian-steinmeyer/theses-template", "max_forks_repo_head_hexsha": "d53d59f1d05025f4a3d2b3ee8f3ff22c69ce124f", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-05-03T17:01:39.000Z", "max_forks_repo_forks_event_max_datetime": "2016-05-03T17:01:39.000Z", "avg_line_length": 44.2380952381, "max_line_length": 359, "alphanum_fraction": 0.7524219591, "num_tokens": 261}
|
function [M_est,U_est,V_est,L1_error] = RobustApproximation_M_UV_TraceNormReg(M,W,r,lambda,rho,maxIterIN,signM)
%% Robust low-rank matrix approximation with missing data and outliers
% min |W.*(M-E)|_1 + lambda*|V|_*
% s.t., E = UV, U'*U = I
%
%Input:
% M: m*n data matrix
% W: m*n indicator matrix, with '1' means 'observed', and '0' 'missing'.
% r: the rank of r
% lambda: the weighting factor of the trace-norm regularization, 1e-3 in default.
% rho: increasing ratio of the penalty parameter mu, usually 1.05.
% maxInterIN: maximum iteration number of inner loop, usually 100.
% signM: if M>= 0, then signM = 1, otherwise, signM = 0;
%Output:
% M_est: m*n full matrix, such that M_est = U_est*V_est
% U_est: m*r matrix
% V_est: r*n matrix
% L1_error: the L1-norm error of observed data only.
%% Normalization
scale = max(max(abs(M)));
M = M/scale;
%% In-default parameters
[m n] = size(M); %matrix dimension
if nargin < 6
maxIterIN = 100;
end
if nargin < 5
rho = 1.05;
end
if nargin < 4
lambda = 1e-3;
end
if nargin < 3
disp('Please input the data matrix M, the indicator W and the rank r, and try again.');
end
maxIterOUT = 5000;
max_mu = 1e20;
mu = 1e-6;
M_norm = norm(M,'fro');
tol = 1e-8*M_norm;
cW = ones(size(W)) - W; %the complement of W.
display = 1; %display progress
%% Initializing optimization variables as zeros
E = zeros(m,n);
U = zeros(m,r);
V = zeros(r,n);
Y = zeros(m,n); %lagrange multiplier
%% Start main outer loop
iter_OUT = 0;
objs=[];
while iter_OUT < maxIterOUT
iter_OUT = iter_OUT + 1;
itr_IN = 0;
obj_pre = 1e20;
%start inner loop
while itr_IN < maxIterIN
%update U
temp = (E + Y/mu)*V';
[Us,sigma,Ud] = svd(temp,'econ'); % stable
%[Us,sigma,Ud] = svdecon(temp); % fastest
U = Us*Ud';
%update V
temp = U'*(E + Y/mu);
[Vs,sigma,Vd] = svd(temp,'econ'); % stable
%[Vs,sigma,Vd] = svdecon(temp); % fastest
sigma = diag(sigma);
svp = length(find(sigma > lambda/mu));
if svp >= 1
sigma = sigma(1:svp) - lambda/mu;
else
svp = 1;
sigma = 0;
end
V = Vs(:,1:svp)*diag(sigma)*Vd(:,1:svp)';
sigma0 = sigma;
UV = U*V;
%update E
temp1 = UV - Y/mu;
temp = M-temp1;
E = max(0,temp - 1/mu) + min(0,temp + 1/mu);
E = (M-E).*W + temp1.*cW;
if signM > 0
E(E<0) = 0;
end
%evaluate current objective
obj_cur = sum(sum(abs(W.*(M-E)))) + lambda*sum(sigma0) + sum(sum(Y.*(E-UV))) + mu/2*norm(E-UV,'fro')^2;
%check convergence of inner loop
if abs(obj_cur - obj_pre) <= 1e-8*abs(obj_pre)
break;
else
obj_pre = obj_cur;
itr_IN = itr_IN + 1;
end
end
leq = E - UV;
stopC = norm(leq,'fro');
if display
obj = sum(sum(abs(W.*(M-UV)))) + lambda*sum(sigma0);
objs = [objs,obj];
end
if display && (iter_OUT==1 || mod(iter_OUT,50)==0 || stopC<tol)
disp(['iter ' num2str(iter_OUT) ',mu=' num2str(mu,'%2.1e') ...
',obj=' num2str(obj) ',stopALM=' num2str(stopC,'%2.3e')]);
end
if stopC<tol
break;
else
%update lagrage multiplier
Y = Y + mu*leq;
%update penalty parameter
mu = min(max_mu,mu*rho);
end
end
%% Denormalization
U_est = sqrt(scale)*U; V_est = sqrt(scale)*V;
M_est = U_est*V_est;
L1_error = sum(sum(abs(W.*(scale*M-M_est))));
end
|
{"author": "andrewssobral", "repo": "lrslibrary", "sha": "06d457349cb5f1fc56a583cd61af9f1d5150e3a1", "save_path": "github-repos/MATLAB/andrewssobral-lrslibrary", "path": "github-repos/MATLAB/andrewssobral-lrslibrary/lrslibrary-06d457349cb5f1fc56a583cd61af9f1d5150e3a1/algorithms/rpca/RegL1-ALM/RobustApproximation_M_UV_TraceNormReg.m"}
|
# Replicate of calcpath subroutine
calcpath <- function(nohrs,slope,aspect,path){
for (i in 1:nohrs){
if (zenang[i] < pid2*0.998){
print(paste('zenang of i = '),zenang[i])
return()
}
path[i] = cos(zenang[i])*cos(slope)+sin(zenang[i])*sin(slope)*cos(aspect-sunazm[i])
if (path[i] == 0){
path[i] = 1.0e-11
path[i] = 1.0/path[i]
}
if (path[i] <= 0.0 | path[i] > 1.0e10){
if (path[i] <= 0.0){
path[i] = -1.0
}
} else {
# Sun below horizon
path[i] = -1.0
}
}
}
|
{"hexsha": "2da4f842ddfc8a41055d55185535b14890a6e014", "size": 550, "ext": "r", "lang": "R", "max_stars_repo_path": "src/calcpath.r", "max_stars_repo_name": "hieulel/CUPID", "max_stars_repo_head_hexsha": "4b35e5f5cf338c6061763085b32359bc68866ae1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-28T14:24:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-28T14:24:48.000Z", "max_issues_repo_path": "src/calcpath.r", "max_issues_repo_name": "hieulel/CUPID", "max_issues_repo_head_hexsha": "4b35e5f5cf338c6061763085b32359bc68866ae1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/calcpath.r", "max_forks_repo_name": "hieulel/CUPID", "max_forks_repo_head_hexsha": "4b35e5f5cf338c6061763085b32359bc68866ae1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0, "max_line_length": 87, "alphanum_fraction": 0.4981818182, "num_tokens": 209}
|
#The files/folders that need to be in the executing folder are:
# yocto_api.py
# yocto_temperature.py
# folder: cdll
#Available from the Yoctopuce website:
#http://www.yoctopuce.com/EN/libraries.php Python libraries
#TODO:
#Make plotting function
#Buffer creation is commented out.
import os,sys
import time
import threading
import csv
import numpy as np
import collections
from yocto_api import *
from yocto_temperature import *
class FISH_thermistor():
"""
Class to initiate and read the temperature of a Yoctopuce Maxi Thermistor
"""
temperature = []
def __init__(self, logical_name = None, serial_number = None, **kwargs):
"""
Instatiate sensor, check if it works and opens temperature channels
Input:
`logical_name`: Logical name of the sensor (set in the Yoctopuce
software)
`serial_number`: Unchangable serial number of the sensor (can be found
in the Yoctopuce software)
One of the two is required.
"""
self.logical_name = logical_name
self.serial_number = serial_number
self.errmsg=YRefParam()
# Setup the API to use local USB devices
if YAPI.RegisterHub("usb", self.errmsg)!= YAPI.SUCCESS:
sys.exit("init error"+self.errmsg.value)
if self.logical_name != None:
self.target = self.logical_name
elif self.serial_number != None:
self.target = self.serial_number
elif self.serial_number == None and self.logical_name == None:
self.thermistor_die('Specify logical name or serial number')
#Instantiate sensor
self.sensor= YTemperature.FindTemperature(self.target + '.temperature1')
#Check if sensor is valid and live
if self.sensor is None :
self.thermistor_die('No module connected, check connection and name')
if not(self.sensor.isOnline()):
self.thermistor_die('device not connected')
#Get sensor serial number
self.serial=self.sensor.get_module().get_serialNumber()
#Initiate channels
self.init_channels()
def thermistor_die(self, msg):
sys.exit(msg+' (check name or USB cable)')
def init_channels(self):
global channel1, channel2, channel3, channel4, channel5, channel6
channel1 = YTemperature.FindTemperature(self.serial + '.temperature1')
channel2 = YTemperature.FindTemperature(self.serial + '.temperature2')
channel3 = YTemperature.FindTemperature(self.serial + '.temperature3')
channel4 = YTemperature.FindTemperature(self.serial + '.temperature4')
channel5 = YTemperature.FindTemperature(self.serial + '.temperature5')
channel6 = YTemperature.FindTemperature(self.serial + '.temperature6')
def read_temperature(self):
"""
get the temperature values of the 6 channels.
returns the 6 temperatures individualy (not as list).
"""
temp1 = channel1.get_currentValue()
temp2 = channel2.get_currentValue()
temp3 = channel3.get_currentValue()
temp4 = channel4.get_currentValue()
temp5 = channel5.get_currentValue()
temp6 = channel6.get_currentValue()
return temp1, temp2, temp3, temp4, temp5, temp6
def pr(self, time):
for i in range(time):
print(self.read_temperature())
YAPI.Sleep(1000)
class FISH_temperature_deamon():
"""
Class that can run the Yoctopuse Maxi Thermistor in the background
on a seperate thread and real live plot the data. All data will be
sved to a .csv file.
Input:
`logical_name`(str): logical name of the sensor
`serial_number(str): serial number of sensor
`exp_name`(str): Experiment name to track files
`buffer_size`(int): number of hours to plot in graph (default=2)
`log_interval`(int): Interval in seconds to save the temperature data.
default = 1 second
"""
def __init__(self, logical_name = None, serial_number = None,
exp_name = None, buffer_size=2, log_interval=1):
#Initiate sensor using the FISH_thermistor class
if logical_name != None:
self.sensor = FISH_thermistor(logical_name = logical_name)
if serial_number != None:
self.sensor = FISH_thermistor(serial_number = serial_number)
#Setup log file and exp name
self.exp_name = exp_name
#Creates log file and returns the file name
self.temp_log_filename = self.temp_log_file(self.exp_name)
#make buffers for graph with length in hours
#self.make_buffers(buffer_size)
self.log_interval = log_interval
# Worker that reads the temp form the sensor, saves it to file, plots and makes it available, every second.
def worker(self): #Can not pass the sensor as argument here, threading will complain
"""
thread worker function. Reads the temperature, saves it to a file
and makes the data available for other programs using the get_temp()
funciton.
"""
thread_name = threading.currentThread().getName()
print('Started FISH_temperature_deamon in the backround on thread: {}'.format(thread_name))
global current_temp
current_temp = []
count = 0
while True:
tic = time.time()
event_flag.clear()
#Get current temperature
current_temp = self.background_get_temp(self.sensor)
#write to file every interval
if count % self.log_interval == 0:
self.write_temp_log_file(self.temp_log_filename, current_temp)
count = 0
#updata data for plot
#self.update_temp_data_buffer(current_temp)
#update plot
count += 1
event_flag.set()
toc = time.time()
execute_time = toc - tic
if execute_time > 1:
execute_time = 0.001
time.sleep(1 - execute_time)
return current_temp
# Low level funcitons used in __init__
def temp_log_file(self, exp_name):
"""Make temperature log file, return file name"""
if not os.path.exists('Temperature_log_files'):
os.makedirs('Temperature_log_files')
if exp_name != None:
file_name = ('Temperature_log_files/' + exp_name + '_temp_log_' +
str(time.strftime('%d-%m-%Y_%H-%M-%S')) + '.csv')
else:
file_name = ('Temperature_log_files/' +'temp_log_' +
time.strftime('%d-%m-%Y_%H-%M-%S') + '.csv')
print(file_name)
self.logger_path = file_name
with open(file_name, 'w', newline='') as temp_log:
writer = csv.writer(temp_log)
header = [['Timestamp','Sensor1','Sensor2','Sensor3','Sensor4',
'Sensor5','Sensor6']]
writer.writerows(header)
return file_name
def make_buffers(self, buffer_size):
"""
Make 7 buffers for time and temperature
Input:
`buffer_size`(int): number of hours to buffer (default=2)
Buffers are fixed size 'deque' objects
"""
buffer_size = buffer_size * 60 * 60 #buffer size in seconds
self.time_data = collections.deque([None], maxlen=buffer_size)
self.sensor1_data = collections.deque([None], maxlen=buffer_size)
self.sensor2_data = collections.deque([None], maxlen=buffer_size)
self.sensor3_data = collections.deque([None], maxlen=buffer_size)
self.sensor4_data = collections.deque([None], maxlen=buffer_size)
self.sensor5_data = collections.deque([None], maxlen=buffer_size)
self.sensor6_data = collections.deque([None], maxlen=buffer_size)
# Low level funcitons used in the worker
def background_get_temp(self, sensor):
"""Get the current time and temperature from sensor"""
data = []
now = time.strftime('%d-%m-%Y_%H:%M:%S')
temperature = self.sensor.read_temperature()
data.append(now)
data += temperature
return data
def write_temp_log_file(self, file_name, data):
"""Write new data to temperature log file"""
with open(file_name, 'a', newline='') as temp_log:
writer = csv.writer(temp_log)
writer.writerows([data])
def update_temp_data_buffer(self, new_data):
"""Append new data to the data buffers"""
self.time_data.append(new_data[0])
self.sensor1_data.append(new_data[1])
self.sensor2_data.append(new_data[2])
self.sensor3_data.append(new_data[3])
self.sensor4_data.append(new_data[4])
self.sensor5_data.append(new_data[5])
self.sensor6_data.append(new_data[6])
# Starting the deamon in seperate thread
def deamon_start(self):
global event_flag
event_flag = threading.Event()
temp_thread = threading.Thread(target=self.worker)#, args = self.sensor)
temp_thread.setDaemon(True) #It will end the thread when the main process is done or quit
temp_thread.start()
time.sleep(1)
# Function to get the temperature from the main thread without interfeering with the worker.
def get_temp(self):
"""Get the current time and temperature from deamon"""
while not event_flag.isSet():
event_is_set = event_flag.wait(0.1)
return current_temp
if __name__ == "__main__":
x = FISH_temperature_deamon(serial_number = 'THRMSTR2-629D5')
x.deamon_start()
print('This is a test function that will print the temp every sec for the next 10 seconds')
for i in range (10):
print(x.get_temp())
#print('data buffer: ', x.sensor1_data[-20:])
time.sleep(1)
|
{"hexsha": "dd24717fd0d37a8ce528b5d3af09b45be3f8d93f", "size": 9890, "ext": "py", "lang": "Python", "max_stars_repo_path": "YoctoThermistor_FISH.py", "max_stars_repo_name": "linnarsson-lab/ROBOFISH", "max_stars_repo_head_hexsha": "cc54bf6c63565a8e2e1fdfbaa12255576cf0b784", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "YoctoThermistor_FISH.py", "max_issues_repo_name": "linnarsson-lab/ROBOFISH", "max_issues_repo_head_hexsha": "cc54bf6c63565a8e2e1fdfbaa12255576cf0b784", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "YoctoThermistor_FISH.py", "max_forks_repo_name": "linnarsson-lab/ROBOFISH", "max_forks_repo_head_hexsha": "cc54bf6c63565a8e2e1fdfbaa12255576cf0b784", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3602941176, "max_line_length": 107, "alphanum_fraction": 0.6385237614, "include": true, "reason": "import numpy", "num_tokens": 2204}
|
from typing import *
import pickle
import numpy as np
import torch
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
class Affectdataset(Dataset):
def __init__(self, data: Dict, flatten_time_series: bool, aligned: bool = True, task: str = None) -> None:
self.dataset = data
self.flatten = flatten_time_series
self.aligned = aligned
self.task = task
def __getitem__(self, ind):
vision = torch.tensor(self.dataset['vision'][ind])
audio = torch.tensor(self.dataset['audio'][ind])
text = torch.tensor(self.dataset['text'][ind])
if self.aligned:
try:
start = text.nonzero()[0][0]
except:
print(text, ind)
exit()
vision = vision[start:].float()
audio = audio[start:].float()
text = text[start:].float()
else:
vision = vision[vision.nonzero()[0][0]:].float()
audio = audio[audio.nonzero()[0][0]:].float()
text = text[text.nonzero()[0][0]:].float()
label = torch.tensor(self.dataset['labels'][ind]).round().long()+3 if self.task == "classification" else\
torch.tensor(self.dataset['labels'][ind]).float()
if self.flatten:
return [vision.flatten(), audio.flatten(), text.flatten(), ind,
label]
else:
return [vision, audio, text, ind, label]
def __len__(self):
return self.dataset['vision'].shape[0]
def get_dataloader(
filepath: str, batch_size: int = 40, train_shuffle: bool = True,
num_workers: int = 8, flatten_time_series: bool = False, task=None) -> DataLoader:
with open(filepath, "rb") as f:
alldata = pickle.load(f)
for dataset in alldata:
drop = []
for ind, k in enumerate(alldata[dataset]["text"]):
if k.sum() == 0:
drop.append(ind)
alldata[dataset]["text"] = np.delete(alldata[dataset]["text"], drop, 0)
alldata[dataset]["vision"] = np.delete(
alldata[dataset]["vision"], drop, 0)
alldata[dataset]["audio"] = np.delete(
alldata[dataset]["audio"], drop, 0)
alldata[dataset]["labels"] = np.delete(
alldata[dataset]["labels"], drop, 0)
train = DataLoader(Affectdataset(alldata['train'], flatten_time_series, task=task),
shuffle=train_shuffle, num_workers=num_workers, batch_size=batch_size,
collate_fn=process)
valid = DataLoader(Affectdataset(alldata['valid'], flatten_time_series, task=task),
shuffle=False, num_workers=num_workers, batch_size=batch_size,
collate_fn=process)
test = DataLoader(Affectdataset(alldata['test'], flatten_time_series, task=task),
shuffle=False, num_workers=num_workers, batch_size=batch_size,
collate_fn=process)
return train, valid, test
def process(inputs: List):
processed_input = []
processed_input_lengths = []
inds = []
labels = []
for i in range(len(inputs[0])-2):
feature = []
for sample in inputs:
feature.append(sample[i])
processed_input_lengths.append(
torch.as_tensor([v.size(0) for v in feature]))
processed_input.append(pad_sequence(feature, batch_first=True))
for sample in inputs:
inds.append(sample[-2])
if len(sample[-1].shape) > 1:
labels.append(torch.where(sample[-1][:, 1] == 1)[0])
else:
labels.append(sample[-1])
return processed_input, processed_input_lengths, \
torch.tensor(inds).view(len(inputs), 1), torch.tensor(
labels).view(len(inputs))
|
{"hexsha": "d3f02f479cf50ea75e9bfefd0a0346909ec28f18", "size": 3822, "ext": "py", "lang": "Python", "max_stars_repo_path": "deprecated/dataloaders/affect/get_data.py", "max_stars_repo_name": "kapikantzari/MultiBench", "max_stars_repo_head_hexsha": "44ab6ea028682040a0c04de68239ce5cdf15123f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 148, "max_stars_repo_stars_event_min_datetime": "2021-03-06T06:54:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T19:27:21.000Z", "max_issues_repo_path": "deprecated/dataloaders/affect/get_data.py", "max_issues_repo_name": "kapikantzari/MultiBench", "max_issues_repo_head_hexsha": "44ab6ea028682040a0c04de68239ce5cdf15123f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-07-19T22:57:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T03:12:29.000Z", "max_forks_repo_path": "deprecated/dataloaders/affect/get_data.py", "max_forks_repo_name": "kapikantzari/MultiBench", "max_forks_repo_head_hexsha": "44ab6ea028682040a0c04de68239ce5cdf15123f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2021-07-22T07:17:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T16:11:40.000Z", "avg_line_length": 36.75, "max_line_length": 113, "alphanum_fraction": 0.5860805861, "include": true, "reason": "import numpy", "num_tokens": 855}
|
program LOOP
integer I,N,A(100),B(100)
do i = 1, 100
A(I) = 0.0
enddo
do i = 1, 100
call PRIV1(A,B,i)
call PRIV2(A,B,i)
enddo
end
subroutine PRIV1(V,W,N)
integer V(N),W,i
integer WORK(100)
save WORK
do i = 1,N
WORK(i) = V(i)
enddo
W = 0
do i = 1,N
W = W + WORK(n-i+1)
enddo
end
subroutine PRIV2(V,W,N)
integer V(N),W,i
integer WORK(100),j
common /toto/ WORK,j
do i = 1,N
WORK(i) = V(i)
enddo
W = 0
do i = 1,N
j = n-i+1
W = W + WORK(j)
enddo
end
|
{"hexsha": "14fd020adbb128f5dcf31a684cbd78d1e52292b3", "size": 696, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "packages/PIPS/validation/RegionPrivatization/declarations_priv.f", "max_stars_repo_name": "DVSR1966/par4all", "max_stars_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2015-01-31T01:51:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T02:01:50.000Z", "max_issues_repo_path": "packages/PIPS/validation/RegionPrivatization/declarations_priv.f", "max_issues_repo_name": "DVSR1966/par4all", "max_issues_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-05-29T09:29:00.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-11T16:01:39.000Z", "max_forks_repo_path": "packages/PIPS/validation/RegionPrivatization/declarations_priv.f", "max_forks_repo_name": "DVSR1966/par4all", "max_forks_repo_head_hexsha": "86b33ca9da736e832b568c5637a2381f360f1996", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2015-03-26T08:05:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-18T02:01:51.000Z", "avg_line_length": 16.9756097561, "max_line_length": 31, "alphanum_fraction": 0.3979885057, "num_tokens": 241}
|
import numpy as np;
# from make_ad_nvar import *
from make_ccm import *
import pprint
import pandas as pd
import operator
import pdb
import copy
import re
import collections
from scipy.stats import poisson
from ccm_ad_flex_tests import *
class Node:
def __init__(self,value, name, parent=None):
self.value = value
self.children = np.array([])
self.ancestors = np.array([])
self.desc = np.array([])
self.parent = parent
self.name = name
self.plot_name = name
self.desc_depth = 0
self.tier = 0
self.desc_idx = np.array([],np.int32)
if parent is not None:
parent.add_children([self])
else:
self.tier = 1
def add_children(self, children):
#children are node instances
#put single nodes into a list
if isinstance(children, list)==False and isinstance(children,np.ndarray) == False:
children = [children]
if self.children is not None:
for child in children:
self.children = np.append(self.children,child)
def remove_child(self, child_names):
#child_names is a string or string list
#put single node names into a list
if isinstance(child_names, basestring) or isinstance(child_names, int):
child_names = [child_names]
#list of indices to remove
to_delete = np.array([],np.int32)
for idx in range(len(self.children)):
if self.children[idx].name in child_names:
to_delete = np.append(to_delete, idx)
self.children = np.delete(self.children, to_delete)
return None
def gather_desc_nodes(self, top_node= None):
if top_node is None:
top_node = self
top_node.desc = np.array([])
if (len(self.children) > 0 ):
top_node.desc_depth += 1
for child in self.children:
top_node.desc = np.append(top_node.desc, child.name)
if len(child.children) > 0:
#get the names of the descendants of each child
child.gather_desc_nodes(top_node)
def gather_child_idx(self,top_node=None):
#top node is the node for which we will gather all of the descendant indices
if top_node is None:
top_node = self
for child in self.children:
#get the indices for each child
top_node.desc_idx = np.append(top_node.desc_idx, child.value)
if len(child.children) > 0:
#get the indices for the descendants of each child
child.gather_child_idx(top_node)
def collapse_children(self, full=True):
#ALL of the node's descendants (not just children) are merged into the cluster
if full == True:
top_node = self
top_node.desc_idx = np.array([], np.int32)
self.gather_child_idx(top_node)
#all of the node's descendants now become part of the cluster
self.value = np.append(self.value,self.desc_idx)
self.desc_idx = np.array([],np.int32)
self.children = np.array([])
class Tree:
def __init__(self,name, size=None, values=None):
self.nodes = []
if values is None:
root_idx = self.node_indices(size)
else:
root_idx = values
self.root = Node(root_idx,name)
self.nodes = [self.root]
self.node_tiers = None
self.tiers = 1
def node_indices(self,size):
#generates the mutation indices for a new node
idx = np.arange(size)
nssm = self.comp_nssms() #how many ssms are already in the tree
idx = idx + nssm #make an array of size as the value of the node
idx = idx.astype(int)
return idx
def create_node(self,name,parent=None,size=None, values=None):
if size is not None and values is None:
idx = self.node_indices(size)
if values is not None:
idx = values
node = Node(idx, name, parent)
self.nodes = np.append(self.nodes,node)
return node
def remove_node(self, name):
node = self.get_node(name)
if(len(node.children)>0):
[self.switch_parent(child.name, node.parent.name) for child in node.children]
self.nodes = np.delete(self.nodes, np.where(self.nodes == node))
return None
def comp_nssms(self):
nssms = reduce(lambda x,y: x + y.value.size,self.nodes ,0)
return(nssms)
def ccm(self, outfile=None):
nssms = self.comp_nssms()
out = np.zeros((nssms,len(self.nodes)))
i = 0
out2A = np.zeros(nssms)
struct = self.tree_struct(return_plot_labels=True, plot_labels=True)
for node in self.nodes:
print node.name
out[node.value,i] = 1
if outfile is not None:
if (isinstance(node.name, basestring)):
name = struct.loc[ struct['plot_names'] == node.name, 'nodes']
else:
name = node.name
out2A[node.value] = name
i += 1
if outfile is not None:
#psuedo-2A output
np.savetxt(outfile, out2A, fmt='%i')
ccm = np.dot(out,out.T)
return ccm
def ad(self):
nssms = self.comp_nssms()
out = np.zeros((nssms,nssms))
for node in self.nodes:
#clear descendent idx array first
node.desc_idx = np.array([],np.int32)
#repopulate
node.gather_child_idx()
desc_idx = node.desc_idx
rows = []
if isinstance(node.value, np.int64):
rows = [node.value]
else:
rows = node.value
out[np.ix_(rows, desc_idx)] = 1
return out
def out_1C(self, outfile=None):
node_names = list()
node_sizes = list()
for node in self.nodes:
node_names.append(node.name)
node_sizes.append(len(node.value))
nodes_df = pd.DataFrame({'nodes': node_names, 'sizes': node_sizes})
if nodes_df.dtypes['nodes'] != 'int64':
nodes_df['nodes'] = nodes_df['nodes'].astype(str)
int_nodes = nodes_df[nodes_df['nodes'].str.contains(r'^[\d]+$')]
string_nodes = nodes_df[nodes_df['nodes'].str.contains(r'[a-zA-Z]')]
# str_idx = string_nodes.index()
node_names = dict()
if(len(int_nodes['nodes'])>0):
max_int = int(max(int_nodes['nodes']))
else:
max_int = 0
j = 1
for i in string_nodes.index:
node = string_nodes['nodes'][i]
node_names[node] = max_int + j
nodes_df['nodes'][i] = max_int + j
j += 1
nodes_df['nodes'] = nodes_df['nodes'].astype('int64')
print nodes_df
nodes_df = nodes_df.sort_values(by='nodes')
if(outfile is not None):
nodes_df.to_csv(outfile, header=None,index=None,sep="\t")
return nodes_df
def tree_struct(self,plot_labels=False, uniform = False, outfile = None, tier = False, return_plot_labels=False):
#creates the 3A output with an optional plot labels cloumn that will be included in the printed outfile
#TODO add rename nodes to numbers function
node_names = list()
parent_names = list()
node_numbers = list()
parent_numbers = list()
node_tiers = list()
for node in self.nodes:
node_name = node.name
if node.parent is not None:
parent_name = node.parent.name
parent_names.append(node.parent.plot_name)
else:
parent_name = 0
parent_names.append(0)
if isinstance(node_name, basestring):
node_match = re.match(r'N\d',node_name)
if node_match is not None:
node_number = int(node_name.replace('N',''))
else:
node_number = node_name
else:
node_number = node_name
if isinstance(parent_name, basestring):
parent_match = re.match(r'N\d',parent_name)
if parent_match is not None:
parent_number = int(parent_name.replace('N',''))
else:
parent_number = parent_name
else:
parent_number = parent_name
node_numbers.append(node_number)
parent_numbers.append(parent_number)
node_names.append(node.plot_name)
node_tiers.append(node.tier)
if plot_labels == False:
tree_df = pd.DataFrame({'nodes' : node_numbers, 'parent' : parent_numbers, 'tier': node_tiers})
if plot_labels == True:
tree_df = pd.DataFrame({'nodes' : node_numbers, 'parent' : parent_numbers, 'plot_names' : node_names, 'parent_names': parent_names, 'tier': node_tiers})
#rename any nodes with string names as increasing integers
if tree_df.dtypes['nodes'] != 'int64':
tree_df['nodes'] = tree_df['nodes'].astype(str)
tree_df['parent'] = tree_df['parent'].astype(str)
# print tree_df.dtypes
int_nodes = tree_df[tree_df['nodes'].str.contains(r'^[\d]+$')]
string_nodes = tree_df[tree_df['nodes'].str.contains(r'[a-zA-Z]')]
# str_idx = string_nodes.index()
node_names = dict()
max_int = int(max(int_nodes['nodes']))
j = 1
for i in string_nodes.index:
node = string_nodes['nodes'][i]
node_names[node] = max_int + j
tree_df['nodes'][i] = max_int + j
j += 1
string_parents = tree_df[tree_df['parent'].str.contains(r'[a-zA-Z]')]
# str_idx = string_nodes.index()
for i in string_parents.index:
tree_df['parent'][i] = node_names[tree_df['parent'][i]]
tree_df['nodes'] = tree_df['nodes'].astype(int)
tree_df['parent'] = tree_df['parent'].astype(int)
if (uniform == True and tier == True):
tree_df = tree_df.sort_values(by='plot_names')
print tree_df[['nodes','parent','plot_names', 'parent_names', 'tier']]
return tree_df.as_matrix(columns=['plot_names', 'parent_names'])
if (uniform == True):
tree_df = tree_df.sort_values(by='plot_names')
# print tree_df[['nodes','parent','plot_names', 'parent_names', 'tier']]
return tree_df.as_matrix(columns=['plot_names', 'parent_names'])
if (tier == True):
print tree_df[['nodes','parent','tier']]
if outfile is not None:
tree_df = tree_df.sort_values(by='plot_names')
out_tree_df = tree_df[['nodes','parent','plot_names']]
out_tree_df.to_csv(outfile, header=None,index=None,sep="\t")
if plot_labels == True:
print tree_df
print tree_df[['nodes','parent','plot_names', 'parent_names']]
if return_plot_labels == True:
return tree_df[['nodes','parent','plot_names', 'parent_names']]
return tree_df.as_matrix(columns=['nodes','parent'])
def get_node(self,name, return_idx=False):
for idx in range(len(self.nodes)):
node = self.nodes[idx]
if node.name == name:
if return_idx == True:
return node, idx
else:
return node
def split_node(self,name,new_name, prop_split=0.5,same=True):
node = self.get_node(name)
node_idx = node.value
prop_keep = 1- prop_split
num_to_keep = int(prop_keep*len(node_idx))
if num_to_keep == 1:
idx_keep = [0]
else:
idx_keep = range(num_to_keep)
idx_split = range(num_to_keep, len(node_idx))
node.value = node_idx[idx_keep]
if same == True:
parent = node.parent
else:
parent = node
new_node = self.create_node(new_name,parent=parent,values=node_idx[idx_split])
def merge_nodes(self, node_name1, node_name2):
#merge node2 into node1 and transfer all children
node1 = self.get_node(node_name1)
#fetch the node and its index in the tree array
node2, idx2 = self.get_node(node_name2, return_idx=True)
if node1.name == node2.name:
return
#move over the values in that node
node1.value = np.concatenate((node1.value,node2.value))
#transfer any children
if len(node2.children) > 0 :
for child in node2.children:
print child.name, child.parent.name
self.switch_parent(child.name, node_name1)
#remove node2 from its parent's children array
node2.parent.remove_child(node_name2)
#remove node2 from the tree
self.nodes = np.delete(self.nodes, idx2)
def collapse_node(self, node_name):
node = self.get_node(node_name)
node.desc = np.array([])
node.gather_desc_nodes()
node.collapse_children()
for name in node.desc:
d_node, d_idx = self.get_node(name,return_idx = True)
self.nodes = np.delete(self.nodes, d_idx)
def _gather_tiered_nodes(self):
self.node_tiers = dict()
for tier in range(1,self.tiers+1):
self.node_tiers[tier] = list()
for node in self.nodes:
try:
self.node_tiers[node.tier].append(node)
except:
die
def _tier_assignment(self, node = None):
if node is None:
node = self.root
# print self.root.children
children = node.children
for child in children:
child.tier = child.parent.tier +1
child.ancestors = child.parent.ancestors
child.ancestors = np.append(child.ancestors,child.parent.name)
if child.tier > self.tiers:
self.tiers = child.tier
self._tier_assignment(node=child)
def assign_tiers(self):
self._tier_assignment()
self._gather_tiered_nodes()
def get_tier(self, tier):
if self.node_tiers is None:
self.assign_tiers()
return(self.node_tiers[tier])
def deg_of_separation(self,node1_name, node2_name):
#count the distance from each to the root,
is_ancestor = False
node1 = self.get_node(node1_name)
node2 = self.get_node(node2_name)
#if one of the nodes does not have a tier then assign tier to the tree
if any([node.tier == 0 and node is not self.root for node in [node1, node2]]):
self.assign_tiers()
if(node1_name in node2.ancestors or node2_name in node1.ancestors):
print "ancestors"
common = np.intersect1d(node1.ancestors, node2.ancestors)
print "common", common, len(common)+1
sep = (node1.tier-1) + (node2.tier-1) - 2*(len(common))
else:
common = np.intersect1d(node1.ancestors, node2.ancestors)
print "common", common, len(common)+1
sep = (node1.tier-1) + (node2.tier-1) - 2*(len(common)-1)
print node1.ancestors
print node2.ancestors
print "sep", sep
return sep
def standard_node_naming(self,as_plot_names=True):
self.assign_tiers()
max_node_num = 1
for tier in range(1,self.tiers+1):
# the first node is one
if tier == 1:
node = self.root
if as_plot_names is True:
node.plot_name = 1
else:
node.name = 1
self.max_node_num = 1
continue
#get all nodes for that tier
nodes = self.get_tier(tier)
n_children = map(lambda x: len(x.children),nodes)
map(lambda x: x.gather_desc_nodes(),nodes)
n_desc = map(lambda x: len(x.desc),nodes)
names = map(lambda x: x.name,nodes)
parent_names = map(lambda x: x.parent.plot_name,nodes)
#for the nodes in that tier number them according to the number of children, the number or descendants and the nodes themselves
sorted_nodes = [x for _,_,_,_,x in sorted(zip(n_children, n_desc,parent_names, names, nodes), key = operator.itemgetter(0,1,2))]
for node in sorted_nodes:
if as_plot_names is True:
node.plot_name = max_node_num + 1
else:
node.name = max_node_num +1
max_node_num += 1
# def uniform_node_naming(self,node=None):
# if node is None:
# node = self.root
# node.plot_name = 1
# self.max_node_num = 1
# print node.name
# # print "self.max_node_num", self.max_node_num
# children = node.children
# if ( len(children) == 1 ):
# children[0].plot_name = self.max_node_num +1
# self.max_node_num = children[0].plot_name
# self.uniform_node_naming(children[0])
# elif (len(children) == 2 ):
# n_children = [len(children[0].children), len(children[1].children)]
# children[0].gather_desc_nodes()
# children[1].gather_desc_nodes()
# # print children
# n_desc = [len(children[0].desc),len(children[1].desc)]
# names = [children[0].name, children[1].name]
# # print children[1].desc_depth
# print n_children
# print n_desc
# if (n_children[0] != n_children[1]):
# #if one has more children it will get the higher node number
# children[np.argmin(n_children)].plot_name = self.max_node_num + 1
# children[np.argmax(n_children)].plot_name = self.max_node_num + 2
# self.max_node_num = children[np.argmax(n_children)].plot_name
# children = [x for _,x in sorted(zip(n_desc,children))]
# elif (n_desc[0] != n_desc[1]):
# print "most desc ", children[np.argmax(n_desc)].name
# #if both have the name number of children the one with more descendents gets the higher number
# children[np.argmin(n_desc)].plot_name = self.max_node_num + 1
# children[np.argmax(n_desc)].plot_name = self.max_node_num + 2
# self.max_node_num = children[np.argmax(n_desc)].plot_name
# children = [x for _,x in sorted(zip(n_desc,children))]
# else:
# children = [x for _,x in sorted(zip(names,children))]
# children[0].plot_name = self.max_node_num + 1
# children[1].plot_name = self.max_node_num + 2
# self.max_node_num = children[1].plot_name
# children = [x for _,x in sorted(zip(names,children))]
# for child in children:
# # print "child ", child.name
# self.uniform_node_naming(child)
# else: #no children
# return
def switch_parent(self, node_name,new_parent_name):
node = self.get_node(node_name)
new_parent_node = self.get_node(new_parent_name)
old_parent_node = self.get_node(node.parent.name)
if new_parent_node == old_parent_node:
return
node.parent = new_parent_node
new_parent_node.add_children(node)
old_parent_node.remove_child(node.name)
#update descendants of the new parent node
if len(new_parent_node.desc) > 0:
new_parent_node.gather_desc_nodes()
if len(new_parent_node.desc_idx) > 0:
new_parent_node.gather_child_idx()
#update descendants of the old parent node
if len(old_parent_node.desc) > 0:
old_parent_node.gather_desc_nodes()
if len(old_parent_node.desc_idx) > 0:
old_parent_node.gather_child_idx()
def gather_down(self, node, prev_node, limit, sep, nodes_in_range):
children = node.children[np.where(node.children != prev_node)]
if( len(children) > 0 ):
branch_sep = copy.deepcopy(sep)
branch_sep += 1
if(sep < limit):
for child in children:
nodes_in_range[child.name] = branch_sep
self.gather_down(child, prev_node, limit, branch_sep, nodes_in_range)
return None
def gather_within_range(self, node,limit):
sep = 0
prev_node = node
parent = node.parent
nodes_in_range = dict()
while (parent != None) and (sep < limit):
sep += 1
if (parent not in nodes_in_range):
nodes_in_range[parent.name] = sep
# nodes_in_range[parent.name] = self.deg_of_separation(parent.name, node.name)
self.gather_down(parent,prev_node, limit,sep,nodes_in_range)
prev_node = parent
parent = parent.parent
self.gather_down(node,prev_node,limit,0,nodes_in_range)
return(nodes_in_range)
def extra_node(self,extra_prop, parent_name, new_node_name='X', max_dist=2, transfer_children = True, all_nodes=True, num_nodes = 2):
extra_idx = np.array([],np.int32)
if parent_name is not None:
parent_node = self.get_node(parent_name)
orig_children = parent_node.children
extra = self.create_node(new_node_name, parent=parent_node,values=extra_idx)
#transfer the parent's children to the extra node
if len(orig_children) > 0 and transfer_children == True:
[self.switch_parent(child.name,new_node_name) for child in orig_children]
else:
extra = self.create_node(new_node_name, values=extra_idx)
extra.add_children(self.root)
self.root = extra
drawn_nodes = []
#if drawing from all nodes
if all_nodes == True:
drawn_nodes = self.nodes
num_taken_per_node = [int(round(extra_prop*len(node.value))) for node in self.nodes]
#else drawing from nearby nodes
else:
#grab the nodes within the specified neighbourhood
node_ranges = self.gather_within_range(extra,max_dist)
od_node_ranges = collections.OrderedDict(sorted(node_ranges.items()))
pois = poisson(1)
node_probs = collections.OrderedDict()
for node_name in od_node_ranges.keys():
node_probs[node_name] = pois.pmf(od_node_ranges[node_name])
sum_probs = sum(node_probs.values())
for node_name in od_node_ranges:
node_probs[node_name] = node_probs[node_name]/sum_probs
tot_ssms = total_ssms(node_probs.keys(), self)
num_taken_per_node = np.random.multinomial(n=int(round(extra_prop*tot_ssms)),pvals=node_probs.values())
drawn_nodes = [self.get_node(node) for node in od_node_ranges.keys()]
# drawn_nodes = np.random.choice(self.nodes, num_nodes, replace = False)
i = 0
for node in drawn_nodes:
num_taken = num_taken_per_node[i]
taken_idx = []
# if the number of SSMs removed from the node is greater than the number of SSMs in the node just take all of the SSMs into the extra node and remove the original node
if(num_taken >= len(node.value)):
if (node is self.root):
num_taken = len(node.value)-1
else:
extra.value = np.append(extra.value, node.value)
self.remove_node(node.name)
i += 1
continue
if num_taken == 1:
taken_idx = 0
elif num_taken == 0:
i += 1
continue
else:
taken_idx = np.random.choice(np.arange(len(node.value)),num_taken, replace=False)
i += 1
taken = node.value[taken_idx]
extra.value = np.append(extra.value, taken)
node.value = np.delete(node.value,taken_idx)
return None
def total_ssms(node_names, tree):
tot=0
for node_name in node_names:
node = tree.get_node(node_name)
tot += len(node.value)
return(tot)
def baseline_ad(scenario,print_ad=False, print_ccm = False):
ad=get_ad_nvar(scenario,size_clusters=[3,2,2,3,2,4], extra_prop=2.0/12.0)
ccm, clusters = get_ccm_nvar(scenario,size_clusters=[3,2,2,3,2,4], extra_prop=2.0/12.0)
if print_ad == True:
print ad
if print_ccm == True:
print ccm
return ad,ccm
def make_truth():
tree = Tree(name='N1', size=3)
root = tree.root
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',tree.root,2)
node4 = tree.create_node('N4',node2,3)
node5 = tree.create_node('N5',node2,2)
node6 = tree.create_node('N6',node3,4)
return(tree)
def tree_from_df(tree_df, sizes = None):
if sizes is None:
sizes = np.repeat(2,tree_df.shape[0])
print("sizes")
print(sizes)
tree = Tree(name = tree_df[0,0], size = sizes[0])
nodes = dict()
nodes[1] = tree.root
for i in range(1,tree_df.shape[0]):
node = tree.create_node(tree_df[i,0],nodes[tree_df[i,1]], size = sizes[i])
nodes[tree_df[i,0]] = node
return(tree)
def tree_mistakes(tree, base):
print "orig"
print tree.tree_struct()
tree.ccm(outfile=base+"truth_2A.txt")
tree.tree_struct(plot_labels=True, outfile=base+"truth_3A.txt")
tree.out_1C(base+"truth_1C.txt")
split_tree = split_bottom(tree, base)
merged_bottom_tree = merge_bottom(tree,base)
merged_top_tree = merge_top(tree, base)
extra_intermediate_tree = add_intermediate_extra_bottom(tree, base)
merged_extra_tree = merge_top_and_add_extra(tree, base)
switched_tree = parent_is_grandparent(tree, base)
linearize_tree= linearize(tree, base)
# switched_tree = sibling_is_parent(tree, base)
def split_bottom(tree, base):
tree_df = tree.tree_struct()
# pdb.set_trace()
split_bottom_tree = copy.deepcopy(tree)
split_bottom_tree.split_node(max(tree_df[:,0]), max(tree_df[:,0])+1, same =False)
split_node = split_bottom_tree.get_node(max(tree_df[:,0])+1)
split_node.plot_name = max(tree_df[:,0])
print "split"
print split_bottom_tree.tree_struct(plot_labels = True)
outname = base + "split_bottom_3A.txt"
out1Cname= base+"split_bottom_1C.txt"
out2Aname= base+"split_bottom_2A.txt"
split_bottom_tree.out_1C(out1Cname)
split_bottom_tree.ccm(outfile=out2Aname)
print split_bottom_tree.tree_struct(plot_labels=True, outfile = outname )
return(split_bottom_tree)
def merge_bottom(tree,base):
tree_df = tree.tree_struct()
merged_bottom_tree = copy.deepcopy(tree)
bottom_siblings = tree_df[tree_df[:,1] == tree_df[np.argmax(tree_df[:,0]),1],:]
if bottom_siblings.shape[0] > 1:
node_1 = bottom_siblings[0,0]
node_2 = bottom_siblings[1,0]
else:
bottom_node = tree_df[np.argmax(tree_df[:,0]),:]
print bottom_node
node_1 = bottom_node[1]
node_2 = bottom_node[0]
merged_bottom_tree.merge_nodes(node_1, node_2)
merged_node = merged_bottom_tree.get_node(node_1)
merged_node.plot_name = str(node_1) +'/' + str(node_2)
print merged_bottom_tree.tree_struct(plot_labels = True)
outname = base + "merged_bottom_3A.txt"
print merged_bottom_tree.tree_struct(plot_labels=True, outfile = outname )
out1Cname= base+"merged_bottom_1C.txt"
out2Aname= base+"merged_bottom_2A.txt"
merged_bottom_tree.out_1C(out1Cname)
merged_bottom_tree.ccm(outfile=out2Aname)
return(merged_bottom_tree)
def merge_top(tree, base=None):
tree_df = tree.tree_struct()
merged_top_tree = copy.deepcopy(tree)
node_1 = 1
node_2 = 2
merged_top_tree.merge_nodes(node_1, node_2)
merged_node = merged_top_tree.get_node(node_1)
merged_node.plot_name = str(node_1) +'/' + str(node_2)
print merged_top_tree.tree_struct(plot_labels = True)
if (base is not None):
outname = base + "merged_top_3A.txt"
print merged_top_tree.tree_struct(plot_labels = True, outfile = outname)
out1Cname= base+"merged_top_1C.txt"
out2Aname= base+"merged_top_2A.txt"
merged_top_tree.out_1C(out1Cname)
merged_top_tree.ccm(outfile=out2Aname)
return(merged_top_tree)
def add_extra_bottom(tree, base=None):
tree_df = tree.tree_struct()
parent_name = max(tree_df[:,0])
extra_bottom_tree = copy.deepcopy(tree)
extra_bottom_tree.extra_node(0.18,parent_name,all_nodes=True, num_nodes=None)
parent_node = extra_bottom_tree.get_node('X')
parent_node.plot_name = 'X'
print extra_bottom_tree.tree_struct(plot_labels = True)
if (base is not None):
outname = base + "extra_bottom_3A.txt"
print extra_bottom_tree.tree_struct(plot_labels=True, outfile = outname)
out1Cname= base+"extra_bottom_1C.txt"
out2Aname= base+"extra_bottom_2A.txt"
extra_bottom_tree.out_1C(out1Cname)
extra_bottom_tree.ccm(outfile=out2Aname)
return extra_bottom_tree
def add_intermediate_extra_bottom(tree, base=None):
tree_df = tree.tree_struct()
bottom_node = tree.get_node(max(tree_df[:,0]))
if bottom_node.parent is None:
parent_name = bottom_node.name
else:
parent_name = bottom_node.parent.name
extra_bottom_tree = copy.deepcopy(tree)
extra_bottom_tree.extra_node(0.25, parent_name, max_dist=1, all_nodes=False)
parent_node = extra_bottom_tree.get_node('X')
parent_node.plot_name = 'X'
print extra_bottom_tree.tree_struct(plot_labels = True)
if (base is not None):
outname = base + "extra_intermediate_3A.txt"
print extra_bottom_tree.tree_struct(plot_labels=True, outfile = outname)
out1Cname= base+"extra_intermediate_1C.txt"
out2Aname= base+"extra_intermediate_2A.txt"
extra_bottom_tree.out_1C(out1Cname)
extra_bottom_tree.ccm(outfile=out2Aname)
return extra_bottom_tree
def merge_top_and_add_extra(tree, base):
merged_tree = merge_top(tree)
# print merged_tree.tree_struct()
extra_and_merged = add_intermediate_extra_bottom(merged_tree)
outname = base + "extra_merged_3A.txt"
out1Cname= base+"extra_merged_1C.txt"
out2Aname= base+"extra_merged_2A.txt"
extra_and_merged.out_1C(out1Cname)
extra_and_merged.ccm(outfile=out2Aname)
print extra_and_merged.tree_struct(plot_labels=True, outfile = outname)
return(extra_and_merged)
def parent_is_grandparent(tree, base):
tree_df = tree.tree_struct()
node_name = max(tree_df[:,0])
pig_tree = copy.deepcopy(tree)
node = pig_tree.get_node(node_name)
grandparent = node.parent.parent
print "parent is grandparent"
if grandparent is None :
if (len(node.parent.children) >0 ):
print "going to sibling is parent"
return sibling_is_parent(tree,base)
else:
return None
if( len(grandparent.children) > 1):
print "going to sibling is parent"
# sibling_is_parent(tree,base)
return sibling_is_parent(tree,base)
else:
pig_tree.switch_parent(node_name, grandparent.name)
print pig_tree.tree_struct(plot_labels = True)
outname = base + "pig_3A.txt"
out1Cname= base+"pig_1C.txt"
out2Aname= base+"pig_2A.txt"
pig_tree.out_1C(out1Cname)
pig_tree.ccm(out2Aname)
print pig_tree.tree_struct(plot_labels=True, outfile = outname)
return pig_tree
def linearize(tree,base):
# tree_df = tree.tree_struct(plot_labels=True)
num_nodes = tree.nodes.shape[0]
root_node = tree.root
lin_tree = Tree(name=1, size=root_node.value.shape[0])
for i in range(2,num_nodes+1):
orig_node = tree.get_node(i)
lin_tree.create_node(i, lin_tree.get_node(i-1),orig_node.value.shape[0])
# print("linearized")
outname = base + "linear_3A.txt"
out1Cname = base + "linear_1C.txt"
out2Aname = base + "linear_2A.txt"
lin_tree.tree_struct(plot_labels=True, outfile=outname)
lin_tree.out_1C(out1Cname)
lin_tree.ccm(out2Aname)
# lin_tree.out_1C()
return lin_tree
def sibling_is_parent(tree, base):
tree_df = tree.tree_struct()
print "orig",tree_df
node_name = max(tree_df[:,0])
sip_tree = copy.deepcopy(tree)
node = sip_tree.get_node(node_name)
print node.name
print node.parent.name
print len(node.parent.children)
if (len(node.parent.children) > 1):
#if the node has a direct sibling then make it its parent
sibling_name = tree_df[(tree_df[:,1] == node.parent.name) & (tree_df[:,0] != node_name),0]
else:
if node.parent.parent is None:
return
#if the node does not have a sibling
print "here"
sibling_name = tree_df[(tree_df[:,1] == node.parent.parent.name) & (tree_df[:,0] != node.parent.name),0]
node_name = node.parent.name
print "sibling", sibling_name
sip_tree.switch_parent(node_name, sibling_name)
print sip_tree.tree_struct(plot_labels = True)
outname = base + "sip_3A.txt"
out1Cname= base+"sip_1C.txt"
out2Aname= base+"sip_2A.txt"
sip_tree.out_1C(out1Cname)
sip_tree.ccm(out2Aname)
print sip_tree.tree_struct(plot_labels=True, outfile = outname)
return sip_tree
def one_cluster_full(tree, ordered =True):
nssm = tree.comp_nssms()
out_tree = Tree(name ='N0', size = nssm)
return out_tree
mistake_dict ={
'split_bottom': split_bottom,
'merge_bottom' : merge_bottom,
'merge_top' : merge_top,
'extra_intermediate': add_intermediate_extra_bottom,
'merged_extra' : merge_top_and_add_extra,
'wrong_parent': parent_is_grandparent,
'linear' : linearize,
'extra_bottom': add_extra_bottom,
'all_1clust': one_cluster_full
}
def run_scenario(tree,sc,base):
print sc
if(sc != 'Truth'):
func = mistake_dict[sc]
mistake_tree = func(tree,base)
return mistake_tree
else:
return tree
def n_cluster_one_lineage(nssm, ordered =True):
if ordered == True:
tree = Tree(name ='N0', size =1)
ssms = range(1,nssm)
last_ssm = tree.root
for ssm in ssms:
node = tree.create_node('N'+str(ssm), parent = last_ssm, size =1)
last_ssm = node
return tree
def one_cluster(nssm, ordered =True):
tree = Tree(name ='N0', size = nssm)
return tree
def n_cluster_two_lineages(nssm, ordered = True):
if ordered == True:
tree = Tree(name='N0', size =1)
l1_root = tree.create_node('N1', parent = tree.root, size =1)
l1_ssms = range(1,nssm/2)
last_ssm = l1_root
for ssm in l1_ssms:
node = tree.create_node('N'+str(ssm), parent = last_ssm, size =1)
last_ssm = node
l2_root = tree.create_node('N2', parent = tree.root, size =1)
l2_ssms = range((nssm/2+1),(nssm-1))
last_ssm = l2_root
for ssm in l2_ssms:
node = tree.create_node('N'+str(ssm), parent = last_ssm, size =1)
last_ssm = node
return tree
def ncluster_correct_lineage(tree=None):
if tree is None:
tree = make_truth()
cluster_last_node = {}
cluster_tree = []
for node_idx in range(len(tree.nodes)):
node = tree.nodes[node_idx]
last_node = None
for idx in range(len(node.value)):
parent = ''
if node.parent is None and idx == 0:
cluster_tree = Tree("N0",values = node.value[idx])
last_node = cluster_tree.root
continue
else:
if idx == 0:
parent = cluster_last_node[node.parent.name]
else:
parent = last_node
new_node = cluster_tree.create_node('N', parent = parent, values=node.value[idx])
if idx == (len(node.value)-1):
cluster_last_node[node.name] = new_node
last_node = new_node
# print cluster_tree.ccm()
return cluster_tree
def split_cluster(tree = None, name_split=None, name_new=None, same=True):
if tree is None:
tree = make_truth()
tree.split_node(name_split,name_new,same=same)
n6 = tree.get_node(name_split)
n7 = tree.get_node(name_new)
return tree
def merge_clusters(node_name1=None, node_name2=None):
tree = make_truth()
tree.merge_nodes(node_name1, node_name2)
n1 = tree.get_node(node_name1)
return tree
def collapse_clusters(node_name):
tree = make_truth()
tree.collapse_node(node_name)
return tree
def switch_parent_cluster(node_name=None, new_parent_name=None):
tree = make_truth()
tree.switch_parent(node_name, new_parent_name)
#print tree.ad()
return tree
def extra_cluster(extra_prop=1.0/6.0, parent_name=None, all_nodes = True, num_nodes = None):
tree = make_truth()
tree.extra_node(extra_prop,parent_name,all_nodes=all_nodes, num_nodes=num_nodes)
return tree
########################New mistake scenarios
def branching_test():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',tree.root,3)
node4 = tree.create_node('N4',node3,3)
return tree
def branching_test2():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',tree.root,3)
node4 = tree.create_node('N4',node2,3)
node5 = tree.create_node('N5',node2,3)
node6 = tree.create_node('N6',node3,3)
node7 = tree.create_node('N7',node3,3)
node8 = tree.create_node('N8',node6,3)
return tree
def branching_test3():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',tree.root,3)
node4 = tree.create_node('N4',node2,3)
node5 = tree.create_node('N5',node2,3)
node6 = tree.create_node('N6',node3,3)
node7 = tree.create_node('N7',node3,3)
node8 = tree.create_node('N8',node4,3)
return tree
def branching_test4():
#truth2
tree = Tree(name='N1', size=3)
node3 = tree.create_node('N3',tree.root,3)
node2 = tree.create_node('N2',tree.root,2)
node4 = tree.create_node('N4',node2,3)
node5 = tree.create_node('N5',node4,3)
return tree
def branching_test5():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',node2,3)
node4 = tree.create_node('N4',tree.root,3)
node5 = tree.create_node('N5',node4,3)
node6 = tree.create_node('N6',node5,3)
return tree
def branching_test6():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',node2,3)
node4 = tree.create_node('N4',node3,3)
node5 = tree.create_node('N5',tree.root,3)
return tree
def branching_test7():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',node2,3)
node4 = tree.create_node('N4',node3,3)
node5 = tree.create_node('N5',tree.root,3)
node6 = tree.create_node('N6',node2,3)
return tree
def branching_test8():
#truth2
tree = Tree(name='N1', size=3)
node2 = tree.create_node('N2',tree.root,2)
node3 = tree.create_node('N3',node2,3)
node4 = tree.create_node('N4',tree.root,3)
node5 = tree.create_node('N5',node2,3)
node6 = tree.create_node('N6',node3,3)
return tree
def linear_to_branching(tree):
#truth2 as a mistake for truth1
tree.switch_parent('N4','N2')
node = tree.get_node('N3')
node.gather_child_idx()
print tree.ad()
def matches(scenario, f, pass_l, scenarios, **kwargs):
orig_ad, orig_ccm = baseline_ad(scenario)
new_tree = f(**kwargs)
new_ad = new_tree.ad()
new_ccm = new_tree.ccm()
print scenario
pass_v = 0
if (np.array_equal(new_ad,orig_ad)):
print('Pass AD')
# return('Pass')
else:
print('Fail AD')
print "new"
print new_ad
print "orig"
print orig_ad
pass_v = 1
if (np.array_equal(new_ccm,orig_ccm)):
print('Pass CCM')
# return('Pass')
else:
print('Fail CCM')
print "new"
print new_ccm
print "orig"
print orig_ccm
pass_v = 1
pass_l.append((scenario, pass_v))
def test_all():
pass_l = []
scenarios = []
matches('Truth', make_truth, pass_l, scenarios)
matches('SplitClusterBotSame', split_cluster, pass_l, scenarios, name_split='N6', name_new='N7',same=True)
matches('SplitClusterBotDiff', split_cluster, pass_l, scenarios, name_split='N6', name_new='N7',same=False)
matches('SplitClusterMidOneChild', split_cluster, pass_l, scenarios,name_split='N3', name_new='N7',same=True)
matches('SplitClusterMidMultiChild', split_cluster, pass_l, scenarios, name_split='N2', name_new='N7',same=True)
matches('MergeClusterMid&BotOneChild', merge_clusters, pass_l, scenarios, node_name1='N3',node_name2='N6')
matches('MergeClusterBot', merge_clusters, pass_l, scenarios, node_name1='N4',node_name2='N5')
matches('MergeClusterTop&Mid', merge_clusters, pass_l, scenarios,node_name1='N1',node_name2='N2')
matches('MergeClusterMid&BotMultiChild', merge_clusters, pass_l, scenarios, node_name1='N2',node_name2='N5')
matches('ParentIsSibling', switch_parent_cluster, pass_l, scenarios, node_name='N5',new_parent_name='N4')
matches('ParentIsAunt', switch_parent_cluster, pass_l, scenarios, node_name='N5',new_parent_name='N3')
matches('ParentIsCousin', switch_parent_cluster, pass_l, scenarios, node_name='N5',new_parent_name='N6')
matches('ParentIsNieceWithChildren', switch_parent_cluster, pass_l, scenarios, node_name='N2',new_parent_name='N6')
matches('ParentIsGrandparent', switch_parent_cluster, pass_l, scenarios, node_name='N5',new_parent_name='N1')
matches('SmallExtraCurBot', extra_cluster, pass_l, scenarios, parent_name='N3')
#original SmallExtraMid is wrong
matches('SmallExtraMid', extra_cluster, pass_l, scenarios, parent_name='N1')
matches('SmallExtraTop', extra_cluster, pass_l, scenarios, parent_name=None)
matches('NClusterCorrectLineage', ncluster_correct_lineage, pass_l, scenarios)
matches('NClusterOneLineage', n_cluster_one_lineage, pass_l, scenarios,nssm = 16)
matches('NClusterTwoLineages', n_cluster_two_lineages, pass_l, scenarios,nssm=16)
matches('OneCluster', one_cluster, pass_l, scenarios,nssm=16)
fail = [sc[0] for sc in pass_l if sc[1] > 0]
if len(fail)>0:
for sc in fail:
print "failed at", sc #print scenarios#scenarios[np.where(pass_l>0)]
else:
print "all passed"
def test_scenario(t,f,**kwargs):
tree = t()
new_tree = f(tree, **kwargs)
new_ad = new_tree.ad()
new_ccm = new_tree.ccm()
print new_ad
print new_ccm
|
{"hexsha": "63d964a378dfff954aa0b5e5e40bd32e8a76c033", "size": 37565, "ext": "py", "lang": "Python", "max_stars_repo_path": "smc_het_eval/ccm_ad_flexible.py", "max_stars_repo_name": "asalcedo31/SMC-Het_Scoring", "max_stars_repo_head_hexsha": "8b072a22eeefa4cbac37b9d22fe732798b62d40a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-10T00:50:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-10T00:50:21.000Z", "max_issues_repo_path": "smc_het_eval/ccm_ad_flexible.py", "max_issues_repo_name": "asalcedo31/SMC-Het_Scoring", "max_issues_repo_head_hexsha": "8b072a22eeefa4cbac37b9d22fe732798b62d40a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smc_het_eval/ccm_ad_flexible.py", "max_forks_repo_name": "asalcedo31/SMC-Het_Scoring", "max_forks_repo_head_hexsha": "8b072a22eeefa4cbac37b9d22fe732798b62d40a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7808798646, "max_line_length": 171, "alphanum_fraction": 0.7140423266, "include": true, "reason": "import numpy,from scipy", "num_tokens": 10794}
|
#!/usr/bin/env python3
"""
Changelog:
New is v1_0:
- Create script
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Author:
Sleiman Safaoui
Email:
sleiman.safaoui@utdallas.edu
Github:
@The-SS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This script runs the RRT*-generated path, extracted by `opt_path.py` with an nmpc low level controller
Tested platform:
- Python 3.6.9 on Ubuntu 18.04 LTS (64 bit)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
###############################################################################
###############################################################################
# Import all the required libraries
import math
from casadi import *
import numpy as np
import numpy.linalg as la
import numpy.random as npr
import matplotlib.pyplot as plt
import time
import pickle
import os
from plotting import animate
import copy
from matplotlib.patches import Rectangle, Ellipse
from matplotlib.collections import EllipseCollection
from matplotlib.offsetbox import AnnotationBbox, AuxTransformBox
from collision_check import *
from drrrts_nmpc import SetUpSteeringLawParametersBigM
from drrrts_nmpc import nonlinsteerBigM, find_dr_padding, get_padded_edges
from drrrts_nmpc import disturbed_nmpc
import sys
sys.path.insert(0, '../unicycle')
sys.path.insert(0, '../rrtstar')
sys.path.insert(0, '../')
import UKF_Estimator as UKF_Estimator
from numpy import linalg as LA
import config
STEER_TIME = config.STEER_TIME # Maximum Steering Time Horizon
DT = config.DT # timestep between controls
SAVEPATH = config.SAVEPATH
GOALAREA = config.GOALAREA #[xmin,xmax,ymin,ymax]
RANDAREA = copy.copy(config.RANDAREA) # [xmin,xmax,ymin,ymax]
VELMAX = config.VELMAX
VELMIN = config.VELMIN
ANGVELMAX = config.ANGVELMAX
ANGVELMIN = config.ANGVELMIN
ROBRAD = config.ROBRAD # radius of robot (added as padding to environment bounds and the obstacles
OBSTACLELIST = copy.copy(config.OBSTACLELIST) # [ox,oy,wd,ht]
SIGMAW = config.SIGMAW
SIGMAV = config.SIGMAV
CROSSCOR = config.CROSSCOR
ALFA = copy.copy(config.ALFA)
QLL = config.QLL
RLL = config.RLL
QTLL = config.QTLL
from matplotlib.collections import EllipseCollection
from matplotlib.patches import Ellipse
# copy last alfa (env alfa) to beginning and remove the last 4 (env) alfas from the end
lastalfa = ALFA[-1]
obsalfa = ALFA[0:-4]
obsalfa.insert(0, lastalfa)
ALFA = obsalfa
def sim_state(T, x0, u, f):
f_value = f(x0, u)
st = x0+T * f_value.T
return st
############################# NMPC FUNCTIONS ##################################
#
# def SetUpSteeringLawParametersNoColAvoid(N, T, v_max, v_min, omega_max, omega_min):
# """
# Sets up an IPOPT NLP solver using Casadi Opti
# Inputs:
# N: horizon
# T: time step (sec)
# v_max, v_min: maximum and minimum linear velocities in m/s
# omega_max, omega_min: maximum and minimum angular velocities in rad/s
# Outputs:
# solver, f, n_states, n_controls, U, X, P, delta
# solver: Casadi NLP solver using bonmin
# f: Casadi continuous time dynamics function
# n_states, n_controls: number of states and controls
# U, X: Casadi input and state variables (N x n_controls and (N+1)x n_states matrices)
# P: Casadi desired state parameters ((N+1) x n_states matrix)
# Delta: Casadi 0-1 variables for constraints (4*num_obs vector)
# """
#
# # Define state and input cost matrices
# Q = QLL
# R = RLL
# QT = QTLL
#
# opti = casadi.Opti()
#
# # Define symbolic states using Casadi Opti
# x = opti.variable()
# y = opti.variable()
# theta = opti.variable()
# states = vertcat(x, y, theta) # all three states
# n_states = states.size()[0] # number of symbolic states
#
# # Define symbolic inputs using Cadadi SX
# v = opti.variable()
# omega = opti.variable()
# controls = vertcat(v, omega) # both controls
# n_controls = controls.size()[0] # number of symbolic inputs
#
# # RHS of nonlinear unicycle dynamics (continuous time model)
# rhs = horzcat(v * cos(theta), v * sin(theta), omega)
#
# # Unicycle continuous time dynamics function
# f = Function('f', [states, controls], [rhs], ['input_state', 'control_input'], ['rhs'])
#
# # Casadi Opti trajectory variables/parameters for multiple shooting
# U = opti.variable(N, n_controls)
# X = opti.variable(N+1, n_states)
# P = opti.parameter(N+1, n_states)
# discrete = [False]*(N*n_controls + (N+1)*n_states) # specify U and X to be continuous variables
#
# # Cost function
# obj = 0 # objective/cost
# opti.subject_to(X[0, :].T == P[0, :].T)
# for i in range(N):
# # add to the cost the quadratic stage cost: (x-x_des)*Q*(x-x_des)^T + u*R*u^T
# obj += mtimes([U[i, :], R, U[i, :].T]) # quadratic penalty on control effort
# obj += mtimes([X[i, :] - P[i, :], Q, X[i, :].T - P[i, :].T]) # quadratic penalty on deviation from reference state
#
# # compute the next state from the dynamics
# x_next_ = f(X[i, :], U[i, :]) * T + X[i, :]
#
# # make the dynamics' next state the same as the i+1 trajectory state (multiple shooting) (satisfy dynamics)
# opti.subject_to(X[i + 1, :].T == x_next_.T)
#
# # we might not be able to get back to the original target goal state
# # alternatively, we have a large penalty of being away from it
# obj += mtimes([X[N, :] - P[N, :], QT, X[N, :].T - P[N, :].T])
#
# # minimize this objective
# opti.minimize(obj)
#
# # state environment constraints
# _, env_edges = get_padded_edges()
# x_max_env = env_edges["right"]
# x_min_env = env_edges["left"]
# y_max_env = env_edges["top"]
# y_min_env = env_edges["bottom"]
# opti.subject_to(opti.bounded(x_min_env, X[:, 0], x_max_env))
# opti.subject_to(opti.bounded(y_min_env, X[:, 1], y_max_env))
# opti.subject_to(opti.bounded(-casadi.inf, X[:,2], casadi.inf))
# # input constraints
# opti.subject_to(opti.bounded(v_min, U[:,0], v_max))
# opti.subject_to(opti.bounded(omega_min, U[:,1], omega_max))
#
# # create a dict of the discrete flags
# args = dict(discrete=discrete)
# # specify the solver
# # opti.solver("bonmin", args)
# opti.solver("ipopt", args)
#
# solver = opti # solver instance to return
#
# DELTA = []
# STARTIDX = []
# OBSPAD, ENVPAD = [], []
# return solver, f, n_states, n_controls, U, X, P, DELTA, STARTIDX, OBSPAD, ENVPAD
#
# def SetUpSteeringLawParametersBigM(N, T, v_max, v_min, omega_max, omega_min):
# """
# Sets up a BONMIN MINLP solver using Casadi Opti
# Collision avoidance is encoded with Big-M formulation
#
# Inputs:
# N: horizon
# T: time step (sec)
# v_max, v_min: maximum and minimum linear velocities in m/s
# omega_max, omega_min: maximum and minimum angular velocities in rad/s
# Outputs:
# solver, f, n_states, n_controls, U, X, P, delta
# solver: Casadi NLP solver using bonmin
# f: Casadi continuous time dynamics function
# n_states, n_controls: number of states and controls
# U, X: Casadi input and state variables (N x n_controls and (N+1)x n_states matrices)
# P: Casadi desired state parameters ((N+1) x n_states matrix)
# Delta: Casadi 0-1 variables for constraints (4*num_obs vector)
# """
#
# # Define state and input cost matrices
# Q = QLL
# R = RLL
# QT = QTLL
#
#
# opti = casadi.Opti()
#
# # Define symbolic states using Casadi Opti
# x = opti.variable()
# y = opti.variable()
# theta = opti.variable()
# states = vertcat(x, y, theta) # all three states
# n_states = states.size()[0] # number of symbolic states
#
# # Define symbolic inputs using Cadadi SX
# v = opti.variable()
# omega = opti.variable()
# controls = vertcat(v, omega) # both controls
# n_controls = controls.size()[0] # number of symbolic inputs
#
# # RHS of nonlinear unicycle dynamics (continuous time model)
# rhs = horzcat(v * cos(theta), v * sin(theta), omega)
#
# # Unicycle continuous time dynamics function
# f = Function('f', [states, controls], [rhs], ['input_state', 'control_input'], ['rhs'])
#
# # Casadi Opti trajectory variables/parameters for multiple shooting
# U = opti.variable(N, n_controls)
# X = opti.variable(N+1, n_states)
# P = opti.parameter(N+1, n_states)
# discrete = [False]*(N*n_controls + (N+1)*n_states) # specify U and X to be continuous variables
#
# # Cost function
# obj = 0 # objective/cost
# opti.subject_to(X[0, :].T == P[0, :].T)
# for i in range(N):
# # add to the cost the quadratic stage cost: (x-x_des)*Q*(x-x_des)^T + u*R*u^T
# obj += mtimes([U[i, :], R, U[i, :].T]) # quadratic penalty on control effort
# obj += mtimes([X[i, :] - P[i, :], Q, X[i, :].T - P[i, :].T]) # quadratic penalty on deviation from reference state
#
# # compute the next state from the dynamics
# x_next_ = f(X[i, :], U[i, :]) * T + X[i, :]
#
# # make the dynamics' next state the same as the i+1 trajectory state (multiple shooting) (satisfy dynamics)
# opti.subject_to(X[i + 1, :].T == x_next_.T)
#
# # we might not be able to get back to the original target goal state
# # alternatively, we have a large penalty of being away from it
# obj += mtimes([X[N, :] - P[N, :], QT, X[N, :].T - P[N, :].T])
#
# # minimize this objective
# opti.minimize(obj)
#
# # state environment constraints
# opti.subject_to(opti.bounded(-casadi.inf, X[:,2], casadi.inf)) # theta only now (x,y states added later)
# # input constraints
# opti.subject_to(opti.bounded(v_min, U[:,0], v_max))
# opti.subject_to(opti.bounded(omega_min, U[:,1], omega_max))
#
#
# # obstacle constraints using Big-M formulation TODO: TRY THE CONVEX-HULL REFORMULATION https://optimization.mccormick.northwestern.edu/index.php/Disjunctive_inequalities (it might be faster)
# obs_edges, env_edges = get_padded_edges()
# x_max_env = env_edges["right"]
# x_min_env = env_edges["left"]
# y_max_env = env_edges["top"]
# y_min_env = env_edges["bottom"]
#
# num_obs = len(obs_edges)
# DELTA = opti.variable(4 * num_obs) # 0-1 variables to indicate if an obstacle is hit
# opti.subject_to(opti.bounded(0, DELTA, 1))
# discrete += [True] * (4 * num_obs) # specify the delta variables to be discrete (with above bound --> 0-1 variables)
# M = max(x_max_env-x_min_env, y_max_env-y_min_env) + 1 # 10 # a large upper bound on x and y
# STARTIDX = opti.parameter(1) # specify which points in the horizon should have collision avoidance enforced
# # DR padding values
# OBSPAD = opti.parameter(N+1, 4 * num_obs) # for each time step, each obstacle edge has its own dr padding (right, left, top, bottom)
# ENVPAD = opti.parameter(N+1, 4) # for each time step, the four environment edges have their own dr padding (xmax, xmin, ymax, ymin) = (right, left, top, bottom)
#
# opti.subject_to(opti.bounded(x_min_env + ENVPAD[:,1], X[:, 0], x_max_env - ENVPAD[:,0]))
# opti.subject_to(opti.bounded(y_min_env + ENVPAD[:,3], X[:, 1], y_max_env - ENVPAD[:,2]))
#
# for obs_num, obs in enumerate(obs_edges):
# # for every obstacle
# top = obs["top"]
# bottom = obs["bottom"]
# right = obs["right"]
# left = obs["left"]
#
# # add Big-M formulation disjunctive constraints
# opti.subject_to(opti.bounded(-M * (1 - DELTA[4 * obs_num + 0]) + right + OBSPAD[:, 0],
# X[:, 0],
# x_max_env - ENVPAD[:, 0] + M * (1 - DELTA[4 * obs_num + 0]))) # be to the right of the obstacle
# opti.subject_to(opti.bounded(-M * (1 - DELTA[4 * obs_num + 1]) + x_min_env + ENVPAD[:, 1],
# X[:, 0],
# left - OBSPAD[:, 1] + M * (1 - DELTA[4 * obs_num + 1]))) # be to the left of the obstacle
# opti.subject_to(opti.bounded(-M * (1 - DELTA[4 * obs_num + 2]) + top + OBSPAD[:, 2],
# X[:, 1],
# y_max_env - ENVPAD[:, 2] + M * (1 - DELTA[4 * obs_num + 2]))) # be to the top of the obstacle
# opti.subject_to(opti.bounded(-M * (1 - DELTA[4 * obs_num + 3]) + y_min_env + ENVPAD[:, 3],
# X[:, 1],
# bottom - OBSPAD[:, 3] + M * (1 - DELTA[4 * obs_num + 3]))) # be to the bottom of the obstacle
#
# # require at least one of these constraints to be true
# opti.subject_to(
# 1 <= DELTA[4 * obs_num + 0] + DELTA[4 * obs_num + 1] + DELTA[4 * obs_num + 2] + DELTA[4 * obs_num + 3])
#
# # create a dict of the discrete flags
# args = dict(discrete=discrete)
# # specify the solver
# opti.solver("bonmin", args)
#
# solver = opti # solver instance to return
#
# return solver, f, n_states, n_controls, U, X, P, DELTA, STARTIDX, OBSPAD, ENVPAD
#
# def nonlinsteerNoColAvoid(solver, x0, xT, n_states, n_controls, N, T, U, X, P, DELTA, STARTIDX, OBSPAD, ENVPAD, current_ref_traj, current_ref_inputs, start_idx, obs_pad, env_pad):
# """
# Solves the nonlinear steering problem using the solver from SetUpSteeringLawParametersBigM
# Inputs:
# solver: Casadi NLP solver from SetUpSteeringLawParameters
# x0, xT: initial and final states as (n_states)x1 ndarrays e.g. [[2.], [4.], [3.14]]
# n_states, n_controls: number of states and controls
# N: horizon
# T: time step
# lbg, lbx, ubg, ubx: lower and upper (l,u) state and input (x,g) bounds
# current_ref_traj, current_ref_inputs: reference trajectory and reference inputs as Nx(n_states) ndarrays# TODO: add shapes
# Outputs:
# x_casadi, u_casadi: trajectory states and inputs returned by Casadi
# if solution found:
# states: (N+1)x(n_states) ndarray e.g. [[1 2 0], [1.2 2.4 0], [2 3.5 0]]
# controls: (N)x(n_controls) ndarray e.g. [[0.5 0], [1 0.01], [1.2 -0.01]]
# else, [],[] returned
# """
#
# # Create an initial state trajectory that roughly accomplishes the desired state transfer (by interpolating)
# init_states_param = np.linspace(0, 1, N + 1)
# init_states = np.zeros([N + 1, n_states])
# dx = xT - x0
# for i in range(N + 1):
# init_states[i] = (x0 + init_states_param[i] * dx).flatten()
#
# # Create an initial input trajectory that roughly accomplishes the desired state transfer
# # (using interpolated states to compute rough estimate of controls)
# dist = la.norm(xT[0:2] - x0[0:2])
# ang_dist = xT[2][0] - x0[2][0]
# total_time = N * T
# const_vel = dist / total_time
# const_ang_vel = ang_dist / total_time
# init_inputs = np.array([const_vel, const_ang_vel] * N).reshape(-1, 2)
#
# ## set parameter
# constraint_states = []
# constraint_states.append(x0.reshape(n_states))
#
# for ref_state in current_ref_traj:
# constraint_states.append(ref_state.reshape(n_states))
#
# init_inputs = []
# for ref_input in current_ref_inputs:
# init_inputs.append(ref_input.reshape(n_controls))
# init_inputs = np.array(init_inputs)
#
# constraint_states = np.array(constraint_states)
# solver.set_value(P, constraint_states)
# solver.set_initial(X, constraint_states)
# solver.set_initial(U, init_inputs)
# # solver.set_initial(X, init_states)
# # solver.set_initial(U, init_inputs)
# try:
# res = solver.solve()
# except:
# print('Steering NLP Failed')
# return [], []
#
# # Update the cost_total
# # cost_total = res.value(self.obj) # self.opti.debug.value(self.obj)
# # Obtain the optimal control input sequence
# u_casadi = res.value(U) # shape: (N, n_controls)
# # Get the predicted state trajectory for N time steps ahead
# x_casadi = res.value(X) # shape: # (N+1, n_states)
#
# return x_casadi, u_casadi
#
# def nonlinsteerBigM(solver, x0, xT, n_states, n_controls, N, T, U, X, P, DELTA, STARTIDX, OBSPAD, ENVPAD, current_ref_traj, current_ref_inputs, start_idx, obs_pad, env_pad):
# """
# Solves the nonlinear steering problem using the solver from SetUpSteeringLawParametersBigM
# Inputs:
# solver: Casadi NLP solver from SetUpSteeringLawParameters
# x0, xT: initial and final states as (n_states)x1 ndarrays e.g. [[2.], [4.], [3.14]]
# n_states, n_controls: number of states and controls
# N: horizon
# T: time step
# lbg, lbx, ubg, ubx: lower and upper (l,u) state and input (x,g) bounds
# current_ref_traj, current_ref_inputs: reference trajectory and reference inputs as Nx(n_states) ndarrays# TODO: add shapes
# Outputs:
# x_casadi, u_casadi: trajectory states and inputs returned by Casadi
# if solution found:
# states: (N+1)x(n_states) ndarray e.g. [[1 2 0], [1.2 2.4 0], [2 3.5 0]]
# controls: (N)x(n_controls) ndarray e.g. [[0.5 0], [1 0.01], [1.2 -0.01]]
# else, [],[] returned
# """
#
# # Create an initial state trajectory that roughly accomplishes the desired state transfer (by interpolating)
# init_states_param = np.linspace(0, 1, N + 1)
# init_states = np.zeros([N + 1, n_states])
# dx = xT - x0
# for i in range(N + 1):
# init_states[i] = (x0 + init_states_param[i] * dx).flatten()
#
# # Create an initial input trajectory that roughly accomplishes the desired state transfer
# # (using interpolated states to compute rough estimate of controls)
# dist = la.norm(xT[0:2] - x0[0:2])
# ang_dist = xT[2][0] - x0[2][0]
# total_time = N * T
# const_vel = dist / total_time
# const_ang_vel = ang_dist / total_time
# init_inputs = np.array([const_vel, const_ang_vel] * N).reshape(-1, 2)
#
# ## set parameter
# constraint_states = []
# constraint_states.append(x0.reshape(n_states))
#
#
# for ref_state in current_ref_traj:
# constraint_states.append(ref_state.reshape(n_states))
# constraint_states = np.array(constraint_states)
#
# init_inputs = []
# for ref_input in current_ref_inputs:
# init_inputs.append(ref_input.reshape(n_controls))
# init_inputs = np.array(init_inputs)
#
# solver.set_value(P, constraint_states)
# solver.set_value(STARTIDX, start_idx)
# solver.set_value(OBSPAD, obs_pad)
# solver.set_value(ENVPAD, env_pad)
# solver.set_initial(X, constraint_states)
# solver.set_initial(U, init_inputs)
# try:
# res = solver.solve()
# except:
# print('Steering NLP Failed')
# return [], []
#
# # Update the cost_total
# # cost_total = res.value(self.obj) # self.opti.debug.value(self.obj)
# # Obtain the optimal control input sequence
# u_casadi = res.value(U) # shape: (N, n_controls)
# # Get the predicted state trajectory for N time steps ahead
# x_casadi = res.value(X) # shape: # (N+1, n_states)
#
# print('delta', res.value(DELTA))
#
# return x_casadi, u_casadi
def nmpc(N,T, rrt_states, rrt_inputs, num_steps, num_states, num_inputs,
obstaclelist, envbounds, drnmpc):
w = np.zeros([num_steps, num_states])
return disturbed_nmpc(N, T, rrt_states, rrt_inputs, num_steps,
num_states, num_inputs, w, obstaclelist, envbounds, drnmpc)
# def disturbed_nmpc(N,T, rrt_states, rrt_inputs, num_steps, num_states, num_inputs, w, obstaclelist, envbounds, drnmpc):
#
# # if drnmpc --> use col avoidance pipeline, if not drnmpc --> just do no col avoid
#
# v_max = VELMAX
# v_min = VELMIN
# omega_max = ANGVELMAX
# omega_min = ANGVELMIN
#
# # TODO: remove x_min, x_max, y_min, y_max from inputs
# obs_edges, _ = get_padded_edges()
# # Set up the Casadi solver
#
# if drnmpc:
# [solver, f, _, _, U, X, P, DELTA, STARTIDX, OBSPAD, ENVPAD] = SetUpSteeringLawParametersBigM(N, T, v_max, v_min, omega_max, omega_min)
# # else:
# [solverN, _, _, _, UN, XN, PN, DELTAN, STARTIDXN, OBSPADN, ENVPADN] = SetUpSteeringLawParametersNoColAvoid(N, T, v_max, v_min, omega_max, omega_min)
# else:
# [solverN, f, _, _, UN, XN, PN, DELTAN, STARTIDXN, OBSPADN, ENVPADN] = SetUpSteeringLawParametersNoColAvoid(N, T,
# v_max,
# v_min,
# omega_max,
# omega_min)
#
# final_input = [0.0, 0.0] # final input
# final_state = sim_state(T, rrt_states[-1].reshape(3), rrt_inputs[-1], f).full().reshape(3) # final state
#
# # pad rest of inputs and states with last state and last input for the rest of the horizon (N-1 times)
# rrt_inputs = rrt_inputs.tolist() # num_steps x num_controls (e.g. 200x2)
# rrt_states = rrt_states.tolist() # num_steps x num_states (e.g. 200x3)
# rrt_states.append(final_state) # first append the last state; now we have (num_steps+1) x num_states (e.g. 201x3)
# for _ in range(N-1):
# rrt_inputs.append(final_input)
# rrt_states.append(final_state)
# rrt_inputs = np.array(rrt_inputs) # (num_steps+N-1) x num_controls (e.g. for N = 10: 209x2)
# rrt_states = np.array(rrt_states) # (num_steps+1+N-1) x num_states (e.g. for N = 10: 210x3)
#
# ######################
# # Start NMPC Tracker #
# ######################
# # Example: N = 2
# # repeat final input (= 0) N-1 times
# # |
# # (x0,u0)--->(x1,u1)--->(x2,u2)--->(x3,u3)--->(x4,u4)--->(x5,u5)--->(xT,u_f)--->(xT)
# # | | | | | |
# # current | | | | repeat final states
# # state |_______|__| | N-1 times
# # | | |
# # N horizon |___________|
# # next ref |
# # inputs N horizon next ref states
#
# # flags for function to terminate
# pt_obs_collision_detected = False
# line_obs_collision_detected = False
# nlp_failed_flag = False
#
# visited_states = []
# applied_controls = []
# current_state = rrt_states[0].reshape(num_states, 1) # x0
# # check if current state is safe
# collision_detected = PtObsColFlag(current_state, obstaclelist, envbounds, ROBRAD)
# if collision_detected:
# pt_obs_collision_detected = True
# return pt_obs_collision_detected, line_obs_collision_detected, nlp_failed_flag, [], [], []
#
# visited_states.append(current_state) # mark current state as visited states
#
# # set the same threshold for the environment and the obstacles; e.g. alfa = [env_alfa, obs1_alfa, ..., obs5_alfa]
# alfa = ALFA
#
# SigmaW = SIGMAW
# SigmaV = SIGMAV
# CrossCor = CROSSCOR
#
# # Note: num_steps = number of control steps available
# # The last control will take the system to the final state
# all_nmpc_planned_states = []
# for itr in range(num_steps):
# current_state = visited_states[-1] # Last visited state
# horizon_ref_states = rrt_states[itr+1:itr+N+1] # next N rrt-planned states (N x num_states starting after current state)
# horizon_ref_inputs = rrt_inputs[itr:itr+N] # next N rrt-planned inputs (N x num_controls starting at current state)
# current_goal_state = horizon_ref_states[-1].reshape(num_states, 1) # end of current reference horizon states
#
# # find covariance for all but the first state in the horizon
# # first state/current state is deterministic
# # covar of second state/next state is just SigmaW
# # (X[t+1] = f(X[t], U[t]) + W[t]; f(.,.) is deterministic since X[t] is realized)
# if drnmpc:
# horizon_covars = ukfCovars(list(horizon_ref_states), list(horizon_ref_inputs[1:]), N-1, num_states, num_states, SigmaW, SigmaV, CrossCor, SigmaW)
#
# env_pad, obs_pad = find_dr_padding(alfa, N, obs_edges, horizon_covars)
#
# # index of node in horizon that with which collision avoidance should start
# # (use at least 1 to avoid crashes due to state realizations in collision zone)
# start_idx = max(1, N)
# # obs_pad = 0*np.ones([N+1, 4 * num_obs])
# # env_pad = np.zeros([N+1, 4])
#
# # steer by solving NLP
# if drnmpc:
# x_casadi, u_casadi = nonlinsteerBigM(solver, current_state, current_goal_state, num_states, num_inputs, N, T, U,
# X, P, DELTA, STARTIDX, OBSPAD, ENVPAD, horizon_ref_states,
# horizon_ref_inputs, start_idx, obs_pad, env_pad)
# else:
# x_casadi = [] # to active the next condition and just do the no col avoid steering
#
# if x_casadi == []:
# obs_pad = []
# env_pad = []
# x_casadi, u_casadi = nonlinsteerNoColAvoid(solverN, current_state, current_goal_state, num_states,
# num_inputs, N,
# T, UN,
# XN, PN, DELTAN, STARTIDXN, OBSPADN, ENVPADN, horizon_ref_states,
# horizon_ref_inputs, start_idx, obs_pad, env_pad)
# # print("###################################################")
# # print("###################################################")
# # print("###################################################")
# # print("###################################################")
# # print("############### Switched solvers ##################")
# # print("###################################################")
# # print("###################################################")
# # print("###################################################")
# # print("###################################################")
# if x_casadi == []:
# nlp_failed_flag = True
# print("nmpc failed at itr: ", itr)
# break
#
# all_nmpc_planned_states.append(x_casadi)
#
# # NLP succeeded and trajectory found
# nmpc_input = u_casadi[0] # input to apply at current state
# nmpc_next_state = x_casadi[1] # next state after nmpc_input is applied
#
# # realized next state with noise
# realized_next_state = nmpc_next_state.reshape(num_states, 1) + w[itr].reshape(num_states, 1)
# # check if realized state is safe
# collision_detected = PtObsColFlag(realized_next_state, obstaclelist, envbounds, ROBRAD)
# if collision_detected:
# pt_obs_collision_detected = True
# break
#
# # check if line connecting previous state and realized state is safe
# collision_detected = LineObsColFlag(current_state, realized_next_state, obstaclelist, ROBRAD)
# if collision_detected:
# line_obs_collision_detected = True
# break
#
# # update the visited states and applied controls
# visited_states.append(realized_next_state)
# applied_controls.append(nmpc_input.reshape(num_inputs, 1))
#
# realized_states = visited_states
#
# print('Done with nmpc')
# visited_states = np.array(visited_states).reshape(len(visited_states), num_states)
# applied_controls = np.array(applied_controls).reshape(len(applied_controls), num_inputs)
# distance_error = la.norm(final_state[0:2] - visited_states[-1][0:2])
# print('Final error away from RRT* goal:', distance_error)
#
# result_data = {'pt_obs_collision_detected': pt_obs_collision_detected,
# 'line_obs_collision_detected': line_obs_collision_detected,
# 'nlp_failed_flag': nlp_failed_flag,
# 'visited_states': visited_states,
# 'applied_controls': applied_controls,
# 'all_nmpc_planned_states': all_nmpc_planned_states}
#
# return result_data
#
# def get_padded_edges():
# '''
# Finds the left, right, top, and bottom padded (by robot radius) edges for the obstacles and the environment
# Outputs:
# obs_edges = edges of obstacles in the form of a list where each element is a dictionary with "top","bottom", "right", and "left"
# env_edges = edges of environment in the form of a dictionary with "top","bottom", "right", and "left"
# obs_edges should be used as (x < "left") or (x > "right") or (y < "bottom") or (y > "top")
# env_edges should be used as (x > "left") and (x < "right") and (y > "bottom") and (y < "top")
# '''
# randArea1 = copy.copy(RANDAREA) # [xmin,xmax,ymin,ymax]
# obstacleList1 = copy.copy(OBSTACLELIST) # [ox,oy,wd,ht]
#
# # environment bounds
# xmin = randArea1[0]
# xmax = randArea1[1]
# ymin = randArea1[2]
# ymax = randArea1[3]
# # thickness of env edges (doesn't matter much, anything > 0 works)
# thickness = 0.1
# # original environment area - width and height
# width = xmax - xmin
# height = ymax - ymin
#
# env_edges = {"left": xmin+ROBRAD, "right": xmax-ROBRAD, "bottom": ymin+ROBRAD, "top": ymax-ROBRAD} # environment edges
# obs_edges = []
#
# # add enough padding for obstacles for robot radius
# for obs in obstacleList1:
# xmin = obs[0] - ROBRAD
# xmax = xmin + obs[2] + (2 * ROBRAD)
# ymin = obs[1] - ROBRAD
# ymax = ymin + obs[3] + (2 * ROBRAD)
# edges = {"left": xmin, "right": xmax, "bottom": ymin, "top": ymax}
# obs_edges.append(edges)
#
# return obs_edges, env_edges
def get_state_bounds(obs_edges, env_edges, state):
'''
Finds the position bounds on a state given a set of obstacles (find maximum padding along each direction before
colliding with an obstacle)
'''
eps = 0.00001 # arbitrarily small value
# current state
x = state[0]
y = state[1]
# environment bounds
x_max_env = env_edges["right"]
x_min_env = env_edges["left"]
y_max_env = env_edges["top"]
y_min_env = env_edges["bottom"]
# lists to add upper and lower bounds for x and y
# (min/max element selected from them later as the upper/lower bound)
# Initialize them with environment bounds
x_max_bounds = [x_max_env]
x_min_bounds = [x_min_env]
y_max_bounds = [y_max_env]
y_min_bounds = [y_min_env]
inside_obs_counter = 0 # check if state is inside multiple obstacles
for obs_num, obs in enumerate(obs_edges):
# obstacles
top = obs["top"]
bottom = obs["bottom"]
right = obs["right"]
left = obs["left"]
# TODO: This mess needs to be fixed. All obstacles need to be considered at once when the state could be inside
# an obstacle or when the state can be moved which might put it in an obstacle
# if the state is inside the obstacle, we have to move the state outside (to the closest bound)
if (left <= x <= right) and (bottom <= y <= top):
inside_obs_counter += 1
dr = abs(x - right) # x distance to right edge
dl = abs(x - left) # x distance to left edge
dt = abs(y - top) # y distance to top edge
db = abs(y - bottom) # y distance to bottom edge
d_list = [dr, dl, dt, db] # list of distances: right, left, top, bottom
idx = d_list.index(min(d_list)) # index of closest distance: 0-->right, 1-->left, 2-->top, 3-->bottom
if idx == 0:
x_min_bounds.append(right) # right edge is closest --> make right edge a lower bound for x
x = right + eps # move x to right edge
elif idx == 1:
x_max_bounds.append(left) # left edge is closest --> make left edge an upper bound for x
x = left - eps # move x to left edge
elif idx == 2:
y_min_bounds.append(top) # top edge is closest --> make top edge a lower bound for y
y = top + eps # move y to top edge
elif idx == 3:
y_max_bounds.append(bottom) # bottom edge is closest --> make bottom edge an upper bound for y
y = bottom - eps # move y to bottom edge
else:
print('ERROR: something is wrong')
# if drl < dtb: # state closer to right or left edge
# if dr < dl: # if x is closer to the right edge, add right edge as x lower bound
# x_min_bounds.append(right)
# else: # if x is closer to the left edge, add left edge as x upper bound
# x_max_bounds.append(left)
# else: # state closer to top or bottom edge
# if dt < db: # if y is closer to the top edge, add top edge as y lower bound
# y_min_bounds.append(top)
# else: # if y is closer to the bottom edge, add bottom edge as y upper bound
# y_max_bounds.append(bottom)
else: # state not inside an obstacle
# add left edge of obstacle to x upper bounds if current state is to the left of the obstacle
if (bottom <= y <= top) and (x <= left):
x_max_bounds.append(left)
# add right edge of obstacle to x lower bounds if current state is to the right of the obstacle
if (bottom <= y <= top) and (x >= right):
x_min_bounds.append(right)
# add bottom edge of obstacle to y upper bounds if current state is to the bottom of the obstacle
if (left <= x <= right) and (y <= bottom):
y_max_bounds.append(bottom)
# add top edge of obstacle to y lower bounds if current state is to the top of the obstacle
if (left <= x <= right) and (y >= top):
y_min_bounds.append(top)
# find maximum lower bound and minimum upper bound
xmax = min(x_max_bounds)
xmin = max(x_min_bounds)
ymax = min(y_max_bounds)
ymin = max(y_min_bounds)
for obs_num, obs in enumerate(obs_edges):
# obstacles
top = obs["top"]
bottom = obs["bottom"]
right = obs["right"]
left = obs["left"]
# TODO: This mess needs to be fixed. All obstacles need to be considered at once when the state could be inside
# an obstacle or when the state can be moved which might put it in an obstacle
# if the state is inside the obstacle, we have to move the state outside (to the closest bound)
if (left <= x <= right) and (bottom <= y <= top):
inside_obs_counter += 1
if inside_obs_counter > 1:
print('......................................................')
print('ERROR: INSIDE MULTIPLE OBSTACLES. THIS IS NOT RESOLVED')
print('******************************************************')
return []
return [xmin, xmax, ymin, ymax]
################# UKF #######################
def ukfCovars(xHist, uHist, N, numStates, numOutputs, SigmaW, SigmaV, CrossCor, start_node_covar):
'''
compute covariances at each state
xHist: list of states (list: N+1 elements each with num_states elements)
uHist: list of control inputs (list: N elements each with num_controls elements)
N: horizon length
numStates, numOutputs: number of states and outputs
SigmaW, SigmaV, CrossCor = process noise covariance, measurement noise covariance, and cross covariance between them
start_node_covar: covariance at the initial node
'''
ukf_params = {}
ukf_params["n_x"] = numStates
ukf_params["n_o"] = numOutputs
ukf_params["SigmaW"] = SigmaW
ukf_params["SigmaV"] = SigmaV
ukf_params["CrossCor"] = CrossCor
ukf_params["dT"] = DT
# Find covariances
SigmaE = start_node_covar # covariance at initial/from node
covarHist = [SigmaE]
for k in range(0, N):
x_hat = xHist[k]
u_k = uHist[k]
y_k = xHist[k+1] # (we assume perfect full state feedback so y = x)
ukf_params["x_hat"] = x_hat
ukf_params["u_k"] = u_k
ukf_params["SigmaE"] = SigmaE
ukf_params["y_k"] = y_k
ukf_estimator = UKF_Estimator.UKF() # initialize the state estimator
estimator_output = ukf_estimator.Estimate(ukf_params) # get the estimates
SigmaE = estimator_output["SigmaE"] # Unbox the covariance
covarHist.append(SigmaE.reshape(numStates, numStates))
return covarHist
################ DR Padding ##################
#
# def find_dr_padding(alfa, N, obs_edges, horizon_covars):
# '''
# Finds DR padding value for each environment and obstacle edge
# '''
# xDir = np.array([1, 0, 0]) # x direction
# yDir = np.array([0, 1, 0]) # x direction
# num_obs = len(obs_edges)
#
# env_pad = np.zeros([N + 1, 4]) # for each time step, the four environment edges have their own dr padding (right, left, top, bottom)
# obs_pad = np.zeros([N + 1, 4 * num_obs]) # for each time step, each obstacle edge has its own dr padding (right, left, top, bottom)
#
# # find tightening value for all alfa values delta = sqrt((1-alfa)/alfa)
# alpha = np.array(alfa, float)
# delta = (1-alpha) / alpha
# delta = delta**(0.5)
# print("##############################")
# print(delta)
#
# for n in range(1,N+1): # skip the first time step (no DR padding there - it is already realized)
# sigma = horizon_covars[n-1] # this step's covariance
#
# # environment dr padding
# rl_pad = delta[0] * math.sqrt(xDir.T @ sigma @ xDir) # padding along right/left direction
# tb_pad = delta[0] * math.sqrt(yDir.T @ sigma @ yDir) # padding along top/bottom direction
# env_pad[n, 0] = rl_pad # right
# env_pad[n, 1] = rl_pad # left
# env_pad[n, 2] = tb_pad # top
# env_pad[n, 3] = tb_pad # bottom
#
# # obstacle padding
# for ob in range(num_obs): # for every obstacle, do the above
# rl_pad = delta[ob+1] * math.sqrt(xDir.T @ sigma @ xDir) # padding along right/left direction
# tb_pad = delta[ob+1] * math.sqrt(yDir.T @ sigma @ yDir) # padding along top/bottom direction
# obs_pad[n, 4 * ob + 0] = rl_pad # right
# obs_pad[n, 4 * ob + 1] = rl_pad # left
# obs_pad[n, 4 * ob + 2] = tb_pad # top
# obs_pad[n, 4 * ob + 3] = tb_pad # bottom
#
# return env_pad, obs_pad
###############################################################################
####################### FUNCTION CALLED BY MAIN() #############################
###############################################################################
#TODO:change this to support different min and max values
def drrrtstar_with_nmpc(nmpc_horizon, x_ref_hist, u_ref_hist, n, m, num_steps, w=[],
save_plot = False, save_file_name = "", drnmpc = True):
"""
runs an nmpc low level controller for the rrt* path
Inputs:
input_file: file name (only) for the optimal inputs
file_path: path to input_file
v_max, omega_max, x_max, y_max, theta_max: maximum linear and angular velocity and maximum x,y,theta values
w: generated disturbance
animate_results: True --> animate, False --> don't animate
save_plot: True --> save plot, False --> don't save plot
ax_lim: axis limits for animation
robot_w: robot width for animation
robot_h: robot height for animation
wheel_w: robot wheel width for animation
wheel_h: robot wheel height for animation
"""
plotting_on = False
obstaclelist = copy.copy(OBSTACLELIST)
envbounds = copy.copy(RANDAREA)
robrad = ROBRAD
t_start = time.time()
# load inputs and states
rrt_states = x_ref_hist
rrt_inputs = u_ref_hist
x0 = [-2.,3.,0.]
xT = [-1.0, 3.0, 0.0]
num_steps = 1
nmpc_horizon = 1
u_guess = [0.1, 0.0]
rrt_states = [np.array(x0)]
rrt_states.append(np.array(xT))
rrt_inputs = [np.array(u_guess)]
results_data = disturbed_nmpc(nmpc_horizon, DT, rrt_states, rrt_inputs, num_steps, n, m, w, obstaclelist, envbounds, drnmpc)
pt_obs_collision_detected = results_data["pt_obs_collision_detected"]
line_obs_collision_detected = results_data["line_obs_collision_detected"]
nlp_failed_flag = results_data["nlp_failed_flag"]
all_states_cl = results_data["visited_states"]
all_inputs_cl = results_data["applied_controls"]
all_nmpc_planned_states = results_data["all_nmpc_planned_states"]
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
if pt_obs_collision_detected:
print('Collision between realized point and an obstacle/environment')
if line_obs_collision_detected:
print('Collision between line connecting realized point and previous point with an obstacle')
if nlp_failed_flag:
print('NLP failed for some reason')
crash_idx = -1 # index when NMPC failed completely (-1 --> didn't fail)
if pt_obs_collision_detected or line_obs_collision_detected or nlp_failed_flag:
crash_idx = len(all_states_cl)
# get last visited
last_state = all_states_cl[-1]
# pad states with last one and ctrl with nothing until num_steps
zero_ctrl = all_inputs_cl[-1] * 0
all_states_cl = list(all_states_cl)
all_inputs_cl = list(all_inputs_cl)
for padding_steps in range(crash_idx, num_steps+1):
all_states_cl.append(last_state)
all_inputs_cl.append(zero_ctrl)
all_states_cl = np.array(all_states_cl).reshape(num_steps+1, n)
all_inputs_cl = np.array(all_inputs_cl).reshape(num_steps, m)
t_end = time.time()
print('Total time: ', t_end - t_start)
# compute final state of rrt plan # TODO: this is overkill, fix later
opti = casadi.Opti()
x,y,theta = opti.variable(), opti.variable(), opti.variable()
states = vertcat(x, y, theta) # all three states
v, omega = opti.variable(), opti.variable()
controls = vertcat(v, omega) # both controls
rhs = horzcat(v * cos(theta), v * sin(theta), omega)
f = Function('f', [states, controls], [rhs], ['input_state', 'control_input'], ['rhs'])
xtm1 = rrt_states[-1, :]
xtm1 = xtm1.reshape(1,3)
utm1 = rrt_inputs[-1, :]
utm1 = utm1.reshape(1,2)
last_rrt_state = f(xtm1, utm1) * DT + xtm1
# RRT* x,y states
# extract the x and y states in the rrt plan with computed last state appended
x_orig = np.array(rrt_states).reshape(num_steps, n)[:, 0]
x_orig = list(x_orig)
x_orig.append(last_rrt_state[0])
x_orig = np.array(x_orig)
y_orig = np.array(rrt_states).reshape(num_steps, n)[:, 1]
y_orig = list(y_orig)
y_orig.append(last_rrt_state[1])
y_orig = np.array(y_orig)
# NMPC Realized x,y states
# get the x,y states of nmpc
x_cl = np.array(all_states_cl)[:, 0]
y_cl = np.array(all_states_cl)[:, 1]
##################################################################
# PLOTTING
if plotting_on:
# environment rectangle bottom left and top right corners
xmin_randarea = RANDAREA[0]
xmax_randarea = RANDAREA[1]
ymin_randarea = RANDAREA[2]
ymax_randarea = RANDAREA[3]
# thickness of env edges (doesn't matter much, anything > 0 works)
thickness = 0.1
# original environment area - width and height
width_randarea = xmax_randarea - xmin_randarea
height_randarea = ymax_randarea - ymin_randarea
# top, bottom, right, and left rectangles for the env edges
env_bottom = [xmin_randarea - thickness, ymin_randarea - thickness, width_randarea + 2 * thickness, thickness]
env_top = [xmin_randarea - thickness, ymax_randarea, width_randarea + 2 * thickness, thickness]
env_right = [xmax_randarea, ymin_randarea - thickness, thickness, height_randarea + 2 * thickness]
env_left = [xmin_randarea - thickness, ymin_randarea - thickness, thickness, height_randarea + 2 * thickness]
# add env as obstacle
OBSTACLELIST.append(env_bottom)
OBSTACLELIST.append(env_top)
OBSTACLELIST.append(env_right)
OBSTACLELIST.append(env_left)
# Create figure
fig = plt.figure(figsize=[9, 9])
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
# ax.axis('equal')
plt.axis([-5.2, 5.2, -5.3, 5.3])
# Plot the environment boundary
xy, w, h = (-5.0, -5.0), 10.0, 10.0
r = Rectangle(xy, w, h, fc='none', ec='gold', lw=1)
offsetbox = AuxTransformBox(ax.transData)
offsetbox.add_artist(r)
ab = AnnotationBbox(offsetbox, (xy[0] + w / 2., xy[1] + w / 2.),
boxcoords="data", pad=0.52, fontsize=20,
bboxprops=dict(facecolor="none", edgecolor='k', lw=20))
ax.add_artist(ab)
# Change ticklabel font size
plt.xticks(fontsize=32)
plt.yticks(fontsize=32)
# rough estimate of DR padding
xDir = np.array([1, 0, 0]) # x direction
yDir = np.array([1, 0, 0]) # y direction
alpha = ALFA[0]
delta = (1-alpha)/alpha
delta = delta ** 0.5
xdrpad = delta * math.sqrt(xDir.T @ SIGMAW @ xDir)
ydrpad = delta * math.sqrt(yDir.T @ SIGMAW @ yDir)
# Plot the rectangle obstacles with DR padding
obstacles = [Rectangle(xy=[ox - ROBRAD - xdrpad, oy - ROBRAD - ydrpad],
width=wd + 2 * ROBRAD + 2*xdrpad,
height=ht + 2 * ROBRAD + 2*ydrpad,
angle=0,
color="palegoldenrod") for (ox, oy, wd, ht) in OBSTACLELIST]
for obstacle in obstacles:
ax.add_artist(obstacle)
# Plot the rectangle obstacles with robot radius padding
obstacles = [Rectangle(xy=[ox - ROBRAD, oy - ROBRAD],
width=wd +2 * ROBRAD,
height=ht +2 * ROBRAD,
angle=0,
color="mistyrose") for (ox, oy, wd, ht) in OBSTACLELIST]
for obstacle in obstacles:
ax.add_artist(obstacle)
# Plot the true rectangle obstacles
obstacles = [Rectangle(xy=[ox, oy], # add radius padding
width=wd, # add radius padding
height=ht, # add radius padding
angle=0,
color="k") for (ox, oy, wd, ht) in OBSTACLELIST]
for obstacle in obstacles:
ax.add_artist(obstacle)
# plot RRT* sampled points
plt.plot(x_orig, y_orig, 'o', color='gray')
# plot NMPC realized points
plt.plot(x_cl, y_cl, 'x', color='red')
colorlist = ["blue", "green", "orangered", "purple", "lime", "coral"]
num_colors = len(colorlist)
for idx, nmpc_plan in enumerate(all_nmpc_planned_states):
nmpc_plan_x = nmpc_plan[:,0]
nmpc_plan_y = nmpc_plan[:,1]
plt.plot(nmpc_plan_x, nmpc_plan_y, color=colorlist[idx%num_colors])
if save_plot:
plot_name = save_file_name + '_plot_nmpc.png'
plt.savefig(plot_name)
plt.show()
# result_data = {'all_states_cl': all_states_cl,
# 'all_inputs_cl': all_inputs_cl,
# 'pt_obs_collision_detected': pt_obs_collision_detected,
# 'line_obs_collision_detected': line_obs_collision_detected,
# 'nlp_failed_flag': nlp_failed_flag,
# 'crash_idx': crash_idx,
# 'last_rrt_state': last_rrt_state}
collision_flag = pt_obs_collision_detected or line_obs_collision_detected or nlp_failed_flag
result_data = {'x_hist': all_states_cl,
'u_hist': all_inputs_cl,
'collision_flag': collision_flag,
'collision_idx': crash_idx}
return result_data
# TODO: main is no longer up to date MUST UPDATE
if __name__ == '__main__':
dist = "nrm"
show_hist = False
SigmaW = SIGMAW
sigma1 = SigmaW[0, 0] # first entry in SigmaW
alfa = ALFA
plotting_on = False
obstaclelist = copy.copy(OBSTACLELIST)
envbounds = copy.copy(RANDAREA)
robrad = ROBRAD
t_start = time.time()
# load inputs and states
# x0 = [-2., 3., 0.]
# x1 = [-1.9, 3, 0]
# xT = [-1.8, 3.0, 0.0]
x0 = [-2., 3., 0.]
x1 = [-1.9, 3, 0]
x2 = [-1.8, 3, 0]
x3 = [-1.7, 3, 0]
x4 = [-1.6, 3, 0]
x5 = [-1.5, 3, 0]
x6 = [-1.4, 3, 0]
x7 = [-1.3, 3, 0]
x8 = [-1.2, 3, 0]
x9 = [-1.1, 3, 0]
x10 = [-1.0, 3, 0]
x11 = [-1.1, 3, 0]
x12 = [-1.2, 3, 0]
x13 = [-1.3, 3, 0]
x14 = [-1.4, 3, 0]
x15 = [-1.5, 3, 0]
x16 = [-1.6, 3, 0]
x17 = [-1.7, 3, 0]
x18 = [-1.8, 3, 0]
x19 = [-1.9, 3, 0]
# x0 = [-2., 3., 0.]
# x1 = [-1.95, 3, 0]
# x2 = [-1.9, 3, 0]
# x3 = [-1.85, 3, 0]
# x4 = [-1.8, 3, 0]
# x5 = [-1.75, 3, 0]
# x6 = [-1.7, 3, 0]
# x7 = [-1.65, 3, 0]
# x8 = [-1.6, 3, 0]
# x9 = [-1.55, 3, 0]
# x10 = [-1.5, 3, 0]
num_steps = 50
T = num_steps
nmpc_horizon = 5 #2
u_guess = [2.5, 0.0]
rrt_states = [np.array(x0)]
rrt_states.append(np.array(x1))
rrt_states.append(np.array(x2))
rrt_states.append(np.array(x3))
rrt_states.append(np.array(x4))
rrt_states.append(np.array(x5))
rrt_states.append(np.array(x6))
rrt_states.append(np.array(x7))
rrt_states.append(np.array(x8))
rrt_states.append(np.array(x9))
rrt_states.append(np.array(x10))
rrt_states.append(np.array(x11))
rrt_states.append(np.array(x12))
rrt_states.append(np.array(x13))
rrt_states.append(np.array(x14))
rrt_states.append(np.array(x15))
rrt_states.append(np.array(x16))
rrt_states.append(np.array(x17))
rrt_states.append(np.array(x18))
rrt_states.append(np.array(x19))
rrt_states.append(np.array(x0))
rrt_states.append(np.array(x1))
rrt_states.append(np.array(x2))
rrt_states.append(np.array(x3))
rrt_states.append(np.array(x4))
rrt_states.append(np.array(x5))
rrt_states.append(np.array(x6))
rrt_states.append(np.array(x7))
rrt_states.append(np.array(x8))
rrt_states.append(np.array(x9))
rrt_states.append(np.array(x10))
rrt_states.append(np.array(x11))
rrt_states.append(np.array(x12))
rrt_states.append(np.array(x13))
rrt_states.append(np.array(x14))
rrt_states.append(np.array(x15))
rrt_states.append(np.array(x16))
rrt_states.append(np.array(x17))
rrt_states.append(np.array(x18))
rrt_states.append(np.array(x19))
rrt_states.append(np.array(x0))
rrt_states.append(np.array(x1))
rrt_states.append(np.array(x2))
rrt_states.append(np.array(x3))
rrt_states.append(np.array(x4))
rrt_states.append(np.array(x5))
rrt_states.append(np.array(x6))
rrt_states.append(np.array(x7))
rrt_states.append(np.array(x8))
rrt_states.append(np.array(x9))
# rrt_states.append(np.array(xT))
rrt_inputs = [np.array(u_guess)]*num_steps
n, m = 3,2
num_runs = 1000
x_realized = []
y_realized = []
x2_realized = []
y2_realized = []
x3_realized = []
y3_realized = []
x4_realized = []
y4_realized = []
x5_realized = []
y5_realized = []
x6_realized = []
y6_realized = []
x7_realized = []
y7_realized = []
x8_realized = []
y8_realized = []
x9_realized = []
y9_realized = []
x10_realized = []
y10_realized = []
xT_realized = []
yT_realized = []
for i in range(num_runs):
if dist == "nrm":
w_base_hist = npr.multivariate_normal(mean=[0, 0, 0], cov=SigmaW, size=T)
if show_hist:
plt.hist(w_base_hist)
plt.show()
elif dist == "lap":
l = 0
b = (sigma1 / 2) ** 0.5
w_base_hist = npr.laplace(loc=l, scale=b, size=[T, 3]) # mean = loc, var = 2*scale^2
if show_hist:
plt.hist(w_base_hist)
plt.show()
elif dist == "gum":
b = (6 * sigma1) ** 0.5 / np.pi
l = -0.57721 * b
w_base_hist = npr.gumbel(loc=l, scale=b, size=[T, 3]) # mean = loc+0.57721*scale, var = pi^2/6 scale^2
if show_hist:
plt.hist(w_base_hist)
plt.show()
w = w_base_hist
results_data = disturbed_nmpc(nmpc_horizon, DT, np.array(rrt_states), np.array(rrt_inputs), num_steps, n, m, w, obstaclelist, envbounds, drnmpc=True, hnmpc=True)
# results_data = disturbed_nmpc(nmpc_horizon, DT, rrt_states, rrt_inputs, num_steps, n, m, w, obstaclelist, envbounds, drnmpc, hnmpc=hnmpc)
pt_obs_collision_detected = results_data["pt_obs_collision_detected"]
line_obs_collision_detected = results_data["line_obs_collision_detected"]
nlp_failed_flag = results_data["nlp_failed_flag"]
all_states_cl = results_data["visited_states"]
all_inputs_cl = results_data["applied_controls"]
all_nmpc_planned_states = results_data["all_nmpc_planned_states"]
x_realized.append(all_states_cl[1][0])
y_realized.append(all_states_cl[1][1])
x2_realized.append(all_states_cl[2][0])
y2_realized.append(all_states_cl[2][1])
x3_realized.append(all_states_cl[3][0])
y3_realized.append(all_states_cl[3][1])
x4_realized.append(all_states_cl[4][0])
y4_realized.append(all_states_cl[4][1])
x5_realized.append(all_states_cl[5][0])
y5_realized.append(all_states_cl[5][1])
x6_realized.append(all_states_cl[6][0])
y6_realized.append(all_states_cl[6][1])
x7_realized.append(all_states_cl[7][0])
y7_realized.append(all_states_cl[7][1])
x8_realized.append(all_states_cl[8][0])
y8_realized.append(all_states_cl[8][1])
x9_realized.append(all_states_cl[9][0])
y9_realized.append(all_states_cl[9][1])
x10_realized.append(all_states_cl[10][0])
y10_realized.append(all_states_cl[10][1])
xT_realized.append(all_states_cl[-1][0])
yT_realized.append(all_states_cl[-1][1])
fig = plt.figure(figsize=[9, 9])
ax = fig.add_subplot(1, 1, 1) # create an axes object in the figure
# ax.axis('equal')
plt.axis([-4, 0, -3, 4])
# elE, elV = LA.eig(SigmaW[0:2, 0:2])
# ellipse = Ellipse(xy=(x1[0], x1[1]), width=math.sqrt(elE[0])*2, height=math.sqrt(elE[1])*2,
# edgecolor='b', fc='None', lw=2)
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(x1[0], x1[1]), width=math.sqrt(elE[0])*6, height=math.sqrt(elE[1])*6,
# edgecolor='b', fc='None', lw=2)
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(xT[0], xT[1]), width=math.sqrt(elE[0]) * 6, height=math.sqrt(elE[1]) * 6,
# edgecolor='b', fc='None', lw=2)
# ax.add_patch(ellipse)
xDir = np.array([1, 0, 0]) # x direction
yDir = np.array([1, 0, 0]) # y direction
# SIGMAW = np.diag([0.005, 0.005, 0.005])
# alpha = 0.07
alpha = alfa[0]
delta = (1 - alpha) / alpha
delta = delta ** 0.5
xdrpad = delta * math.sqrt(xDir.T @ SIGMAW @ xDir) * 2
ydrpad = delta * math.sqrt(yDir.T @ SIGMAW @ yDir) * 2
# plot first step ellipse and its realizations
ellipse = Ellipse(xy=(x1[0], x1[1]), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x_realized, y_realized, 'o', color='r', markersize=1)
plt.plot(x0[0], x0[1], 'x', color='k')
plt.plot(x1[0], x1[1], 'x', color='k')
plt.plot(x2[0], x2[1], 'x', color='k')
plt.plot(x3[0], x3[1], 'x', color='k')
plt.plot(x4[0], x4[1], 'x', color='k')
plt.plot(x5[0], x5[1], 'x', color='k')
plt.plot(x6[0], x6[1], 'x', color='k')
plt.plot(x7[0], x7[1], 'x', color='k')
plt.plot(x8[0], x8[1], 'x', color='k')
plt.plot(x9[0], x9[1], 'x', color='k')
plt.plot(x10[0], x10[1], 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -0.5
ellipse = Ellipse(xy=(x2[0], x2[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x2_realized, list(np.array(y2_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -1
ellipse = Ellipse(xy=(x3[0], x3[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x3_realized, list(np.array(y3_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -1.5
ellipse = Ellipse(xy=(x4[0], x4[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x4_realized, list(np.array(y4_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -2
ellipse = Ellipse(xy=(x5[0], x5[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x5_realized, list(np.array(y5_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -2.5
ellipse = Ellipse(xy=(x6[0], x6[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x6_realized, list(np.array(y6_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -3
ellipse = Ellipse(xy=(x7[0], x7[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x7_realized, list(np.array(y7_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -3.5
ellipse = Ellipse(xy=(x8[0], x8[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x8_realized, list(np.array(y8_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -4
ellipse = Ellipse(xy=(x9[0], x9[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x9_realized, list(np.array(y9_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -4.5
ellipse = Ellipse(xy=(x10[0], x10[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(x10_realized, list(np.array(y10_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x0[0], x0[1] + shift_value, 'x', color='k')
plt.plot(x1[0], x1[1] + shift_value, 'x', color='k')
plt.plot(x2[0], x2[1] + shift_value, 'x', color='k')
plt.plot(x3[0], x3[1] + shift_value, 'x', color='k')
plt.plot(x4[0], x4[1] + shift_value, 'x', color='k')
plt.plot(x5[0], x5[1] + shift_value, 'x', color='k')
plt.plot(x6[0], x6[1] + shift_value, 'x', color='k')
plt.plot(x7[0], x7[1] + shift_value, 'x', color='k')
plt.plot(x8[0], x8[1] + shift_value, 'x', color='k')
plt.plot(x9[0], x9[1] + shift_value, 'x', color='k')
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
# plot seconds step ellipse and its realizations (shift down by 1 for clarity)
shift_value = -5
ellipse = Ellipse(xy=(x10[0], x10[1] + shift_value), width=xdrpad, height=ydrpad,
edgecolor='b', fc='None', lw=2)
ax.add_patch(ellipse)
plt.plot(xT_realized, list(np.array(yT_realized) + shift_value), 'o', color='g', markersize=1)
plt.plot(x10[0], x10[1] + shift_value, 'x', color='k')
plt.show()
|
{"hexsha": "ab34e42aa62042f5e207ffc315081bf2d15dc5d7", "size": 67884, "ext": "py", "lang": "Python", "max_stars_repo_path": "Unicycle Simulation/scripts/destination_distribution_check.py", "max_stars_repo_name": "TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning", "max_stars_repo_head_hexsha": "717b9f07f4ed625ee33ab8ec22ce78dc2907d759", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-01-07T19:37:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T08:50:28.000Z", "max_issues_repo_path": "Unicycle Simulation/scripts/destination_distribution_check.py", "max_issues_repo_name": "TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning", "max_issues_repo_head_hexsha": "717b9f07f4ed625ee33ab8ec22ce78dc2907d759", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Unicycle Simulation/scripts/destination_distribution_check.py", "max_forks_repo_name": "TSummersLab/Risk_Bounded_Nonlinear_Robot_Motion_Planning", "max_forks_repo_head_hexsha": "717b9f07f4ed625ee33ab8ec22ce78dc2907d759", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1665582303, "max_line_length": 196, "alphanum_fraction": 0.5880030641, "include": true, "reason": "import numpy,from numpy", "num_tokens": 19789}
|
"""
Update claims-based hospitalization indicator.
Author: Maria Jahja
Created: 2020-09-27
"""
# standard packages
import logging
from multiprocessing import Pool, cpu_count
# third party
import numpy as np
import pandas as pd
from delphi_utils import GeoMapper
# first party
from delphi_utils import Weekday
from .config import Config, GeoConstants
from .load_data import load_data
from .indicator import ClaimsHospIndicator
class ClaimsHospIndicatorUpdater:
"""Updater class for claims-based hospitalization indicator."""
# pylint: disable=too-many-instance-attributes, too-many-arguments
# all variables are used
def __init__(self, startdate, enddate, dropdate, geo, parallel, weekday,
write_se, signal_name):
"""
Initialize updater for the claims-based hospitalization indicator.
Args:
startdate: first indicator date (YYYY-mm-dd)
enddate: last indicator date (YYYY-mm-dd)
dropdate: data drop date (YYYY-mm-dd)
geo: geographic resolution, one of ["county", "state", "msa", "hrr", "hhs", "nation"]
parallel: boolean to run the indicator update in parallel
weekday: boolean to adjust for weekday effects
write_se: boolean to write out standard errors, if true, use an obfuscated name
signal_name: string signal name
"""
self.startdate, self.enddate, self.dropdate = [pd.to_datetime(t) for t in
(startdate, enddate, dropdate)]
self.geo, self.parallel, self.weekday, self.write_se, self.signal_name = \
geo.lower(), parallel, weekday, write_se, signal_name
# init in shift_dates, declared here for pylint
self.burnindate, self.fit_dates, self.burn_in_dates, self.output_dates = \
[None] * 4
assert (
self.startdate > (Config.FIRST_DATA_DATE + Config.BURN_IN_PERIOD)
), f"not enough data to produce estimates starting {self.startdate}"
assert self.startdate < self.enddate, "start date >= end date"
assert self.enddate <= self.dropdate, "end date > drop date"
assert (
geo in ['county', 'state', 'msa', 'hrr', 'hhs', 'nation']
), f"{geo} is invalid, pick one of 'county', 'state', 'msa', 'hrr', 'hhs', 'nation'"
def shift_dates(self):
"""
Shift estimates forward to account for time lag.
Explanation:
We will shift estimates one day forward to account for a 1 day lag. For example,
we want to produce estimates for the time range May 2 to May 20, inclusive.
Given a drop on May 20, we have data up until May 19. We then train on data from
Jan 1 until May 19, storing only the values on May 1 to May 19. we then shift
the dates forward by 1, giving us values on May 2 to May 20. We shift the
startdate back by one day in order to get the proper estimate at May 1.
"""
drange = lambda s, e: pd.date_range(start=s, periods=(e - s).days, freq='D')
self.startdate = self.startdate - Config.DAY_SHIFT
self.burnindate = self.startdate - Config.BURN_IN_PERIOD
self.fit_dates = drange(Config.FIRST_DATA_DATE, self.dropdate)
self.burn_in_dates = drange(self.burnindate, self.dropdate)
self.output_dates = drange(self.startdate, self.enddate)
def geo_reindex(self, data):
"""
Reindex dataframe based on desired output geography.
Args:
data: dataframe, the output of load_data::load_data()
Returns:
reindexed dataframe
"""
geo_map = GeoMapper()
if self.geo == "county":
data_frame = geo_map.fips_to_megacounty(data,
Config.MIN_DEN,
Config.MAX_BACKWARDS_PAD_LENGTH,
thr_col="den",
mega_col=self.geo)
elif self.geo == "state":
data_frame = geo_map.replace_geocode(data,
from_code="fips",
new_col=self.geo,
new_code="state_id")
data_frame[self.geo] = data_frame[self.geo]
elif self.geo in ["msa", "hhs", "nation"]:
data_frame = geo_map.replace_geocode(data,
from_code="fips",
new_code=self.geo)
elif self.geo == "hrr":
data_frame = data # data is already adjusted in aggregation step above
else:
logging.error(
"%s is invalid, pick one of 'county', 'state', 'msa', 'hrr', 'hhs', nation'",
self.geo)
return False
unique_geo_ids = pd.unique(data_frame[self.geo])
data_frame.set_index([self.geo, "timestamp"], inplace=True)
# for each location, fill in all missing dates with 0 values
multiindex = pd.MultiIndex.from_product((unique_geo_ids, self.fit_dates),
names=[self.geo, Config.DATE_COL])
assert (
len(multiindex) <= (GeoConstants.MAX_GEO[self.geo] * len(self.fit_dates))
), "more loc-date pairs than maximum number of geographies x number of dates"
# fill dataframe with missing dates using 0
data_frame = data_frame.reindex(multiindex, fill_value=0)
data_frame.fillna(0, inplace=True)
return data_frame
def update_indicator(self, input_filepath, outpath, logger):
"""
Generate and output indicator values.
Args:
input_filepath: path to the aggregated claims data
outpath: output path for the csv results
"""
self.shift_dates()
final_output_inds = \
(self.burn_in_dates >= self.startdate) & (self.burn_in_dates <= self.enddate)
# load data
base_geo = Config.HRR_COL if self.geo == Config.HRR_COL else Config.FIPS_COL
data = load_data(input_filepath, self.dropdate, base_geo)
data_frame = self.geo_reindex(data)
# handle if we need to adjust by weekday
wd_params = Weekday.get_params(
data_frame,
"den",
["num"],
Config.DATE_COL,
[1, 1e5],
logger,
) if self.weekday else None
# run fitting code (maybe in parallel)
rates = {}
std_errs = {}
valid_inds = {}
if not self.parallel:
for geo_id, sub_data in data_frame.groupby(level=0):
sub_data.reset_index(inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(
wd_params, sub_data, ["num"], Config.DATE_COL)
sub_data.set_index(Config.DATE_COL, inplace=True)
res = ClaimsHospIndicator.fit(sub_data, self.burnindate, geo_id)
res = pd.DataFrame(res)
rates[geo_id] = np.array(res.loc[final_output_inds, "rate"])
std_errs[geo_id] = np.array(res.loc[final_output_inds, "se"])
valid_inds[geo_id] = np.array(res.loc[final_output_inds, "incl"])
else:
n_cpu = min(Config.MAX_CPU_POOL, cpu_count())
logging.debug("starting pool with %d workers", n_cpu)
with Pool(n_cpu) as pool:
pool_results = []
for geo_id, sub_data in data_frame.groupby(level=0, as_index=False):
sub_data.reset_index(inplace=True)
if self.weekday:
sub_data = Weekday.calc_adjustment(
wd_params, sub_data, ["num"], Config.DATE_COL)
sub_data.set_index(Config.DATE_COL, inplace=True)
pool_results.append(
pool.apply_async(
ClaimsHospIndicator.fit,
args=(sub_data, self.burnindate, geo_id,),
)
)
pool_results = [proc.get() for proc in pool_results]
for res in pool_results:
geo_id = res["geo_id"]
res = pd.DataFrame(res)
rates[geo_id] = np.array(res.loc[final_output_inds, "rate"])
std_errs[geo_id] = np.array(res.loc[final_output_inds, "se"])
valid_inds[geo_id] = np.array(res.loc[final_output_inds, "incl"])
# write out results
unique_geo_ids = list(rates.keys())
output_dict = {
"rates": rates,
"se": std_errs,
"dates": self.output_dates,
"geo_ids": unique_geo_ids,
"geo_level": self.geo,
"include": valid_inds,
}
self.write_to_csv(output_dict, outpath)
logging.debug("wrote files to %s", outpath)
def write_to_csv(self, output_dict, output_path="./receiving"):
"""
Write values to csv.
Args:
output_dict: dictionary containing values, se, unique dates, and unique geo_id
output_path: outfile path to write the csv
"""
if self.write_se:
logging.info("========= WARNING: WRITING SEs TO %s =========",
self.signal_name)
geo_level = output_dict["geo_level"]
dates = output_dict["dates"]
geo_ids = output_dict["geo_ids"]
all_rates = output_dict["rates"]
all_se = output_dict["se"]
all_include = output_dict["include"]
out_n = 0
for i, date in enumerate(dates):
filename = "%s/%s_%s_%s.csv" % (
output_path,
(date + Config.DAY_SHIFT).strftime("%Y%m%d"),
geo_level,
self.signal_name,
)
with open(filename, "w") as outfile:
outfile.write("geo_id,val,se,direction,sample_size\n")
for geo_id in geo_ids:
val = all_rates[geo_id][i]
se = all_se[geo_id][i]
if all_include[geo_id][i]:
assert not np.isnan(val), "value for included value is nan"
assert not np.isnan(se), "se for included rate is nan"
if val > 90:
logging.warning("value suspicious, %s: %d", geo_id, val)
assert se < 5, f"se suspicious, {geo_id}: {se}"
if self.write_se:
assert val > 0 and se > 0, "p=0, std_err=0 invalid"
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, val, se, "NA", "NA"))
else:
# for privacy reasons we will not report the standard error
outfile.write(
"%s,%f,%s,%s,%s\n" % (geo_id, val, "NA", "NA", "NA"))
out_n += 1
logging.debug("wrote %d rows for %d %s", out_n, len(geo_ids), geo_level)
|
{"hexsha": "b4169370d0d634fa8d8944bdd2c054734cf1b65f", "size": 11378, "ext": "py", "lang": "Python", "max_stars_repo_path": "claims_hosp/delphi_claims_hosp/update_indicator.py", "max_stars_repo_name": "jingjtang/covidcast-indicators", "max_stars_repo_head_hexsha": "34cb8786f78fbea2710b810a9500ee02c2379241", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-10-12T04:27:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T16:56:57.000Z", "max_issues_repo_path": "claims_hosp/delphi_claims_hosp/update_indicator.py", "max_issues_repo_name": "jingjtang/covidcast-indicators", "max_issues_repo_head_hexsha": "34cb8786f78fbea2710b810a9500ee02c2379241", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 666, "max_issues_repo_issues_event_min_datetime": "2020-09-30T21:18:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:37:12.000Z", "max_forks_repo_path": "claims_hosp/delphi_claims_hosp/update_indicator.py", "max_forks_repo_name": "jingjtang/covidcast-indicators", "max_forks_repo_head_hexsha": "34cb8786f78fbea2710b810a9500ee02c2379241", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-10-01T14:25:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-12T08:31:19.000Z", "avg_line_length": 42.4552238806, "max_line_length": 97, "alphanum_fraction": 0.5454385657, "include": true, "reason": "import numpy", "num_tokens": 2436}
|
module DataIO
include("filesystem/filesystem.jl")
end # module DataIO
|
{"hexsha": "1e3b269151c4aa93cdeee9b773d2ae58ee48eb84", "size": 73, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/dataio/dataio.jl", "max_stars_repo_name": "gitter-badger/NumericalDataManipulation.jl", "max_stars_repo_head_hexsha": "4f1bc43e8c2f94c3700c88619f51a9632f956306", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/dataio/dataio.jl", "max_issues_repo_name": "gitter-badger/NumericalDataManipulation.jl", "max_issues_repo_head_hexsha": "4f1bc43e8c2f94c3700c88619f51a9632f956306", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/dataio/dataio.jl", "max_forks_repo_name": "gitter-badger/NumericalDataManipulation.jl", "max_forks_repo_head_hexsha": "4f1bc43e8c2f94c3700c88619f51a9632f956306", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.1666666667, "max_line_length": 35, "alphanum_fraction": 0.7671232877, "num_tokens": 19}
|
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
'''
This package is to be used as a library. Please do not edit.
'''
def fpoly(x: np.float) -> np.float:
""" Simple polynomial of degree 5"""
return 0.009 * (x ** 5) + 0.02 * (x ** 4) - 0.32 * (x ** 3) - 0.54 * (x ** 2) + 3.2 * x - 1.0
def dfpoly(x: np.float) -> np.float:
"""Derivative of simple polynomial of degree 5"""
return 5.0 * 0.009 * (x ** 4) + 4.0 * 0.02 * (x ** 3) - 3.0 * 0.32 * (x ** 2) - 2.0 * 0.54 * x + 3.2
# ======================================================================
# Functions for Newton Fractal
def generate_sampling(borders: list, size: int) -> np.ndarray:
size_x = size
size_y = int(size * (borders[3] - borders[2]) / (borders[1] - borders[0]))
sx = np.linspace(borders[0], borders[1], size_x)
sy = np.linspace(borders[2], borders[3], size_y)
x, y = np.meshgrid(sx, sy)
sampling = x + 1j * y
return sampling, size_x, size_y
def get_colors(roots: np.ndarray) -> np.ndarray:
colors = np.zeros((roots.shape[0], 3))
c_idx = np.linspace(0.0, 1.0, roots.shape[0])
cm = matplotlib.cm.get_cmap('jet')
for idx, i in enumerate(c_idx):
colors[idx] = cm(i)[:3]
return colors
# Roots of unity
def rou(k):
def f(x):
return x ** k - 1
return f
def drou(k):
def f(x):
return k * x ** (k - 1)
return f
def rou_roots(k):
return np.array([np.exp(2.j * np.pi * i / k) for i in range(k)])
rou_borders = [-1.5, 1.5, -1.5, 1.5]
# Polynomial
def poly(x):
return x ** 3 - 2 * x + 2
def dpoly(x):
return 3 * x ** 2 - 2
poly_roots = np.array([np.complex128(-1.76929235423863), np.complex128(0.884646177119316 + 0.589742805022206j),
np.complex128(0.884646177119316 - 0.589742805022206j)])
poly_borders = [-1.5, 0.5, -1.0, 1.0]
# Sinus function
def sin(x):
return np.sin(x)
def dsin(x):
return np.cos(x)
sin_roots = np.array(np.linspace(-10 * np.pi, 10 * np.pi, 21))
sin_borders = [-np.pi, np.pi, -np.pi, np.pi]
fractal_functions = [[rou(4), drou(4), rou_roots(4), rou_borders, "roots_of_unity_4"],
[rou(7), drou(7), rou_roots(7), rou_borders, "roots_of_unity_7"],
[poly, dpoly, poly_roots, poly_borders, "polynomial"],
[sin, dsin, sin_roots, sin_borders, "sinus"]]
# ======================================================================
# Functions for Minimal Surfaces
def generate_cylinder(nc, nz, scale=0.8):
v = np.zeros((nc * nz, 3))
f = np.zeros((2 * nc * (nz - 1), 3), dtype=np.int)
phi = np.linspace(0.0, 2.0 * np.pi, endpoint=False, num=nc)
z = np.linspace(0.0, 1.0, endpoint=True, num=nz)
for i in range(nz):
for j in range(nc):
v[i * nc + j, :] = (scale * np.cos(phi[j]), scale * np.sin(phi[j]), z[i])
for i in range(nz - 1):
for j in range(nc):
vi = i * nc + j
ni = 1
if j + 1 >= nc:
ni -= nc
f[2 * vi, :] = (vi, vi + ni, vi + nc)
f[2 * vi + 1, :] = (vi + ni, vi + nc + ni, vi + nc)
c1 = list(range(0, nc))
c2 = list(range(v.shape[0] - nc, v.shape[0]))
c1.extend(c2)
c = np.array(c1)
return v, f, c
def load_object(name):
object = np.load(name + ".npz")
return object["v"], object["f"], object["c"]
def prepare_visualization(v, f):
fig = plt.figure()
cmap = plt.get_cmap('Blues')
norm = lambda x: x
ax = fig.gca(projection='3d')
limits = (np.min(v[:, 0]), np.max(v[:, 0]), np.min(v[:, 1]), np.max(v[:, 1]), np.min(v[:, 2]), np.max(v[:, 2]))
surf = ax.plot_trisurf(v[:, 0], v[:, 1], v[:, 2], triangles=f, cmap=cmap, linewidth=0.1, norm=norm, shade=False,
alpha=0.8)
ax.set_xlim3d(limits[0], limits[1])
ax.set_ylim3d(limits[2], limits[3])
ax.set_zlim3d(limits[4], limits[5])
ax.set_axis_off()
return fig, surf, ax, limits
def update_visualization(v, f, abs_gradient, limits, ax, normalize=False):
ax.clear()
if normalize:
abs_gradient = abs_gradient / abs_gradient.max()
cmap = plt.get_cmap('Blues')
norm = lambda x: x
surf = ax.plot_trisurf(v[:, 0], v[:, 1], v[:, 2], triangles=f, cmap=cmap, linewidth=0.1, norm=norm,
shade=False, alpha=0.8)
surf.set_array(abs_gradient)
ax.set_xlim3d(limits[0], limits[1])
ax.set_ylim3d(limits[2], limits[3])
ax.set_zlim3d(limits[4], limits[5])
ax.set_axis_off()
def calculate_abs_gradient(g, f, c):
indices = np.arange(g.shape[0])
indices = np.delete(indices, c)
grad_abs = np.sqrt(np.sum(g * g, axis=1))
grad_per_tri = np.zeros(f.shape[0])
for fa in range(f.shape[0]):
fgrad = 0.0
for idx in range(3):
vidx = f[fa, idx]
if vidx in indices:
fgrad += grad_abs[vidx]
grad_per_tri[fa] = fgrad
return grad_per_tri
|
{"hexsha": "8d5ee936f0bcf3c5ccf6493e418f2b4fc99084df", "size": 5033, "ext": "py", "lang": "Python", "max_stars_repo_path": "Project6/lib.py", "max_stars_repo_name": "veronikadim99/Wissenschaftliches-Rechnen", "max_stars_repo_head_hexsha": "3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Project6/lib.py", "max_issues_repo_name": "veronikadim99/Wissenschaftliches-Rechnen", "max_issues_repo_head_hexsha": "3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project6/lib.py", "max_forks_repo_name": "veronikadim99/Wissenschaftliches-Rechnen", "max_forks_repo_head_hexsha": "3b7c86e9488bf434f3ad1d590f5b9bb9b4cdf218", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5965909091, "max_line_length": 116, "alphanum_fraction": 0.5434134711, "include": true, "reason": "import numpy", "num_tokens": 1667}
|
""" Provide a consistent set of constants to use through CLMM """
from enum import Enum
import astropy.constants as astropyconst
import astropy.units as u
class Constants(Enum):
""" A set of constants for consistency throughout the
code and dependencies. """
CLIGHT = 299792458.0
""" Speed of light (m/s)
Source: CODATA 2018
"""
CLIGHT_KMS = CLIGHT*1.0e-3
""" Speed of light (km/s)
Source: CODATA 2018
"""
GNEWT = 6.67430e-11
""" Newton's constant (m^3/kg/s^2)
Source: CODATA 2018
"""
PC_TO_METER = 3.085677581491367e16
""" parsec to meter (m)
Source: IAU 2015
"""
GNEWT_SOLAR_MASS = 1.3271244e20
""" G x Solar mass (m^3/s^2)
Source: IAU 2015
"""
SOLAR_MASS = GNEWT_SOLAR_MASS/GNEWT
""" Solar mass (kg)
Source: IAU 2015/CODATA 2018
"""
|
{"hexsha": "99600f2404d9ebdd062dc47bee88f4bedef4b1ce", "size": 854, "ext": "py", "lang": "Python", "max_stars_repo_path": "clmm/constants.py", "max_stars_repo_name": "96RadhikaJadhav/CLMM", "max_stars_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "clmm/constants.py", "max_issues_repo_name": "96RadhikaJadhav/CLMM", "max_issues_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "clmm/constants.py", "max_forks_repo_name": "96RadhikaJadhav/CLMM", "max_forks_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.5652173913, "max_line_length": 65, "alphanum_fraction": 0.6170960187, "include": true, "reason": "import astropy", "num_tokens": 282}
|
[STATEMENT]
lemma uminus_one_neq_one_double[simp]: "- 1 \<noteq> (1 :: double)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - 1 \<noteq> 1
[PROOF STEP]
by (transfer, transfer, simp)
|
{"llama_tokens": 82, "file": "MFODL_Monitor_Optimized_Code_Double", "length": 1}
|
import cv2
import numpy as np
import os
Dir = os.getcwd()
path = os.path.join(Dir, 'source')
images = os.listdir(path)
def rescale(frame, scale=0.20):
w = int(frame.shape[1] * scale)
h = int(frame.shape[0] * scale)
dim = (w, h)
return cv2.resize(frame, dim)
for file in images:
filename = file.split('.')[0]
image = cv2.imread(f"{path}/{file}")
resized_img = rescale(image)
cv2.imwrite(f"{os.path.join(Dir, 'target/rescale')}/{filename}1.jpg", resized_img)
|
{"hexsha": "14f152b70583e2eebcc2f1a7f48d8076edbc038e", "size": 492, "ext": "py", "lang": "Python", "max_stars_repo_path": "opencv_/rescale.py", "max_stars_repo_name": "AKSK16101999/Image_Processing", "max_stars_repo_head_hexsha": "d6b12e445ace1c7afff747f24d5028f506a96825", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-23T15:53:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-23T15:53:44.000Z", "max_issues_repo_path": "opencv_/rescale.py", "max_issues_repo_name": "AKSK16101999/Image_Processing", "max_issues_repo_head_hexsha": "d6b12e445ace1c7afff747f24d5028f506a96825", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "opencv_/rescale.py", "max_forks_repo_name": "AKSK16101999/Image_Processing", "max_forks_repo_head_hexsha": "d6b12e445ace1c7afff747f24d5028f506a96825", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6, "max_line_length": 86, "alphanum_fraction": 0.6402439024, "include": true, "reason": "import numpy", "num_tokens": 145}
|
import shutil
from os import path
import numpy as np
import logging
import yass
from yass import preprocess, detect, cluster, templates, deconvolute
from yass.batch import RecordingsReader
from yass import read_config
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
def test_threshold_output(path_to_tests):
"""Test that pipeline using threshold detector returns the same results
"""
logger = logging.getLogger(__name__)
yass.set_config(path.join(path_to_tests, 'config_threshold_49.yaml'))
CONFIG = read_config()
TMP = Path(CONFIG.data.root_folder, 'tmp')
logger.info('Removing %s', TMP)
shutil.rmtree(str(TMP))
PATH_TO_REF = '/home/Edu/data/threshold'
np.random.seed(0)
# run preprocess
(standardized_path, standardized_params,
whiten_filter) = preprocess.run()
# load preprocess output
path_to_standardized = path.join(PATH_TO_REF,
'preprocess', 'standardized.bin')
path_to_whitening = path.join(PATH_TO_REF, 'preprocess', 'whitening.npy')
whitening_saved = np.load(path_to_whitening)
standardized_saved = RecordingsReader(path_to_standardized,
loader='array').data
standardized = RecordingsReader(standardized_path, loader='array').data
# test preprocess
np.testing.assert_array_equal(whitening_saved, whiten_filter)
np.testing.assert_array_equal(standardized_saved, standardized)
# run detect
(score, spike_index_clear,
spike_index_all) = detect.run(standardized_path,
standardized_params,
whiten_filter)
# load detect output
path_to_scores = path.join(PATH_TO_REF, 'detect', 'scores_clear.npy')
path_to_spike_index_clear = path.join(PATH_TO_REF, 'detect',
'spike_index_clear.npy')
path_to_spike_index_all = path.join(PATH_TO_REF, 'detect',
'spike_index_all.npy')
scores_saved = np.load(path_to_scores)
spike_index_clear_saved = np.load(path_to_spike_index_clear)
spike_index_all_saved = np.load(path_to_spike_index_all)
# test detect output
np.testing.assert_array_equal(scores_saved, score)
np.testing.assert_array_equal(spike_index_clear_saved, spike_index_clear)
np.testing.assert_array_equal(spike_index_all_saved, spike_index_all)
# run cluster
(spike_train_clear,
tmp_loc, vbParam) = cluster.run(score, spike_index_clear)
# load cluster output
path_to_spike_train_cluster = path.join(PATH_TO_REF, 'cluster',
'spike_train_cluster.npy')
spike_train_cluster_saved = np.load(path_to_spike_train_cluster)
# test cluster
#np.testing.assert_array_equal(spike_train_cluster_saved, spike_train_clear)
# run templates
(templates_, spike_train,
groups, idx_good_templates) = templates.run(spike_train_clear, tmp_loc,
save_results=True)
# load templates output
path_to_templates = path.join(PATH_TO_REF, 'templates', 'templates.npy')
templates_saved = np.load(path_to_templates)
# test templates
np.testing.assert_array_equal(templates_saved, templates_)
# run deconvolution
spike_train = deconvolute.run(spike_index_all, templates_)
# load deconvolution output
path_to_spike_train = path.join(PATH_TO_REF, 'spike_train.npy')
spike_train_saved = np.load(path_to_spike_train)
# test deconvolution
np.testing.assert_array_equal(spike_train_saved, spike_train)
|
{"hexsha": "6a61280ffc25826bda003d7e0e2fed7cbb5e982b", "size": 3680, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/performance/test_threshold_output.py", "max_stars_repo_name": "jaib1/yass", "max_stars_repo_head_hexsha": "9899c7d63c522a26b160ac7a223c794dfd3e23c6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2017-10-29T02:21:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T01:12:27.000Z", "max_issues_repo_path": "tests/performance/test_threshold_output.py", "max_issues_repo_name": "jaib1/yass", "max_issues_repo_head_hexsha": "9899c7d63c522a26b160ac7a223c794dfd3e23c6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 257, "max_issues_repo_issues_event_min_datetime": "2017-10-25T17:11:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-21T19:12:00.000Z", "max_forks_repo_path": "tests/performance/test_threshold_output.py", "max_forks_repo_name": "jaib1/yass", "max_forks_repo_head_hexsha": "9899c7d63c522a26b160ac7a223c794dfd3e23c6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 24, "max_forks_repo_forks_event_min_datetime": "2017-10-28T19:59:44.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T09:56:45.000Z", "avg_line_length": 35.0476190476, "max_line_length": 80, "alphanum_fraction": 0.6940217391, "include": true, "reason": "import numpy", "num_tokens": 797}
|
import random
import copy
import math
from collections import defaultdict
import numpy as np
import scipy as sp
import h5py
import cyclus
import pickle
from cyclus.agents import Institution, Agent, Facility
from cyclus import lib
import cyclus.typesystem as ts
class ann_lwr(Facility):
fuel_incommod = ts.String(
doc="The commodity name for incoming fuel",
tooltip="Incoming fuel",
uilabel="Incoming fuel"
)
fuel_outcommod = ts.String(
doc="The commodity name for discharge fuel",
tooltip="Discharge Fuel",
uilabel="Discharge Fuel"
)
pickle_path = ts.String(
doc="Path to the pickle file",
tooltip="Absolute path to the pickle file"
)
# one row would be 2.1_30000 3.1_40000 4.1_50000 etc
enr_bu_matrix = ts.VectorString(
doc="enrichment and burnup matrix",
tooltip="enrichment_burnup column separated by space"
)
n_assem_core = ts.Int(
doc="Number of assemblies",
tooltip="Number of assemblies in core"
)
n_assem_batch = ts.Int(
doc="Number of assemblies per batch",
tooltip="Number of assemblies per batch"
)
assem_size = ts.Double(
doc="Assembly mass",
tooltip="Assembly mass"
)
power_cap = ts.Double(
doc="Power capacity of reactor",
tooltip="Power capacity of reactor",
)
cycle_time_eq = ts.String(
doc="cycle time of reactor equation",
tooltip="Cycle time of reactor equation"
)
refuel_time_eq = ts.String(
doc="Refuel time of reactor equation",
tooltip="Refuel time of reactor equation"
)
core = ts.ResBufMaterialInv()
waste = ts.ResBufMaterialInv()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def enter_notify(self):
super().enter_notify()
self.model_dict = pickle.load(open(self.pickle_path, 'rb'))
# change other to h-1
other_index = self.model_dict['iso_list'].index('other')
self.model_dict['iso_list'][other_index] = 'h-1'
self.iso_list = self.model_dict['iso_list']
# check if it's integer batches
if (self.n_assem_core / self.n_assem_batch)%1 != 0:
raise ValueError('Sorry can only do integer batches')
# input consistency checking
self.enr_matrix, self.bu_matrix = self.check_enr_bu_matrix()
# !!
self.f = open('f.txt', 'w')
# set initial cycle and refuel time
t = self.context.time
self.cycle_time = max(0, int(eval(self.cycle_time_eq)))
self.refuel_time = max(0, int(eval(self.refuel_time_eq)))
# set core capacity
self.core.capacity = self.n_assem_core * self.assem_size
self.cycle_step = 0
self.batch_gen = 0
self.n_batch = int(self.n_assem_core / self.n_assem_batch)
# if no exit time, exit time is 1e5
if self.exit_time == -1:
self.decom_time = 1e5
else:
self.decom_time = self.exit_time
def tick(self):
# If time to decommission, where if decommissioning
# mid cycle, deplete using weighted average
# and discharge
if self.context.time == self.decom_time:
# burnup is prorated by the ratio
cycle_step_ratio = self.cycle_step / self.cycle_time
for index, bu_list in enumerate(self.bu_matrix):
prorated_bu_list = bu_list * cycle_step_ratio
self.transmute_and_discharge(prorated_bu_list,
self.enr_matrix[index])
return
if self.cycle_step == self.cycle_time:
if self.batch_gen < self.n_batch:
i = self.batch_gen
else:
i = -1
bu_list = self.bu_matrix[i]
self.transmute_and_discharge(bu_list,
self.enr_matrix[i])
self.batch_gen += 1
def tock(self):
if (self.cycle_step >= self.cycle_time + self.refuel_time) and (self.is_core_full()):
t = self.context.time
self.cycle_time = max(0, int(eval(self.cycle_time_eq)))
self.refuel_time = max(0, int(eval(self.refuel_time_eq)))
self.cycle_step = 1
# produce power if core is full
if (self.cycle_step >= 0) and (self.cycle_step < self.cycle_time) and (self.is_core_full()):
self.produce_power(True)
else:
self.produce_power(False)
if self.cycle_step > 0 or self.is_core_full():
self.cycle_step += 1
def get_material_bids(self, requests):
""" Gets material bids that want its 'outcommod' and
returns bid portfolio
"""
bids = []
if self.fuel_outcommod in requests.keys():
reqs = requests[self.fuel_outcommod]
for req in reqs:
if self.waste.empty():
break
qty = min(req.target.quantity, self.waste.quantity
)
next_in_line = self.waste.peek()
mat = ts.Material.create_untracked(qty, next_in_line.comp())
bids.append({'request': req, 'offer': mat})
if len(bids) == 0:
return
port = {'bids': bids}
return port
def get_material_trades(self, trades):
""" Give out fuel_outcommod from waste buffer"""
responses = {}
for trade in trades:
commodity = trade.request.commodity
if commodity == self.fuel_outcommod:
mat_list = self.waste.pop_n(self.waste.count)
if len(mat_list) > 1:
for mat in mat_list[1:]:
mat_list[0].absorb(mat)
responses[trade] = mat_list[0]
return responses
def get_material_requests(self):
""" Ask for fuel_incommod"""
ports = []
if self.context.time == self.decom_time:
return []
if self.is_core_full():
return []
recipes = {}
qty = {}
mat = {}
t = self.context.time
# initial core loading
if self.batch_gen == 0:
enr_to_request = self.enr_matrix
for i in range(np.shape(enr_to_request)[0]):
for j in range(np.shape(enr_to_request)[1]):
enr = eval(enr_to_request[i,j])
comp = {'u-238': 100-enr,
'u-235': enr}
qty = self.assem_size
mat = ts.Material.create_untracked(qty, comp)
ports.append({'commodities': {self.fuel_incommod: mat},
'constraints': qty})
# subsequent equilibrium batch loading
else:
enr_to_request = self.enr_matrix[-1]
for enrichment in enr_to_request:
enr = eval(enrichment)
comp = {'u-238': 100-enr,
'u-235': enr}
qty = self.assem_size
mat = ts.Material.create_untracked(qty, comp)
ports.append({'commodities' : {self.fuel_incommod: mat},
'constraints': qty})
return ports
def accept_material_trades(self, responses):
""" Get fuel_incommod and store it into core"""
for key, mat in responses.items():
if key.request.commodity == self.fuel_incommod:
self.core.push(mat)
def is_core_full(self):
if self.core.count == self.n_assem_core:
return True
else:
return False
def predict(self, enr_bu):
model = self.model_dict['model']
x = self.model_dict['xscaler'].transform(enr_bu)
y = self.model_dict['yscaler'].inverse_transform(
model.predict(x))[0]
comp_dict = {}
for indx, iso in enumerate(self.iso_list):
# zero if model predicts negative
if y[indx] < 0:
y[indx] = 0
comp_dict[iso] = y[indx]
return comp_dict
def transmute_and_discharge(self, bu_list, enr_list):
# this should ideally be one batch,
t = self.context.time
if self.batch_gen < self.n_batch:
enr = enr_list[self.batch_gen]
else:
enr = enr_list[-1]
for indx, bu in enumerate(bu_list):
enr_bu = [[eval(enr_list[indx]),eval(bu)]]
print('Transmuting fuel with enrichment, burnup:')
print(enr_bu)
discharge_fuel = self.core.pop()
comp = self.predict(enr_bu)
discharge_fuel.transmute(comp)
self.waste.push(discharge_fuel)
def produce_power(self, produce=True):
if produce:
lib.record_time_series(lib.POWER, self, float(self.power_cap))
else:
lib.record_time_series(lib.POWER, self, 0)
def check_enr_bu_matrix(self):
# parse bu enr matrix
empty = np.zeros(len(self.enr_bu_matrix[0].split(' ')))
for i in self.enr_bu_matrix:
entry = np.array(i.split(' '))
if len(entry) != self.n_assem_batch:
raise ValueError('The length of entry has to match n_assem_batch')
try:
empty = np.vstack((empty, entry))
except ValueError:
print('Your length of entries per batch are inconsistent!')
matrix = empty[1:]
# separate bu and enrichment
sep = np.char.split(matrix, '_')
bu_matrix = np.empty(np.shape(matrix), dtype=object)
enr_matrix = np.empty(np.shape(matrix), dtype=object)
for i in range(np.shape(sep)[0]):
for j in range(np.shape(sep)[1]):
enr_matrix[i,j] = sep[i,j][0]
bu_matrix[i,j] = sep[i,j][1]
return enr_matrix, bu_matrix
|
{"hexsha": "600aedb46e9d669d2c159108ac56f71e69934764", "size": 9971, "ext": "py", "lang": "Python", "max_stars_repo_path": "ann_lwr/ann_lwr.py", "max_stars_repo_name": "jbae11/ann_pwr", "max_stars_repo_head_hexsha": "c35ed75184aa7b06c3469a4bb6bed77ff2465c1c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ann_lwr/ann_lwr.py", "max_issues_repo_name": "jbae11/ann_pwr", "max_issues_repo_head_hexsha": "c35ed75184aa7b06c3469a4bb6bed77ff2465c1c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-10T22:52:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-10T22:52:05.000Z", "max_forks_repo_path": "ann_lwr/ann_lwr.py", "max_forks_repo_name": "jbae11/ann_pwr", "max_forks_repo_head_hexsha": "c35ed75184aa7b06c3469a4bb6bed77ff2465c1c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1262458472, "max_line_length": 100, "alphanum_fraction": 0.565640357, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2283}
|
[STATEMENT]
lemma vars_term_ctxt_apply [simp]:
"vars_term C\<langle>t\<rangle> = vars_ctxt C \<union> vars_term t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. vars_term C\<langle>t\<rangle> = vars_ctxt C \<union> vars_term t
[PROOF STEP]
by (induct C arbitrary: t) auto
|
{"llama_tokens": 112, "file": "Regular_Tree_Relations_Util_Term_Context", "length": 1}
|
"""
Trainer class.
"""
import json
import logging
import os
import sys
import time
from collections import OrderedDict
import torch
import numpy as np
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from galaxy.args import str2bool
from galaxy.data.data_loader import DataLoader
from galaxy.metrics.metrics_tracker import MetricsTracker
def get_logger(log_path, name="default"):
logger = logging.getLogger(name)
logger.propagate = False
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
fh = logging.FileHandler(log_path, mode="w")
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
class Trainer(object):
@classmethod
def add_cmdline_argument(cls, parser):
""" Add the cmdline arguments of trainer. """
group = parser.add_argument_group("Trainer")
group.add_argument("--seed", type=int, default=11,
help="The number of seed to fix random operations.")
group.add_argument("--gpu", type=int, default=0,
help="Whether to use gpu for running, default using cpu.")
group.add_argument("--valid_metric_name", type=str, default="-loss",
help="The validation metric determining which checkpoint is the best.")
group.add_argument("--num_epochs", type=int, default=10,
help="Total number of training epochs to perform.")
group.add_argument("--save_dir", type=str, required=True,
help="The output directory where the model will be saved.")
group.add_argument("--token_loss", type=str2bool, default=True,
help="Whether to update token loss or sentence loss.")
group.add_argument("--batch_size", type=int, default=8,
help="Total batch size for training/evaluation/inference.")
group.add_argument("--log_steps", type=int, default=100,
help="The number of training steps to output current metrics "
"on past training dataset.")
group.add_argument("--valid_steps", type=int, default=2000,
help="The number of training steps to perform a evaluation "
"on validation datasets.")
group.add_argument("--save_checkpoint", type=str2bool, default=True,
help="Whether to save one checkpoints for each training epoch.")
DataLoader.add_cmdline_argument(group)
return group
def __init__(self, model, to_tensor, hparams, logger=None, lr_scheduler=None, optimizer=None,
reader=None, evaluator=None):
self.to_tensor = to_tensor
self.hparams = hparams
self.do_train = hparams.do_train
self.do_infer = hparams.do_infer
self.data_name = hparams.data_name
self.is_decreased_valid_metric = hparams.valid_metric_name[0] == "-"
self.valid_metric_name = hparams.valid_metric_name[1:]
self.num_epochs = hparams.num_epochs
self.save_dir = hparams.save_dir
self.log_steps = hparams.log_steps
self.valid_steps = hparams.valid_steps
self.save_checkpoint = hparams.save_checkpoint
self.gradient_accumulation_steps = hparams.gradient_accumulation_steps
self.weight_decay = hparams.weight_decay
self.batch_size = hparams.batch_size
self.warmup_steps = hparams.warmup_steps
self.gpu = hparams.gpu
self.lr = hparams.lr
self.lr_scheduler = lr_scheduler
self.optimizer = optimizer
self.model = model
self.func_model = self.model.module if self.gpu > 1 else self.model
self.reader = reader
self.evaluator = evaluator
self.tokenizer = reader.tokenizer
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
self.logger = logger or get_logger(os.path.join(self.save_dir, "trainer.log"), "trainer")
self.batch_metrics_tracker = MetricsTracker()
self.token_metrics_tracker = MetricsTracker()
self.best_valid_metric = float("inf" if self.is_decreased_valid_metric else "-inf")
self.epoch = 0
def decode_generated_bspn_resp(self, generated):
"""
decode generated
return decoded ('bspn', 'resp')
"""
decoded = {}
eos_r_id = self.reader.eos_r_id
eos_b_id = self.reader.eos_b_id
# eos_r may not exists if galaxy generated repetitive words.
if eos_r_id in generated:
eos_r_idx = generated.index(eos_r_id)
else:
eos_r_idx = len(generated) - 1
# self.logger.info('eos_r not in generated: ' + self.tokenizer.decode(generated))
# predicted bspn, resp
eos_b_idx = generated.index(eos_b_id)
decoded['bspn'] = generated[:eos_b_idx + 1]
decoded['resp'] = generated[eos_b_idx + 1: eos_r_idx + 1]
return decoded
def decode_generated_act_resp(self, generated):
"""
decode generated
return decoded['resp'] ('bspn', 'aspn')
"""
decoded = {}
eos_a_id = self.reader.eos_a_id
eos_r_id = self.reader.eos_r_id
eos_b_id = self.reader.eos_b_id
# eos_r may not exists if galaxy generated repetitive words.
if eos_r_id in generated:
eos_r_idx = generated.index(eos_r_id)
else:
eos_r_idx = len(generated) - 1
self.logger.info('eos_r not in generated: ' + self.tokenizer.decode(generated))
if self.reader.use_true_curr_aspn: # only predict resp
decoded['resp'] = generated[: eos_r_idx + 1]
else: # predicted aspn, resp
eos_a_idx = generated.index(eos_a_id)
decoded['aspn'] = generated[: eos_a_idx + 1]
decoded['resp'] = generated[eos_a_idx + 1: eos_r_idx + 1]
return decoded
def decode_generated_bspn(self, generated):
eos_b_id = self.reader.eos_b_id
if eos_b_id in generated:
eos_b_idx = generated.index(eos_b_id)
else:
eos_b_idx = len(generated) - 1
return generated[: eos_b_idx + 1]
def set_optimizers(self):
"""
Setup the optimizer and the learning rate scheduler.
from transformers.Trainer
parameters from cfg: lr (1e-3); warmup_steps
"""
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.lr)
num_training_steps = self.reader.set_stats['train']['num_training_steps_per_epoch'] * \
self.num_epochs // self.gradient_accumulation_steps
num_warmup_steps = self.warmup_steps if self.warmup_steps >= 0 else int(num_training_steps * 0.1)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps,
num_training_steps=num_training_steps
)
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def train(self, train_data, dev_data):
# log info
set_stats = self.reader.set_stats['train']
self.logger.info("***** Running training *****")
self.logger.info(" Num Training steps(one turn in a batch of dialogs) per epoch = %d",
set_stats['num_training_steps_per_epoch'])
self.logger.info(" Num Turns = %d", set_stats['num_turns'])
self.logger.info(" Num Dialogs = %d", set_stats['num_dials'])
self.logger.info(" Num Epochs = %d", self.num_epochs)
self.logger.info(" Batch size = %d", self.batch_size)
self.logger.info(" Gradient Accumulation steps = %d", self.gradient_accumulation_steps)
self.logger.info(" Total optimization steps = %d", set_stats['num_training_steps_per_epoch'] *
self.num_epochs // self.gradient_accumulation_steps)
# begin training
num_epochs = self.num_epochs - self.epoch
for epoch in range(num_epochs):
self.train_epoch(train_data=train_data, dev_data=dev_data)
def train_epoch(self, train_data, dev_data):
"""
Train an epoch.
"""
raise NotImplementedError
def infer(self, data_type):
"""
Inference interface.
"""
raise NotImplementedError
def save(self, is_best=False):
""" save """
train_state = {"epoch": self.epoch,
"best_valid_metric": self.best_valid_metric,
"optimizer": self.optimizer.state_dict()}
if self.lr_scheduler is not None:
train_state["lr_scheduler"] = self.lr_scheduler.state_dict()
# Save checkpoint
if self.save_checkpoint:
model_file = os.path.join(self.save_dir, f"state_epoch_{self.epoch}.model")
torch.save(self.model.state_dict(), model_file)
self.logger.info(f"Saved model state to '{model_file}'")
train_file = os.path.join(self.save_dir, f"state_epoch_{self.epoch}.train")
torch.save(train_state, train_file)
self.logger.info(f"Saved train state to '{train_file}'")
# Save current best model
if is_best:
best_model_file = os.path.join(self.save_dir, "best.model")
torch.save(self.model.state_dict(), best_model_file)
best_train_file = os.path.join(self.save_dir, "best.train")
torch.save(train_state, best_train_file)
self.logger.info(
f"Saved best model state to '{best_model_file}' with new best valid metric "
f"{self.valid_metric_name.upper()}={self.best_valid_metric:.3f}")
def load(self):
""" load """
def _load_model_state():
model_state_dict = torch.load(f'{self.func_model.init_checkpoint}.model',
map_location=lambda storage, loc: storage)
if 'module.' in list(model_state_dict.keys())[0]:
new_model_state_dict = OrderedDict()
for k, v in model_state_dict.items():
assert k[:7] == 'module.'
new_model_state_dict[k[7:]] = v
model_state_dict = new_model_state_dict
new_model_state_dict = OrderedDict()
parameters = {name: param for name, param in self.func_model.named_parameters()}
for name, param in model_state_dict.items():
if name in parameters:
if param.shape != parameters[name].shape:
assert hasattr(param, "numpy")
arr = param.numpy()
z = np.random.normal(scale=self.func_model.initializer_range,
size=parameters[name].shape).astype("float32")
if name == 'embedder.token_embedding.weight':
z[-param.shape[0]:] = arr
print(f"part of parameter({name}) random normlize initialize")
else:
if z.shape[0] < param.shape[0]:
z = arr[:z.shape[0]]
print(f"part of parameter({name}) are dropped")
else:
z[:param.shape[0]] = arr
print(f"part of parameter({name}) random normlize initialize")
dtype, device = param.dtype, param.device
z = torch.tensor(z, dtype=dtype, device=device)
new_model_state_dict[name] = z
else:
new_model_state_dict[name] = param
else:
print(f"parameter({name}) are dropped")
model_state_dict = new_model_state_dict
for name in parameters:
if name not in model_state_dict:
if parameters[name].requires_grad:
print(f"parameter({name}) random normlize initialize")
z = np.random.normal(scale=self.func_model.initializer_range,
size=parameters[name].shape).astype("float32")
dtype, device = parameters[name].dtype, parameters[name].device
model_state_dict[name] = torch.tensor(z, dtype=dtype, device=device)
else:
model_state_dict[name] = parameters[name]
self.func_model.load_state_dict(model_state_dict)
self.logger.info(f"Loaded model state from '{self.func_model.init_checkpoint}.model'")
def _load_train_state():
train_file = f"{self.func_model.init_checkpoint}.train"
if os.path.exists(train_file):
train_state_dict = torch.load(train_file, map_location=lambda storage, loc: storage)
self.epoch = train_state_dict["epoch"]
self.best_valid_metric = train_state_dict["best_valid_metric"]
if self.optimizer is not None and "optimizer" in train_state_dict:
self.optimizer.load_state_dict(train_state_dict["optimizer"])
if self.lr_scheduler is not None and "lr_scheduler" in train_state_dict:
self.lr_scheduler.load_state_dict(train_state_dict["lr_scheduler"])
self.logger.info(
f"Loaded train state from '{train_file}' with (epoch-{self.epoch} "
f"best_valid_metric={self.best_valid_metric:.3f})")
else:
self.logger.info(f"Loaded no train state")
if self.func_model.init_checkpoint is None:
self.logger.info(f"Loaded no model !!!")
return
_load_model_state()
_load_train_state()
class MultiWOZTrainer(Trainer):
def __init__(self, model, to_tensor, hparams, logger=None, lr_scheduler=None, optimizer=None,
reader=None, evaluator=None):
super(MultiWOZTrainer, self).__init__(model, to_tensor, hparams, logger, lr_scheduler, optimizer,
reader, evaluator)
def train_epoch(self, train_data, dev_data):
"""
Train an epoch.
"""
times = []
epoch_step = 0
global_step = 0
self.epoch += 1
self.batch_metrics_tracker.clear()
self.token_metrics_tracker.clear()
num_training_steps = self.reader.set_stats['train']['num_training_steps_per_epoch'] // \
self.gradient_accumulation_steps # similar to the original num_batches
self.model.zero_grad()
data_iterator = self.reader.get_data_iterator(all_batches=train_data)
for batch_idx, dial_batch in enumerate(data_iterator):
pv_batch = []
for turn_num, turn_batch in enumerate(dial_batch):
first_turn = (turn_num == 0)
samples, pv_batch = self.reader.convert_batch_turn(turn_batch, pv_batch, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=samples)
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
# Do a training iteration
start_time = time.time()
metrics = self.model(batch, is_training=True)
if self.gpu > 1: # balance metrics for multi-gpu setting
for metric in metrics:
if metric is not None:
assert len(metric) == self.gpu
nll, token_nll, token_num, bce, bce_f1 = metrics
metrics = {}
token_num = torch.sum(token_num)
token_nll = torch.sum(nll) * (batch_size / self.gpu) / token_num
nll = torch.mean(nll)
metrics['token_num'] = token_num
metrics['token_nll'] = token_nll
metrics['nll'] = nll
loss = token_nll if self.func_model.token_loss else nll
if bce is not None:
bce = torch.mean(bce)
bce_f1 = torch.mean(bce_f1)
metrics['bce'] = bce
metrics['bce_f1'] = bce_f1
loss = loss + bce * self.func_model.bce_ratio
metrics['loss'] = loss
else:
loss = metrics["loss"]
self.func_model._optimize(loss, do_update=False, optimizer=self.optimizer)
metrics = {k: v.cpu().detach().numpy() if isinstance(v, torch.Tensor) else v
for k, v in metrics.items()}
token_num = metrics.pop("token_num", None)
elapsed = time.time() - start_time
times.append(elapsed)
epoch_step += 1
batch_metrics = {k: v for k, v in metrics.items() if "token" not in k}
token_metrics = {k: v for k, v in metrics.items() if "token" in k}
self.batch_metrics_tracker.update(batch_metrics, batch_size)
self.token_metrics_tracker.update(token_metrics, token_num)
if (epoch_step % self.gradient_accumulation_steps == 0) or \
(epoch_step == self.reader.set_stats['train']['num_training_steps_per_epoch']):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
global_step += 1
if self.log_steps > 0 and global_step % self.log_steps == 0:
batch_metrics_message = self.batch_metrics_tracker.value()
token_metrics_message = self.token_metrics_tracker.value()
message_prefix = f"[Train][{self.epoch}][{global_step}/{num_training_steps}]"
avg_time = f"AVG_Time-{sum(times[-self.log_steps:]) / self.log_steps:.3f}"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message,
avg_time])
self.logger.info(message)
self.logger.info("-" * 150)
batch_metrics_message = self.batch_metrics_tracker.summary()
token_metrics_message = self.token_metrics_tracker.summary()
message_prefix = f"[Valid][{self.epoch}]"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message])
self.logger.info(message)
cur_valid_metric = self.batch_metrics_tracker.get(self.valid_metric_name)
if self.is_decreased_valid_metric:
is_best = cur_valid_metric < self.best_valid_metric
else:
is_best = cur_valid_metric > self.best_valid_metric
if is_best:
self.best_valid_metric = cur_valid_metric
self.save(is_best)
self.logger.info("-" * 150)
return
def infer(self, data_type='test'):
"""
Inference interface.
"""
self.logger.info("Generation starts ...")
infer_save_file = os.path.join(self.save_dir, f"infer_{self.epoch}.result.json")
infer_samples_save_file = os.path.join(self.save_dir, f"infer_samples_{self.epoch}.result.json")
# Inference
result_collection = {}
begin_time = time.time()
eval_data = self.reader.get_eval_data(data_type)
set_stats = self.reader.set_stats[data_type]
self.logger.info("***** Running Evaluation *****")
self.logger.info(" Num Turns = %d", set_stats['num_turns'])
with torch.no_grad():
pbar = tqdm(eval_data)
for dial_idx, dialog in enumerate(pbar):
pv_turn = {}
for turn_idx, turn in enumerate(dialog):
first_turn = (turn_idx == 0)
inputs, prompt_id = self.reader.convert_turn_eval(turn, pv_turn, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=[inputs])
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
if self.reader.use_true_curr_bspn: # generate act, response
max_len = 60
if not self.reader.use_true_curr_aspn:
max_len = 80
outputs = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_r_id, max_gen_len=max_len)
# resp_gen, need to trim previous context
generated = outputs[0].cpu().numpy().tolist()
try:
decoded = self.decode_generated_act_resp(generated)
except ValueError as exception:
self.logger.info(str(exception))
self.logger.info(self.tokenizer.decode(generated))
decoded = {'resp': [], 'bspn': [], 'aspn': []}
else: # predict bspn, access db, then generate act and resp
outputs = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_b_id, max_gen_len=60)
generated_bs = outputs[0].cpu().numpy().tolist()
bspn_gen = self.decode_generated_bspn(generated_bs)
# check DB result
if self.reader.use_true_db_pointer:
db = turn['db']
else:
db_result = self.reader.bspan_to_DBpointer(self.tokenizer.decode(bspn_gen),
turn['turn_domain'])
assert len(turn['db']) == 4
book_result = turn['db'][2]
assert isinstance(db_result, str)
db = [self.reader.sos_db_id] + \
self.tokenizer.convert_tokens_to_ids([db_result]) + \
[book_result] + \
[self.reader.eos_db_id]
prompt_id = self.reader.sos_a_id
prev_input = torch.tensor(bspn_gen + db)
if self.func_model.use_gpu:
prev_input = prev_input.cuda()
outputs_db = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_r_id, max_gen_len=80,
prev_input=prev_input)
generated_ar = outputs_db[0].cpu().numpy().tolist()
try:
decoded = self.decode_generated_act_resp(generated_ar)
decoded['bspn'] = bspn_gen
except ValueError as exception:
self.logger.info(str(exception))
self.logger.info(self.tokenizer.decode(generated_ar))
decoded = {'resp': [], 'bspn': [], 'aspn': []}
turn['resp_gen'] = decoded['resp']
turn['bspn_gen'] = turn['bspn'] if self.reader.use_true_curr_bspn else decoded['bspn']
turn['aspn_gen'] = turn['aspn'] if self.reader.use_true_curr_aspn else decoded['aspn']
turn['dspn_gen'] = turn['dspn']
pv_turn['labels'] = inputs['labels'] # all true previous context
pv_turn['resp'] = turn['resp'] if self.reader.use_true_prev_resp else decoded['resp']
if not self.reader.use_true_curr_bspn:
pv_turn['bspn'] = turn['bspn'] if self.reader.use_true_prev_bspn else decoded['bspn']
pv_turn['db'] = turn['db'] if self.reader.use_true_prev_bspn else db
pv_turn['aspn'] = turn['aspn'] if self.reader.use_true_prev_aspn else decoded['aspn']
tmp_dialog_result = self.reader.inverse_transpose_turn(dialog)
result_collection.update(tmp_dialog_result)
# compute tmp scores
results, _ = self.reader.wrap_result_lm(tmp_dialog_result)
bleu, success, match = self.evaluator.validation_metric(results)
score = 0.5 * (success + match) + bleu
pbar.set_description('match: %2.2f success: %2.2f bleu: %2.2f score: %.2f' %
(match, success, bleu, score))
# compute scores
results, _ = self.reader.wrap_result_lm(result_collection)
bleu, success, match = self.evaluator.validation_metric(results)
score = 0.5 * (success + match) + bleu
# log results
metrics_message = 'match: %2.2f success: %2.2f bleu: %2.2f score: %.2f' %\
(match, success, bleu, score)
message_prefix = f"[Infer][{self.epoch}]"
time_cost = f"TIME-{time.time() - begin_time:.3f}"
message = " ".join([message_prefix, metrics_message, time_cost])
self.logger.info(message)
# save results
eval_results = {
'bleu': bleu,
'success': success,
'match': match,
'score': score,
'result': message
}
with open(infer_save_file, "w") as fp:
json.dump(eval_results, fp, indent=2)
self.logger.info(f"Saved inference results to {infer_save_file}")
with open(infer_samples_save_file, "w") as fp:
for sample in results:
line = json.dumps(sample)
fp.write(line)
fp.write('\n')
self.logger.info(f"Saved inference samples to {infer_samples_save_file}")
return
class CamRestTrainer(Trainer):
def __init__(self, model, to_tensor, hparams, logger=None, lr_scheduler=None, optimizer=None,
reader=None, evaluator=None):
super(CamRestTrainer, self).__init__(model, to_tensor, hparams, logger, lr_scheduler, optimizer,
reader, evaluator)
def train_epoch(self, train_data, dev_data):
"""
Train an epoch.
"""
times = []
epoch_step = 0
global_step = 0
self.epoch += 1
self.batch_metrics_tracker.clear()
self.token_metrics_tracker.clear()
num_training_steps = self.reader.set_stats['train']['num_training_steps_per_epoch'] // \
self.gradient_accumulation_steps # similar to the original num_batches
self.model.zero_grad()
data_iterator = self.reader.get_data_iterator(all_batches=train_data)
for batch_idx, dial_batch in enumerate(data_iterator):
pv_batch = []
for turn_num, turn_batch in enumerate(dial_batch):
first_turn = (turn_num == 0)
samples, pv_batch = self.reader.convert_batch_turn(turn_batch, pv_batch, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=samples)
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
# Do a training iteration
start_time = time.time()
metrics = self.model(batch, is_training=True)
loss = metrics["loss"]
self.func_model._optimize(loss, do_update=False, optimizer=self.optimizer)
metrics = {k: v.cpu().detach().numpy() if isinstance(v, torch.Tensor) else v
for k, v in metrics.items()}
token_num = metrics.pop("token_num", None)
elapsed = time.time() - start_time
times.append(elapsed)
epoch_step += 1
batch_metrics = {k: v for k, v in metrics.items() if "token" not in k}
token_metrics = {k: v for k, v in metrics.items() if "token" in k}
self.batch_metrics_tracker.update(batch_metrics, batch_size)
self.token_metrics_tracker.update(token_metrics, token_num)
if (epoch_step % self.gradient_accumulation_steps == 0) or \
(epoch_step == self.reader.set_stats['train']['num_training_steps_per_epoch']):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
global_step += 1
if self.log_steps > 0 and global_step % self.log_steps == 0:
batch_metrics_message = self.batch_metrics_tracker.value()
token_metrics_message = self.token_metrics_tracker.value()
message_prefix = f"[Train][{self.epoch}][{global_step}/{num_training_steps}]"
avg_time = f"AVG_Time-{sum(times[-self.log_steps:]) / self.log_steps:.3f}"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message,
avg_time])
self.logger.info(message)
self.logger.info("-" * 150)
batch_metrics_message = self.batch_metrics_tracker.summary()
token_metrics_message = self.token_metrics_tracker.summary()
message_prefix = f"[Train][{self.epoch}]"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message])
self.logger.info(message)
cur_valid_metric = self.batch_metrics_tracker.get(self.valid_metric_name)
if self.is_decreased_valid_metric:
is_best = cur_valid_metric < self.best_valid_metric
else:
is_best = cur_valid_metric > self.best_valid_metric
if is_best:
self.best_valid_metric = cur_valid_metric
self.save(is_best)
self.logger.info("-" * 150)
return
def infer(self, data_type='test'):
"""
Inference interface.
"""
self.logger.info("Generation starts ...")
infer_save_file = os.path.join(self.save_dir, f"infer_{self.epoch}.result.json")
infer_samples_save_file = os.path.join(self.save_dir, f"infer_samples_{self.epoch}.result.json")
# Inference
result_collection = {}
begin_time = time.time()
eval_data = self.reader.get_eval_data(data_type)
set_stats = self.reader.set_stats[data_type]
self.logger.info("***** Running Evaluation *****")
self.logger.info(" Num Turns = %d", set_stats['num_turns'])
with torch.no_grad():
pbar = tqdm(eval_data)
for dial_idx, dialog in enumerate(pbar):
pv_turn = {}
for turn_idx, turn in enumerate(dialog):
first_turn = (turn_idx == 0)
inputs, prompt_id = self.reader.convert_turn_eval(turn, pv_turn, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=[inputs])
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
if self.reader.use_true_curr_bspn: # generate act, response
max_len = 60
if not self.reader.use_true_curr_aspn:
max_len = 80
outputs = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_r_id, max_gen_len=max_len)
# resp_gen, need to trim previous context
generated = outputs[0].cpu().numpy().tolist()
try:
decoded = self.decode_generated_act_resp(generated)
except ValueError as exception:
self.logger.info(str(exception))
self.logger.info(self.tokenizer.decode(generated))
decoded = {'resp': [], 'bspn': [], 'aspn': []}
else: # predict bspn, access db, then generate act and resp
outputs = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_b_id, max_gen_len=20) # max=14
generated_bs = outputs[0].cpu().numpy().tolist()
bspn_gen = self.decode_generated_bspn(generated_bs)
# check DB result
if self.reader.use_true_db_pointer:
db = turn['db']
else:
db_result = self.reader.bspan_to_DBpointer(self.tokenizer.decode(bspn_gen))
assert isinstance(db_result, str)
db = [self.reader.sos_db_id] + \
self.tokenizer.convert_tokens_to_ids([db_result]) + \
[self.reader.eos_db_id]
prompt_id = self.reader.sos_a_id
prev_input = torch.tensor(bspn_gen + db)
if self.func_model.use_gpu:
prev_input = prev_input.cuda()
outputs_db = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_r_id, max_gen_len=60, # max=48
prev_input=prev_input)
generated_ar = outputs_db[0].cpu().numpy().tolist()
try:
decoded = self.decode_generated_act_resp(generated_ar)
decoded['bspn'] = bspn_gen
except ValueError as exception:
self.logger.info(str(exception))
self.logger.info(self.tokenizer.decode(generated_ar))
decoded = {'resp': [], 'bspn': [], 'aspn': []}
turn['resp_gen'] = decoded['resp']
turn['bspn_gen'] = turn['bspn'] if self.reader.use_true_curr_bspn else decoded['bspn']
turn['aspn_gen'] = turn['aspn'] if self.reader.use_true_curr_aspn else decoded['aspn']
pv_turn['labels'] = inputs['labels'] # all true previous context
pv_turn['resp'] = turn['resp'] if self.reader.use_true_prev_resp else decoded['resp']
if not self.reader.use_true_curr_bspn:
pv_turn['bspn'] = turn['bspn'] if self.reader.use_true_prev_bspn else decoded['bspn']
pv_turn['db'] = turn['db'] if self.reader.use_true_prev_bspn else db
pv_turn['aspn'] = turn['aspn'] if self.reader.use_true_prev_aspn else decoded['aspn']
tmp_dialog_result = self.reader.inverse_transpose_turn(dialog)
result_collection.update(tmp_dialog_result)
# compute tmp scores
# results, _ = self.reader.wrap_result_lm(tmp_dialog_result)
# metrics = self.evaluator.run_metrics(results)
# bleu, match, req_f1, joint_goal = metrics['bleu'], metrics['match'] * 100, \
# metrics['req_f1'] * 100, metrics['joint_goal']
# score = 0.5 * (req_f1 + match) + bleu
# compute scores
results, _ = self.reader.wrap_result_lm(result_collection)
metrics = self.evaluator.run_metrics(results)
bleu, match, req_f1, joint_goal = metrics['bleu'], metrics['match'] * 100, metrics['req_f1'] * 100, \
metrics['joint_goal']
score = 0.5 * (req_f1 + match) + bleu
# log results
metrics_message = 'match: %2.2f req_f1: %2.2f bleu: %2.2f score: %.2f joint_goal: %2.3f' % \
(match, req_f1, bleu, score, joint_goal)
message_prefix = f"[Infer][{self.epoch}]"
time_cost = f"TIME-{time.time() - begin_time:.3f}"
message = " ".join([message_prefix, metrics_message, time_cost])
self.logger.info(message)
# save results
eval_results = {
'bleu': bleu,
'req_f1': req_f1,
'match': match,
'score': score,
'joint_goal': joint_goal,
'result': message
}
with open(infer_save_file, "w") as fp:
json.dump(eval_results, fp, indent=2)
self.logger.info(f"Saved inference results to {infer_save_file}")
with open(infer_samples_save_file, "w") as fp:
for sample in results:
line = json.dumps(sample)
fp.write(line)
fp.write('\n')
self.logger.info(f"Saved inference samples to {infer_samples_save_file}")
return
class KvretTrainer(Trainer):
def __init__(self, model, to_tensor, hparams, logger=None, lr_scheduler=None, optimizer=None,
reader=None, evaluator=None):
super(KvretTrainer, self).__init__(model, to_tensor, hparams, logger, lr_scheduler, optimizer,
reader, evaluator)
def train_epoch(self, train_data, dev_data):
"""
Train an epoch.
"""
times = []
epoch_step = 0
global_step = 0
self.epoch += 1
self.batch_metrics_tracker.clear()
self.token_metrics_tracker.clear()
num_training_steps = self.reader.set_stats['train']['num_training_steps_per_epoch'] // \
self.gradient_accumulation_steps # similar to the original num_batches
self.model.zero_grad()
data_iterator = self.reader.get_data_iterator(all_batches=train_data)
for batch_idx, dial_batch in enumerate(data_iterator):
pv_batch = []
for turn_num, turn_batch in enumerate(dial_batch):
first_turn = (turn_num == 0)
samples, pv_batch = self.reader.convert_batch_turn(turn_batch, pv_batch, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=samples)
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
# Do a training iteration
start_time = time.time()
metrics = self.model(batch, is_training=True)
loss = metrics["loss"]
self.func_model._optimize(loss, do_update=False, optimizer=self.optimizer)
metrics = {k: v.cpu().detach().numpy() if isinstance(v, torch.Tensor) else v
for k, v in metrics.items()}
token_num = metrics.pop("token_num", None)
elapsed = time.time() - start_time
times.append(elapsed)
epoch_step += 1
batch_metrics = {k: v for k, v in metrics.items() if "token" not in k}
token_metrics = {k: v for k, v in metrics.items() if "token" in k}
self.batch_metrics_tracker.update(batch_metrics, batch_size)
self.token_metrics_tracker.update(token_metrics, token_num)
if (epoch_step % self.gradient_accumulation_steps == 0) or \
(epoch_step == self.reader.set_stats['train']['num_training_steps_per_epoch']):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
global_step += 1
if self.log_steps > 0 and global_step % self.log_steps == 0:
batch_metrics_message = self.batch_metrics_tracker.value()
token_metrics_message = self.token_metrics_tracker.value()
message_prefix = f"[Train][{self.epoch}][{global_step}/{num_training_steps}]"
avg_time = f"AVG_Time-{sum(times[-self.log_steps:]) / self.log_steps:.3f}"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message,
avg_time])
self.logger.info(message)
self.logger.info("-" * 150)
batch_metrics_message = self.batch_metrics_tracker.summary()
token_metrics_message = self.token_metrics_tracker.summary()
message_prefix = f"[Train][{self.epoch}]"
message = " ".join([message_prefix, batch_metrics_message, token_metrics_message])
self.logger.info(message)
combined_score = self.infer(data_type='dev')
cur_valid_metric = -combined_score
if self.is_decreased_valid_metric:
is_best = cur_valid_metric < self.best_valid_metric
else:
is_best = cur_valid_metric > self.best_valid_metric
if is_best:
self.best_valid_metric = cur_valid_metric
self.save(is_best)
self.logger.info("-" * 150)
return
def infer(self, data_type='test'):
"""
Inference interface.
"""
self.logger.info("Generation starts ...")
infer_save_file = os.path.join(self.save_dir, f"infer_{self.epoch}.result.json")
infer_samples_save_file = os.path.join(self.save_dir, f"infer_samples_{self.epoch}.result.json")
# Inference
result_collection = {}
begin_time = time.time()
eval_data = self.reader.get_batches(data_type)
data_iterator = self.reader.get_data_iterator(all_batches=eval_data)
set_stats = self.reader.set_stats[data_type]
self.logger.info("***** Running Evaluation *****")
self.logger.info(" Num Turns = %d", set_stats['num_turns'])
with torch.no_grad():
pbar = tqdm(data_iterator)
for dial_idx, dialog in enumerate(pbar):
# pv_turn = {}
pv_turn = []
for turn_idx, turn in enumerate(dialog):
first_turn = (turn_idx == 0)
inputs, prompt_id = self.reader.convert_turn_eval(turn, pv_turn, first_turn)
batch, batch_size = self.reader.collate_fn_multi_turn(samples=inputs)
batch = type(batch)(map(lambda kv: (kv[0], self.to_tensor(kv[1])), batch.items()))
# predict bspn, access db, then generate act and resp
max_len = 60 # max(len(turn['resp] + turn['bspn'])) = 48
outputs = self.func_model.infer(inputs=batch, start_id=prompt_id,
eos_id=self.reader.eos_r_id, max_gen_len=max_len)
generated_br = outputs.cpu().numpy().tolist()
decodeds = []
for s in generated_br:
try:
decoded = self.decode_generated_bspn_resp(s)
except ValueError as exception:
self.logger.info(str(exception))
self.logger.info(self.tokenizer.decode(s))
decoded = {'resp': [], 'bspn': []}
decodeds.append(decoded)
turn['resp_gen'] = [decoded['resp'] for decoded in decodeds]
turn['bspn_gen'] = [decoded['bspn'] for decoded in decodeds]
for i in range(batch_size):
pv_turn.append({
'labels': inputs[i]['labels'],
'resp': turn['resp'][i] if self.reader.use_true_prev_resp else decodeds[i]['resp'],
'bspn': turn['bspn'][i] if self.reader.use_true_prev_bspn else decodeds[i]['bspn']
})
tmp_dialog_result = self.reader.inverse_transpose_batch(dialog)
result_collection.update(tmp_dialog_result)
# compute tmp scores
# results, _ = self.reader.wrap_result_lm(tmp_dialog_result)
# metrics = self.evaluator.run_metrics(results)
# bleu, match, req_f1, joint_goal = metrics['bleu'], metrics['match'] * 100, \
# metrics['req_f1'] * 100, metrics['joint_goal']
# score = 0.5 * (req_f1 + match) + bleu
# compute scores
results, _ = self.reader.wrap_result_lm(result_collection)
metrics = self.evaluator.run_metrics(results)
bleu, match, req_f1, joint_goal = metrics['bleu'], metrics['match'] * 100, metrics['req_f1'] * 100, \
metrics['joint_goal']
score = 0.5 * (req_f1 + match) + bleu
# log results
metrics_message = 'match: %2.2f req_f1: %2.2f bleu: %2.2f score: %.2f joint_goal: %2.3f' % \
(match, req_f1, bleu, score, joint_goal)
message_prefix = f"[Infer][{self.epoch}]"
time_cost = f"TIME-{time.time() - begin_time:.3f}"
message = " ".join([message_prefix, metrics_message, time_cost])
self.logger.info(message)
# save results
eval_results = {
'bleu': bleu,
'req_f1': req_f1,
'match': match,
'score': score,
'joint_goal': joint_goal,
'result': message
}
with open(infer_save_file, "w") as fp:
json.dump(eval_results, fp, indent=2)
self.logger.info(f"Saved inference results to {infer_save_file}")
with open(infer_samples_save_file, "w") as fp:
for sample in results:
line = json.dumps(sample)
fp.write(line)
fp.write('\n')
self.logger.info(f"Saved inference samples to {infer_samples_save_file}")
return score
|
{"hexsha": "15602c3b1b8f0a8b04acdec5d8f0567474cc6506", "size": 47085, "ext": "py", "lang": "Python", "max_stars_repo_path": "galaxy/trainer.py", "max_stars_repo_name": "siat-nlp/GALAXY", "max_stars_repo_head_hexsha": "b1c23f588a37a588b0de7e16f5bcdbeb8a517fd5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2021-12-09T13:43:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T07:11:52.000Z", "max_issues_repo_path": "galaxy/trainer.py", "max_issues_repo_name": "siat-nlp/GALAXY", "max_issues_repo_head_hexsha": "b1c23f588a37a588b0de7e16f5bcdbeb8a517fd5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-14T06:08:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-17T02:25:36.000Z", "max_forks_repo_path": "galaxy/trainer.py", "max_forks_repo_name": "siat-nlp/GALAXY", "max_forks_repo_head_hexsha": "b1c23f588a37a588b0de7e16f5bcdbeb8a517fd5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-15T09:05:12.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T06:24:29.000Z", "avg_line_length": 48.3915724563, "max_line_length": 113, "alphanum_fraction": 0.5563980036, "include": true, "reason": "import numpy", "num_tokens": 9541}
|
[STATEMENT]
lemma obs_consistent_med_a0m1a_is [iff]:
"obs_consistent R_a0m1a_is med_a0m1a_is a0i m1a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. obs_consistent R_a0m1a_is med_a0m1a_is a0i m1a
[PROOF STEP]
by (auto simp add: obs_consistent_def R_a0m1a_is_def med_a0m1a_is_def
a0i_def m1a_def)
|
{"llama_tokens": 166, "file": "Security_Protocol_Refinement_Key_establish_m1_keydist_iirn", "length": 1}
|
import os
import numpy as np
import sys
# sys.path.append(BASE_DIR)
# sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import data_prep_util
import indoor3d_util
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
# Constants
data_dir = os.path.join(ROOT_DIR, 'data')
indoor3d_data_dir = os.path.join(data_dir, 'stanford_indoor3d')
NUM_POINT = 4096
H5_BATCH_SIZE = 1000
data_dim = [NUM_POINT, 9]
label_dim = [NUM_POINT]
data_dtype = 'float32'
label_dtype = 'uint8'
# Set paths
filelist = os.path.join(BASE_DIR, 'meta/all_data_label.txt')
data_label_files = [os.path.join(indoor3d_data_dir, line.rstrip()) for line in open(filelist)]
output_dir = os.path.join(data_dir, 'indoor3d_sem_seg_hdf5_data')
if not os.path.exists(output_dir):
os.mkdir(output_dir)
output_filename_prefix = os.path.join(output_dir, 'ply_data_all')
output_room_filelist = os.path.join(output_dir, 'room_filelist.txt')
fout_room = open(output_room_filelist, 'w')
# --------------------------------------
# ----- BATCH WRITE TO HDF5 -----
# --------------------------------------
batch_data_dim = [H5_BATCH_SIZE] + data_dim
batch_label_dim = [H5_BATCH_SIZE] + label_dim
h5_batch_data = np.zeros(batch_data_dim, dtype=np.float32)
h5_batch_label = np.zeros(batch_label_dim, dtype=np.uint8)
buffer_size = 0 # state: record how many samples are currently in buffer
h5_index = 0 # state: the next h5 file to save
def insert_batch(data, label, last_batch=False):
global h5_batch_data, h5_batch_label
global buffer_size, h5_index
data_size = data.shape[0]
# If there is enough space, just insert
if buffer_size + data_size <= h5_batch_data.shape[0]:
h5_batch_data[buffer_size:buffer_size+data_size, ...] = data
h5_batch_label[buffer_size:buffer_size+data_size] = label
buffer_size += data_size
else: # not enough space
capacity = h5_batch_data.shape[0] - buffer_size
assert(capacity>=0)
if capacity > 0:
h5_batch_data[buffer_size:buffer_size+capacity, ...] = data[0:capacity, ...]
h5_batch_label[buffer_size:buffer_size+capacity, ...] = label[0:capacity, ...]
# Save batch data and label to h5 file, reset buffer_size
h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
data_prep_util.save_h5(h5_filename, h5_batch_data, h5_batch_label, data_dtype, label_dtype)
print('Stored {0} with size {1}'.format(h5_filename, h5_batch_data.shape[0]))
h5_index += 1
buffer_size = 0
# recursive call
insert_batch(data[capacity:, ...], label[capacity:, ...], last_batch)
if last_batch and buffer_size > 0:
h5_filename = output_filename_prefix + '_' + str(h5_index) + '.h5'
data_prep_util.save_h5(h5_filename, h5_batch_data[0:buffer_size, ...], h5_batch_label[0:buffer_size, ...], data_dtype, label_dtype)
print('Stored {0} with size {1}'.format(h5_filename, buffer_size))
h5_index += 1
buffer_size = 0
return
sample_cnt = 0
for i, data_label_filename in enumerate(data_label_files):
print(data_label_filename)
data, label = indoor3d_util.room2blocks_wrapper_normalized(
data_label_filename, NUM_POINT, block_size=1.0, stride=0.5,
random_sample=False, sample_num=None)
print('{0}, {1}'.format(data.shape, label.shape))
for _ in range(data.shape[0]):
fout_room.write(os.path.basename(data_label_filename)[0:-4]+'\n')
sample_cnt += data.shape[0]
insert_batch(data, label, i == len(data_label_files)-1)
fout_room.close()
print("Total samples: {0}".format(sample_cnt))
|
{"hexsha": "1807a33fe8d1136f1ffc9e74f2771754ab95747a", "size": 3641, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/s3dis/third_party/gen_indoor3d_h5.py", "max_stars_repo_name": "corochann/chainer-pointnet", "max_stars_repo_head_hexsha": "4b0350122c6a704ebea9bf206896a6f18e1ab4d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2018-06-01T21:10:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-14T15:42:33.000Z", "max_issues_repo_path": "experiments/s3dis/third_party/gen_indoor3d_h5.py", "max_issues_repo_name": "KosukeArase/chainer-pointnet", "max_issues_repo_head_hexsha": "4b0350122c6a704ebea9bf206896a6f18e1ab4d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-07-20T10:16:07.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-01T01:36:52.000Z", "max_forks_repo_path": "experiments/s3dis/third_party/gen_indoor3d_h5.py", "max_forks_repo_name": "KosukeArase/chainer-pointnet", "max_forks_repo_head_hexsha": "4b0350122c6a704ebea9bf206896a6f18e1ab4d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-08-01T07:05:41.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T06:07:00.000Z", "avg_line_length": 40.9101123596, "max_line_length": 139, "alphanum_fraction": 0.6945893985, "include": true, "reason": "import numpy", "num_tokens": 976}
|
import pandas as pd
import numpy as np
def main(args):
dates = pd.date_range('20130101', periods=2)
df = pd.DataFrame(np.random.randn(2,2), index=dates, columns=list('AB'))
print(df)
return df.to_dict('split')
|
{"hexsha": "2286ff4d2004051539a4cf15970b63321f3a4c5b", "size": 227, "ext": "py", "lang": "Python", "max_stars_repo_path": "python3IBM/pandas.py", "max_stars_repo_name": "csantanapr/openwhisk-demos", "max_stars_repo_head_hexsha": "d5613658678363619f7af345f49ba0bf8727d9ed", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python3IBM/pandas.py", "max_issues_repo_name": "csantanapr/openwhisk-demos", "max_issues_repo_head_hexsha": "d5613658678363619f7af345f49ba0bf8727d9ed", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-10-10T02:35:25.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-10T02:39:03.000Z", "max_forks_repo_path": "python3IBM/pandas.py", "max_forks_repo_name": "csantanapr/openwhisk-demos", "max_forks_repo_head_hexsha": "d5613658678363619f7af345f49ba0bf8727d9ed", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-09-18T20:15:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-03T07:37:19.000Z", "avg_line_length": 25.2222222222, "max_line_length": 76, "alphanum_fraction": 0.6740088106, "include": true, "reason": "import numpy", "num_tokens": 63}
|
/* CirKit: A circuit toolkit
* Copyright (C) 2009-2015 University of Bremen
* Copyright (C) 2015-2017 EPFL
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* @file logic.hpp
*
* @brief Logic operations
*
* @author Mathias Soeken
* @since 2.2
*/
#ifndef LOGIC_HPP
#define LOGIC_HPP
#include <boost/assign/std/vector.hpp>
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <classical/sat/sat_solver.hpp>
using namespace boost::assign;
namespace cirkit
{
template<class S>
inline void blocking_and( S& solver, int sel, int a, int b, int c )
{
add_clause( solver )( {-sel, a, -c} );
add_clause( solver )( {-sel, b, -c} );
add_clause( solver )( {-sel, -a, -b, c} );
}
template<class S>
void blocking_and( S& solver, int sel, const clause_t& x, int c )
{
using boost::adaptors::transformed;
for ( auto l : x )
{
add_clause( solver )( {-sel, l, -c} );
}
clause_t clause = { -sel };
boost::push_back( clause, x | transformed( []( int l ) { return -l; } ) );
clause += c;
add_clause( solver )( clause );
}
template<class S>
inline void blocking_or( S& solver, int sel, int a, int b, int c )
{
add_clause( solver )( {-sel, -a, c} );
add_clause( solver )( {-sel, -b, c} );
add_clause( solver )( {-sel, a, b, -c} );
}
template<class S>
void blocking_or( S& solver, int sel, const clause_t& x, int c )
{
for ( auto l : x )
{
add_clause( solver )( {-sel, -l, c} );
}
clause_t clause( x.begin(), x.end() );
clause += -sel;
clause += -c;
add_clause( solver )( clause );
}
template<class S>
inline void blocking_xor( S& solver, int sel, int a, int b, int c )
{
add_clause( solver )( {-sel, -a, b, c} );
add_clause( solver )( {-sel, a, -b, c} );
add_clause( solver )( {-sel, a, b, -c} );
add_clause( solver )( {-sel, -a, -b, -c} );
}
template<class S>
inline void blocking_xnor( S& solver, int sel, int a, int b, int c )
{
add_clause( solver )( {-sel, -a, b, -c} );
add_clause( solver )( {-sel, a, -b, -c} );
add_clause( solver )( {-sel, a, b, c} );
add_clause( solver )( {-sel, -a, -b, c} );
}
template<class S>
inline void logic_and( S& solver, int a, int b, int c )
{
add_clause( solver )( {a, -c} );
add_clause( solver )( {b, -c} );
add_clause( solver )( {-a, -b, c} );
}
template<class S>
void logic_and( S& solver, const clause_t& x, int c )
{
using boost::adaptors::transformed;
for ( auto l : x )
{
add_clause( solver )( {l, -c} );
}
clause_t clause;
boost::push_back( clause, x | transformed( []( int l ) { return -l; } ) );
clause += c;
add_clause( solver )( clause );
}
template<class S>
inline void logic_or( S& solver, int a, int b, int c )
{
add_clause( solver )( {-a, c} );
add_clause( solver )( {-b, c} );
add_clause( solver )( {a, b, -c} );
}
template<class S>
void logic_or( S& solver, const clause_t& x, int c )
{
for ( auto l : x )
{
add_clause( solver )( {-l, c} );
}
clause_t clause( x.begin(), x.end() );
clause += -c;
add_clause( solver )( clause );
}
template<class S>
inline void logic_xor( S& solver, int a, int b, int c )
{
add_clause( solver )( {-a, b, c} );
add_clause( solver )( {a, -b, c} );
add_clause( solver )( {a, b, -c} );
add_clause( solver )( {-a, -b, -c} );
}
template<class S>
inline void logic_xnor( S& solver, int a, int b, int c )
{
add_clause( solver )( {-a, b, -c} );
add_clause( solver )( {a, -b, -c} );
add_clause( solver )( {a, b, c} );
add_clause( solver )( {-a, -b, c} );
}
}
#endif
// Local Variables:
// c-basic-offset: 2
// eval: (c-set-offset 'substatement-open 0)
// eval: (c-set-offset 'innamespace 0)
// End:
|
{"hexsha": "1d8ffc862a5ecca776a5cbcb689e824ed3682767", "size": 4702, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/classical/sat/operations/logic.hpp", "max_stars_repo_name": "eletesta/cirkit", "max_stars_repo_head_hexsha": "6d0939798ea25cecf92306ce796be154139b94f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/classical/sat/operations/logic.hpp", "max_issues_repo_name": "eletesta/cirkit", "max_issues_repo_head_hexsha": "6d0939798ea25cecf92306ce796be154139b94f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/classical/sat/operations/logic.hpp", "max_forks_repo_name": "eletesta/cirkit", "max_forks_repo_head_hexsha": "6d0939798ea25cecf92306ce796be154139b94f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8351648352, "max_line_length": 76, "alphanum_fraction": 0.6297320289, "num_tokens": 1415}
|
\chapter{\label{chapter3} The Abstract Syntax Tree (AST)}
The abstract class \texttt{ASTNode.cs} represents the building block of the data structure that is used as the Intermediate Representation (IR) for the \fwap language. The Abstract Syntax Tree (AST), assembled using the methods provided by the \texttt{ASTGenerator.cs} class, is the input for either the interpreter (see Chapter~\ref{chapter4}) and the \fsharp code generator (see Chapter~\ref{chapter5}). As said before, it is up to the parser, opportunely guided by the semantics annotations of Coco/R, to create the AST related to a certain \fwap source file.
\section{\texttt{Node}'s and \texttt{Term}'s}
The subclasses of \texttt{ASTNode.cs} act as a single node of the AST of a program and can be:
\begin{itemize}
\item a \texttt{Node}, meaning an internal node of the tree
\item a \texttt{Term}, meaning a leaf of the tree.
\end{itemize}
Each and every \texttt{Node} is distinguished by a label which describes its function and has possibly got a list of children nodes (\texttt{List<ASTNode> children}). All the allowed labels are listed below.\\
\begin{stlisting}[caption=Labels for \texttt{Node}s.]
public enum Labels {Program, Main, Afun, FunDecl, For,
If, While, Block, Assig, Decl, AssigDecl, Return,
Async, Print, Read, Dsync, Plus, Mul, Minus, Div, Gt,
Gte, Lt, Lte, Eq, NotEq, And, Or, Negativ, Bracket,
FunCall};
\end{lstlisting}
\texttt{Term}'s contain a generic \texttt{object} that could be one among an integer, a boolean, a string or an \texttt{Obj} that acts as a variable (see Section\ref{typecheck}).
\section{Building the AST}
The following pictures illustrate how some labeled \texttt{Node}'s are built by the parser. An agreement on the order of the children from left to right was inevitable for implementing the interpreter and the \fsharp compiler.
\newpage
\begin{figure}
\centering
\includegraphics[width=0.90\textwidth]{C:/Users/Stefano/Documents/GitHub/APproject/docs/nodesAST/Program.jpg}
\caption{Order of the children of some labeled \texttt{Node}'s; curly braces indicates zero or n copies of a certain node, round braces a choice among a few options.}
\label{fig:Program}
\end{figure}
|
{"hexsha": "dd1b2edb89f8adf72c085285384309e8f858ac0f", "size": 2204, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/chapters/ast.tex", "max_stars_repo_name": "MCSN-project2014/APproject", "max_stars_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-01-06T21:30:55.000Z", "max_stars_repo_stars_event_max_datetime": "2015-01-06T21:30:55.000Z", "max_issues_repo_path": "docs/chapters/ast.tex", "max_issues_repo_name": "MCSN-project2014/APproject", "max_issues_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 25, "max_issues_repo_issues_event_min_datetime": "2015-01-01T18:07:39.000Z", "max_issues_repo_issues_event_max_datetime": "2015-01-14T15:11:28.000Z", "max_forks_repo_path": "docs/chapters/ast.tex", "max_forks_repo_name": "MCSN-project2014/APproject", "max_forks_repo_head_hexsha": "6bdfbedfa0dc8fec7e25b81665624c6aedc93e3d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 58.0, "max_line_length": 562, "alphanum_fraction": 0.7631578947, "num_tokens": 596}
|
import numpy as np
from PIL import Image
import numbers
from collections.abc import Sequence
from typing import Tuple, List, Optional
import random
import torch
from torchvision import transforms as T
from torchvision.transforms import functional as F
def _check_sequence_input(x, name, req_sizes):
msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes])
if not isinstance(x, Sequence):
raise TypeError("{} should be a sequence of length {}.".format(name, msg))
if len(x) not in req_sizes:
raise ValueError("{} should be sequence of length {}.".format(name, msg))
def _setup_angle(x, name, req_sizes=(2, )):
if isinstance(x, numbers.Number):
if x < 0:
raise ValueError("If {} is a single number, it must be positive.".format(name))
x = [-x, x]
else:
_check_sequence_input(x, name, req_sizes)
return [float(d) for d in x]
def pad_if_smaller(img, size, fill=0):
min_size = min(img.size)
if min_size < size:
ow, oh = img.size
padh = size - oh if oh < size else 0
padw = size - ow if ow < size else 0
img = F.pad(img, (0, 0, padw, padh), fill=fill)
return img
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomResize(object):
def __init__(self, min_size, max_size=None):
self.min_size = min_size
if max_size is None:
max_size = min_size
self.max_size = max_size
def __call__(self, image, target):
size = random.randint(self.min_size, self.max_size)
image = F.resize(image, size)
target = F.resize(target, size, interpolation=Image.NEAREST)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, flip_prob):
self.flip_prob = flip_prob
def __call__(self, image, target):
if random.random() < self.flip_prob:
image = F.hflip(image)
target = F.hflip(target)
return image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = pad_if_smaller(image, self.size)
target = pad_if_smaller(target, self.size, fill=255)
crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
image = F.crop(image, *crop_params)
target = F.crop(target, *crop_params)
return image, target
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, image, target):
image = F.center_crop(image, self.size)
target = F.center_crop(target, self.size)
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
target = torch.as_tensor(np.array(target), dtype=torch.int64)
return image, target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, target
class RandomRotation(object):
def __init__(self, degrees, interpolation=Image.NEAREST, expand=False, center=None, fill=0, resample=None):
self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, ))
self.center = center
self.resample = self.interpolation = interpolation
self.expand = expand
if fill is None:
fill = 0
elif not isinstance(fill, (Sequence, numbers.Number)):
raise TypeError("Fill should be either a sequence or a number.")
self.fill = fill
@staticmethod
def get_params(degrees: List[float]) -> float:
angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item())
return angle
def __call__(self, image, target):
fill = self.fill
if torch.is_tensor(image):
if isinstance(fill, (int, float)):
fill = [float(fill)] * 3
else:
fill = [float(f) for f in fill]
angle = self.get_params(self.degrees)
image = F.rotate(image, angle, self.resample, self.expand, self.center, fill)
return image, target
class ColorJitter(object):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
self.brightness = self._check_input(brightness, 'brightness')
self.contrast = self._check_input(contrast, 'contrast')
self.saturation = self._check_input(saturation, 'saturation')
self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), clip_first_on_zero=False)
@staticmethod
def get_params(brightness: Optional[List[float]],
contrast: Optional[List[float]],
saturation: Optional[List[float]],
hue: Optional[List[float]]
) -> Tuple[torch.Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]:
fn_idx = torch.randperm(4)
b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1]))
c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1]))
s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1]))
h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1]))
return fn_idx, b, c, s, h
def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
if isinstance(value, numbers.Number):
if value < 0:
raise ValueError("If {} is a single number, it must be non negative.".format(name))
value = [center - float(value), center + float(value)]
if clip_first_on_zero:
value[0] = max(value[0], 0.0)
elif isinstance(value, (tuple, list)) and len(value) == 2:
if not bound[0] <= value[0] <= value[1] <= bound[1]:
raise ValueError("{} values should be between {}".format(name, bound))
else:
raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name))
# if value is 0 or (1., 1.) for brightness/contrast/saturation
# or (0., 0.) for hue, do nothing
if value[0] == value[1] == center:
value = None
return value
def __call__(self, image, target):
fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
for fn_id in fn_idx:
if fn_id == 0 and brightness_factor is not None:
image = F.adjust_brightness(image, brightness_factor)
elif fn_id == 1 and contrast_factor is not None:
image = F.adjust_contrast(image, contrast_factor)
elif fn_id == 2 and saturation_factor is not None:
image = F.adjust_saturation(image, saturation_factor)
elif fn_id == 3 and hue_factor is not None:
image = F.adjust_hue(image, hue_factor)
return image, target
class ColorJitterGrid(object):
def __init__(self, brightness=0, contrast=0, saturation=0, hue=0, grid_size=20):
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.grid_size = grid_size
def __call__(self, image, target):
img_tensor, _ = ToTensor()(image, target)
img_size = (img_tensor.shape[1], img_tensor.shape[2])
portion_size = (self.grid_size, self.grid_size)
x1 = random.randint(0, img_size[0]-portion_size[0]-1)
y1 = random.randint(0, img_size[1]-portion_size[1]-1)
x2, y2 = x1+portion_size[0], y1+portion_size[1]
grid = torch.clone(img_tensor[:, x1:x2, y1:y2])
jitter = ColorJitter(contrast=(self.contrast, self.contrast))
grid, _ = jitter(grid, target)
img_tensor[:, x1:x2, y1:y2] = grid
from torchvision import transforms
image = transforms.ToPILImage()(img_tensor).convert("RGB")
return image, target
|
{"hexsha": "3a4e30504b9edbb1dcbafa5a599b8663d30fca2d", "size": 8420, "ext": "py", "lang": "Python", "max_stars_repo_path": "semseg/transforms.py", "max_stars_repo_name": "rainarit/segmentation-benchmark", "max_stars_repo_head_hexsha": "bbdadf56ed2ff1049e7dd5925f61f524d0440401", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-01-12T21:01:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T16:40:09.000Z", "max_issues_repo_path": "semseg/transforms.py", "max_issues_repo_name": "rainarit/segmentation-benchmark", "max_issues_repo_head_hexsha": "bbdadf56ed2ff1049e7dd5925f61f524d0440401", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "semseg/transforms.py", "max_forks_repo_name": "rainarit/segmentation-benchmark", "max_forks_repo_head_hexsha": "bbdadf56ed2ff1049e7dd5925f61f524d0440401", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2566371681, "max_line_length": 158, "alphanum_fraction": 0.6263657957, "include": true, "reason": "import numpy", "num_tokens": 2058}
|
"""
Module containing tasks for morphological operations
Credits:
Copyright (c) 2017-2019 Matej Aleksandrov, Matej Batič, Andrej Burja, Eva Erzin (Sinergise)
Copyright (c) 2017-2019 Grega Milčinski, Matic Lubej, Devis Peresutti, Jernej Puc, Tomislav Slijepčević (Sinergise)
Copyright (c) 2017-2019 Blaž Sovdat, Nejc Vesel, Jovan Višnjić, Anže Zupanc, Lojze Žust (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import skimage.morphology
import numpy as np
from eolearn.core import EOTask
class ErosionTask(EOTask):
"""
The task performs an erosion to the provided mask
:param mask_feature: The mask which is to be eroded
:type mask_feature: (FeatureType, str)
:param disk_radius: Radius of the erosion disk (in pixels). Default is set to `1`
:type disk_radius: int
:param erode_labels: List of labels to erode. If `None`, all unique labels are eroded. Default is `None`
:type erode_labels: list(int)
:param no_data_label: Value used to replace eroded pixels. Default is set to `0`
:type no_data_label: int
"""
def __init__(self, mask_feature, disk_radius=1, erode_labels=None, no_data_label=0):
if not isinstance(disk_radius, int) or disk_radius is None or disk_radius < 1:
raise ValueError('Disk radius should be an integer larger than 0!')
self.mask_type, self.mask_name, self.new_mask_name = next(self._parse_features(mask_feature, new_names=True)())
self.disk = skimage.morphology.disk(disk_radius)
self.erode_labels = erode_labels
self.no_data_label = no_data_label
def execute(self, eopatch):
feature_array = eopatch[(self.mask_type, self.mask_name)].squeeze().copy()
all_labels = np.unique(feature_array)
erode_labels = self.erode_labels if self.erode_labels else all_labels
erode_labels = set(erode_labels) - {self.no_data_label}
other_labels = set(all_labels) - set(erode_labels) - {self.no_data_label}
eroded_masks = [skimage.morphology.binary_erosion(feature_array == label, self.disk) for label in erode_labels]
other_masks = [feature_array == label for label in other_labels]
merged_mask = np.logical_or.reduce(eroded_masks + other_masks, axis=0)
feature_array[~merged_mask] = self.no_data_label
eopatch[(self.mask_type, self.new_mask_name)] = np.expand_dims(feature_array, axis=-1)
return eopatch
|
{"hexsha": "5e6f993e371bab3494d27cab64088feaa07448e2", "size": 2506, "ext": "py", "lang": "Python", "max_stars_repo_path": "geometry/eolearn/geometry/morphology.py", "max_stars_repo_name": "rpitonak/eo-learn", "max_stars_repo_head_hexsha": "246616903c600f9d85d8d7bfcaef3785356b9fd1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-30T12:40:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-30T12:40:50.000Z", "max_issues_repo_path": "geometry/eolearn/geometry/morphology.py", "max_issues_repo_name": "rpitonak/eo-learn", "max_issues_repo_head_hexsha": "246616903c600f9d85d8d7bfcaef3785356b9fd1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "geometry/eolearn/geometry/morphology.py", "max_forks_repo_name": "rpitonak/eo-learn", "max_forks_repo_head_hexsha": "246616903c600f9d85d8d7bfcaef3785356b9fd1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7666666667, "max_line_length": 119, "alphanum_fraction": 0.7262569832, "include": true, "reason": "import numpy", "num_tokens": 664}
|
C++---------------------------------------------------------------------
C Set of routines to determine automatic center of galaxy
C and automatic sky level for Fitelli and other programs
C Contains: AUTO_CENTER and AUTO_SKY
C------------------------------------------------------------------------
C Subroutine AUTO_CENTER :
C Assumes that the center of the galaxy is close to the center of the image
C
C------------------------------------------------------------------------
SUBROUTINE AUTO_CENTER(IMAGE,NX,NY,IDIM,OX,OY)
REAL*4 IMAGE(IDIM,*),OX,OY,VALMAX,SUMX,SUMY,SUMI
INTEGER*4 IXMIN,IXMAX,IYMIN,IYMAX,IOX,IOY
C First determines working window:
IXMIN=NX/4
IXMAX=3*NX/4
IYMIN=NY/4
IYMAX=3*NY/4
C Scan window to look for maximum:
IOX=IXMIN
IOY=IYMIN
VALMAX=IMAGE(IOX,IOY)
DO IY=IYMIN,IYMAX
DO IX=IXMIN,IXMAX
IF(IMAGE(IX,IY).GT.VALMAX)THEN
VALMAX=IMAGE(IX,IY)
IOX=IX
IOY=IY
ENDIF
END DO
END DO
C More acurate determination with barycenter:
SUMX=0.
SUMY=0.
SUMI=0.
DO IY=IOY-1,IOY+1
DO IX=IOX-1,IOX+1
SUMX = SUMX + FLOAT(IX)*IMAGE(IX,IY)
SUMY = SUMY + FLOAT(IY)*IMAGE(IX,IY)
SUMI = SUMI + IMAGE(IX,IY)
END DO
END DO
C
IF(SUMI.EQ.0.)THEN
OX=IOX
OY=IOY
ELSE
OX = SUMX/SUMI
OY = SUMY/SUMI
ENDIF
RETURN
END
C------------------------------------------------------------------------
C Subroutine AUTO_SKY :
C Works out the sky level and the noise of a CCD image
C
C Calculates the sky level of the image in 4 edge zones :
C
C Zone 1 : bottom left
C I=NX/30+1,NX/6+1 J=NY/30+1,NY/6+1
C Zone 2 : bottom right
C I=NX-NX/6,NX-NX/30 J=NY/30+1,NY/6+1
C Zone 3 : top left
C I=NX/30+1,NX/6+1 J=NY-NY/6,NY-NY/30
C Zone 4 : top right
C I=NX-NX/6,NX-NX/30 J=NY-NY/6,NY-NY/30
C
C INPUT :
C IMAGE(NX,NY) REAL*4 declared as (IDIM,*)
C
C OUTPUT :
C SKY, SIGMA
C
C JLP
C Version 04-11-93
C------------------------------------------------------------------------
SUBROUTINE AUTO_SKY(IMAGE,NX,NY,IDIM,SKY,SIGMA)
REAL*4 IMAGE(IDIM,*),SKY,SIGMA,STATUS
REAL*8 SUM(4),SUMSQ(4)
REAL*4 SIG(4),XMEAN(4),VALMIN(4),VALMAX(4),COEF(4)
REAL*4 XNUMB(4)
INTEGER*4 IMIN(4),IMAX(4),JMIN(4),JMAX(4)
C Setting up the boundaries of the 4 areas:
IF(NX.LT.6) THEN
I1=0
I2=1
ELSE
I1=NX/30
I2=NX/6
ENDIF
IMIN(1)=I1+1
IMIN(2)=NX-I2+1
IMIN(3)=I1+1
IMIN(4)=NX-I2+1
IMAX(1)=I2
IMAX(2)=NX-I1
IMAX(3)=I2
IMAX(4)=NX-I1
IF(NY.LT.6) THEN
J1=0
J2=1
ELSE
J1=NY/30
J2=NY/6
ENDIF
JMIN(1)=J1+1
JMIN(2)=J1+1
JMIN(3)=NY-J2+1
JMIN(4)=NY-J2+1
JMAX(1)=J2
JMAX(2)=J2
JMAX(3)=NY-J1
JMAX(4)=NY-J1
C First loop on the four areas to determine mean and standard deviation :
DO KK=1,4
SUM(KK)=0.
SUMSQ(KK)=0.
XNUMB(KK)=0.
DO J=JMIN(KK),JMAX(KK)
DO I=IMIN(KK),IMAX(KK)
SUM(KK)=IMAGE(I,J)+SUM(KK)
SUMSQ(KK)=IMAGE(I,J)*IMAGE(I,J)+SUMSQ(KK)
XNUMB(KK)=XNUMB(KK)+1.
END DO
END DO
IF(XNUMB(KK).NE.0) THEN
XMEAN(KK)=SUM(KK)/XNUMB(KK)
WORK=(SUMSQ(KK)/XNUMB(KK))-(XMEAN(KK)*XMEAN(KK))
SIG(KK)=SQRT(WORK)
VALMIN(KK)=XMEAN(KK)-2.*SIG(KK)
VALMAX(KK)=XMEAN(KK)+2.*SIG(KK)
ELSE
WRITE(6,76)
76 FORMAT('AUTO_SKY: Unable to compute the sky level',/,
1 ' Empty area, because image is too small')
SKY=0.
SIGMA=0.
RETURN
ENDIF
END DO
C WRITE(6,*) ' FIRST LOOP'
C WRITE(6,75) (XMEAN(K),K=1,4),(SIG(K),K=1,4)
C Second loop on the four zones to discard the points over 2-sigma
DO KK=1,4
SUM(KK)=0.
SUMSQ(KK)=0.
XNUMB(KK)=0
DO J=JMIN(KK),JMAX(KK)
DO I=IMIN(KK),IMAX(KK)
WORK=IMAGE(I,J)
IF(WORK.GT.VALMIN(KK).AND.WORK.LT.VALMAX(KK))THEN
XNUMB(KK)=XNUMB(KK)+1.
SUM(KK)=WORK+SUM(KK)
SUMSQ(KK)=WORK*WORK+SUMSQ(KK)
ENDIF
END DO
END DO
END DO
C Test if bad area:
ISTATUS=0
DO KK=1,4
IF(XNUMB(KK).NE.0.)THEN
XMEAN(KK)=SUM(KK)/XNUMB(KK)
COEF(KK)=1.
WORK=(SUMSQ(KK)/XNUMB(KK))-(XMEAN(KK)*XMEAN(KK))
SIG(KK)=SQRT(WORK)
ELSE
COEF(KK)=0.
ISTATUS=ISTATUS+1
ENDIF
END DO
WRITE(6,74) IMIN(1),IMIN(2),IMAX(1),IMAX(2),
1 JMIN(1),JMIN(3),JMAX(1),JMAX(3)
74 FORMAT(' AUTO_SKY/X1,2,3,4',4(1X,I5),' Y1,2,3,4',4(1X,I5))
C
WRITE(6,75) (XMEAN(K),K=1,4),(SIG(K),K=1,4)
75 FORMAT(' 3-sigma mean in bottom left (X12Y12) bottom right',
1 ' (X34Y12) top left (X12Y34)',/,' & top right (X34Y34):',
1 4(1PG11.4,1X),/,' Corresponding sigma: ',
1 4(1PG11.4,1X))
C Initialize SKY and SIGMA, first (since it exists at least one good value):
DO I=1,4
IF(COEF(I).NE.0.OR.ISTATUS.EQ.4)THEN
SKY=XMEAN(I)
SIGMA=SIG(I)
ENDIF
END DO
C Then finds the minima:
DO I=1,4
IF(COEF(I).NE.0.OR.ISTATUS.EQ.4)THEN
SKY=AMIN1(XMEAN(I),SKY)
SIGMA=AMIN1(SIG(I),SIGMA)
ENDIF
END DO
C If status=4, no good points on second loop, so I keep first loop values:
IF(ISTATUS.EQ.4) THEN
WRITE(6,77) SKY,SIGMA
77 FORMAT('AUTO_SKY/ Sky level : ',1PG11.4,' sigma : ',
1 1PG11.4,' (one loop)')
ELSE
WRITE(6,78) SKY,SIGMA
78 FORMAT('AUTO_SKY/ Sky level : ',1PG11.4,' sigma : ',
1 1PG11.4)
ENDIF
RETURN
END
|
{"hexsha": "0337536027c91b495f235c14ee028170f5efa76f", "size": 5510, "ext": "for", "lang": "FORTRAN", "max_stars_repo_path": "jlpsub/auto_sky.for", "max_stars_repo_name": "jlprieur/shell_galaxies", "max_stars_repo_head_hexsha": "1dde87ef33b3c33b3a892e9ad0d642ae02ac6d9e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jlpsub/auto_sky.for", "max_issues_repo_name": "jlprieur/shell_galaxies", "max_issues_repo_head_hexsha": "1dde87ef33b3c33b3a892e9ad0d642ae02ac6d9e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jlpsub/auto_sky.for", "max_forks_repo_name": "jlprieur/shell_galaxies", "max_forks_repo_head_hexsha": "1dde87ef33b3c33b3a892e9ad0d642ae02ac6d9e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.3474576271, "max_line_length": 76, "alphanum_fraction": 0.5408348457, "num_tokens": 2207}
|
/-
Copyright (c) 2021 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import analysis.special_functions.integrals
import analysis.calculus.fderiv_measurable
/-!
# Non integrable functions
In this file we prove that the derivative of a function that tends to infinity is not interval
integrable, see `interval_integral.not_integrable_has_deriv_at_of_tendsto_norm_at_top_filter` and
`interval_integral.not_integrable_has_deriv_at_of_tendsto_norm_at_top_punctured`. Then we apply the
latter lemma to prove that the function `λ x, x⁻¹` is integrable on `a..b` if and only if `a = b` or
`0 ∉ [a, b]`.
## Main results
* `not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_punctured`: if `f` tends to infinity
along `𝓝[≠] c` and `f' = O(g)` along the same filter, then `g` is not interval integrable on any
nontrivial integral `a..b`, `c ∈ [a, b]`.
* `not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_filter`: a version of
`not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_punctured` that works for one-sided
neighborhoods;
* `not_interval_integrable_of_sub_inv_is_O_punctured`: if `1 / (x - c) = O(f)` as `x → c`, `x ≠ c`,
then `f` is not interval integrable on any nontrivial interval `a..b`, `c ∈ [a, b]`;
* `interval_integrable_sub_inv_iff`, `interval_integrable_inv_iff`: integrability conditions for
`(x - c)⁻¹` and `x⁻¹`.
## Tags
integrable function
-/
open_locale measure_theory topological_space interval nnreal ennreal
open measure_theory topological_space set filter asymptotics interval_integral
variables {E F : Type*} [normed_group E] [normed_space ℝ E] [second_countable_topology E]
[complete_space E] [normed_group F]
/-- If `f` is eventually differentiable along a nontrivial filter `l : filter ℝ` that is generated
by convex sets, the norm of `f` tends to infinity along `l`, and `f' = O(g)` along `l`, where `f'`
is the derivative of `f`, then `g` is not integrable on any interval `a..b` such that
`[a, b] ∈ l`. -/
lemma not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_filter {f : ℝ → E} {g : ℝ → F}
{a b : ℝ} (l : filter ℝ) [ne_bot l] [tendsto_Ixx_class Icc l l] (hl : [a, b] ∈ l)
(hd : ∀ᶠ x in l, differentiable_at ℝ f x) (hf : tendsto (λ x, ∥f x∥) l at_top)
(hfg : is_O (deriv f) g l) :
¬interval_integrable g volume a b :=
begin
intro hgi,
obtain ⟨C, hC₀, s, hsl, hsub, hfd, hg⟩ : ∃ (C : ℝ) (hC₀ : 0 ≤ C) (s ∈ l),
(∀ (x ∈ s) (y ∈ s), [x, y] ⊆ [a, b]) ∧
(∀ (x ∈ s) (y ∈ s) (z ∈ [x, y]), differentiable_at ℝ f z) ∧
(∀ (x ∈ s) (y ∈ s) (z ∈ [x, y]), ∥deriv f z∥ ≤ C * ∥g z∥),
{ rcases hfg.exists_nonneg with ⟨C, C₀, hC⟩,
have h : ∀ᶠ x : ℝ × ℝ in l.prod l, ∀ y ∈ [x.1, x.2], (differentiable_at ℝ f y ∧
∥deriv f y∥ ≤ C * ∥g y∥) ∧ y ∈ [a, b],
from (tendsto_fst.interval tendsto_snd).eventually ((hd.and hC.bound).and hl).lift'_powerset,
rcases mem_prod_self_iff.1 h with ⟨s, hsl, hs⟩,
simp only [prod_subset_iff, mem_set_of_eq] at hs,
exact ⟨C, C₀, s, hsl, λ x hx y hy z hz, (hs x hx y hy z hz).2,
λ x hx y hy z hz, (hs x hx y hy z hz).1.1, λ x hx y hy z hz, (hs x hx y hy z hz).1.2⟩ },
replace hgi : interval_integrable (λ x, C * ∥g x∥) volume a b, by convert hgi.norm.smul C,
obtain ⟨c, hc, d, hd, hlt⟩ : ∃ (c ∈ s) (d ∈ s), ∥f c∥ + ∫ y in Ι a b, C * ∥g y∥ < ∥f d∥,
{ rcases filter.nonempty_of_mem hsl with ⟨c, hc⟩,
have : ∀ᶠ x in l, ∥f c∥ + ∫ y in Ι a b, C * ∥g y∥ < ∥f x∥,
from hf.eventually (eventually_gt_at_top _),
exact ⟨c, hc, (this.and hsl).exists.imp (λ d hd, ⟨hd.2, hd.1⟩)⟩ },
specialize hsub c hc d hd, specialize hfd c hc d hd,
replace hg : ∀ x ∈ Ι c d, ∥deriv f x∥ ≤ C * ∥g x∥, from λ z hz, hg c hc d hd z ⟨hz.1.le, hz.2⟩,
have hg_ae : ∀ᵐ x ∂(volume.restrict (Ι c d)), ∥deriv f x∥ ≤ C * ∥g x∥,
from (ae_restrict_mem measurable_set_interval_oc).mono hg,
have hsub' : Ι c d ⊆ Ι a b,
from interval_oc_subset_interval_oc_of_interval_subset_interval hsub,
have hfi : interval_integrable (deriv f) volume c d,
from (hgi.mono_set hsub).mono_fun' (ae_strongly_measurable_deriv _ _) hg_ae,
refine hlt.not_le (sub_le_iff_le_add'.1 _),
calc ∥f d∥ - ∥f c∥ ≤ ∥f d - f c∥ : norm_sub_norm_le _ _
... = ∥∫ x in c..d, deriv f x∥ : congr_arg _ (integral_deriv_eq_sub hfd hfi).symm
... = ∥∫ x in Ι c d, deriv f x∥ : norm_integral_eq_norm_integral_Ioc _
... ≤ ∫ x in Ι c d, ∥deriv f x∥ : norm_integral_le_integral_norm _
... ≤ ∫ x in Ι c d, C * ∥g x∥ :
set_integral_mono_on hfi.norm.def (hgi.def.mono_set hsub') measurable_set_interval_oc hg
... ≤ ∫ x in Ι a b, C * ∥g x∥ :
set_integral_mono_set hgi.def (ae_of_all _ $ λ x, mul_nonneg hC₀ (norm_nonneg _))
hsub'.eventually_le
end
/-- If `a ≠ b`, `c ∈ [a, b]`, `f` is differentiable in the neighborhood of `c` within
`[a, b] \ {c}`, `∥f x∥ → ∞` as `x → c` within `[a, b] \ {c}`, and `f' = O(g)` along
`𝓝[[a, b] \ {c}] c`, where `f'` is the derivative of `f`, then `g` is not interval integrable on
`a..b`. -/
lemma not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_within_diff_singleton
{f : ℝ → E} {g : ℝ → F} {a b c : ℝ} (hne : a ≠ b) (hc : c ∈ [a, b])
(h_deriv : ∀ᶠ x in 𝓝[[a, b] \ {c}] c, differentiable_at ℝ f x)
(h_infty : tendsto (λ x, ∥f x∥) (𝓝[[a, b] \ {c}] c) at_top)
(hg : is_O (deriv f) g (𝓝[[a, b] \ {c}] c)) :
¬interval_integrable g volume a b :=
begin
obtain ⟨l, hl, hl', hle, hmem⟩ : ∃ l : filter ℝ, tendsto_Ixx_class Icc l l ∧ l.ne_bot ∧
l ≤ 𝓝 c ∧ [a, b] \ {c} ∈ l,
{ cases (min_lt_max.2 hne).lt_or_lt c with hlt hlt,
{ refine ⟨𝓝[<] c, infer_instance, infer_instance, inf_le_left, _⟩,
rw ← Iic_diff_right,
exact diff_mem_nhds_within_diff (Icc_mem_nhds_within_Iic ⟨hlt, hc.2⟩) _ },
{ refine ⟨𝓝[>] c, infer_instance, infer_instance, inf_le_left, _⟩,
rw ← Ici_diff_left,
exact diff_mem_nhds_within_diff (Icc_mem_nhds_within_Ici ⟨hc.1, hlt⟩) _ } },
resetI,
have : l ≤ 𝓝[[a, b] \ {c}] c, from le_inf hle (le_principal_iff.2 hmem),
exact not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_filter l
(mem_of_superset hmem (diff_subset _ _))
(h_deriv.filter_mono this) (h_infty.mono_left this) (hg.mono this),
end
/-- If `f` is differentiable in a punctured neighborhood of `c`, `∥f x∥ → ∞` as `x → c` (more
formally, along the filter `𝓝[≠] c`), and `f' = O(g)` along `𝓝[≠] c`, where `f'` is the derivative
of `f`, then `g` is not interval integrable on any nontrivial interval `a..b` such that
`c ∈ [a, b]`. -/
lemma not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_punctured {f : ℝ → E} {g : ℝ → F}
{a b c : ℝ} (h_deriv : ∀ᶠ x in 𝓝[≠] c, differentiable_at ℝ f x)
(h_infty : tendsto (λ x, ∥f x∥) (𝓝[≠] c) at_top) (hg : is_O (deriv f) g (𝓝[≠] c))
(hne : a ≠ b) (hc : c ∈ [a, b]) :
¬interval_integrable g volume a b :=
have 𝓝[[a, b] \ {c}] c ≤ 𝓝[≠] c, from nhds_within_mono _ (inter_subset_right _ _),
not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_within_diff_singleton hne hc
(h_deriv.filter_mono this) (h_infty.mono_left this) (hg.mono this)
/-- If `f` grows in the punctured neighborhood of `c : ℝ` at least as fast as `1 / (x - c)`,
then it is not interval integrable on any nontrivial interval `a..b`, `c ∈ [a, b]`. -/
lemma not_interval_integrable_of_sub_inv_is_O_punctured {f : ℝ → F} {a b c : ℝ}
(hf : is_O (λ x, (x - c)⁻¹) f (𝓝[≠] c)) (hne : a ≠ b) (hc : c ∈ [a, b]) :
¬interval_integrable f volume a b :=
begin
have A : ∀ᶠ x in 𝓝[≠] c, has_deriv_at (λ x, real.log (x - c)) (x - c)⁻¹ x,
{ filter_upwards [self_mem_nhds_within] with x hx,
simpa using ((has_deriv_at_id x).sub_const c).log (sub_ne_zero.2 hx) },
have B : tendsto (λ x, ∥real.log (x - c)∥) (𝓝[≠] c) at_top,
{ refine tendsto_abs_at_bot_at_top.comp (real.tendsto_log_nhds_within_zero.comp _),
rw ← sub_self c,
exact ((has_deriv_at_id c).sub_const c).tendsto_punctured_nhds one_ne_zero },
exact not_interval_integrable_of_tendsto_norm_at_top_of_deriv_is_O_punctured
(A.mono (λ x hx, hx.differentiable_at)) B
(hf.congr' (A.mono $ λ x hx, hx.deriv.symm) eventually_eq.rfl) hne hc
end
/-- The function `λ x, (x - c)⁻¹` is integrable on `a..b` if and only if `a = b` or `c ∉ [a, b]`. -/
@[simp] lemma interval_integrable_sub_inv_iff {a b c : ℝ} :
interval_integrable (λ x, (x - c)⁻¹) volume a b ↔ a = b ∨ c ∉ [a, b] :=
begin
split,
{ refine λ h, or_iff_not_imp_left.2 (λ hne hc, _),
exact not_interval_integrable_of_sub_inv_is_O_punctured (is_O_refl _ _) hne hc h },
{ rintro (rfl|h₀),
exacts [interval_integrable.refl,
interval_integrable_inv (λ x hx, sub_ne_zero.2 $ ne_of_mem_of_not_mem hx h₀)
(continuous_on_id.sub continuous_on_const)] }
end
/-- The function `λ x, x⁻¹` is integrable on `a..b` if and only if `a = b` or `0 ∉ [a, b]`. -/
@[simp] lemma interval_integrable_inv_iff {a b : ℝ} :
interval_integrable (λ x, x⁻¹) volume a b ↔ a = b ∨ (0 : ℝ) ∉ [a, b] :=
by simp only [← interval_integrable_sub_inv_iff, sub_zero]
|
{"author": "saisurbehera", "repo": "mathProof", "sha": "57c6bfe75652e9d3312d8904441a32aff7d6a75e", "save_path": "github-repos/lean/saisurbehera-mathProof", "path": "github-repos/lean/saisurbehera-mathProof/mathProof-57c6bfe75652e9d3312d8904441a32aff7d6a75e/src/tertiary_packages/mathlib/src/analysis/special_functions/non_integrable.lean"}
|
from tensorflow.keras.losses import CategoricalCrossentropy
import tensorflow as tf
import numpy as np
from utils.dataset import get_train_dataset
from utils.utils import UtilityFunction
from utils.config import Config as Cfg
from utils.model import get_model
# Build model
model, input_size = get_model(classes_numbers=Cfg.CIFAR_10_CLASS_NUMBERS)
# Compile model
model.compile(loss=CategoricalCrossentropy(), optimizer='adam', metrics=["accuracy"])
# Get training dataset
data_gen_args = dict(rescale=1./255,
rotation_range=0.2,
vertical_flip=True,
horizontal_flip=True,
validation_split=0.2)
train_dataset, validation_dataset = get_train_dataset(
directory='./dataset/cifar-10/images/train',
aug_dict=data_gen_args,
classes=Cfg.CIFAR_10_CLASS_NAMES,
image_size=input_size,
batch_size=Cfg.BATCH_SIZE,
class_mode='categorical',
color_mode='rgb',
shuffle=True,
seed=0)
# Use callbacks
model_path = f'./models/cifar-10/{Cfg.MODEL_TYPE}'
# Use ModelCheckpoint to control validation loss for saving model.
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=model_path,
monitor='val_loss',
verbose=1,
save_best_only=True)
# Use LearningRateScheduler to decrease the learning rate during training.
learning_rate = tf.keras.callbacks.LearningRateScheduler(UtilityFunction.step_decay_classification)
callbacks = [model_checkpoint_callback, learning_rate]
# Train network
history = model.fit(train_dataset, validation_data=validation_dataset,
epochs=Cfg.EPOCHS, callbacks=callbacks)
# Save history
np.save(f'{model_path}/history.npy', history.history)
|
{"hexsha": "5dcdf2af8d5904d096ee5a6cf241f4a36efaf9a1", "size": 1906, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "MrRiahi/Convolutional-Neural-Networks", "max_stars_repo_head_hexsha": "e15b93376da83af89df672982f81475bf541c8cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-11-21T17:24:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T13:07:02.000Z", "max_issues_repo_path": "train.py", "max_issues_repo_name": "MrRiahi/Convolutional-Neural-Networks", "max_issues_repo_head_hexsha": "e15b93376da83af89df672982f81475bf541c8cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "MrRiahi/Convolutional-Neural-Networks", "max_forks_repo_head_hexsha": "e15b93376da83af89df672982f81475bf541c8cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0357142857, "max_line_length": 99, "alphanum_fraction": 0.6799580273, "include": true, "reason": "import numpy", "num_tokens": 365}
|
### A Pluto.jl notebook ###
# v0.19.4
#> [frontmatter]
#> title = "ExoFinder.jl"
#> description = "Let's find some worlds!"
using Markdown
using InteractiveUtils
# ╔═╡ f19b358c-8506-11ec-252c-c39dcd644d06
begin
import Pkg
Pkg.activate(Base.current_project())
using AstroImages, PlutoUI, Plots
using MarkdownLiteral: @mdx
end
# ╔═╡ 2d5365c9-c7d9-4d14-b074-f77b39baec02
using AstroImages.AstroAngles
# ╔═╡ cc2aaba1-4dfe-414f-8ace-71075e2f3bf5
using HTTP
# ╔═╡ ad40234c-474c-489e-b328-73fa5ecfb6e2
using HTTP.URIs
# ╔═╡ acdf51db-09e0-4e4c-b529-2db8030ea57c
using CSV, DataFramesMeta
# ╔═╡ 3845b39a-a637-4d2b-b2b9-f4ac0294f0e9
@mdx """
# ExoFinder 🪐
In this notebook, we will build a simple map showing the locations of known exoplanets in the night sky. The final product will look like this:
"""
# ╔═╡ e58691c1-98ed-4e75-a5bb-e03102e62def
md"""
This demo uses the following packages:
"""
# ╔═╡ 3ea72aeb-356c-4fd1-b228-f0cf0cba2df7
with_terminal() do
Pkg.status()
end
# ╔═╡ 6207daa3-fde6-4535-9ef3-d1e4a762a14d
@mdx """
## 🗺️ Sky map
First, let's set up a way to create a coordinate-aware plot of a patch of the sky. We will accomplish this with the handy [AstroImages.jl](https://github.com/JuliaAstro/AstroImages.jl) package. First, we download a sample image (stored in the [FITS](https://en.wikipedia.org/wiki/FITS) file format):
!!! warning "TODO"
Update link with Will's [PR](https://github.com/JuliaAstro/AstroImages.jl/pull/30)
"""
# ╔═╡ bb2425be-3e6a-456d-bad2-e665dc7408aa
fname = download("http://data.astropy.org/tutorials/FITS-images/HorseHead.fits")
# ╔═╡ a5d80eed-fb12-4980-82db-800a1c4dba49
img = load(fname);
# ╔═╡ 5fcff0be-3d80-4423-a239-2a00aa376db3
let
implot(img; wcsticks=true, framestyle=:grid)
X, Y = world_to_pix(img, [hms"5 41 00"deg, dms"-2 28"deg])
scatter!([X], [Y]; ms=10, msw=0, color=:cyan)
end
# ╔═╡ 8830d13c-04e7-4333-babc-10bb267993fe
@mdx """
And then just plot it!
"""
# ╔═╡ 8762e1a5-505b-4eed-8863-35171e8cc8e1
implot(img)
# ╔═╡ 50666f3e-b5a7-4fab-86a6-979c8da62693
@mdx """
!!! Question
What was the reasoning for separating `plot` from `implot` again?
"""
# ╔═╡ 67e9e89e-7442-4d96-b8e2-fadb900a8cc3
@mdx """
In the background, AstroImages.jl calls the generic plotting package [Plots.jl](https://docs.juliaplots.org/stable/), and automatically adds a colorbar, tick marks, labels, and grid lines for us. The relevant transformations from pixel space to world coordinates (in this case RA and Dec) are also automatically applied based on the [WCS](https://www.atnf.csiro.au/people/mcalabre/WCS/) information stored in the image of our fits file.
For more information about this package and its different capabilities, be sure to check out the [demo AstroImages.jl notebook]()!
!!! warning "TODO"
Add a demo notebook from Will?
"""
# ╔═╡ d4552736-159b-4c6a-9a97-ef57024333f7
@mdx """
### Coordinate conversion aside
Now that we have a nice way to visualize astronomical data with coordinate-aware transformations, let's compose this with regular plot commands from Plots.jl. If we turn off the axes transformations, we recover the typical pixel space information stored in our FITs file:
"""
# ╔═╡ 6fd6108a-dc97-42ed-a722-bc74c19a0bde
implot(img; wcsticks=false, framestyle=:grid)
# ╔═╡ b8e06c27-5504-41d1-b07c-c5259ff2681f
@mdx """
By default, the origin is located in the bottom-left corner and indices increase to the right and up. Since `implot` returns a standard Plots.jl figure object, we can plot a test point on top of it [in the usual way](https://docs.juliaplots.org/latest/basics/):
"""
# ╔═╡ 12b9a88f-2f9a-4744-ba69-496545a0dac0
let
p = implot(img; wcsticks=false, framestyle=:grid)
scatter!(p, [500], [400]; ms=10, msw=0, color=:cyan)
end
# ╔═╡ e0a44c34-69f8-4037-82a8-60b196b87382
@mdx """
!!! Question
`wcticks = false` seems to override `grid`. Is there a way to plot in pixel space while still having the grid lines enabled?
"""
# ╔═╡ aa60557e-faf0-4058-96fd-128ca093c773
@mdx """
We can go the other direction (from world cordinate space to pixel space) in the following two stages:
1. First, we use [AstroAngles.jl](https://github.com/JuliaAstro/AstroAngles.jl) to conveniently convert RA and Dec formatted input to their equivalent values in degrees for us. This package supports a wide range of formats, which are shown [here](https://github.com/JuliaAstro/AstroAngles.jl#usage).
1. Next, we pass these values to the `world_to_pix` function exported by `AstroImages.jl` to make the WCS transformations in our final plot.
!!! Question
Are degrees used internally for all cases?
"""
# ╔═╡ ba4e4e4e-7f0a-4590-981c-619b53fd0bec
let
implot(img; wcsticks=true, framestyle=:grid)
X, Y = world_to_pix(img, [hms"5 41 00"deg, dms"-2 28 00"deg])
scatter!([X], [Y]; ms=10, msw=0, color=:cyan)
end
# ╔═╡ f62bae29-e31a-416e-b844-9720a5ef57f2
@mdx """
!!! Question
Is it better to do `using AstroAngles` explicitly here?
"""
# ╔═╡ a6368cc9-b7c6-4ffd-a9ba-5ec33be3cb2b
@mdx """
Now that we have a way to plot a single arbitray point, let's extend this to a whole collection of known exoplanet coordinates.
"""
# ╔═╡ d03ffae4-5a15-448f-a47b-e850049efe80
@mdx """
## 🎯 Exoplanet locations
The [NASA Exoplanet Archive](https://exoplanetarchive.ipac.caltech.edu/) stores an updated list of known exoplanets, along with additional information about its host star and orbital parameters. As of this writing, there are 5,000+ confirmed detections, and fortunately their is an API to query all of this information!
The archive provides a Table Access Protocol [(TAP)](https://exoplanetarchive.ipac.caltech.edu/docs/TAP/usingTAP.html) service to query the data using an astronomy specific extension of SQL known as Astronomical Data Query Language [(ADQL)](https://www.ivoa.net/documents/ADQL/). This essentially boils down to pasting a query into a url, and then pulling it down with a `GET` request, which we accomplish with [HTTP.jl](https://juliaweb.github.io/HTTP.jl/stable/):
"""
# ╔═╡ 1319c8bf-ea90-469a-8433-5c3b66b1af07
q = """
select top 10 hostname, pl_name, tic_id
from pscomppars
"""
# ╔═╡ ef38432c-0ec4-46b7-9444-9321180729d9
query = "query=" * escapeuri(q) * "&format=csv"
# ╔═╡ 4d073b62-c25b-4cd9-be89-d87144f2bfdb
@mdx """
!!! note "Note"
We use the `escapeuri` function exported by [URIs.jl](https://docs.juliahub.com/URIs/eec2u/1.3.0/#Tutorial) to convert the spaces and other special characters into valid [URI](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier) characters.
"""
# ╔═╡ 4581038a-fb53-49c7-a85f-60eb153b6f25
df = CSV.read(
HTTP.get("https://exoplanetarchive.ipac.caltech.edu/TAP/sync"; query).body,
DataFrame,
)
# ╔═╡ 403c435d-4d54-49d6-a50f-9f5362ae96d9
@mdx """
We now have the data in a convenient table format (provided by [DataFrames.jl](https://dataframes.juliadata.org/stable/)) for our queried exoplanets.
"""
# ╔═╡ 6db3d93a-0c35-4c27-ace7-1fb44966d864
@mdx """
### Data extraction
Whooo
"""
# ╔═╡ 127338cb-b917-4e2d-8ba1-3ed045c799a4
@mdx """
# Notebook setup 📦
"""
# ╔═╡ fcceea3e-db8f-4853-af49-240d66d54377
TableOfContents()
# ╔═╡ Cell order:
# ╟─3845b39a-a637-4d2b-b2b9-f4ac0294f0e9
# ╟─5fcff0be-3d80-4423-a239-2a00aa376db3
# ╟─e58691c1-98ed-4e75-a5bb-e03102e62def
# ╟─3ea72aeb-356c-4fd1-b228-f0cf0cba2df7
# ╟─6207daa3-fde6-4535-9ef3-d1e4a762a14d
# ╠═bb2425be-3e6a-456d-bad2-e665dc7408aa
# ╠═a5d80eed-fb12-4980-82db-800a1c4dba49
# ╟─8830d13c-04e7-4333-babc-10bb267993fe
# ╠═8762e1a5-505b-4eed-8863-35171e8cc8e1
# ╟─50666f3e-b5a7-4fab-86a6-979c8da62693
# ╟─67e9e89e-7442-4d96-b8e2-fadb900a8cc3
# ╟─d4552736-159b-4c6a-9a97-ef57024333f7
# ╠═6fd6108a-dc97-42ed-a722-bc74c19a0bde
# ╟─b8e06c27-5504-41d1-b07c-c5259ff2681f
# ╠═12b9a88f-2f9a-4744-ba69-496545a0dac0
# ╟─e0a44c34-69f8-4037-82a8-60b196b87382
# ╟─aa60557e-faf0-4058-96fd-128ca093c773
# ╠═ba4e4e4e-7f0a-4590-981c-619b53fd0bec
# ╠═2d5365c9-c7d9-4d14-b074-f77b39baec02
# ╟─f62bae29-e31a-416e-b844-9720a5ef57f2
# ╟─a6368cc9-b7c6-4ffd-a9ba-5ec33be3cb2b
# ╟─d03ffae4-5a15-448f-a47b-e850049efe80
# ╠═cc2aaba1-4dfe-414f-8ace-71075e2f3bf5
# ╠═ad40234c-474c-489e-b328-73fa5ecfb6e2
# ╠═1319c8bf-ea90-469a-8433-5c3b66b1af07
# ╠═ef38432c-0ec4-46b7-9444-9321180729d9
# ╟─4d073b62-c25b-4cd9-be89-d87144f2bfdb
# ╠═4581038a-fb53-49c7-a85f-60eb153b6f25
# ╟─403c435d-4d54-49d6-a50f-9f5362ae96d9
# ╟─6db3d93a-0c35-4c27-ace7-1fb44966d864
# ╠═acdf51db-09e0-4e4c-b529-2db8030ea57c
# ╟─127338cb-b917-4e2d-8ba1-3ed045c799a4
# ╠═fcceea3e-db8f-4853-af49-240d66d54377
# ╠═f19b358c-8506-11ec-252c-c39dcd644d06
|
{"hexsha": "e5e5b53405f8d2e9f669628797bbaf3f3a7be44c", "size": 8496, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "astroimages.jl", "max_stars_repo_name": "icweaver/Pluto_sample_notebooks", "max_stars_repo_head_hexsha": "37d5869eecd748ddcf2e7e5600cc730a689af721", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "astroimages.jl", "max_issues_repo_name": "icweaver/Pluto_sample_notebooks", "max_issues_repo_head_hexsha": "37d5869eecd748ddcf2e7e5600cc730a689af721", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "astroimages.jl", "max_forks_repo_name": "icweaver/Pluto_sample_notebooks", "max_forks_repo_head_hexsha": "37d5869eecd748ddcf2e7e5600cc730a689af721", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8481012658, "max_line_length": 465, "alphanum_fraction": 0.7436440678, "num_tokens": 3473}
|
import os
import os.path as osp
import json
import torch
import numpy as np
from torch_sparse import coalesce
from torch_geometric.data import (InMemoryDataset, Data, download_url,
extract_zip)
class PPI(InMemoryDataset):
r"""Protein-protein interaction networks from the `"Predicting
Multicellular Function through Multi-layer Tissue Networks"
<https://arxiv.org/abs/1707.04638>`_ paper, containing positional gene
sets, motif gene sets and immunological signatures as features (50 in
total) and gene ontology sets as labels (121 in total).
Training, validation and test splits are given by binary masks.
Args:
root (string): Root directory where the dataset should be saved.
transform (callable, optional): A function/transform that takes in an
:obj:`torch_geometric.data.Data` object and returns a transformed
version. The data object will be transformed before every access.
(default: :obj:`None`)
pre_transform (callable, optional): A function/transform that takes in
an :obj:`torch_geometric.data.Data` object and returns a
transformed version. The data object will be transformed before
being saved to disk. (default: :obj:`None`)
"""
url = 'http://snap.stanford.edu/graphsage/ppi.zip'
def __init__(self, root, transform=None, pre_transform=None):
super(PPI, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
prefix = self.__class__.__name__.lower()
suffix = ['G.json', 'feats.npy', 'class_map.json']
return ['{}-{}'.format(prefix, s) for s in suffix]
@property
def processed_file_names(self):
return 'data.pt'
def download(self):
path = download_url(self.url, self.root)
extract_zip(path, self.root)
os.unlink(path)
name = self.__class__.__name__.lower()
os.rename(osp.join(self.root, name), self.raw_dir)
def process(self):
with open(self.raw_paths[0], 'r') as f:
graph_data = json.load(f)
mask = torch.zeros(len(graph_data['nodes']), dtype=torch.uint8)
for i in graph_data['nodes']:
mask[i['id']] = 1 if i['val'] else (2 if i['test'] else 0)
train_mask, val_mask, test_mask = mask == 0, mask == 1, mask == 2
row, col = [], []
for i in graph_data['links']:
row.append(i['source'])
col.append(i['target'])
edge_index = torch.stack([torch.tensor(row), torch.tensor(col)], dim=0)
edge_index, _ = coalesce(edge_index, None, mask.size(0), mask.size(0))
x = torch.from_numpy(np.load(self.raw_paths[1])).float()
with open(self.raw_paths[2], 'r') as f:
y_data = json.load(f)
y = []
for i in range(len(y_data)):
y.append(y_data[str(i)])
y = torch.tensor(y, dtype=torch.float)
data = Data(x=x, edge_index=edge_index, y=y)
data.train_mask = train_mask
data.val_mask = val_mask
data.test_mask = test_mask
data = data if self.pre_transform is None else self.pre_transform(data)
data, slices = self.collate([data])
torch.save((data, slices), self.processed_paths[0])
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
|
{"hexsha": "e8d7bf89e92bd949bba68e690d305e388829e8ba", "size": 3466, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_geometric/datasets/ppi.py", "max_stars_repo_name": "n-kats/pytorch_geometric", "max_stars_repo_head_hexsha": "9ef6ad5501d4f2439ae608ad0d197500a8acc2d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-16T19:51:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-16T19:51:08.000Z", "max_issues_repo_path": "torch_geometric/datasets/ppi.py", "max_issues_repo_name": "n-kats/pytorch_geometric", "max_issues_repo_head_hexsha": "9ef6ad5501d4f2439ae608ad0d197500a8acc2d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "torch_geometric/datasets/ppi.py", "max_forks_repo_name": "n-kats/pytorch_geometric", "max_forks_repo_head_hexsha": "9ef6ad5501d4f2439ae608ad0d197500a8acc2d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.6739130435, "max_line_length": 79, "alphanum_fraction": 0.6321407963, "include": true, "reason": "import numpy", "num_tokens": 818}
|
"""
Implements the ArraysInterface object and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.tools import sharedmemtools as _smt
class ArraysInterface(object):
"""
An interface between pyGSTi's optimization methods and data storage arrays.
This class provides an abstract interface to algorithms (particularly the Levenberg-Marquardt
nonlinear least-squares algorithm) for creating an manipulating potentially distributed data
arrays with types such as "jtj" (Jacobian^T * Jacobian), "jtf" (Jacobian^T * objectivefn_vector),
and "x" (model parameter vector). The class encapsulates all the operations on these arrays so
that the algorithm doesn't need to worry about how the arrays are actually stored in memory,
e.g. whether shared memory is used or not.
"""
pass # just a base class - maybe make an abc abtract class in FUTURE?
class UndistributedArraysInterface(ArraysInterface):
"""
An arrays interface for the case when the arrays are not actually distributed.
Parameters
----------
num_global_elements : int
The total number of objective function "elements", i.e. the size of the
objective function array `f`.
num_global_params : int
The total number of (model) parameters, i.e. the size of the `x` array.
"""
def __init__(self, num_global_elements, num_global_params):
self.num_global_elements = num_global_elements
self.num_global_params = num_global_params
def allocate_jtf(self):
"""
Allocate an array for holding a `'jtf'`-type value.
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty(self.num_global_params, 'd')
def allocate_jtj(self):
"""
Allocate an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_params, self.num_global_params), 'd')
def allocate_jac(self):
"""
Allocate an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return _np.empty((self.num_global_elements, self.num_global_params), 'd')
def deallocate_jtf(self, jtf):
"""
Free an array for holding an objective function value (type `'jtf'`).
Returns
-------
None
"""
pass
def deallocate_jtj(self, jtj):
"""
Free an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
None
"""
pass
def deallocate_jac(self, jac):
"""
Free an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
None
"""
pass
def global_num_elements(self):
"""
The total number of objective function "elements".
This is the size/length of the objective function `f` vector.
Returns
-------
int
"""
return self.num_global_elements
def jac_param_slice(self, only_if_leader=False):
"""
The slice into a Jacobian's columns that belong to this processor.
Parameters
----------
only_if_leader : bool, optional
If `True`, the current processor's parameter slice is ony returned if
the processor is the "leader" (i.e. the first) of the processors that
calculate the same parameter slice. All non-leader processors return
the zero-slice `slice(0,0)`.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def jtf_param_slice(self):
"""
The slice into a `'jtf'` vector giving the rows of owned by this processor.
Returns
-------
slice
"""
return slice(0, self.num_global_params)
def param_fine_info(self):
"""
Returns information regarding how model parameters are distributed among hosts and processors.
This information relates to the "fine" distribution used in distributed layouts,
and is needed by some algorithms which utilize shared-memory communication between
processors on the same host.
Returns
-------
param_fine_slices_by_host : list
A list with one entry per host. Each entry is itself a list of
`(rank, (global_param_slice, host_param_slice))` elements where `rank` is the top-level
overall rank of a processor, `global_param_slice` is the parameter slice that processor owns
and `host_param_slice` is the same slice relative to the parameters owned by the host.
owner_host_and_rank_of_global_fine_param_index : dict
A mapping between parameter indices (keys) and the owning processor rank and host index.
Values are `(host_index, processor_rank)` tuples.
"""
all_params = slice(0, self.num_global_params)
ranks_and_pslices_for_host0 = [(0, (all_params, all_params))]
param_fine_slices_by_host = [ranks_and_pslices_for_host0]
owner_host_and_rank_of_global_fine_param_index = {i: (0, 0) for i in range(self.num_global_params)}
return param_fine_slices_by_host, \
owner_host_and_rank_of_global_fine_param_index
def allgather_x(self, x, global_x):
"""
Gather a parameter (`x`) vector onto all the processors.
Parameters
----------
x : numpy.array or LocalNumpyArray
The input vector.
global_x : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_x[:] = x
def allscatter_x(self, global_x, x):
"""
Pare down an already-scattered global parameter (`x`) vector to be just a local `x` vector.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector. This global vector is already present on all the processors,
so there's no need to do any MPI communication.
x : numpy.array or LocalNumpyArray
The output vector, typically a slice of `global_x`..
Returns
-------
None
"""
x[:] = global_x
def scatter_x(self, global_x, x):
"""
Scatter a global parameter (`x`) vector onto all the processors.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector.
x : numpy.array or LocalNumpyArray
The output (scattered) vector.
Returns
-------
None
"""
x[:] = global_x
def allgather_f(self, f, global_f):
"""
Gather an objective funtion (`f`) vector onto all the processors.
Parameters
----------
f : numpy.array or LocalNumpyArray
The input vector.
global_f : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
global_f[:] = f
def gather_jtj(self, jtj, return_shared=False):
"""
Gather a Hessian (`jtj`) matrix onto the root processor.
Parameters
----------
jtj : numpy.array or LocalNumpyArray
The (local) input matrix to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtj, None) if return_shared else jtj # gathers just onto the root proc
def scatter_jtj(self, global_jtj, jtj):
"""
Scatter a Hessian (`jtj`) matrix onto all the processors.
Parameters
----------
global_jtj : numpy.ndarray
The global Hessian matrix to scatter.
jtj : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtj[:, :] = global_jtj
def gather_jtf(self, jtf, return_shared=False):
"""
Gather a `jtf` vector onto the root processor.
Parameters
----------
jtf : numpy.array or LocalNumpyArray
The local input vector to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
return (jtf, None) if return_shared else jtf
def scatter_jtf(self, global_jtf, jtf):
"""
Scatter a `jtf` vector onto all the processors.
Parameters
----------
global_jtf : numpy.ndarray
The global vector to scatter.
jtf : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
jtf[:] = global_jtf
def global_svd_dot(self, jac_v, minus_jtf):
"""
Gathers the dot product between a `jtj`-type matrix and a `jtf`-type vector into a global result array.
This is typically used within SVD-defined basis calculations, where `jac_v` is the "V"
matrix of the SVD of a jacobian, and `minus_jtf` is the negative dot product between the Jacobian
matrix and objective function vector.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
minus_jtf : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type.
Returns
-------
numpy.ndarray
The global (gathered) parameter vector `dot(jac_v.T, minus_jtf)`.
"""
return _np.dot(jac_v.T, minus_jtf)
def fill_dx_svd(self, jac_v, global_vec, dx):
"""
Computes the dot product of a `jtj`-type array with a global parameter array.
The result (`dx`) is a `jtf`-type array. This is typically used for
computing the x-update vector in the LM method when using a SVD-defined basis.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
global_vec : numpy.ndarray
A global parameter vector.
dx : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type. Filled with `dot(jac_v, global_vec)`
values.
Returns
-------
None
"""
dx[:] = _np.dot(jac_v, global_vec)
def dot_x(self, x1, x2):
"""
Take the dot product of two `x`-type vectors.
Parameters
----------
x1, x2 : numpy.ndarray or LocalNumpyArray
The vectors to operate on.
Returns
-------
float
"""
return _np.dot(x1, x2)
def norm2_x(self, x):
"""
Compute the Frobenius norm squared of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
return _np.dot(x, x)
def infnorm_x(self, x):
"""
Compute the infinity-norm of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
return _np.linalg.norm(x, ord=_np.inf) # (max(sum(abs(x), axis=1))) = max(abs(x))
def max_x(self, x):
"""
Compute the maximum of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
return _np.max(x)
def norm2_f(self, f):
"""
Compute the Frobenius norm squared of an `f`-type vector.
Parameters
----------
f : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
return _np.dot(f, f)
def norm2_jtj(self, jtj):
"""
Compute the Frobenius norm squared of an `jtj`-type matrix.
Parameters
----------
jtj : numpy.ndarray or LocalNumpyArray
The array to operate on.
Returns
-------
float
"""
return _np.linalg.norm(jtj)**2
def norm2_jac(self, j):
"""
Compute the Frobenius norm squared of an Jacobian matrix (`ep`-type).
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
The Jacobian to operate on.
Returns
-------
float
"""
return _np.linalg.norm(j)
def fill_jtf(self, j, f, jtf):
"""
Compute dot(Jacobian.T, f) in supplied memory.
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
Jacobian matrix (type `ep`).
f : numpy.ndarray or LocalNumpyArray
Objective function vector (type `e`).
jtf : numpy.ndarray or LocalNumpyArray
Output array, type `jtf`. Filled with `dot(j.T, f)` values.
Returns
-------
None
"""
jtf[:] = _np.dot(j.T, f)
def fill_jtj(self, j, jtj, shared_mem_buf=None):
"""
Compute dot(Jacobian.T, Jacobian) in supplied memory.
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
Jacobian matrix (type `ep`).
jtf : numpy.ndarray or LocalNumpyArray
Output array, type `jtj`. Filled with `dot(j.T, j)` values.
shared_mem_buf : tuple or None
Scratch space of shared memory used to speed up repeated calls to `fill_jtj`.
If not none, the value returned from :method:`allocate_jtj_shared_mem_buf`.
Returns
-------
None
"""
jtj[:, :] = _np.dot(j.T, j)
def allocate_jtj_shared_mem_buf(self):
"""
Allocate scratch space to be used for repeated calls to :method:`fill_jtj`.
Returns
-------
scratch : numpy.ndarray or None
The scratch array.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
The shared memory handle associated with `scratch`, which is needed to
free the memory.
"""
return None, None
def deallocate_jtj_shared_mem_buf(self, jtj_buf):
"""
Frees the scratch memory allocated by :method:`allocate_jtj_shared_mem_buf`.
Parameters
----------
jtj_buf : tuple or None
The value returned from :method:`allocate_jtj_shared_mem_buf`
"""
pass
def jtj_diag_indices(self, jtj):
"""
The indices into a `jtj`-type array that correspond to diagonal elements of the global matrix.
If `jtj` were a global quantity, then this would just be `numpy.diag_indices_from(jtj)`,
however, it may be more complicated in actuality when different processors hold different
sections of the global matrix.
Parameters
----------
jtj : numpy.ndarray or None
The `jtj`-type array to get the indices with respect to.
Returns
-------
tuple
A tuple of 1D arrays that can be used to index the elements of `jtj` that
correspond to diagonal elements of the global jtj matrix.
"""
return _np.diag_indices_from(jtj)
class DistributedArraysInterface(ArraysInterface):
"""
An arrays interface where the arrays are distributed according to a distributed layout.
Parameters
----------
dist_layout : DistributableCOPALayout
The layout giving the distribution of the arrays.
extra_elements : int, optional
The number of additional objective function "elements" beyond those
specified by `dist_layout`. These are often used for penalty terms.
"""
def __init__(self, dist_layout, lsvec_mode, extra_elements=0):
from ..layouts.distlayout import DistributableCOPALayout as _DL
assert(isinstance(dist_layout, _DL))
self.layout = dist_layout
self.resource_alloc = self.layout.resource_alloc()
self.extra_elements = extra_elements
self.lsvec_mode = lsvec_mode # e.g. 'normal' or 'circuits'
def allocate_jtf(self):
"""
Allocate an array for holding a `'jtf'`-type value.
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return self.layout.allocate_local_array('jtf', 'd', extra_elements=self.extra_elements)
def allocate_jtj(self):
"""
Allocate an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
return self.layout.allocate_local_array('jtj', 'd', extra_elements=self.extra_elements)
def allocate_jac(self):
"""
Allocate an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
numpy.ndarray or LocalNumpyArray
"""
if self.lsvec_mode == 'normal':
return self.layout.allocate_local_array('ep', 'd', extra_elements=self.extra_elements)
elif self.lsvec_mode == 'percircuit':
return self.layout.allocate_local_array('cp', 'd', extra_elements=self.extra_elements)
else:
raise ValueError("Invlid lsvec_mode: %s" % str(self.lsvec_mode))
def deallocate_jtf(self, jtf):
"""
Free an array for holding an objective function value (type `'jtf'`).
Returns
-------
None
"""
self.layout.free_local_array(jtf) # cleaup shared memory, if it was used
def deallocate_jtj(self, jtj):
"""
Free an array for holding an approximated Hessian (type `'jtj'`).
Returns
-------
None
"""
self.layout.free_local_array(jtj) # cleaup shared memory, if it was used
def deallocate_jac(self, jac):
"""
Free an array for holding a Jacobian matrix (type `'ep'`).
Returns
-------
None
"""
self.layout.free_local_array(jac) # cleaup shared memory, if it was used
def global_num_elements(self):
"""
The total number of objective function "elements".
This is the size/length of the objective function `f` vector.
Returns
-------
int
"""
if self.lsvec_mode == "normal":
return self.layout.global_num_elements + self.extra_elements
elif self.lsvec_mode == "percircuit":
return self.layout.global_num_circuits + self.extra_elements
else:
raise ValueError("Invalid lsvec_mode: %s" % str(self.lsvec_mode))
def jac_param_slice(self, only_if_leader=False):
"""
The slice into a Jacobian's columns that belong to this processor.
Parameters
----------
only_if_leader : bool, optional
If `True`, the current processor's parameter slice is ony returned if
the processor is the "leader" (i.e. the first) of the processors that
calculate the same parameter slice. All non-leader processors return
the zero-slice `slice(0,0)`.
Returns
-------
slice
"""
if only_if_leader and not self.layout.resource_alloc('param-processing').is_host_leader:
return slice(0, 0) # not the leader of the group of procs computing this same jac portion
return self.layout.global_param_slice
def jtf_param_slice(self):
"""
The slice into a `'jtf'` vector giving the rows of owned by this processor.
Returns
-------
slice
"""
return self.layout.global_param_fine_slice
def param_fine_info(self):
"""
Returns information regarding how model parameters are distributed among hosts and processors.
This information relates to the "fine" distribution used in distributed layouts,
and is needed by some algorithms which utilize shared-memory communication between
processors on the same host.
Returns
-------
param_fine_slices_by_host : list
A list with one entry per host. Each entry is itself a list of
`(rank, (global_param_slice, host_param_slice))` elements where `rank` is the top-level
overall rank of a processor, `global_param_slice` is the parameter slice that processor owns
and `host_param_slice` is the same slice relative to the parameters owned by the host.
owner_host_and_rank_of_global_fine_param_index : dict
A mapping between parameter indices (keys) and the owning processor rank and host index.
Values are `(host_index, processor_rank)` tuples.
"""
return self.layout.param_fine_slices_by_host, \
self.layout.owner_host_and_rank_of_global_fine_param_index
def allgather_x(self, x, global_x):
"""
Gather a parameter (`x`) vector onto all the processors.
Parameters
----------
x : numpy.array or LocalNumpyArray
The input vector.
global_x : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
#TODO: do this more efficiently in future:
global_x_on_root = self.layout.gather_local_array('jtf', x)
if self.resource_alloc.comm is not None:
global_x[:] = self.resource_alloc.comm.bcast(
global_x_on_root if self.resource_alloc.comm.rank == 0 else None, root=0)
else:
global_x[:] = global_x_on_root
def allscatter_x(self, global_x, x):
"""
Pare down an already-scattered global parameter (`x`) vector to be just a local `x` vector.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector. This global vector is already present on all the processors,
so there's no need to do any MPI communication.
x : numpy.array or LocalNumpyArray
The output vector, typically a slice of `global_x`..
Returns
-------
None
"""
x[:] = global_x[self.layout.global_param_fine_slice]
def scatter_x(self, global_x, x):
"""
Scatter a global parameter (`x`) vector onto all the processors.
Parameters
----------
global_x : numpy.array or LocalNumpyArray
The input vector.
x : numpy.array or LocalNumpyArray
The output (scattered) vector.
Returns
-------
None
"""
self.scatter_jtf(global_x, x)
def allgather_f(self, f, global_f):
"""
Gather an objective funtion (`f`) vector onto all the processors.
Parameters
----------
f : numpy.array or LocalNumpyArray
The input vector.
global_f : numpy.array or LocalNumpyArray
The output (gathered) vector.
Returns
-------
None
"""
#TODO: do this more efficiently in future:
artype = 'c' if self.lsvec_mode == 'percircuit' else 'e'
global_f_on_root = self.layout.gather_local_array(artype, f, extra_elements=self.extra_elements)
if self.resource_alloc.comm is not None:
global_f[:] = self.resource_alloc.comm.bcast(
global_f_on_root if self.resource_alloc.comm.rank == 0 else None, root=0)
else:
global_f[:] = global_f_on_root
def gather_jtj(self, jtj, return_shared=False):
"""
Gather a Hessian (`jtj`) matrix onto the root processor.
Parameters
----------
jtj : numpy.array or LocalNumpyArray
The (local) input matrix to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
# gathers just onto the root proc
return self.layout.gather_local_array('jtj', jtj, return_shared=return_shared)
def scatter_jtj(self, global_jtj, jtj):
"""
Scatter a Hessian (`jtj`) matrix onto all the processors.
Parameters
----------
global_jtj : numpy.ndarray
The global Hessian matrix to scatter.
jtj : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
# Don't bother trying to be fancy with shared mem here - we need to send the
# entire global_jtj from the (single) root proc anyway.
comm = self.resource_alloc.comm
if comm is not None:
jtj[:, :] = comm.scatter([global_jtj[pslc, :] for pslc in self.layout.param_fine_slices_by_rank]
if comm.rank == 0 else None, root=0)
else:
jtj[:, :] = global_jtj
def gather_jtf(self, jtf, return_shared=False):
"""
Gather a `jtf` vector onto the root processor.
Parameters
----------
jtf : numpy.array or LocalNumpyArray
The local input vector to gather.
return_shared : bool, optional
Whether the returned array is allowed to be a shared-memory array, which results
in a small performance gain because the array used internally to gather the results
can be returned directly. When `True` a shared memory handle is also returned, and
the caller assumes responsibilty for freeing the memory via
:function:`pygsti.tools.sharedmemtools.cleanup_shared_ndarray`.
Returns
-------
gathered_array : numpy.ndarray or None
The full (global) output array on the root (rank=0) processor and
`None` on all other processors.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
Returned only when `return_shared == True`. The shared memory handle
associated with `gathered_array`, which is needed to free the memory.
"""
# gathers just onto the root proc
return self.layout.gather_local_array('jtf', jtf, return_shared=return_shared)
def scatter_jtf(self, global_jtf, jtf):
"""
Scatter a `jtf` vector onto all the processors.
Parameters
----------
global_jtf : numpy.ndarray
The global vector to scatter.
jtf : numpy.ndarray or LocalNumpyArray
The local destination array.
Returns
-------
None
"""
# Don't bother trying to be fancy with shared mem here - we need to send the
# entire global_jtj from the (single) root proc anyway.
comm = self.resource_alloc.comm
if comm is not None:
to_scatter = [global_jtf[pslc] for pslc in self.layout.param_fine_slices_by_rank] \
if (comm.rank == 0) else None
jtf[:] = comm.scatter(to_scatter, root=0)
else:
jtf[:] = global_jtf
def global_svd_dot(self, jac_v, minus_jtf):
"""
Gathers the dot product between a `jtj`-type matrix and a `jtf`-type vector into a global result array.
This is typically used within SVD-defined basis calculations, where `jac_v` is the "V"
matrix of the SVD of a jacobian, and `minus_jtf` is the negative dot product between the Jacobian
matrix and objective function vector.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
minus_jtf : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type.
Returns
-------
numpy.ndarray
The global (gathered) parameter vector `dot(jac_v.T, minus_jtf)`.
"""
# Assumes jac_v is 'jtj' type and minus_jtf is 'jtf' type.
# Returns a *global* parameter array that is dot(jac_v.T, minus_jtf)
local_dot = _np.dot(jac_v.T, minus_jtf) # (nP, nP_fine) * (nP_fine) = (nP,)
#Note: Could make this more efficient by being given a shared array like this as the destination
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (jac_v.shape[1],), 'd')
self.resource_alloc.allreduce_sum(result, local_dot,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result.copy()
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def fill_dx_svd(self, jac_v, global_vec, dx):
"""
Computes the dot product of a `jtj`-type array with a global parameter array.
The result (`dx`) is a `jtf`-type array. This is typically used for
computing the x-update vector in the LM method when using a SVD-defined basis.
Parameters
----------
jac_v : numpy.ndarray or LocalNumpyArray
An array of `jtj`-type.
global_vec : numpy.ndarray
A global parameter vector.
dx : numpy.ndarray or LocalNumpyArray
An array of `jtf`-type. Filled with `dot(jac_v, global_vec)`
values.
Returns
-------
None
"""
# Assumes dx is of type 'jtf' (only locally holds fine param slice)
# Assumes jac_v is of type 'jtj' (locally hosts fine param slice rows)
# Assumes global_vec is a global parameter vector
# fills dx = dot(jac, global_vec
dx[:] = _np.dot(jac_v, global_vec) # everything is local in this case!
def dot_x(self, x1, x2):
"""
Take the dot product of two `x`-type vectors.
Parameters
----------
x1, x2 : numpy.ndarray or LocalNumpyArray
The vectors to operate on.
Returns
-------
float
"""
# assumes x's are in "fine" mode
local_dot = _np.array(_np.dot(x1, x2))
local_dot.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_sum(result, local_dot,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def norm2_x(self, x):
"""
Compute the Frobenius norm squared of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
return self.dot_x(x, x)
def infnorm_x(self, x): # (max(sum(abs(x), axis=1))) = max(abs(x))
"""
Compute the infinity-norm of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
# assumes x's are in "fine" mode
local_infnorm = _np.array(_np.linalg.norm(x, ord=_np.inf))
local_infnorm.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_max(result, local_infnorm,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def min_x(self, x):
"""
Compute the minimum of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
# assumes x's are in "fine" mode
local_min = _np.array(_np.min(x))
local_min.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_min(result, local_min,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def max_x(self, x):
"""
Compute the maximum of an `x`-type vector.
Parameters
----------
x : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
# assumes x's are in "fine" mode
local_max = _np.array(_np.max(x))
local_max.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_max(result, local_max,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def norm2_f(self, f):
"""
Compute the Frobenius norm squared of an `f`-type vector.
Parameters
----------
f : numpy.ndarray or LocalNumpyArray
The vector to operate on.
Returns
-------
float
"""
local_dot = _np.array(_np.dot(f, f))
local_dot.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_sum(result, local_dot,
unit_ralloc=self.layout.resource_alloc('atom-processing'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def norm2_jac(self, j):
"""
Compute the Frobenius norm squared of an Jacobian matrix (`ep`-type).
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
The Jacobian to operate on.
Returns
-------
float
"""
local_norm2 = _np.array(_np.linalg.norm(j)**2)
local_norm2.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_sum(result, local_norm2,
unit_ralloc=self.layout.resource_alloc('param-processing'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def norm2_jtj(self, jtj):
"""
Compute the Frobenius norm squared of an `jtj`-type matrix.
Parameters
----------
jtj : numpy.ndarray or LocalNumpyArray
The array to operate on.
Returns
-------
float
"""
local_norm2 = _np.array(_np.linalg.norm(jtj)**2)
local_norm2.shape = (1,) # for compatibility with allreduce_sum
result, result_shm = _smt.create_shared_ndarray(self.resource_alloc, (1,), 'd')
self.resource_alloc.allreduce_sum(result, local_norm2,
unit_ralloc=self.layout.resource_alloc('param-fine'))
ret = result[0] # "copies" the single returned element
self.resource_alloc.host_comm_barrier() # make sure we don't cleanup too quickly
_smt.cleanup_shared_ndarray(result_shm)
return ret
def fill_jtf(self, j, f, jtf):
"""
Compute dot(Jacobian.T, f) in supplied memory.
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
Jacobian matrix (type `ep`).
f : numpy.ndarray or LocalNumpyArray
Objective function vector (type `e`).
jtf : numpy.ndarray or LocalNumpyArray
Output array, type `jtf`. Filled with `dot(j.T, f)` values.
Returns
-------
None
"""
self.layout.fill_jtf(j, f, jtf)
def fill_jtj(self, j, jtj, shared_mem_buf=None):
"""
Compute dot(Jacobian.T, Jacobian) in supplied memory.
Parameters
----------
j : numpy.ndarray or LocalNumpyArray
Jacobian matrix (type `ep`).
jtf : numpy.ndarray or LocalNumpyArray
Output array, type `jtj`. Filled with `dot(j.T, j)` values.
shared_mem_buf : tuple or None
Scratch space of shared memory used to speed up repeated calls to `fill_jtj`.
If not none, the value returned from :method:`allocate_jtj_shared_mem_buf`.
Returns
-------
None
"""
self.layout.fill_jtj(j, jtj, shared_mem_buf)
def allocate_jtj_shared_mem_buf(self):
"""
Allocate scratch space to be used for repeated calls to :method:`fill_jtj`.
Returns
-------
scratch : numpy.ndarray or None
The scratch array.
shared_memory_handle : multiprocessing.shared_memory.SharedMemory or None
The shared memory handle associated with `scratch`, which is needed to
free the memory.
"""
return self.layout._allocate_jtj_shared_mem_buf()
def deallocate_jtj_shared_mem_buf(self, jtj_buf):
"""
Frees the scratch memory allocated by :method:`allocate_jtj_shared_mem_buf`.
Parameters
----------
jtj_buf : tuple or None
The value returned from :method:`allocate_jtj_shared_mem_buf`
"""
buf, buf_shm = jtj_buf
_smt.cleanup_shared_ndarray(buf_shm)
def jtj_diag_indices(self, jtj):
"""
The indices into a `jtj`-type array that correspond to diagonal elements of the global matrix.
If `jtj` were a global quantity, then this would just be `numpy.diag_indices_from(jtj)`,
however, it may be more complicated in actuality when different processors hold different
sections of the global matrix.
Parameters
----------
jtj : numpy.ndarray or None
The `jtj`-type array to get the indices with respect to.
Returns
-------
tuple
A tuple of 1D arrays that can be used to index the elements of `jtj` that
correspond to diagonal elements of the global jtj matrix.
"""
global_param_indices = self.layout.global_param_fine_slice
row_indices = _np.arange(jtj.shape[0]) # row dimension is always smaller
col_indices = _np.arange(global_param_indices.start, global_param_indices.stop)
assert(len(row_indices) == len(col_indices)) # checks that global_param_indices is good
return row_indices, col_indices # ~ _np.diag_indices_from(jtj)
|
{"hexsha": "8c97d0b1f6b4218becbef090736450e4bd3dce9e", "size": 43114, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygsti/optimize/arraysinterface.py", "max_stars_repo_name": "pyGSTi-Developers/pyGSTi", "max_stars_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pygsti/optimize/arraysinterface.py", "max_issues_repo_name": "pyGSTi-Developers/pyGSTi", "max_issues_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygsti/optimize/arraysinterface.py", "max_forks_repo_name": "pyGSTi-Developers/pyGSTi", "max_forks_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9747832939, "max_line_length": 111, "alphanum_fraction": 0.5936586724, "include": true, "reason": "import numpy", "num_tokens": 9489}
|
#!/bin/python3
# Copyright (©) 2015-2016 Lucas Maugère, Thomas Mijieux
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class Levenshtein:
# with lists
def __init__(self, l1, l2):
self.l1 = [None]
self.l2 = [None]
self.l1.extend(l1)
self.l2.extend(l2)
self.m = np.zeros((len(self.l1), len(self.l2)))
self.compute()
#########################################################
def compute(self):
self.m[0][0] = 0
for i in range(1, len(self.l1)):
self.m[i][0] = i
for j in range(1, len(self.l2)):
self.m[j][0] = j
#
for i in range(1, len(self.l1)):
for j in range(1, len(self.l2)):
if self.l1[i] == self.l2[j]:
val = self.m[i-1][j-1] + 0
else:
val = self.m[i-1][j-1] + 1
self.m[i][j] = min(self.m[i-1][j] + 1,
self.m[i][j-1] + 1,
val)
#########################################################
def dist(self):
return self.m[len(self.l1)-1][len(self.l2)-1]
#########################################################
|
{"hexsha": "61905875057adfe961f056cfebeb257f3a2cd515", "size": 1738, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/taikorank/test/levenshtein.py", "max_stars_repo_name": "tomtix/osux", "max_stars_repo_head_hexsha": "cf87171ffca9513c3a05e2156618b20cea4aef98", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2016-09-19T08:06:48.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-02T17:12:50.000Z", "max_issues_repo_path": "src/taikorank/test/levenshtein.py", "max_issues_repo_name": "tomtix/osux", "max_issues_repo_head_hexsha": "cf87171ffca9513c3a05e2156618b20cea4aef98", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/taikorank/test/levenshtein.py", "max_forks_repo_name": "tomtix/osux", "max_forks_repo_head_hexsha": "cf87171ffca9513c3a05e2156618b20cea4aef98", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7826086957, "max_line_length": 74, "alphanum_fraction": 0.4930955121, "include": true, "reason": "import numpy", "num_tokens": 438}
|
import random
import argparse
import os
import numpy as np
import timm
import torch
from torch.optim import Adam, AdamW, RMSprop, SGD
from torch.utils.data import DataLoader
from torchvision.datasets import *
import torchvision.transforms as transforms
import torchdata as td
from adamp import AdamP
from radam import RAdam
ICTH_PATH = "\\".join(os.path.abspath(__file__).split('\\')[:-2])
print(ICTH_PATH)
DATA_PATH = os.path.join(ICTH_PATH, 'data')
if "data" not in os.listdir(ICTH_PATH):
os.mkdir(DATA_PATH)
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-ds", "--dataset", type=str, default='cifar10', choices=[
'cifar10', 'cifar100'], help="dataset name in torchvision classificaion dataset")
parser.add_argument("-ep", "--epochs", type=int, default=10)
parser.add_argument("-bs", "--batch_size", type=int, default=32)
parser.add_argument("-lr", "--learning_rate", type=float, default=3e-4)
parser.add_argument("-wd", "--weight_decay", type=float, default=0)
parser.add_argument("-sd", "--scheduler", type=str,
default="", choices=["", "step", "cosine"])
parser.add_argument("-seed", "--seed", type=int, default=2020)
parser.add_argument("-md", "--model", type=str, default="efficientnet_b0",
choices=timm.list_models(), help="model name in timm models list")
parser.add_argument("-wt", "--warmup_type", type=str,
default="", choices=["", "linear", "exponential", 'radam'])
parser.add_argument("-ws", "--warmup_step", type=float, default=0.0)
parser.add_argument("-opt", "--optimizer", type=str,
default='adam', choices=['adam', 'adamw', 'rmsprop', 'sgd', 'radam', 'adamp'])
parser.add_argument("-rt", "--repeat_times", type=int, default=1,
help="how many times of training and testing will be repeated given a group of hyperparameters")
args = parser.parse_args()
dataset_dict = {
'cifar10': CIFAR10,
'cifar100': CIFAR100
}
opt_dict = {
'adam': Adam,
'adamw': AdamW,
'rmsprop': RMSprop,
'sgd': SGD,
'radam': RAdam,
'adamp': AdamP
}
args.dataset = dataset_dict[args.dataset]
args.optimizer = opt_dict[args.optimizer]
return args
def get_classes_num(args):
if args.dataset is CIFAR10:
return 10
elif args.dataset is CIFAR100:
return 100
def get_model(args):
num_classes = get_classes_num(args)
model = timm.create_model(args.model, num_classes=num_classes)
return model
def get_dataloader(args, train=True):
batch_size = args.batch_size
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset = td.datasets.WrapDataset(args.dataset(root=DATA_PATH, train=train, download=True,transform=transform)).cache(td.cachers.Memory())
# pin_memory = True, when ram is sufficient
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=train, pin_memory=True)
return dataloader
|
{"hexsha": "6ae6e3aa9722fb3830b4e97d6947f48b9f0f43e9", "size": 3441, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "prismleong/ICTH", "max_stars_repo_head_hexsha": "297dfa829e878151d4c057438ff5dce9cde97d27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "prismleong/ICTH", "max_issues_repo_head_hexsha": "297dfa829e878151d4c057438ff5dce9cde97d27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "prismleong/ICTH", "max_forks_repo_head_hexsha": "297dfa829e878151d4c057438ff5dce9cde97d27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.41, "max_line_length": 142, "alphanum_fraction": 0.6535890729, "include": true, "reason": "import numpy", "num_tokens": 847}
|
# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail
import numpy as np
from npcomp.compiler import test_config
import_global = test_config.create_import_dump_decorator()
global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0],
(2, 1)))
# CHECK-LABEL: func @global_array_to_const
@import_global
def global_array_to_const():
# CHECK: %[[CST:.*]] = constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [2.000000e+00, 4.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
# CHECK: numpy.create_array_from_tensor %[[CST]] : (tensor<2x3xf64>) -> !numpy.ndarray<[2,3]:f64>
local_data = global_data
return local_data
|
{"hexsha": "1a6ad850a6dde72fca64f52a68f1beb786486daf", "size": 741, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/Python/NumpyCompiler/array_basics.py", "max_stars_repo_name": "marbre/mlir-npcomp", "max_stars_repo_head_hexsha": "30adf9e6b0c1e94db38050a9e143f20a5a461d17", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-03-30T23:31:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T01:17:58.000Z", "max_issues_repo_path": "test/Python/NumpyCompiler/array_basics.py", "max_issues_repo_name": "marbre/mlir-npcomp", "max_issues_repo_head_hexsha": "30adf9e6b0c1e94db38050a9e143f20a5a461d17", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-04-26T21:19:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-29T18:22:59.000Z", "max_forks_repo_path": "test/Python/NumpyCompiler/array_basics.py", "max_forks_repo_name": "marbre/mlir-npcomp", "max_forks_repo_head_hexsha": "30adf9e6b0c1e94db38050a9e143f20a5a461d17", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0, "max_line_length": 154, "alphanum_fraction": 0.632928475, "include": true, "reason": "import numpy", "num_tokens": 251}
|
#!/usr/bin/env python
''' differences of Gaussians .
Usage : python dog.py [<video source>]
'''
import numpy as np
import cv2
import video
from common import nothing, getsize
n=0;
if __name__ == '__main__':
import sys
print __doc__
try:
fn = sys.argv[1]
except:
fn = 0
cap = video.create_capture(fn)
cv2.namedWindow("dog")
cv2.namedWindow("gaussian1")
cv2.namedWindow("gaussian2")
cv2.createTrackbar('Gaussian1','dog',1,21,nothing)
while True:
d1=cv2.getTrackbarPos('Gaussian1','dog')
if (d1==0):
# if the trackbar is set to 0 bang it back up to 1 as there's no such
# thing as a zero width gaussian
d1=1
cv2.setTrackbarPos('Gaussian1','dog',1)
d2=d1+2
# get an image
ret, frame = cap.read()
# greyscale it
grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
height,width=grey.shape
# blur it
grey_blur1 = cv2.GaussianBlur(grey, (2*d1+1, 2*d1+1), -1)[d1:-d1,d1:-d1]
grey_blur2 = cv2.GaussianBlur(grey, (2*d2+1, 2*d2+1), -1)[d2:-d2,d2:-d2]
#make sure they're the same size
grey_blur2= cv2.resize(grey_blur2, (width, height))
grey_blur1= cv2.resize(grey_blur1, (width, height))
#rescale
grey_blur1=cv2.convertScaleAbs(grey_blur1);
grey_blur2=cv2.convertScaleAbs(grey_blur2);
#actually do the Diference of Gaussians- take one blurred image away
#from the other.
out=grey_blur1-grey_blur2
#uncommment the next few lines and edit a bit if you want to save any of the
# images
#fn="out/dog"+str(n).rjust(4,'0')+".png"
#cv2.imwrite(fn,out);
#fn="out/gone"+str(n).rjust(4,'0')+".png"
#cv2.imwrite(fn,grey_blur1);
#fn="out/gtwo"+str(n).rjust(4,'0')+".png"
#cv2.imwrite(fn,grey_blur2);
#n+=1
#image show stuff
cv2.imshow('dog', out)
cv2.imshow('gaussian1', grey_blur1)
cv2.imshow('gaussian2', grey_blur2)
if cv2.waitKey(1) == 27:
break
|
{"hexsha": "773bcf2b011afff1ed7d321c39130b723c39ab8a", "size": 2039, "ext": "py", "lang": "Python", "max_stars_repo_path": "dog.py", "max_stars_repo_name": "elliots41/python-vision", "max_stars_repo_head_hexsha": "5c29f1196f9ce8ce81a1e09f08ad42cb1364bb7a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dog.py", "max_issues_repo_name": "elliots41/python-vision", "max_issues_repo_head_hexsha": "5c29f1196f9ce8ce81a1e09f08ad42cb1364bb7a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dog.py", "max_forks_repo_name": "elliots41/python-vision", "max_forks_repo_head_hexsha": "5c29f1196f9ce8ce81a1e09f08ad42cb1364bb7a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.141025641, "max_line_length": 80, "alphanum_fraction": 0.6105934282, "include": true, "reason": "import numpy", "num_tokens": 664}
|
'''
This module fits a parameterized function for a SNIa light-curve with one
or two peaks. Taken from M. Stritzinger's PhD thesis, which was adapted
from Contardo, G., Leibundgut, B., & Vacca, W. D. 2000, A&A, 359, 876.
'''
from scipy.optimize import leastsq,brentq
from numpy import *
def Ialcn(t, par, n):
m0,gamma,tau,theta,t0 = par[0:5]
num = m0 + gamma*(t - t0)
denom = 1 - exp((tau-t)/theta**2)
for i in range(n):
ti,gi,sigi = par[4+i*3:7+i*3]
num -= gi**2*exp(-0.5*power(t-ti,2)/sigi**2)
return(num/denom)
def d_Ialcn_dt(t, par, n):
m0,gamma,tau,theta = par[0:4]
num = gamma
ex = exp((tau - t)/theta**2)
for i in range(n):
ti,gi,sigi = par[4+i*3:7+i*3]
num -= gi*exp(-0.5*power(t-ti,2)/sigi**2)/sigi**2*(t-ti)
num -= ex/theta**2*Ialcn(t, par,n)
return (num/(1-ex))
def d_Ialcn_dp(par, t, y, dy, n):
m0,gamma,tau,theta,t0 = par[0:5]
ex = exp((tau - t)/theta**2)
jac = zeros((len(par), len(t)), dtype=float32)
jac[0,:] = 1.0/(1 - ex)
jac[1,:] = jac[0,:]*(t - t0)
jac[2,:] = Ialcn(t, par, n)/theta**2/(1-ex)*ex
jac[3,:] = -2*jac[2,:]*(tau-t)/theta
for i in range(n):
ti,gi,sigi = par[4+i*3:7+i*3]
Gi = gi*exp(-0.5*power(t-ti,2)/sigi**2)
jac[4+i*3,:] = jac[0,:]*(Gi/sigi**2*(t - ti) - gamma)
jac[5+i*3,:] = jac[0,:]*Gi/gi
jac[6+i*3,:] = jac[0,:]*Gi/sigi**3*power(t-ti,2)
return jac
def wrap_Ialcn(p, x, y, dy, n):
return((Ialcn(x, p, n)-y)/dy)
def guess_parsn(t, n, mag, Tmax=None):
p = [0]*(7+3*(n-1))
if Tmax is None:
id = argmin(mag)
p[0] = mag[id]
p[4] = t[id]
else:
p[0] = mag[argmin(absolute(t-Tmax))]
p[4] = Tmax
p[1] = (p[0] - mag[-1])/(p[4] - t[-1])
p[2] = p[4] - 100
p[3] = 3.0
p[5] = -1.0
p[6] = 10.0
dt = (t[-1] - p[4])/n
for i in range(n-1):
p[7+i*3] = p[4]+(i+1)*dt
p[8+i*3] = -1.0
p[9+i*3] = 10.0
return p
def fit_lc(t, mag, e_mag, ngauss=1, maxiter=10000, p0=None, Tmax=None):
'''Fit a light-curve to the parameterized model. t = time, mag = magnitudes,
e_mag = error in magnitudes. If ngauss=1, fit a single-peaked LC, if ngauss=2,
fit a 2-peaked one.'''
if p0 is None:
p0 = guess_parsn(t, ngauss, mag, Tmax)
par,cov,info,mesg,ier = leastsq(wrap_Ialcn, p0,
args=(t, mag, e_mag, ngauss), full_output=1, maxfev=maxiter,
Dfun=d_Ialcn_dp, col_deriv=1)
return(par,cov,info,mesg,ier)
|
{"hexsha": "d9c80b140ee1cec29fc364de4e3caea7f0322e77", "size": 2451, "ext": "py", "lang": "Python", "max_stars_repo_path": "snpy/utils/fit_lc.py", "max_stars_repo_name": "emirkmo/snpy", "max_stars_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-01-14T19:40:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-05T12:19:39.000Z", "max_issues_repo_path": "snpy/utils/fit_lc.py", "max_issues_repo_name": "emirkmo/snpy", "max_issues_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-04-25T20:06:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-09T20:46:41.000Z", "max_forks_repo_path": "snpy/utils/fit_lc.py", "max_forks_repo_name": "emirkmo/snpy", "max_forks_repo_head_hexsha": "2a0153c84477ba8a30310d7dbca3d5a8f24de3c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2017-04-25T19:57:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T11:54:19.000Z", "avg_line_length": 28.5, "max_line_length": 82, "alphanum_fraction": 0.5467156263, "include": true, "reason": "from numpy,from scipy", "num_tokens": 1045}
|
using Revise
using CSV
using Geodesy
using Dates
using Plots
using StatsPlots
using MinimalRides
using MinimalRides: Pos, load_animal_data
filename = "/media/win/Data/Arctic fox Bylot - GPS tracking.csv"
tracks = load_animal_data(filename);
@assert length(tracks) == 20
length(tracks[1].route)
x = tracks[1].route[3].ts - tracks[1].route[2].ts
arr = map(2:length(tracks[1].route)) do i
ts2 = tracks[1].route[i].ts
ts1 = tracks[1].route[i - 1].ts
p2 = tracks[1].route[i].ecef
p1 = tracks[1].route[i-1].ecef
return (seconds = Second(ts2 - ts1).value, dist = distance(p2, p1))
end
sum(x -> x.dist == 0., arr)
findall(x -> x.dist == 0., arr)
findall(x -> x.seconds > 1000., arr)
arr[5590:5610]
x = getfield.(tracks[1].route[2000:4000], :ts)
y = getfield.(arr[2000:4000], :dist)
flt = findall(x -> x > 20.0, y)
plot(x[flt], y[flt])
density(getfield.(arr[2000:4000], :dist))
histogram(getfield.(arr[2000:4000], :dist), bins = 1000)
filter(x -> Date(x.ts) == Date("2019-06-11"), tracks[1].route) |> length
minimum(getfield.(arr[1:349], :dist))
maximum(getfield.(arr[1:349], :seconds))
minimum(getfield.(arr[350:350+357], :dist))
maximum(getfield.(arr[350:350+357], :seconds))
plot(map(x -> (x.ll.lat, x.ll.lon), tracks[1].route))
plot!(map(x -> (x.ll.lat, x.ll.lon), tracks[2].route))
plot!(map(x -> (x.ll.lat, x.ll.lon), tracks[3].route))
plot!(map(x -> (x.ll.lat, x.ll.lon), tracks[4].route))
plot!(map(x -> (x.ll.lat, x.ll.lon), tracks[5].route))
plot!(map(x -> (x.ll.lat, x.ll.lon), tracks[6].route))
names = unique(map(x -> x.info.name_id, tracks))
for name in names
println(name, ": ", filter(x -> x.info.name_id == name, tracks) |> length)
end
filter(x -> x.info.name_id == "BVOB", tracks)
data = CSV.File(filename);
data[1][Symbol("location-long")]
data[1][Symbol("location-lat")]
DateTime(data[1].timestamp[1:19], "yyyy-mm-dd HH:MM:SS")
data[1]
sort(data, by = x -> (x[Symbol("individual-local-identifier")], x.timestamp))
data[1]
Pos(data[1])
Pos(data[1]).ecef
|
{"hexsha": "bf7150bb65bf6f217ca0c9b8dff694edb7d02f8a", "size": 2001, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "notebooks/AnimalExperiments.jl", "max_stars_repo_name": "Arkoniak/MinimalRides.jl", "max_stars_repo_head_hexsha": "e842094242dffd8f9a97ad2b5a835f37105116bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-31T21:32:09.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-31T21:32:09.000Z", "max_issues_repo_path": "notebooks/AnimalExperiments.jl", "max_issues_repo_name": "Arkoniak/MinimalRides.jl", "max_issues_repo_head_hexsha": "e842094242dffd8f9a97ad2b5a835f37105116bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-11-01T00:05:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-28T00:06:35.000Z", "max_forks_repo_path": "notebooks/AnimalExperiments.jl", "max_forks_repo_name": "Arkoniak/MinimalRides.jl", "max_forks_repo_head_hexsha": "e842094242dffd8f9a97ad2b5a835f37105116bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-31T21:42:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-31T21:42:57.000Z", "avg_line_length": 29.0, "max_line_length": 78, "alphanum_fraction": 0.6511744128, "num_tokens": 691}
|
import tensorflow as tf
import numpy as np
import re
from baselines.acktr.kfac_utils import *
from functools import reduce
KFAC_OPS = ['MatMul', 'Conv2D', 'BiasAdd']
KFAC_DEBUG = False
class KfacOptimizer():
def __init__(self, learning_rate=0.01, momentum=0.9, clip_kl=0.01, kfac_update=2, stats_accum_iter=60,
full_stats_init=False, cold_iter=100, cold_lr=None, async_=False, async_stats=False, epsilon=1e-2,
stats_decay=0.95, blockdiag_bias=False, channel_fac=False, factored_damping=False, approxT2=False,
use_float64=False, weight_decay_dict={}, max_grad_norm=0.5):
self.max_grad_norm = max_grad_norm
self._lr = learning_rate
self._momentum = momentum
self._clip_kl = clip_kl
self._channel_fac = channel_fac
self._kfac_update = kfac_update
self._async = async_
self._async_stats = async_stats
self._epsilon = epsilon
self._stats_decay = stats_decay
self._blockdiag_bias = blockdiag_bias
self._approxT2 = approxT2
self._use_float64 = use_float64
self._factored_damping = factored_damping
self._cold_iter = cold_iter
if cold_lr == None:
# good heuristics
self._cold_lr = self._lr # * 3.
else:
self._cold_lr = cold_lr
self._stats_accum_iter = stats_accum_iter
self._weight_decay_dict = weight_decay_dict
self._diag_init_coeff = 0.
self._full_stats_init = full_stats_init
if not self._full_stats_init:
self._stats_accum_iter = self._cold_iter
self.sgd_step = tf.Variable(0, name='KFAC/sgd_step', trainable=False)
self.global_step = tf.Variable(
0, name='KFAC/global_step', trainable=False)
self.cold_step = tf.Variable(0, name='KFAC/cold_step', trainable=False)
self.factor_step = tf.Variable(
0, name='KFAC/factor_step', trainable=False)
self.stats_step = tf.Variable(
0, name='KFAC/stats_step', trainable=False)
self.vFv = tf.Variable(0., name='KFAC/vFv', trainable=False)
self.factors = {}
self.param_vars = []
self.stats = {}
self.stats_eigen = {}
def getFactors(self, g, varlist):
graph = tf.get_default_graph()
factorTensors = {}
fpropTensors = []
bpropTensors = []
opTypes = []
fops = []
def searchFactors(gradient, graph):
# hard coded search stratergy
bpropOp = gradient.op
bpropOp_name = bpropOp.name
bTensors = []
fTensors = []
# combining additive gradient, assume they are the same op type and
# indepedent
if 'AddN' in bpropOp_name:
factors = []
for g in gradient.op.inputs:
factors.append(searchFactors(g, graph))
op_names = [item['opName'] for item in factors]
# TO-DO: need to check all the attribute of the ops as well
print(gradient.name)
print(op_names)
print(len(np.unique(op_names)))
assert len(np.unique(op_names)) == 1, gradient.name + \
' is shared among different computation OPs'
bTensors = reduce(lambda x, y: x + y,
[item['bpropFactors'] for item in factors])
if len(factors[0]['fpropFactors']) > 0:
fTensors = reduce(
lambda x, y: x + y, [item['fpropFactors'] for item in factors])
fpropOp_name = op_names[0]
fpropOp = factors[0]['op']
else:
fpropOp_name = re.search(
'gradientsSampled(_[0-9]+|)/(.+?)_grad', bpropOp_name).group(2)
fpropOp = graph.get_operation_by_name(fpropOp_name)
if fpropOp.op_def.name in KFAC_OPS:
# Known OPs
###
bTensor = [
i for i in bpropOp.inputs if 'gradientsSampled' in i.name][-1]
bTensorShape = fpropOp.outputs[0].get_shape()
if bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
###
if fpropOp.op_def.name == 'BiasAdd':
fTensors = []
else:
fTensors.append(
[i for i in fpropOp.inputs if param.op.name not in i.name][0])
fpropOp_name = fpropOp.op_def.name
else:
# unknown OPs, block approximation used
bInputsList = [i for i in bpropOp.inputs[
0].op.inputs if 'gradientsSampled' in i.name if 'Shape' not in i.name]
if len(bInputsList) > 0:
bTensor = bInputsList[0]
bTensorShape = fpropOp.outputs[0].get_shape()
if len(bTensor.get_shape()) > 0 and bTensor.get_shape()[0].value == None:
bTensor.set_shape(bTensorShape)
bTensors.append(bTensor)
fpropOp_name = opTypes.append('UNK-' + fpropOp.op_def.name)
return {'opName': fpropOp_name, 'op': fpropOp, 'fpropFactors': fTensors, 'bpropFactors': bTensors}
for t, param in zip(g, varlist):
if KFAC_DEBUG:
print(('get factor for ' + param.name))
factors = searchFactors(t, graph)
factorTensors[param] = factors
########
# check associated weights and bias for homogeneous coordinate representation
# and check redundent factors
# TO-DO: there may be a bug to detect associate bias and weights for
# forking layer, e.g. in inception models.
for param in varlist:
factorTensors[param]['assnWeights'] = None
factorTensors[param]['assnBias'] = None
for param in varlist:
if factorTensors[param]['opName'] == 'BiasAdd':
factorTensors[param]['assnWeights'] = None
for item in varlist:
if len(factorTensors[item]['bpropFactors']) > 0:
if (set(factorTensors[item]['bpropFactors']) == set(factorTensors[param]['bpropFactors'])) and (
len(factorTensors[item]['fpropFactors']) > 0):
factorTensors[param]['assnWeights'] = item
factorTensors[item]['assnBias'] = param
factorTensors[param]['bpropFactors'] = factorTensors[
item]['bpropFactors']
########
########
# concatenate the additive gradients along the batch dimension, i.e.
# assuming independence structure
for key in ['fpropFactors', 'bpropFactors']:
for i, param in enumerate(varlist):
if len(factorTensors[param][key]) > 0:
if (key + '_concat') not in factorTensors[param]:
name_scope = factorTensors[param][key][0].name.split(':')[
0]
with tf.name_scope(name_scope):
factorTensors[param][
key + '_concat'] = tf.concat(factorTensors[param][key], 0)
else:
factorTensors[param][key + '_concat'] = None
for j, param2 in enumerate(varlist[(i + 1):]):
if (len(factorTensors[param][key]) > 0) and (
set(factorTensors[param2][key]) == set(factorTensors[param][key])):
factorTensors[param2][key] = factorTensors[param][key]
factorTensors[param2][
key + '_concat'] = factorTensors[param][key + '_concat']
########
if KFAC_DEBUG:
for items in zip(varlist, fpropTensors, bpropTensors, opTypes):
print((items[0].name, factorTensors[item]))
self.factors = factorTensors
return factorTensors
def getStats(self, factors, varlist):
if len(self.stats) == 0:
# initialize stats variables on CPU because eigen decomp is
# computed on CPU
with tf.device('/cpu'):
tmpStatsCache = {}
# search for tensor factors and
# use block diag approx for the bias units
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels do not support
# homogeneous coordinate
var_assnBias = factors[var]['assnBias']
if var_assnBias:
factors[var]['assnBias'] = None
factors[var_assnBias]['assnWeights'] = None
##
for var in varlist:
fpropFactor = factors[var]['fpropFactors_concat']
bpropFactor = factors[var]['bpropFactors_concat']
opType = factors[var]['opName']
self.stats[var] = {'opName': opType,
'fprop_concat_stats': [],
'bprop_concat_stats': [],
'assnWeights': factors[var]['assnWeights'],
'assnBias': factors[var]['assnBias'],
}
if fpropFactor is not None:
if fpropFactor not in tmpStatsCache:
if opType == 'Conv2D':
Kh = var.get_shape()[0]
Kw = var.get_shape()[1]
C = fpropFactor.get_shape()[-1]
Oh = bpropFactor.get_shape()[1]
Ow = bpropFactor.get_shape()[2]
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence between input channels and spatial
# 2K-1 x 2K-1 covariance matrix and C x C covariance matrix
# factorization along the channels do not
# support homogeneous coordinate, assnBias
# is always None
fpropFactor2_size = Kh * Kw
slot_fpropFactor_stats2 = tf.Variable(tf.diag(tf.ones(
[fpropFactor2_size])) * self._diag_init_coeff,
name='KFAC_STATS/' + fpropFactor.op.name,
trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats2)
fpropFactor_size = C
else:
# 2K-1 x 2K-1 x C x C covariance matrix
# assume BHWC
fpropFactor_size = Kh * Kw * C
else:
# D x D covariance matrix
fpropFactor_size = fpropFactor.get_shape()[-1]
# use homogeneous coordinate
if not self._blockdiag_bias and self.stats[var]['assnBias']:
fpropFactor_size += 1
slot_fpropFactor_stats = tf.Variable(tf.diag(tf.ones(
[fpropFactor_size])) * self._diag_init_coeff, name='KFAC_STATS/' + fpropFactor.op.name,
trainable=False)
self.stats[var]['fprop_concat_stats'].append(
slot_fpropFactor_stats)
if opType != 'Conv2D':
tmpStatsCache[fpropFactor] = self.stats[
var]['fprop_concat_stats']
else:
self.stats[var][
'fprop_concat_stats'] = tmpStatsCache[fpropFactor]
if bpropFactor is not None:
# no need to collect backward stats for bias vectors if
# using homogeneous coordinates
if not ((not self._blockdiag_bias) and self.stats[var]['assnWeights']):
if bpropFactor not in tmpStatsCache:
slot_bpropFactor_stats = tf.Variable(tf.diag(tf.ones([bpropFactor.get_shape(
)[-1]])) * self._diag_init_coeff, name='KFAC_STATS/' + bpropFactor.op.name,
trainable=False)
self.stats[var]['bprop_concat_stats'].append(
slot_bpropFactor_stats)
tmpStatsCache[bpropFactor] = self.stats[
var]['bprop_concat_stats']
else:
self.stats[var][
'bprop_concat_stats'] = tmpStatsCache[bpropFactor]
return self.stats
def compute_and_apply_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
stats = self.compute_stats(loss_sampled, var_list=varlist)
return self.apply_stats(stats)
def compute_stats(self, loss_sampled, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
gs = tf.gradients(loss_sampled, varlist, name='gradientsSampled')
self.gs = gs
factors = self.getFactors(gs, varlist)
stats = self.getStats(factors, varlist)
updateOps = []
statsUpdates = {}
statsUpdates_cache = {}
for var in varlist:
opType = factors[var]['opName']
fops = factors[var]['op']
fpropFactor = factors[var]['fpropFactors_concat']
fpropStats_vars = stats[var]['fprop_concat_stats']
bpropFactor = factors[var]['bpropFactors_concat']
bpropStats_vars = stats[var]['bprop_concat_stats']
SVD_factors = {}
for stats_var in fpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_fpropFactor = fpropFactor
B = (tf.shape(fpropFactor)[0]) # batch size
if opType == 'Conv2D':
strides = fops.get_attr("strides")
padding = fops.get_attr("padding")
convkernel_size = var.get_shape()[0:3]
KH = int(convkernel_size[0])
KW = int(convkernel_size[1])
C = int(convkernel_size[2])
flatten_size = int(KH * KW * C)
Oh = int(bpropFactor.get_shape()[1])
Ow = int(bpropFactor.get_shape()[2])
if Oh == 1 and Ow == 1 and self._channel_fac:
# factorization along the channels
# assume independence among input channels
# factor = B x 1 x 1 x (KH xKW x C)
# patches = B x Oh x Ow x (KH xKW x C)
if len(SVD_factors) == 0:
if KFAC_DEBUG:
print(('approx %s act factor with rank-1 SVD factors' % (var.name)))
# find closest rank-1 approx to the feature map
S, U, V = tf.batch_svd(tf.reshape(
fpropFactor, [-1, KH * KW, C]))
# get rank-1 approx slides
sqrtS1 = tf.expand_dims(tf.sqrt(S[:, 0, 0]), 1)
patches_k = U[:, :, 0] * sqrtS1 # B x KH*KW
full_factor_shape = fpropFactor.get_shape()
patches_k.set_shape(
[full_factor_shape[0], KH * KW])
patches_c = V[:, :, 0] * sqrtS1 # B x C
patches_c.set_shape([full_factor_shape[0], C])
SVD_factors[C] = patches_c
SVD_factors[KH * KW] = patches_k
fpropFactor = SVD_factors[stats_var_dim]
else:
# poor mem usage implementation
patches = tf.extract_image_patches(fpropFactor, ksizes=[1, convkernel_size[
0], convkernel_size[1], 1], strides=strides, rates=[1, 1, 1, 1], padding=padding)
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 act fisher for %s' % (var.name)))
# T^2 terms * 1/T^2, size: B x C
fpropFactor = tf.reduce_mean(patches, [1, 2])
else:
# size: (B x Oh x Ow) x C
fpropFactor = tf.reshape(
patches, [-1, flatten_size]) / Oh / Ow
fpropFactor_size = int(fpropFactor.get_shape()[-1])
if stats_var_dim == (fpropFactor_size + 1) and not self._blockdiag_bias:
if opType == 'Conv2D' and not self._approxT2:
# correct padding for numerical stability (we
# divided out OhxOw from activations for T1 approx)
fpropFactor = tf.concat([fpropFactor, tf.ones(
[tf.shape(fpropFactor)[0], 1]) / Oh / Ow], 1)
else:
# use homogeneous coordinates
fpropFactor = tf.concat(
[fpropFactor, tf.ones([tf.shape(fpropFactor)[0], 1])], 1)
# average over the number of data points in a batch
# divided by B
cov = tf.matmul(fpropFactor, fpropFactor,
transpose_a=True) / tf.cast(B, tf.float32)
updateOps.append(cov)
statsUpdates[stats_var] = cov
if opType != 'Conv2D':
# HACK: for convolution we recompute fprop stats for
# every layer including forking layers
statsUpdates_cache[stats_var] = cov
for stats_var in bpropStats_vars:
stats_var_dim = int(stats_var.get_shape()[0])
if stats_var not in statsUpdates_cache:
old_bpropFactor = bpropFactor
bpropFactor_shape = bpropFactor.get_shape()
B = tf.shape(bpropFactor)[0] # batch size
C = int(bpropFactor_shape[-1]) # num channels
if opType == 'Conv2D' or len(bpropFactor_shape) == 4:
if fpropFactor is not None:
if self._approxT2:
if KFAC_DEBUG:
print(('approxT2 grad fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(
bpropFactor, [1, 2]) # T^2 terms * 1/T^2
else:
bpropFactor = tf.reshape(
bpropFactor, [-1, C]) * Oh * Ow # T * 1/T terms
else:
# just doing block diag approx. spatial independent
# structure does not apply here. summing over
# spatial locations
if KFAC_DEBUG:
print(('block diag approx fisher for %s' % (var.name)))
bpropFactor = tf.reduce_sum(bpropFactor, [1, 2])
# assume sampled loss is averaged. TO-DO:figure out better
# way to handle this
bpropFactor *= tf.to_float(B)
##
cov_b = tf.matmul(
bpropFactor, bpropFactor, transpose_a=True) / tf.to_float(tf.shape(bpropFactor)[0])
updateOps.append(cov_b)
statsUpdates[stats_var] = cov_b
statsUpdates_cache[stats_var] = cov_b
if KFAC_DEBUG:
aKey = list(statsUpdates.keys())[0]
statsUpdates[aKey] = tf.Print(statsUpdates[aKey],
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor(
'computing stats'),
])
self.statsUpdates = statsUpdates
return statsUpdates
def apply_stats(self, statsUpdates):
""" compute stats and update/apply the new stats to the running average
"""
def updateAccumStats():
if self._full_stats_init:
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), lambda: tf.group(
*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter)),
tf.no_op)
else:
return tf.group(
*self._apply_stats(statsUpdates, accumulate=True, accumulateCoeff=1. / self._stats_accum_iter))
def updateRunningAvgStats(statsUpdates, fac_iter=1):
# return tf.cond(tf.greater_equal(self.factor_step,
# tf.convert_to_tensor(fac_iter)), lambda:
# tf.group(*self._apply_stats(stats_list, varlist)), tf.no_op)
return tf.group(*self._apply_stats(statsUpdates))
if self._async_stats:
# asynchronous stats update
update_stats = self._apply_stats(statsUpdates)
queue = tf.FIFOQueue(1, [item.dtype for item in update_stats], shapes=[
item.get_shape() for item in update_stats])
enqueue_op = queue.enqueue(update_stats)
def dequeue_stats_op():
return queue.dequeue()
self.qr_stats = tf.train.QueueRunner(queue, [enqueue_op])
update_stats_op = tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(
0)), tf.no_op, lambda: tf.group(*[dequeue_stats_op(), ]))
else:
# synchronous stats update
update_stats_op = tf.cond(tf.greater_equal(
self.stats_step, self._stats_accum_iter), lambda: updateRunningAvgStats(statsUpdates), updateAccumStats)
self._update_stats_op = update_stats_op
return update_stats_op
def _apply_stats(self, statsUpdates, accumulate=False, accumulateCoeff=0.):
updateOps = []
# obtain the stats var list
for stats_var in statsUpdates:
stats_new = statsUpdates[stats_var]
if accumulate:
# simple superbatch averaging
update_op = tf.assign_add(
stats_var, accumulateCoeff * stats_new, use_locking=True)
else:
# exponential running averaging
update_op = tf.assign(
stats_var, stats_var * self._stats_decay, use_locking=True)
update_op = tf.assign_add(
update_op, (1. - self._stats_decay) * stats_new, use_locking=True)
updateOps.append(update_op)
with tf.control_dependencies(updateOps):
stats_step_op = tf.assign_add(self.stats_step, 1)
if KFAC_DEBUG:
stats_step_op = (tf.Print(stats_step_op,
[tf.convert_to_tensor('step:'),
self.global_step,
tf.convert_to_tensor('fac step:'),
self.factor_step,
tf.convert_to_tensor('sgd step:'),
self.sgd_step,
tf.convert_to_tensor('Accum:'),
tf.convert_to_tensor(accumulate),
tf.convert_to_tensor('Accum coeff:'),
tf.convert_to_tensor(accumulateCoeff),
tf.convert_to_tensor('stat step:'),
self.stats_step, updateOps[0], updateOps[1]]))
return [stats_step_op, ]
def getStatsEigen(self, stats=None):
if len(self.stats_eigen) == 0:
stats_eigen = {}
if stats is None:
stats = self.stats
tmpEigenCache = {}
with tf.device('/cpu:0'):
for var in stats:
for key in ['fprop_concat_stats', 'bprop_concat_stats']:
for stats_var in stats[var][key]:
if stats_var not in tmpEigenCache:
stats_dim = stats_var.get_shape()[1].value
e = tf.Variable(tf.ones(
[stats_dim]), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/e',
trainable=False)
Q = tf.Variable(tf.diag(tf.ones(
[stats_dim])), name='KFAC_FAC/' + stats_var.name.split(':')[0] + '/Q',
trainable=False)
stats_eigen[stats_var] = {'e': e, 'Q': Q}
tmpEigenCache[
stats_var] = stats_eigen[stats_var]
else:
stats_eigen[stats_var] = tmpEigenCache[
stats_var]
self.stats_eigen = stats_eigen
return self.stats_eigen
def computeStatsEigen(self):
""" compute the eigen decomp using copied var stats to avoid concurrent read/write from other queue """
# TO-DO: figure out why this op has delays (possibly moving
# eigenvectors around?)
with tf.device('/cpu:0'):
def removeNone(tensor_list):
local_list = []
for item in tensor_list:
if item is not None:
local_list.append(item)
return local_list
def copyStats(var_list):
print("copying stats to buffer tensors before eigen decomp")
redundant_stats = {}
copied_list = []
for item in var_list:
if item is not None:
if item not in redundant_stats:
if self._use_float64:
redundant_stats[item] = tf.cast(
tf.identity(item), tf.float64)
else:
redundant_stats[item] = tf.identity(item)
copied_list.append(redundant_stats[item])
else:
copied_list.append(None)
return copied_list
# stats = [copyStats(self.fStats), copyStats(self.bStats)]
# stats = [self.fStats, self.bStats]
stats_eigen = self.stats_eigen
computedEigen = {}
eigen_reverse_lookup = {}
updateOps = []
# sync copied stats
# with tf.control_dependencies(removeNone(stats[0]) +
# removeNone(stats[1])):
with tf.control_dependencies([]):
for stats_var in stats_eigen:
if stats_var not in computedEigen:
eigens = tf.self_adjoint_eig(stats_var)
e = eigens[0]
Q = eigens[1]
if self._use_float64:
e = tf.cast(e, tf.float32)
Q = tf.cast(Q, tf.float32)
updateOps.append(e)
updateOps.append(Q)
computedEigen[stats_var] = {'e': e, 'Q': Q}
eigen_reverse_lookup[e] = stats_eigen[stats_var]['e']
eigen_reverse_lookup[Q] = stats_eigen[stats_var]['Q']
self.eigen_reverse_lookup = eigen_reverse_lookup
self.eigen_update_list = updateOps
if KFAC_DEBUG:
self.eigen_update_list = [item for item in updateOps]
with tf.control_dependencies(updateOps):
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('computed factor eigen')]))
return updateOps
def applyStatsEigen(self, eigen_list):
updateOps = []
print(('updating %d eigenvalue/vectors' % len(eigen_list)))
for i, (tensor, mark) in enumerate(zip(eigen_list, self.eigen_update_list)):
stats_eigen_var = self.eigen_reverse_lookup[mark]
updateOps.append(
tf.assign(stats_eigen_var, tensor, use_locking=True))
with tf.control_dependencies(updateOps):
factor_step_op = tf.assign_add(self.factor_step, 1)
updateOps.append(factor_step_op)
if KFAC_DEBUG:
updateOps.append(tf.Print(tf.constant(
0.), [tf.convert_to_tensor('updated kfac factors')]))
return updateOps
def getKfacPrecondUpdates(self, gradlist, varlist):
updatelist = []
vg = 0.
assert len(self.stats) > 0
assert len(self.stats_eigen) > 0
assert len(self.factors) > 0
counter = 0
grad_dict = {var: grad for grad, var in zip(gradlist, varlist)}
for grad, var in zip(gradlist, varlist):
GRAD_RESHAPE = False
GRAD_TRANSPOSE = False
fpropFactoredFishers = self.stats[var]['fprop_concat_stats']
bpropFactoredFishers = self.stats[var]['bprop_concat_stats']
if (len(fpropFactoredFishers) + len(bpropFactoredFishers)) > 0:
counter += 1
GRAD_SHAPE = grad.get_shape()
if len(grad.get_shape()) > 2:
# reshape conv kernel parameters
KW = int(grad.get_shape()[0])
KH = int(grad.get_shape()[1])
C = int(grad.get_shape()[2])
D = int(grad.get_shape()[3])
if len(fpropFactoredFishers) > 1 and self._channel_fac:
# reshape conv kernel parameters into tensor
grad = tf.reshape(grad, [KW * KH, C, D])
else:
# reshape conv kernel parameters into 2D grad
grad = tf.reshape(grad, [-1, D])
GRAD_RESHAPE = True
elif len(grad.get_shape()) == 1:
# reshape bias or 1D parameters
D = int(grad.get_shape()[0])
grad = tf.expand_dims(grad, 0)
GRAD_RESHAPE = True
else:
# 2D parameters
C = int(grad.get_shape()[0])
D = int(grad.get_shape()[1])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# stack bias grad
var_assnBias = self.stats[var]['assnBias']
grad = tf.concat(
[grad, tf.expand_dims(grad_dict[var_assnBias], 0)], 0)
# project gradient to eigen space and reshape the eigenvalues
# for broadcasting
eigVals = []
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='act', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='act')
eigVals.append(e)
grad = gmatmul(Q, grad, transpose_a=True, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
e = detectMinVal(self.stats_eigen[stats][
'e'], var, name='grad', debug=KFAC_DEBUG)
Q, e = factorReshape(Q, e, grad, facIndx=idx, ftype='grad')
eigVals.append(e)
grad = gmatmul(grad, Q, transpose_b=False, reduce_dim=idx)
##
#####
# whiten using eigenvalues
weightDecayCoeff = 0.
if var in self._weight_decay_dict:
weightDecayCoeff = self._weight_decay_dict[var]
if KFAC_DEBUG:
print(('weight decay coeff for %s is %f' % (var.name, weightDecayCoeff)))
if self._factored_damping:
if KFAC_DEBUG:
print(('use factored damping for %s' % (var.name)))
coeffs = 1.
num_factors = len(eigVals)
# compute the ratio of two trace norm of the left and right
# KFac matrices, and their generalization
if len(eigVals) == 1:
damping = self._epsilon + weightDecayCoeff
else:
damping = tf.pow(
self._epsilon + weightDecayCoeff, 1. / num_factors)
eigVals_tnorm_avg = [tf.reduce_mean(
tf.abs(e)) for e in eigVals]
for e, e_tnorm in zip(eigVals, eigVals_tnorm_avg):
eig_tnorm_negList = [
item for item in eigVals_tnorm_avg if item != e_tnorm]
if len(eigVals) == 1:
adjustment = 1.
elif len(eigVals) == 2:
adjustment = tf.sqrt(
e_tnorm / eig_tnorm_negList[0])
else:
eig_tnorm_negList_prod = reduce(
lambda x, y: x * y, eig_tnorm_negList)
adjustment = tf.pow(
tf.pow(e_tnorm, num_factors - 1.) / eig_tnorm_negList_prod, 1. / num_factors)
coeffs *= (e + adjustment * damping)
else:
coeffs = 1.
damping = (self._epsilon + weightDecayCoeff)
for e in eigVals:
coeffs *= e
coeffs += damping
# grad = tf.Print(grad, [tf.convert_to_tensor('1'), tf.convert_to_tensor(var.name), grad.get_shape()])
grad /= coeffs
# grad = tf.Print(grad, [tf.convert_to_tensor('2'), tf.convert_to_tensor(var.name), grad.get_shape()])
#####
# project gradient back to euclidean space
for idx, stats in enumerate(self.stats[var]['fprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(Q, grad, transpose_a=False, reduce_dim=idx)
for idx, stats in enumerate(self.stats[var]['bprop_concat_stats']):
Q = self.stats_eigen[stats]['Q']
grad = gmatmul(grad, Q, transpose_b=True, reduce_dim=idx)
##
# grad = tf.Print(grad, [tf.convert_to_tensor('3'), tf.convert_to_tensor(var.name), grad.get_shape()])
if (self.stats[var]['assnBias'] is not None) and not self._blockdiag_bias:
# use homogeneous coordinates only works for 2D grad.
# TO-DO: figure out how to factorize bias grad
# un-stack bias grad
var_assnBias = self.stats[var]['assnBias']
C_plus_one = int(grad.get_shape()[0])
grad_assnBias = tf.reshape(tf.slice(grad,
begin=[
C_plus_one - 1, 0],
size=[1, -1]), var_assnBias.get_shape())
grad_assnWeights = tf.slice(grad,
begin=[0, 0],
size=[C_plus_one - 1, -1])
grad_dict[var_assnBias] = grad_assnBias
grad = grad_assnWeights
# grad = tf.Print(grad, [tf.convert_to_tensor('4'), tf.convert_to_tensor(var.name), grad.get_shape()])
if GRAD_RESHAPE:
grad = tf.reshape(grad, GRAD_SHAPE)
grad_dict[var] = grad
print(('projecting %d gradient matrices' % counter))
for g, var in zip(gradlist, varlist):
grad = grad_dict[var]
### clipping ###
if KFAC_DEBUG:
print(('apply clipping to %s' % (var.name)))
tf.Print(grad, [tf.sqrt(tf.reduce_sum(tf.pow(grad, 2)))], "Euclidean norm of new grad")
local_vg = tf.reduce_sum(grad * g * (self._lr * self._lr))
vg += local_vg
# recale everything
if KFAC_DEBUG:
print('apply vFv clipping')
scaling = tf.minimum(1., tf.sqrt(self._clip_kl / vg))
if KFAC_DEBUG:
scaling = tf.Print(scaling, [tf.convert_to_tensor(
'clip: '), scaling, tf.convert_to_tensor(' vFv: '), vg])
with tf.control_dependencies([tf.assign(self.vFv, vg)]):
updatelist = [grad_dict[var] for var in varlist]
for i, item in enumerate(updatelist):
updatelist[i] = scaling * item
return updatelist
def compute_gradients(self, loss, var_list=None):
varlist = var_list
if varlist is None:
varlist = tf.trainable_variables()
g = tf.gradients(loss, varlist)
return [(a, b) for a, b in zip(g, varlist)]
def apply_gradients_kfac(self, grads):
g, varlist = list(zip(*grads))
if len(self.stats_eigen) == 0:
self.getStatsEigen()
qr = None
# launch eigen-decomp on a queue thread
if self._async:
print('Use async eigen decomp')
# get a list of factor loading tensors
factorOps_dummy = self.computeStatsEigen()
# define a queue for the list of factor loading tensors
queue = tf.FIFOQueue(1, [item.dtype for item in factorOps_dummy], shapes=[
item.get_shape() for item in factorOps_dummy])
enqueue_op = tf.cond(
tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update), tf.convert_to_tensor(
0)), tf.greater_equal(self.stats_step, self._stats_accum_iter)),
lambda: queue.enqueue(self.computeStatsEigen()), tf.no_op)
def dequeue_op():
return queue.dequeue()
qr = tf.train.QueueRunner(queue, [enqueue_op])
updateOps = []
global_step_op = tf.assign_add(self.global_step, 1)
updateOps.append(global_step_op)
with tf.control_dependencies([global_step_op]):
# compute updates
assert self._update_stats_op != None
updateOps.append(self._update_stats_op)
dependency_list = []
if not self._async:
dependency_list.append(self._update_stats_op)
with tf.control_dependencies(dependency_list):
def no_op_wrapper():
return tf.group(*[tf.assign_add(self.cold_step, 1)])
if not self._async:
# synchronous eigen-decomp updates
updateFactorOps = tf.cond(tf.logical_and(tf.equal(tf.mod(self.stats_step, self._kfac_update),
tf.convert_to_tensor(0)),
tf.greater_equal(self.stats_step, self._stats_accum_iter)),
lambda: tf.group(*self.applyStatsEigen(self.computeStatsEigen())),
no_op_wrapper)
else:
# asynchronous eigen-decomp updates using queue
updateFactorOps = tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter),
lambda: tf.cond(tf.equal(queue.size(), tf.convert_to_tensor(0)),
tf.no_op,
lambda: tf.group(
*self.applyStatsEigen(dequeue_op())),
),
no_op_wrapper)
updateOps.append(updateFactorOps)
with tf.control_dependencies([updateFactorOps]):
def gradOp():
return list(g)
def getKfacGradOp():
return self.getKfacPrecondUpdates(g, varlist)
u = tf.cond(tf.greater(self.factor_step,
tf.convert_to_tensor(0)), getKfacGradOp, gradOp)
optim = tf.train.MomentumOptimizer(
self._lr * (1. - self._momentum), self._momentum)
# optim = tf.train.AdamOptimizer(self._lr, epsilon=0.01)
def optimOp():
def updateOptimOp():
if self._full_stats_init:
return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)),
lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
else:
return optim.apply_gradients(list(zip(u, varlist)))
if self._full_stats_init:
return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp,
tf.no_op)
else:
return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
updateOps.append(optimOp())
return tf.group(*updateOps), qr
def apply_gradients(self, grads):
coldOptim = tf.train.MomentumOptimizer(
self._cold_lr, self._momentum)
def coldSGDstart():
sgd_grads, sgd_var = zip(*grads)
if self.max_grad_norm != None:
sgd_grads, sgd_grad_norm = tf.clip_by_global_norm(sgd_grads, self.max_grad_norm)
sgd_grads = list(zip(sgd_grads, sgd_var))
sgd_step_op = tf.assign_add(self.sgd_step, 1)
coldOptim_op = coldOptim.apply_gradients(sgd_grads)
if KFAC_DEBUG:
with tf.control_dependencies([sgd_step_op, coldOptim_op]):
sgd_step_op = tf.Print(
sgd_step_op, [self.sgd_step, tf.convert_to_tensor('doing cold sgd step')])
return tf.group(*[sgd_step_op, coldOptim_op])
kfacOptim_op, qr = self.apply_gradients_kfac(grads)
def warmKFACstart():
return kfacOptim_op
return tf.cond(tf.greater(self.sgd_step, self._cold_iter), warmKFACstart, coldSGDstart), qr
def minimize(self, loss, loss_sampled, var_list=None):
grads = self.compute_gradients(loss, var_list=var_list)
update_stats_op = self.compute_and_apply_stats(
loss_sampled, var_list=var_list)
return self.apply_gradients(grads)
|
{"hexsha": "0aec823d6162c78e0852e4a6ff214864daaf6de9", "size": 46263, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/acktr/kfac.py", "max_stars_repo_name": "speedcell4/baselines", "max_stars_repo_head_hexsha": "c4be964fad7d015d1aa2f76a946c7c8c1025ce61", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "baselines/acktr/kfac.py", "max_issues_repo_name": "speedcell4/baselines", "max_issues_repo_head_hexsha": "c4be964fad7d015d1aa2f76a946c7c8c1025ce61", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "baselines/acktr/kfac.py", "max_forks_repo_name": "speedcell4/baselines", "max_forks_repo_head_hexsha": "c4be964fad7d015d1aa2f76a946c7c8c1025ce61", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.5445960126, "max_line_length": 120, "alphanum_fraction": 0.484339537, "include": true, "reason": "import numpy", "num_tokens": 9185}
|
"""
Created on Wednesday 7 March 2018
Last update: Wednesday 25 April 2018
@author: Michiel Stock
michielfmstock@gmail.com
Make a city for the project of discrete optimization
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import BallTree
import json
blue = '#264653'
green = '#2a9d8f'
yellow = '#e9c46a'
orange = '#f4a261'
red = '#e76f51'
black = '#50514F'
np.random.seed(2)
xmax = 250
ymax = 150
neighbors = 5
n_uniform = 1000
n_norm = 750
# make coordinates of the vertices
coordinates = []
# uniform spread
coordinates += (np.random.rand(n_uniform,2) * np.array([[xmax, ymax]])).tolist()
# MVN in the centre
coor_norm = []
mu = np.array([xmax, ymax]) / 2
cov = np.diag([1000, 500])
while len(coordinates) < n_uniform + n_norm:
coor = np.random.multivariate_normal(mu, cov)
if coor[0] > 0 and coor[0] < xmax:
if coor[1] > 0 and coor[1] < ymax:
coordinates.append(coor.tolist())
# make connections
balltree = BallTree(coordinates)
d, ind = balltree.query(coordinates, neighbors+1)
d = d[:,1:] # first neighbor is itself
ind = ind[:,1:] # first neighbor is itself
edges = set([])
for i, (dists, neighbors) in enumerate(zip(d, ind)):
edges.update([(d, i, j) for d, j in zip(dists.tolist(), neighbors.tolist())])
# duplicate edges
edges.update([(d, j, i) for d, i, j in edges])
# make parks
park_A = set([i for i, (x, y) in enumerate(coordinates) if x < 50 and y > 75])
park_B = set([i for i, (x, y) in enumerate(coordinates)
if x > 190 and x < 210])
park_C = set(balltree.query_radius([[xmax/2, ymax/2]], 10)[0].tolist())
parks = park_A | park_B | park_C
fig, ax = plt.subplots(figsize=(20, 15))
for id, (x, y) in enumerate(coordinates):
if id in parks:
ax.scatter(x, y, color=green, s=20, zorder=2)
else:
ax.scatter(x, y, color=orange, s=20, zorder=2)
# add parks to plot
park_A_plot = plt.Rectangle((0, ymax - 75), 50, 75, alpha=0.3,
color=green)
park_B_plot = plt.Rectangle((190, 0), 210 - 190, ymax, alpha=0.3,
color=green)
park_C_plot = plt.Circle((xmax/2, ymax/2), 10, color=green, alpha=0.3)
ax.add_artist(park_A_plot)
ax.add_artist(park_B_plot)
ax.add_artist(park_C_plot)
# plot edges
for d, i, j in edges:
xi, yi = coordinates[i]
xj, yj = coordinates[j]
ax.plot([xi, xj], [yi, yj], color='grey', alpha=0.7, lw=2, zorder=1)
# plot park letters
ax.text(25, 100, 'A', fontsize=42, color=red)
ax.text(200, 75, 'B', fontsize=42, color=red)
ax.text(xmax / 2, ymax / 2, 'C', fontsize=42, color=red)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('Map of the city')
fig.patch.set_visible(False)
fig.savefig('Figures/city_map.png')
data = {
'coordinates' : coordinates,
'edges' : list(edges),
'vertices' : list(range(n_uniform + n_norm)),
'parks' : {
'A' : list(park_A),
'B' : list(park_B),
'C' : list(park_C)
}
}
with open('Data/city.json', 'w') as fh:
json.dump(data, fh)
|
{"hexsha": "23f70565cf3de7f84bdbe85220f1fcb37221f151", "size": 3039, "ext": "py", "lang": "Python", "max_stars_repo_path": "Chapters/08.ProjectDiscrete/make_city.py", "max_stars_repo_name": "ntienvu/SelectedTopicsOptimization", "max_stars_repo_head_hexsha": "069659ca9754cc7fd884b654a06157cc7da6f963", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-01T13:01:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-01T13:01:38.000Z", "max_issues_repo_path": "Chapters/08.ProjectDiscrete/make_city.py", "max_issues_repo_name": "ntienvu/SelectedTopicsOptimization", "max_issues_repo_head_hexsha": "069659ca9754cc7fd884b654a06157cc7da6f963", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapters/08.ProjectDiscrete/make_city.py", "max_forks_repo_name": "ntienvu/SelectedTopicsOptimization", "max_forks_repo_head_hexsha": "069659ca9754cc7fd884b654a06157cc7da6f963", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1982758621, "max_line_length": 81, "alphanum_fraction": 0.6324448832, "include": true, "reason": "import numpy", "num_tokens": 959}
|
import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from cogment_verse_torch_agents.third_party.hive.agent import Agent
from cogment_verse_torch_agents.third_party.td3.td3_mlp import ActorMLP, CriticMLP
class DDPGAgent(Agent):
def __init__(
self,
obs_dim,
act_dim,
high_action=[1, 1],
low_action=[-1, -1],
start_timesteps=2000,
expl_noise=0.1,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
optimizer_fn=None,
id=0,
replay_buffer=None,
discount_rate=0.99,
target_net_update_fraction=1e-3,
target_net_update_schedule=None,
epsilon_schedule=None,
learn_schedule=None,
lr_schedule=None,
seed=42,
device="cpu",
logger=None,
log_frequency=100,
max_replay_buffer_size=50000,
):
super().__init__(
obs_dim=obs_dim,
act_dim=act_dim,
id=id,
seed=seed,
learn_schedule=learn_schedule,
epsilon_schedule=epsilon_schedule,
lr_schedule=lr_schedule,
max_replay_buffer_size=max_replay_buffer_size,
)
self._params["obs_dim"] = obs_dim
self._params["act_dim"] = act_dim
self._params["device"] = device
self._params["policy_noise"] = policy_noise
self._params["noise_clip"] = noise_clip
self._params["policy_freq"] = policy_freq
self._params["discount_rate"] = discount_rate
self._params["tau"] = target_net_update_fraction
self._device = torch.device(device) # pylint: disable=no-member
self._params["min_action"] = torch.tensor(low_action).to(self._device)
self._params["max_action"] = torch.tensor(high_action).to(self._device)
LR_ACTOR = 1e-4
self._actor_local = ActorMLP(obs_dim, act_dim).to(device)
self._actor_target = ActorMLP(obs_dim, act_dim).to(self._device)
self._actor_optimizer = optim.Adam(self._actor_local.parameters(), lr=LR_ACTOR)
LR_CRITIC = 3e-4
WEIGHT_DECAY = 0.0001
self.critic_local = CriticMLP(obs_dim, act_dim).to(self._device)
self._critic_target = CriticMLP(obs_dim, act_dim).to(self._device)
self._critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
self._params["total_it"] = 0
self._params["start_timesteps"] = start_timesteps
self._params["expl_noise"] = expl_noise
def train(self):
"""Changes the agent to training mode."""
super().train()
self._actor_local.train()
self._actor_target.train()
self.critic_local.train()
self._critic_target.train()
def eval(self):
"""Changes the agent to evaluation mode."""
super().eval()
self._actor_local.eval()
self._actor_target.eval()
self.critic_local.eval()
self._critic_target.eval()
def act(self, state, legal_moves_as_int=None, update_schedule=True):
state = torch.from_numpy(np.array(state, copy=True)).float().to(self._device)
assert len(state.shape) <= 2
if len(state.shape) == 2:
assert state.shape[0] == 1
assert state.shape[1] == self._params["obs_dim"]
else:
state = state.unsqueeze(0)
self.eval()
epsilon = self.get_epsilon_schedule(update_schedule)
if self._params["total_it"] < self._params["start_timesteps"] or self._rng.random() < epsilon:
uniform_action = torch.rand(self._params["act_dim"])
span = self._params["max_action"] - self._params["min_action"]
action = uniform_action * span + self._params["min_action"]
else:
with torch.no_grad():
action = (
self._actor_local(state, self._params["min_action"], self._params["max_action"]).cpu().data.numpy()
)
span = self._params["max_action"] - self._params["min_action"]
center = span * 0.5
loc = self._params["min_action"] + center
scale = center * self._params["expl_noise"]
noise = np.random.normal(loc, scale, size=self._params["act_dim"])
action += noise
action = np.clip(action, self._params["min_action"], self._params["max_action"])
return action
def learn(self, experiences, update_schedule=True):
self._params["total_it"] += 1
batch = {key: torch.tensor(val).to(self._device) for key, val in experiences.items()}
states = batch["observations"]
actions = batch["actions"]
rewards = batch["rewards"]
next_states = batch["next_observations"]
dones = batch["done"]
actions_next = self._actor_target(next_states, self._params["min_action"], self._params["max_action"])
action_shape = actions_next.shape
actions = actions.reshape(action_shape)
Q_targets_next = self._critic_target.Q1(next_states, actions_next)
Q_targets = rewards + (self._params["discount_rate"] * Q_targets_next * (1 - dones))
Q_expected = self.critic_local.Q1(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
self._critic_optimizer.zero_grad()
critic_loss.backward()
self._critic_optimizer.step()
actions_pred = self._actor_local(states, self._params["min_action"], self._params["max_action"])
actor_loss = -self.critic_local.Q1(states, actions_pred).mean()
self._actor_optimizer.zero_grad()
actor_loss.backward()
self._actor_optimizer.step()
self.soft_update(self.critic_local, self._critic_target)
self.soft_update(self._actor_local, self._actor_target)
if update_schedule:
self.get_epsilon_schedule(update_schedule)
info = {}
info["critic_loss"] = critic_loss.item()
info["actor_loss"] = actor_loss.item()
info["exploration"] = int(self._params["total_it"] < self._params["start_timesteps"])
return info
def soft_update(self, local_model, target_model):
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(
self._params["tau"] * local_param.data + (1.0 - self._params["tau"]) * target_param.data
)
def save(self, f):
torch.save(
{
"id": self._id,
"params": self._params,
"learn_schedule": self._learn_schedule,
"epsilon_schedule": self._epsilon_schedule,
"rng": self._rng,
"actor": self._actor_local.state_dict(),
"actor_target": self._actor_target.state_dict(),
"actor_optimizer": self._actor_optimizer.state_dict(),
"critic": self.critic_local.state_dict(),
"critic_target": self._critic_target.state_dict(),
"critic_optimizer": self._critic_optimizer.state_dict(),
"lr_schedule": self._lr_schedule,
},
f,
)
def load(self, f):
super().load(f)
device_name = self._params["device"]
checkpoint = torch.load(f, map_location=self._device)
self._id = checkpoint["id"]
self._params = checkpoint["params"]
self._params["device"] = device_name
self._learn_schedule = checkpoint["learn_schedule"]
self._epsilon_schedule = checkpoint["epsilon_schedule"]
self._rng = checkpoint["rng"]
for key in ["min_action", "max_action"]:
self._params[key] = checkpoint["params"][key].to(self._device)
self._actor_local.load_state_dict(checkpoint["actor"])
self._actor_target.load_state_dict(checkpoint["actor_target"])
self._actor_optimizer.load_state_dict(checkpoint["actor_optimizer"])
self.critic_local.load_state_dict(checkpoint["critic"])
self._critic_target.load_state_dict(checkpoint["critic_target"])
self._critic_optimizer.load_state_dict(checkpoint["critic_optimizer"])
self._lr_schedule = checkpoint["lr_schedule"]
|
{"hexsha": "89d1ed11f01878ecf6f4d75ddba228dd5990b137", "size": 8327, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_agents/cogment_verse_torch_agents/third_party/hive/ddpg.py", "max_stars_repo_name": "kharyal/cogment-verse", "max_stars_repo_head_hexsha": "12bcb855bc742e3ec4ed11c40a1b475e95a32515", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2021-10-01T01:33:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T18:18:50.000Z", "max_issues_repo_path": "torch_agents/cogment_verse_torch_agents/third_party/hive/ddpg.py", "max_issues_repo_name": "kharyal/cogment-verse", "max_issues_repo_head_hexsha": "12bcb855bc742e3ec4ed11c40a1b475e95a32515", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2021-11-06T04:37:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T18:05:28.000Z", "max_forks_repo_path": "torch_agents/cogment_verse_torch_agents/third_party/hive/ddpg.py", "max_forks_repo_name": "kharyal/cogment-verse", "max_forks_repo_head_hexsha": "12bcb855bc742e3ec4ed11c40a1b475e95a32515", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-12-14T15:24:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T11:06:34.000Z", "avg_line_length": 37.6787330317, "max_line_length": 119, "alphanum_fraction": 0.6266362435, "include": true, "reason": "import numpy", "num_tokens": 1893}
|
/-
Copyright (c) 2020 Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Bhavik Mehta, Andrew Yang
-/
import category_theory.limits.shapes.terminal
import category_theory.limits.shapes.pullbacks
import category_theory.limits.shapes.binary_products
/-!
# Constructing binary product from pullbacks and terminal object.
The product is the pullback over the terminal objects. In particular, if a category
has pullbacks and a terminal object, then it has binary products.
We also provide the dual.
-/
universes v u
open category_theory category_theory.category category_theory.limits
variables {C : Type u} [category.{v} C]
/-- The pullback over the terminal object is the product -/
def is_product_of_is_terminal_is_pullback {W X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) (h : W ⟶ X)
(k : W ⟶ Y) (H₁ : is_terminal Z)
(H₂ : is_limit (pullback_cone.mk _ _ (show h ≫ f = k ≫ g, from H₁.hom_ext _ _))) :
is_limit (binary_fan.mk h k) :=
{ lift := λ c, H₂.lift (pullback_cone.mk
(c.π.app ⟨walking_pair.left⟩) (c.π.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)),
fac' := λ c j,
begin
cases j,
convert H₂.fac (pullback_cone.mk (c.π.app ⟨walking_pair.left⟩)
(c.π.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)) (some j) using 1,
rcases j; refl,
end,
uniq' := λ c m hm,
begin
apply pullback_cone.is_limit.hom_ext H₂,
{ exact (hm ⟨walking_pair.left⟩).trans (H₂.fac (pullback_cone.mk (c.π.app ⟨walking_pair.left⟩)
(c.π.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)) walking_cospan.left).symm },
{ exact (hm ⟨walking_pair.right⟩).trans (H₂.fac (pullback_cone.mk (c.π.app ⟨walking_pair.left⟩)
(c.π.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)) walking_cospan.right).symm },
end }
/-- The product is the pullback over the terminal object. -/
def is_pullback_of_is_terminal_is_product {W X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) (h : W ⟶ X)
(k : W ⟶ Y) (H₁ : is_terminal Z)
(H₂ : is_limit (binary_fan.mk h k)) :
is_limit (pullback_cone.mk _ _ (show h ≫ f = k ≫ g, from H₁.hom_ext _ _)) :=
begin
apply pullback_cone.is_limit_aux',
intro s,
use H₂.lift (binary_fan.mk s.fst s.snd),
use H₂.fac (binary_fan.mk s.fst s.snd) ⟨walking_pair.left⟩,
use H₂.fac (binary_fan.mk s.fst s.snd) ⟨walking_pair.right⟩,
intros m h₁ h₂,
apply H₂.hom_ext,
rintro ⟨⟨⟩⟩,
{ exact h₁.trans (H₂.fac (binary_fan.mk s.fst s.snd) ⟨walking_pair.left⟩).symm },
{ exact h₂.trans (H₂.fac (binary_fan.mk s.fst s.snd) ⟨walking_pair.right⟩).symm }
end
/-- Any category with pullbacks and a terminal object has a limit cone for each walking pair. -/
noncomputable def limit_cone_of_terminal_and_pullbacks [has_terminal C] [has_pullbacks C]
(F : discrete walking_pair ⥤ C) : limit_cone F :=
{ cone :=
{ X := pullback (terminal.from (F.obj ⟨walking_pair.left⟩))
(terminal.from (F.obj ⟨walking_pair.right⟩)),
π := discrete.nat_trans (λ x, discrete.cases_on x
(λ x, walking_pair.cases_on x pullback.fst pullback.snd)) },
is_limit :=
{ lift := λ c, pullback.lift ((c.π).app ⟨walking_pair.left⟩)
((c.π).app ⟨walking_pair.right⟩)
(subsingleton.elim _ _),
fac' := λ s c, discrete.cases_on c
(λ c, walking_pair.cases_on c (limit.lift_π _ _) (limit.lift_π _ _)),
uniq' := λ s m J,
begin
rw [←J, ←J],
ext;
rw limit.lift_π;
refl
end } }
variable (C)
/-- Any category with pullbacks and terminal object has binary products. -/
-- This is not an instance, as it is not always how one wants to construct binary products!
lemma has_binary_products_of_terminal_and_pullbacks
[has_terminal C] [has_pullbacks C] :
has_binary_products C :=
{ has_limit := λ F, has_limit.mk (limit_cone_of_terminal_and_pullbacks F) }
/-- In a category with a terminal object and pullbacks,
a product of objects `X` and `Y` is isomorphic to a pullback. -/
noncomputable
def prod_iso_pullback [has_terminal C] [has_pullbacks C] (X Y : C) [has_binary_product X Y] :
X ⨯ Y ≅ pullback (terminal.from X) (terminal.from Y) :=
limit.iso_limit_cone (limit_cone_of_terminal_and_pullbacks _)
variable {C}
/-- The pushout under the initial object is the coproduct -/
def is_coproduct_of_is_initial_is_pushout {W X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) (h : W ⟶ X)
(k : W ⟶ Y) (H₁ : is_initial W)
(H₂ : is_colimit (pushout_cocone.mk _ _ (show h ≫ f = k ≫ g, from H₁.hom_ext _ _))) :
is_colimit (binary_cofan.mk f g) :=
{ desc := λ c, H₂.desc (pushout_cocone.mk
(c.ι.app ⟨walking_pair.left⟩) (c.ι.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)),
fac' := λ c j,
begin
cases j,
convert H₂.fac (pushout_cocone.mk (c.ι.app ⟨walking_pair.left⟩) (c.ι.app ⟨walking_pair.right⟩)
(H₁.hom_ext _ _)) (some j) using 1,
cases j; refl
end,
uniq' := λ c m hm,
begin
apply pushout_cocone.is_colimit.hom_ext H₂,
{ exact (hm ⟨walking_pair.left⟩).trans (H₂.fac (pushout_cocone.mk (c.ι.app ⟨walking_pair.left⟩)
(c.ι.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)) walking_cospan.left).symm },
{ exact (hm ⟨walking_pair.right⟩).trans (H₂.fac (pushout_cocone.mk (c.ι.app ⟨walking_pair.left⟩)
(c.ι.app ⟨walking_pair.right⟩) (H₁.hom_ext _ _)) walking_cospan.right).symm },
end }
/-- The coproduct is the pushout under the initial object. -/
def is_pushout_of_is_initial_is_coproduct {W X Y Z : C} (f : X ⟶ Z) (g : Y ⟶ Z) (h : W ⟶ X)
(k : W ⟶ Y) (H₁ : is_initial W)
(H₂ : is_colimit (binary_cofan.mk f g)) :
is_colimit (pushout_cocone.mk _ _ (show h ≫ f = k ≫ g, from H₁.hom_ext _ _)) :=
begin
apply pushout_cocone.is_colimit_aux',
intro s,
use H₂.desc (binary_cofan.mk s.inl s.inr),
use H₂.fac (binary_cofan.mk s.inl s.inr) ⟨walking_pair.left⟩,
use H₂.fac (binary_cofan.mk s.inl s.inr) ⟨walking_pair.right⟩,
intros m h₁ h₂,
apply H₂.hom_ext,
rintro ⟨⟨⟩⟩,
{ exact h₁.trans (H₂.fac (binary_cofan.mk s.inl s.inr) ⟨walking_pair.left⟩).symm },
{ exact h₂.trans (H₂.fac (binary_cofan.mk s.inl s.inr) ⟨walking_pair.right⟩).symm }
end
/-- Any category with pushouts and an initial object has a colimit cocone for each walking pair. -/
noncomputable def colimit_cocone_of_initial_and_pushouts [has_initial C] [has_pushouts C]
(F : discrete walking_pair ⥤ C) : colimit_cocone F :=
{ cocone :=
{ X := pushout (initial.to (F.obj ⟨walking_pair.left⟩))
(initial.to (F.obj ⟨walking_pair.right⟩)),
ι := discrete.nat_trans (λ x, discrete.cases_on x
(λ x, walking_pair.cases_on x pushout.inl pushout.inr)) },
is_colimit :=
{ desc := λ c, pushout.desc (c.ι.app ⟨walking_pair.left⟩)
(c.ι.app ⟨walking_pair.right⟩)
(subsingleton.elim _ _),
fac' := λ s c, discrete.cases_on c
(λ c, walking_pair.cases_on c (colimit.ι_desc _ _) (colimit.ι_desc _ _)),
uniq' := λ s m J,
begin
rw [←J, ←J],
ext;
rw colimit.ι_desc;
refl
end } }
variable (C)
/-- Any category with pushouts and initial object has binary coproducts. -/
-- This is not an instance, as it is not always how one wants to construct binary coproducts!
lemma has_binary_coproducts_of_initial_and_pushouts
[has_initial C] [has_pushouts C] :
has_binary_coproducts C :=
{ has_colimit := λ F, has_colimit.mk (colimit_cocone_of_initial_and_pushouts F) }
/-- In a category with an initial object and pushouts,
a coproduct of objects `X` and `Y` is isomorphic to a pushout. -/
noncomputable
def coprod_iso_pushout [has_initial C] [has_pushouts C] (X Y : C) [has_binary_coproduct X Y] :
X ⨿ Y ≅ pushout (initial.to X) (initial.to Y) :=
colimit.iso_colimit_cocone (colimit_cocone_of_initial_and_pushouts _)
|
{"author": "nick-kuhn", "repo": "leantools", "sha": "567a98c031fffe3f270b7b8dea48389bc70d7abb", "save_path": "github-repos/lean/nick-kuhn-leantools", "path": "github-repos/lean/nick-kuhn-leantools/leantools-567a98c031fffe3f270b7b8dea48389bc70d7abb/src/category_theory/limits/constructions/binary_products.lean"}
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
from PIL import Image
# In[2]:
def stitch(stack, numpix_threshold=0):
'''
Combine multiple instance segmentations based on overlapping patches into a single
segmentation
Args
----
stack : np.ndarray
first two dimensions of stack should be the dimensions of the input image,
and the third dimension be the number of overlapping patches
numpix_threshold : int
a label will be retained in the output only if it has at least
numpix_threshold pixels
Returns
-------
result : numpy.ndarray
a 2-D array labels
'''
from scipy.sparse.csgraph import csgraph_from_dense, connected_components
# find foreground labels
nonzero_idx = np.any(stack,axis=2)
# get unique label combinations across patches in stack
labels_to_combine = np.unique(stack[nonzero_idx],axis=0)
# compute a "connectivity matrix" that indicates which labels overlap across patches
conn_mat = np.zeros((labels_to_combine.max()+1,labels_to_combine.max()+1), dtype='bool')
for row, label_combo in enumerate(labels_to_combine):
group = label_combo[np.nonzero(label_combo)]
for i in range(len(group)-1):
for j in range(i+1,len(group)):
conn_mat[group[i], group[j]] = True
conn_mat[group[j], group[i]] = True
#np.fill_diagonal(conn_mat, True)
# find connected components using this connectivity matrix
# each connected component will be a different label in the result (as long as it
# contains the minimum required number of pixels)
graph = csgraph_from_dense(conn_mat)
n_conncomp, graph_complabels = connected_components(graph, directed=False)
result = np.zeros_like(stack[:,:,0])
# reassign labels to the ids of the connected components
for label in np.unique(stack):
# get 2-D mask of voxels with a given label
mask = np.any(stack==label,axis=2)
# make sure that there are enough many pixels
if mask.sum() > numpix_threshold:
# if so, reassign this label to its corresponding connected component id
result[np.any(stack==label,axis=2)] = graph_complabels[label]
return result
def stitch_sparse(stack, numpix_threshold=0):
'''
Combine multiple instance segmentations based on overlapping patches into a single
segmentation. This implementation uses a sparse instead of a dense connectivity matrix,
so could be helpful if there is a large number of objects being segmented.
Args
----
stack : numpy.ndarray
first two dimensions of stack should be the dimensions of the input image,
and the third dimension be the number of overlapping patches
numpix_threshold : int
a label will be retained in the output only if it has at least
numpix_threshold pixels
Returns
-------
result : numpy.ndarray
a 2-D array labels
'''
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
# get label combinations across stacks
nonzero_idx = np.any(stack,axis=2)
labels_to_combine = stack[nonzero_idx]
# keep track of the number of times a label combination occurs
combo_dict = {}
for row, label_combo in enumerate(labels_to_combine):
group = label_combo[np.nonzero(label_combo)]
for i in range(len(group)):
for j in range(i+1,len(group)):
if (group[i], group[j]) in combo_dict:
combo_dict[(group[i], group[j])] += 1
else:
combo_dict[(group[i], group[j])] = 1
conn_mat = csr_matrix((np.ones(len(combo_dict), dtype='bool'),
([key[0] for key in combo_dict.keys()],
[key[1] for key in combo_dict.keys()])),
shape=(labels_to_combine.max()+1,labels_to_combine.max()+1))
n_conncomp, graph_complabels = connected_components(conn_mat, directed=False)
result = np.zeros_like(stack[:,:,0])
for label in np.unique(stack):
mask = np.any(stack==label,axis=2)
if mask.sum() > numpix_threshold:
result[np.any(stack==label,axis=2)] = graph_complabels[label]
return result
def fix_euler_numbers(result, max_hole_size=999):
'''
Fix labels whose Euler numbers are not 1 (i.e., labels with holes or handles)
Args
----
result : numpy.ndarray
2-D integer array of labeled objects
max_hole_size : int
max number of pixels to fill
Returns
-------
result : numpy.ndarray
'''
from skimage.measure import regionprops
from skimage.morphology import remove_small_holes
# use skimage's regionprops to get the Euler number for each object
props = regionprops(result)
labels = np.array([roi['label'] for roi in props])
euler_numbers = np.array([roi['euler_number'] for roi in props])
for bad_label in labels[euler_numbers!=1]:
mask = remove_small_holes(result==bad_label, area_threshold=max_hole_size)
# if there are other labels that intersect with the holes that were filled,
# then those other labels will be removed from the image
result[np.isin(result,np.setdiff1d(np.unique(result[mask]),[0, bad_label]))] = 0
result[mask] = bad_label
return result
def split_large_objects(result, stack):
'''
Detect objects that are too large
Determine whether these are actually single or multiple objects
If multiple objects, manipulate labels accordingly
Args
----
result : numpy.ndarray
2-D integer array of labeled objects
stack : np.ndarray
stack that was used to generate result
first two dimensions of stack should be the dimensions of the input image,
and the third dimension be the number of overlapping patches
Returns
-------
result : numpy.ndarray
'''
pass
# # inference-sparce-512
# In[3]:
stack = np.load('../inference-sparce-512/inference-bigger-cell.npy')
stack = stack[:,:,1:]
npix = 10
# In[4]:
result = stitch(stack, numpix_threshold=npix)
result = fix_euler_numbers(result)
# In[5]:
result_sparse = stitch_sparse(stack, numpix_threshold=npix)
result_sparse = fix_euler_numbers(result_sparse)
# In[6]:
Image.fromarray(result).save('../inference-sparce-512/result_hackathon.tif')
Image.fromarray(result_sparse).save('../inference-sparce-512/result_sparse_hackathon.tif')
# # inference-dense-512
# In[7]:
stack = np.load('../inference-dense-512/inference-stack.npy')
stack = stack[:,:,1:]
npix = 10
# In[8]:
result = stitch(stack, numpix_threshold=npix)
result = fix_euler_numbers(result)
# In[9]:
Image.fromarray(result).save('../inference-dense-512/result_hackathon.tif')
# # inference-dense-512 using flipped
# In[10]:
stack = np.load('../inference-dense-512/inference-bigger-cell-after-flipping.npy')
stack = stack[:,:,1:]
npix = 10
# In[11]:
result = stitch(stack, numpix_threshold=npix)
result = fix_euler_numbers(result)
# In[12]:
Image.fromarray(result).save('../inference-dense-512/result_flipped_hackathon.tif')
# # inference-dense-512 using both
# In[13]:
stack = np.load('../inference-dense-512/inference-stack.npy')
stack = stack[:,:,1:]
stack_flipped = np.load('../inference-dense-512/inference-bigger-cell-after-flipping.npy')
stack_flipped = stack_flipped[:,:,1:]
stack_flipped[stack_flipped>0] += stack.max() # ensure that IDs are different
stack = np.concatenate((stack,stack_flipped), axis=2)
npix = 10
# In[14]:
result = stitch(stack, numpix_threshold=npix)
result = fix_euler_numbers(result)
# In[15]:
Image.fromarray(result).save('../inference-dense-512/result_both_hackathon.tif')
# In[ ]:
|
{"hexsha": "c78574613d35aa229128b47bea3937c527ae3543", "size": 8154, "ext": "py", "lang": "Python", "max_stars_repo_path": "postprocess/stitching.py", "max_stars_repo_name": "patrickfletcher/Biological-structure-segmentation-in-microscopy-images-using-deep-learning", "max_stars_repo_head_hexsha": "6ce3b008dbe0374cd4c502f658f4dbd7ca21c87a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-09-09T16:54:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T03:50:27.000Z", "max_issues_repo_path": "postprocess/stitching.py", "max_issues_repo_name": "patrickfletcher/Biological-structure-segmentation-in-microscopy-images-using-deep-learning", "max_issues_repo_head_hexsha": "6ce3b008dbe0374cd4c502f658f4dbd7ca21c87a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "postprocess/stitching.py", "max_forks_repo_name": "patrickfletcher/Biological-structure-segmentation-in-microscopy-images-using-deep-learning", "max_forks_repo_head_hexsha": "6ce3b008dbe0374cd4c502f658f4dbd7ca21c87a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2018-09-10T14:02:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T13:52:07.000Z", "avg_line_length": 28.6105263158, "max_line_length": 92, "alphanum_fraction": 0.6545253863, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1879}
|
import gym
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO1
from scipy.special import softmax
from gym.spaces import Box, Discrete
from typing import NamedTuple, Callable, List, Union
from stable_baselines3 import PPO
from torch import nn
from tests.util import np_relu
import numpy as np
import cv2
class Mlp(NamedTuple):
layer_weights: List
layer_biases: List
layer_fns: List[Callable]
activations: List[Callable]
class Actor(NamedTuple):
sample: Callable
log_prob_from_distribution: Callable
actor_mlp: Mlp
step: Callable
class Critic(NamedTuple):
value_fn: Callable
critic_mlp: Mlp
class ActorCritic(NamedTuple):
actor: Actor
critic: Callable
step: Callable
def run_mlp(x, mlp: Mlp):
for i in range(len(mlp.layer_fns)):
w = mlp.layer_weights[i]
b = mlp.layer_biases[i]
act = mlp.activations[i]
fn = mlp.layer_fns[i]
x = act(fn(x, w, b))
return x
def np_identity(x):
return x
def np_linear(x, weight, bias=None):
if bias is not None:
return x.dot(weight.T) + bias
else:
return x.dot(weight.T)
def create_mlp(sizes, activation, output_activation=np_identity, pytorch_layers: Union[None, nn.Sequential]=None):
layer_weights = []
layer_fns = []
layer_biases = []
activations = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
if pytorch_layers:
lidx = j*2
lw = pytorch_layers[lidx].weight.detach().numpy()
if hasattr(pytorch_layers[lidx], 'bias'):
lb = pytorch_layers[lidx].bias.detach().numpy()
else:
lb = None
else:
torch_layer = nn.Linear(sizes[j], sizes[j+1])
lw = torch_layer.weight.detach().numpy()
lb = torch_layer.bias.detach().numpy()
layer_weights.append(lw)
layer_biases.append(lb)
activations.append(act)
layer_fns.append(np_linear)
mlp = Mlp(layer_weights, layer_biases, layer_fns, activations)
return mlp
def lognormalize(x):
a = np.logaddexp.reduce(x)
return np.exp(x - a)
def probs_to_logits(probs):
return np.log(probs)
def logits_to_probs(logits):
return softmax(logits)
def np_categorical_sample(obs, mlp_impl: Mlp):
unnorm_probs = run_mlp(obs, mlp_impl)
probs = lognormalize(unnorm_probs)
return np.random.choice(probs.shape[-1], size=1, replace=True, p=probs)
def np_log_prob(a, log_probs):
mg = np.meshgrid(
*(range(i) for i in log_probs.shape[:-1]),
indexing='ij')
return log_probs[mg + [a.astype(np.int32)]]
def np_categorical_step(obs, mlp_impl):
unnorm_probs = run_mlp(obs, mlp_impl)
probs = lognormalize(unnorm_probs)
log_probs = probs_to_logits(probs)
sample = np.random.choice(probs.shape[-1], size=1, replace=True, p=probs)
return sample, np_log_prob(sample, log_probs)
def mlp_categorical_dist(obs_dim, act_dim, hidden_sizes, activation, pytorch_mod=None, env_sample=None):
pytorch_actor_layers = pytorch_mod.pi.logits_net
pytorch_critic_layers = pytorch_mod.v.v_net
layer_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]
actor_mlp_impl = create_mlp(layer_sizes, activation,
pytorch_layers=pytorch_actor_layers)
actor_mlp = lambda x: run_mlp(x, actor_mlp_impl)
step = lambda x: np_categorical_step(x, actor_mlp_impl)
log_prob = lambda x, p: np_log_prob(x, p)
actor = Actor(sample=lambda x: np_categorical_sample(x, actor_mlp_impl),
log_prob_from_distribution=log_prob,
actor_mlp=actor_mlp,
step=step)
critic_mlp_impl = create_mlp([obs_dim] + list(hidden_sizes) + [1], activation,
pytorch_layers=pytorch_critic_layers)
v_fnc = lambda x: run_mlp(x, critic_mlp_impl)
step_fnc = lambda x: actor.step(x) + (v_fnc(x),)
ac = ActorCritic(actor=actor, critic=v_fnc, step=step_fnc)
return ac
def update(actor_critic, data):
def compute_loss_pi(x):
pass
def compute_loss_v(x):
pass
pi_l_old = compute_loss_pi(data)
v_l_old = compute_loss_v(data)
def mlp_gaussian(obs_dim, act_dim, hidden_sizes, activation, pytorch_layers=None, env_sample=None):
layer_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]
mlp_impl = create_mlp(layer_sizes, activation,
pytorch_layers=pytorch_layers)
dist = lambda x: np.random.choice(run_mlp(x, mlp_impl), replace=False)
log_prob = lambda pi, lprob: pi * lprob
def create_actor_critic(obs_space, action_space, hidden_sizes, activation, pytorch_mod=None, env_sample=None):
obs_dim = obs_space.shape[0]
assert isinstance(hidden_sizes, tuple)
if isinstance(action_space, Box):
act_dim = action_space.shape[0]
pi = mlp_gaussian(obs_dim, act_dim, hidden_sizes, activation, pytorch_mod, env_sample)
else:
assert isinstance(action_space, Discrete)
act_dim = action_space.n
pi = mlp_categorical_dist(obs_dim, act_dim, hidden_sizes, activation, pytorch_mod, env_sample)
return pi
def ppo1_impl():
env = gym.make('CartPole-v1')
model = PPO('MlpPolicy', env, verbose=1)
return model, env
def value_func(obs):
pass
def entropy(x):
logp = np.log(x)
return np.sum(-x * logp)
def compute_value_loss(data, value_func):
obs, ret = data['obs'], data['ret']
return np.mean((value_func(obs) - ret) ** 2)
def compute_loss_pi(data, clip_ratio, ac: ActorCritic):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
pi, logp = mlp_cat_actor(obs, act)
ratio = np.exp(logp - logp_old)
clip_adv = np.clip(ratio, 1 - clip_ratio, 1 + clip_ratio) * adv
loss_pi = -np.mean(np.min(ratio * adv, clip_adv))
# Useful extra info
approx_kl = np.mean(logp_old - logp)
ent = np.mean(entropy(pi))
clipped = (ratio > (1 + clip_ratio)) | (ratio < (1 - clip_ratio))
clipfrac = np.mean(clipped)
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
TORCH_NP_FN_MAP = {
"ReLU": np_relu,
"Linear": np_linear,
"Identity": np_identity
}
|
{"hexsha": "582e57d834ed27e48de92bd1fee8bb4d5a5939a1", "size": 6358, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/reference_implementations/rl_algorithms.py", "max_stars_repo_name": "lite-david/polymath", "max_stars_repo_head_hexsha": "cf1addc75e203fa606ebc6d32bc552fb3975ea99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2021-05-09T05:46:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T20:46:32.000Z", "max_issues_repo_path": "tests/reference_implementations/rl_algorithms.py", "max_issues_repo_name": "lite-david/polymath", "max_issues_repo_head_hexsha": "cf1addc75e203fa606ebc6d32bc552fb3975ea99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/reference_implementations/rl_algorithms.py", "max_forks_repo_name": "lite-david/polymath", "max_forks_repo_head_hexsha": "cf1addc75e203fa606ebc6d32bc552fb3975ea99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-08-24T07:46:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-05T18:23:07.000Z", "avg_line_length": 31.631840796, "max_line_length": 114, "alphanum_fraction": 0.6701793017, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1702}
|
subroutine sortst ( c10a , c10b , val , nr )
c Subroutine to sort (part of) tables R6-R7-R8
integer nr, ir, jndex
character*10 c10a(nr), c10b(nr), evea, eveb, sortar(nr)
real val(nr), eveval
logical flag
integer sortnr(nr), evenr, nrarr
c Set index
nrarr = 0
do 50 ir = 1,nr
call zoek ( c10a(ir), nrarr, sortar, 10, jndex )
if ( jndex .le. 0 ) then
nrarr = nrarr + 1
sortar(nrarr) = c10a(ir)
jndex = nrarr
endif
sortnr(ir) = jndex
50 continue
c Perform sort
100 continue
flag = .false.
do 200 ir = 1,nr-1
if ( sortnr(ir+1) .lt. sortnr(ir) ) then
flag = .true.
evenr = sortnr(ir+1)
evea = c10a (ir+1)
eveb = c10b (ir+1)
eveval = val (ir+1)
sortnr(ir+1) = sortnr(ir)
c10a (ir+1) = c10a (ir)
c10b (ir+1) = c10b (ir)
val (ir+1) = val (ir)
sortnr(ir) = evenr
c10a (ir) = evea
c10b (ir) = eveb
val (ir) = eveval
endif
200 continue
c back for next sweep
if ( flag ) goto 100
return
end
subroutine sorts2 ( pr , it , nm , de , do ,
j sx , nr , do_de , do_sx )
c Subroutine to sort tables R3-R4-R5
integer nr, nm(nr), sx(nr), ir, evenm, evesx
character*10 pr(nr), it(nr), sortar(nr), evepr, eveit
character*1 de(nr), do(nr), evede, evedo
logical flag , do_de, do_sx
integer sortnr(nr), evenr, nrarr, irarr, noffse,
j nrsub, jndex
c Set index for process
c write (*,*) ' SORTS2 '
nrarr = 0
do ir = 1,nr
call zoek ( pr(ir), nrarr, sortar, 10, jndex )
if ( jndex .le. 0 ) then
nrarr = nrarr + 1
sortar(nrarr) = pr(ir)
jndex = nrarr
endif
sortnr(ir) = jndex
enddo
c write (*,*) ' Processes indexed '
c Perform sort on process
100 continue
flag = .false.
do ir = 1,nr-1
if ( sortnr(ir+1) .lt. sortnr(ir) ) then
flag = .true.
evenr = sortnr(ir+1)
evepr = pr (ir+1)
eveit = it (ir+1)
evenm = nm (ir+1)
if ( do_de )
j evede = de (ir+1)
evedo = do (ir+1)
if ( do_sx )
j evesx = sx (ir+1)
sortnr(ir+1) = sortnr(ir)
pr (ir+1) = pr (ir)
it (ir+1) = it (ir)
nm (ir+1) = nm (ir)
if ( do_de )
j de (ir+1) = de (ir)
do (ir+1) = do (ir)
if ( do_sx )
j sx (ir+1) = sx (ir)
sortnr(ir) = evenr
pr (ir) = evepr
it (ir) = eveit
nm (ir) = evenm
if ( do_de )
j de (ir) = evede
do (ir) = evedo
if ( do_sx )
j sx (ir) = evesx
endif
enddo
c back for next sweep
if ( flag ) goto 100
c write (*,*) ' Processes sorted '
c do ir = 1,nr
c write (11,*) ir,sortnr(ir),pr(ir),it(ir)
c enddo
c Sort on number!!!
noffse = 0
do irarr = 1,nrarr
c Find items within current process
do ir = noffse + 1, nr
c write (11,*) ' ir ',ir
if ( sortnr(ir) .ne. sortnr(noffse+1) ) then
nrsub = ir-1-noffse
goto 200
endif
if ( ir .eq. nr ) then
nrsub = ir-noffse
goto 200
endif
enddo
200 continue
c write (11,*) ' Cycle ',irarr, ' from ',noffse+1,' to ',
c j noffse+nrsub
c Sort
300 continue
flag = .false.
do ir = noffse+1,noffse+nrsub-1
c write (*,*) ' ir2 ',ir
if ( nm(ir+1) .lt. nm(ir) ) then
flag = .true.
evepr = pr (ir+1)
eveit = it (ir+1)
evenm = nm (ir+1)
if ( do_de )
j evede = de (ir+1)
evedo = do (ir+1)
if ( do_sx )
j evesx = sx (ir+1)
pr (ir+1) = pr (ir)
it (ir+1) = it (ir)
nm (ir+1) = nm (ir)
if ( do_de )
j de (ir+1) = de (ir)
do (ir+1) = do (ir)
if ( do_sx )
j sx (ir+1) = sx (ir)
pr (ir) = evepr
it (ir) = eveit
nm (ir) = evenm
if ( do_de )
j de (ir) = evede
do (ir) = evedo
if ( do_sx )
j sx (ir) = evesx
endif
enddo
c back for next sweep
if ( flag ) goto 300
noffse = noffse + nrsub
enddo
c write (*,*) ' Items sorted '
if ( noffse .ne. nr ) stop 'BUG SortNR'
return
end
|
{"hexsha": "0a6f9c73c9eac6c8226e313d0c87176107aa9c20", "size": 5438, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "docker/water/delft3d/tags/v6686/src/tools_gpl/waqpb/packages/waqpb_lib/src/sortst.f", "max_stars_repo_name": "liujiamingustc/phd", "max_stars_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-06T03:01:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:02:55.000Z", "max_issues_repo_path": "docker/water/delft3d/tags/v6686/src/tools_gpl/waqpb/packages/waqpb_lib/src/sortst.f", "max_issues_repo_name": "liujiamingustc/phd", "max_issues_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docker/water/delft3d/tags/v6686/src/tools_gpl/waqpb/packages/waqpb_lib/src/sortst.f", "max_forks_repo_name": "liujiamingustc/phd", "max_forks_repo_head_hexsha": "4f815a738abad43531d02ac66f5bd0d9a1def52a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1761658031, "max_line_length": 66, "alphanum_fraction": 0.3824935638, "num_tokens": 1730}
|
/*
Copyright (C) 2017 Sascha Meiers
Distributed under the MIT software license, see the accompanying
file LICENSE.md or http://www.opensource.org/licenses/mit-license.php.
*/
#include <iostream>
#include <fstream>
#include <vector>
#include <unordered_map>
#include <tuple>
#include <boost/program_options/cmdline.hpp>
#include <boost/program_options/options_description.hpp>
#include <boost/program_options/parsers.hpp>
#include <boost/program_options/variables_map.hpp>
#include <boost/tokenizer.hpp>
#include <boost/filesystem.hpp>
#include <boost/progress.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/stream_buffer.hpp>
#include <boost/iostreams/device/file.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include <htslib/sam.h>
#include "version.hpp"
#include "intervals.hpp"
#include "counter.hpp"
#include "distribution.hpp"
#include "hmm.hpp"
#include "iocounts.hpp"
/**
* @file
* @defgroup count Bin, count and classify W/C reads.
*
* Summary of how Strand-seq data is binned, counted and classified.
*
* ## Strand-seq read counting
*
* @todo write documentation about counting.
*/
using interval::Interval;
using count::TGenomeCounts;
using count::Counter;
struct Conf {
std::vector<boost::filesystem::path> f_in;
boost::filesystem::path f_out;
boost::filesystem::path f_bins;
boost::filesystem::path f_excl;
boost::filesystem::path f_info;
boost::filesystem::path f_sample_info;
boost::filesystem::path f_removed_bins;
boost::filesystem::path f_segments;
int minMapQual;
unsigned int window;
std::string mode;
};
/**
*
*/
void run_standard_HMM(std::vector<TGenomeCounts> & counts,
std::vector<unsigned> const & good_cells,
std::vector<CellInfo> & cells,
std::vector<unsigned> const & good_bins,
std::vector<int32_t> const & good_map,
std::unordered_map<std::string, SampleInfo> const & samples,
float p_trans)
{
// Set up and run HMM:
hmm::HMM<unsigned, hmm::MultiVariate<hmm::NegativeBinomial> > hmm({"CC", "WC", "WW"});
hmm.set_initials({0.3333, 0.3333, 0.3333});
hmm.set_transitions({1-2*p_trans, p_trans, p_trans, \
p_trans, 1-2*p_trans, p_trans, \
p_trans, p_trans, 1-2*p_trans});
for (auto i = good_cells.begin(); i != good_cells.end(); ++i)
{
// set NB(n,p) parameters according to `p` of sample and mean of cell.
float p = samples.at(cells[*i].sample_name).p;
float n = (float)cells[*i].mean_bin_count * p / (1-p);
float a = 0.1;
cells[*i].nb_p = p;
cells[*i].nb_r = n;
cells[*i].nb_a = a;
//std::cout << "NB parameters for cell <?>" << ": p=" << p << "\tn=" << n << "\tz=" << z << std::endl;
hmm.set_emissions( {\
hmm::MultiVariate<hmm::NegativeBinomial>({hmm::NegativeBinomial(p, (1-a)*n), hmm::NegativeBinomial(p, a*n)}), // CC
hmm::MultiVariate<hmm::NegativeBinomial>({hmm::NegativeBinomial(p, n/2), hmm::NegativeBinomial(p, n/2)}), // WC
hmm::MultiVariate<hmm::NegativeBinomial>({hmm::NegativeBinomial(p, a*n), hmm::NegativeBinomial(p, (1-a)*n)}) // WW
});
run_HMM(hmm, counts[*i], good_bins, good_map);
}
}
int main_count(int argc, char **argv)
{
// Command line options
Conf conf;
boost::program_options::options_description generic("Generic options");
generic.add_options()
("help,?", "show help message")
("verbose,v", "Be more verbose in the output")
("mapq,q", boost::program_options::value<int>(&conf.minMapQual)->default_value(10), "min mapping quality")
("window,w", boost::program_options::value<unsigned int>(&conf.window)->default_value(500000), "window size of fixed windows")
("out,o", boost::program_options::value<boost::filesystem::path>(&conf.f_out)->default_value("out.txt.gz"), "output file for counts + strand state (gz)")
("bins,b", boost::program_options::value<boost::filesystem::path>(&conf.f_bins), "BED file with manual bins (disables -w). See also 'makebins'")
("exclude,x", boost::program_options::value<boost::filesystem::path>(&conf.f_excl), "Exclude chromosomes and regions")
("info,i", boost::program_options::value<boost::filesystem::path>(&conf.f_info), "Write info about samples")
("do-not-filter-by-WC", "When black-listing bins, only consider coverage and not WC/WW/CC states")
("do-not-blacklist-hmm", "Do not output a blacklist (None bins). Bins will be blacklisted for parameter estimation, but not during HMM")
;
// Note: Currently the blacklisting is done after counting via R/norm.R. A better way would be to
// input the blacklist + normalization into MosaiCatcher during the counting, then the HMM could be run
// only on non-blacklisted bins. Now it is run through many bad bin that potentially affect the quality of results.
boost::program_options::options_description hidden("Hidden options");
hidden.add_options()
("input-file", boost::program_options::value<std::vector<boost::filesystem::path> >(&conf.f_in), "input bam file(s)")
("sample_info,S", boost::program_options::value<boost::filesystem::path>(&conf.f_sample_info), "write info per sample")
("removed_bins,R", boost::program_options::value<boost::filesystem::path>(&conf.f_removed_bins), "bins that were removed (bed file)")
;
boost::program_options::positional_options_description pos_args;
pos_args.add("input-file", -1);
boost::program_options::options_description cmdline_options;
cmdline_options.add(generic).add(hidden);
boost::program_options::options_description visible_options;
visible_options.add(generic);
boost::program_options::variables_map vm;
boost::program_options::store(boost::program_options::command_line_parser(argc, argv).options(cmdline_options).positional(pos_args).run(), vm);
boost::program_options::notify(vm);
// Check command line arguments
if (!vm["window"].defaulted() && vm.count("bins")) {
std::cerr << "[Error] -w and -b cannot be specified together" << std::endl << std::endl;
goto print_usage_and_exit;
}
if (vm.count("bins") && vm.count("exclude")) {
std::cerr << "[Error] Exclude chromosomes (-x) have no effect when -b is specified. Stop" << std::endl << std::endl;
goto print_usage_and_exit;
}
if (vm.count("help") || !vm.count("input-file"))
{
print_usage_and_exit:
std::cout << std::endl;
std::cout << "Mosaicatcher " << STRINGIFYMACRO(MOSAIC_VERSION_MAJOR);
std::cout << "." << STRINGIFYMACRO(MOSAIC_VERSION_MINOR) << std::endl;
std::cout << "> Count reads from Strand-seq BAM files." << std::endl;
std::cout << std::endl;
std::cout << "Usage: " << argv[0] << " [OPTIONS] <cell1.bam> <cell2.bam> ..." << std::endl << std::endl;
std::cout << visible_options << std::endl;
std::cout << "Notes:" << std::endl;
std::cout << " * writes a table of bin counts and state classifcation as a gzip file (default: out.txt.gz)" << std::endl;
std::cout << " * Reads are counted by start position" << std::endl;
std::cout << " * One cell per BAM file, including SM tag in header" << std::endl;
std::cout << " * For paired-end data, only read 1 is counted" << std::endl;
return vm.count("help") ? 0 : 1;
}
/////////////////////////////////////////////////////////// global variables
/* leave one BAM header open to get chrom names & lengths */
bam_hdr_t* hdr = NULL;
/* regarding each cell */
std::vector<CellInfo> cells(conf.f_in.size());
std::vector<TGenomeCounts> counts(conf.f_in.size());
std::vector<unsigned> good_cells;
/* regarding each sample */
std::unordered_map<std::string, SampleInfo> samples;
/* regarding bins */
std::vector<Interval> bins;
std::vector<int32_t> chrom_map;
std::vector<unsigned> good_bins;
std::vector<int32_t> good_map;
////////////////////////////////////////////////////////////////////////////
//
// Chapter: Binning & counting
// ===========================
//
// Read sample names from headers.
// Keep one header throughout the program.
if (vm.count("verbose")) std::cout << "[Info] Exploring SAM headers..." << std::endl;
for(unsigned i = 0; i < conf.f_in.size(); ++i)
{
cells[i].id = (int32_t)i;
cells[i].bam_file = conf.f_in[i].string();
samFile* samfile = sam_open(conf.f_in[i].string().c_str(), "r");
if (samfile == NULL) {
std::cerr << "[Error] Fail to open file " << conf.f_in[i].string() << std::endl;
return 1;
}
hdr = sam_hdr_read(samfile);
if (!get_RG_tag("SM", hdr->text, cells[i].sample_name)) {
std::cerr << "[Error] Each BAM file has to have exactly one RG tag. Group cells " << std::endl;
std::cerr << " belonging to the same sample by the SM tag." << std::endl;
std::cerr << " Problematic file: " << conf.f_in[i].string() << std::endl << std::endl;
goto print_usage_and_exit;
}
if (!get_RG_tag("ID", hdr->text, cells[i].cell_name, /*allow_multiple_matches = */ true)) {
std::cerr << "[Error] Each BAM file has to have exactly one RG tag." << std::endl;
std::cerr << " Problematic file: " << conf.f_in[i].string() << std::endl;
goto print_usage_and_exit;
}
sam_close(samfile);
}
// Bin the genome
unsigned median_binsize;
chrom_map = std::vector<int32_t>(hdr->n_targets, -1);
if (vm.count("bins"))
{
if (!read_dynamic_bins(bins,
chrom_map,
conf.f_bins.string().c_str(),
hdr))
return 1;
TMedianAccumulator<unsigned> med_acc;
for (Interval const & b : bins)
med_acc(b.end - b.start);
median_binsize = boost::accumulators::median(med_acc);
std::cout << "[Info] Reading " << bins.size() << " variable-width bins with median bin size of " << round(median_binsize/1000) << "kb" << std::endl;
}
else
{
std::vector<Interval> exclude;
if (vm.count("exclude")) {
read_exclude_file(conf.f_excl.string(), hdr, exclude, vm.count("verbose"));
sort(exclude.begin(), exclude.end(), interval::invt_less);
}
std::cout << "[Info] Creating " << round(conf.window/1000) << "kb bins with " << exclude.size() << " excluded regions" << std::endl;
create_fixed_bins(bins,
chrom_map,
conf.window,
exclude,
hdr->n_targets,
hdr->target_len);
median_binsize = conf.window;
}
// add last element for easy calculation of number of bins
chrom_map.push_back((int32_t)bins.size());
// Count in bins. If A bam file cannot be read, the cell is ignored and
// the respective entry in `counts` and `cells` will be erased.
std::cout << "[Info] Reading " << conf.f_in.size() << " BAM files...";
boost::progress_display show_progress1(conf.f_in.size());
for (unsigned i = 0, i_f = 0; i_f < conf.f_in.size(); ++i, ++i_f)
{
if (!count_sorted_reads(conf.f_in[i_f].string(),
bins,
chrom_map,
hdr,
conf.minMapQual,
counts[i],
cells[i]))
{
std::cerr << "[Warning] Ignoring cell " << conf.f_in[i_f].string() << std::endl;
counts.erase(counts.begin()+i);
cells.erase(cells.begin()+i);
--i;
}
++show_progress1;
}
//
// Chapter: Filter cells and bins and estimate NB parameter p
// ==========================================================
//
// median per cell
count::set_median_per_cell(counts, cells);
// filter cells with low counts
good_cells = count::get_good_cells(counts, cells);
// filter bins with abnormal counts
if (good_cells.size() < 5) {
std::cerr << "[Warning] Only few cells with sufficient coverage. I will not filter bad bins" << std::endl;
good_bins.resize(bins.size());
std::iota(good_bins.begin(), good_bins.end(), 0); // fill with 0,1,2,...
} else {
good_bins = count::get_good_bins(counts,
cells,
good_cells,
vm.count("verbose"),
!vm.count("do-not-filter-by-WC"));
if (vm.count("verbose")) std::cout << "[Info] Filtered out " << bins.size() - good_bins.size() << " bad bins from " << bins.size() << std::endl;
}
// build chrom_map for good bins
good_map = std::vector<int32_t>(chrom_map.size() - 1, -1);
int32_t pos = 0;
for (int32_t chr = 0; chr < static_cast<int32_t>(good_map.size()); ++chr) {
while (pos < good_bins.size() && bins[good_bins[pos]].chr < chr)
++pos;
// now goodit is either at first occurence of chr, or at the end.
if (pos >= good_bins.size()) good_map[chr] = (int32_t)good_bins.size();
else good_map[chr] = pos;
}
// add last element for easy calculation of number of bins
good_map.push_back((int32_t)good_bins.size());
// calculate cell means and cell variances, grouped by sample (not cell)
calculate_new_cell_mean(samples, cells, counts, good_cells, good_bins);
// Estimation of parameter p per sample (should work even with one cell only)
for (auto it = samples.begin(); it != samples.end(); ++it) {
SampleInfo & s = it->second;
s.p = std::inner_product(s.means.begin(), s.means.end(), s.means.begin(), 0.0f) \
/ std::inner_product(s.means.begin(), s.means.end(), s.vars.begin(), 0.0f);
}
// Write sample information to file
if (vm.count("sample_info")) {
if (vm.count("verbose")) std::cout << "[Write] sample information: " << conf.f_sample_info.string() << std::endl;
std::ofstream out(conf.f_sample_info.string());
if (out.is_open()) {
out << "sample\tcells\tp\tmeans\tvars" << std::endl;
for (auto it = samples.begin(); it != samples.end(); ++it) {
SampleInfo const & s = it->second;
out << it->first << "\t" << s.means.size() << "\t" << s.p << "\t" << s.means[0];
for (unsigned k=1; k<s.means.size(); ++k) out << "," << s.means[k];
out << "\t" << s.vars[0];
for (unsigned k=1; k<s.vars.size(); ++k) out << "," << s.vars[k];
out << std::endl;
}
} else {
std::cerr << "[Warning] Cannot write to " << conf.f_sample_info.string() << std::endl;
}
}
//
// Chapter: Run HMM
// ================
//
if(vm.count("do-not-blacklist-hmm")) {
if (vm.count("verbose")) std::cout << "[Info] Previous filters are not used during HMM phase" << std::endl;
std::vector<unsigned> normal_bins(bins.size());
std::iota(normal_bins.begin(), normal_bins.end(), 0);
run_standard_HMM(counts,
good_cells,
cells,
normal_bins,
chrom_map,
samples,
10.0f / bins.size());
} else {
run_standard_HMM(counts,
good_cells,
cells,
good_bins,
good_map,
samples,
10.0f / good_bins.size());
}
// Print cell information:
if (vm.count("info")) {
if (vm.count("verbose")) std::cout << "[Write] Cell summary: " << conf.f_info.string() << std::endl;
write_cell_info(conf.f_info.string(), cells);
}
// Write final counts + classification
std::cout << "[Write] count table: " << conf.f_out.string() << std::endl;
{
// TODO: why do I pass vector<pair>? I could make it two separate vectors. Just check where else the io function is called.
struct sample_cell_name_wrapper {
std::vector<CellInfo> const & cells;
sample_cell_name_wrapper(std::vector<CellInfo> const & cells) : cells(cells)
{}
std::pair<std::string,std::string> operator[](size_t i) const {
return std::make_pair(cells[i].sample_name, cells[i].cell_name);
}
};
if (!io::write_counts_gzip(conf.f_out.string(),
counts,
bins,
hdr->target_name,
sample_cell_name_wrapper(cells)) )
return 1;
}
return 0;
}
|
{"hexsha": "1bbb1268f5eb559749c276cb7b474e883f2b55fd", "size": 17428, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/count.hpp", "max_stars_repo_name": "tobiasmarschall/mosaicatcher", "max_stars_repo_head_hexsha": "42b078ec0964f3711f0f4871065be5157e63eb37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-12-26T01:36:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-30T00:28:01.000Z", "max_issues_repo_path": "src/count.hpp", "max_issues_repo_name": "tobiasmarschall/mosaicatcher", "max_issues_repo_head_hexsha": "42b078ec0964f3711f0f4871065be5157e63eb37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2018-01-12T11:56:43.000Z", "max_issues_repo_issues_event_max_datetime": "2019-01-29T16:09:34.000Z", "max_forks_repo_path": "src/count.hpp", "max_forks_repo_name": "tobiasmarschall/mosaicatcher", "max_forks_repo_head_hexsha": "42b078ec0964f3711f0f4871065be5157e63eb37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2018-05-24T09:12:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-02T11:33:28.000Z", "avg_line_length": 40.8149882904, "max_line_length": 157, "alphanum_fraction": 0.568510443, "num_tokens": 4205}
|
[STATEMENT]
lemma ipp_cond2_minus:"\<lbrakk>ipp_cond1 {a} i; ipp_cond2 z {a} i f\<rbrakk> \<Longrightarrow>
ipp_cond2 z {\<^sub>i- a} i f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>ipp_cond1 {a} i; ipp_cond2 z {a} i f\<rbrakk> \<Longrightarrow> ipp_cond2 z {\<^sub>i- a} i f
[PROOF STEP]
by (simp add:ipp_cond2_def, simp add:aug_pm_aug_pm_minus)
|
{"llama_tokens": 178, "file": "Group-Ring-Module_Algebra9", "length": 1}
|
clear all; close all; clc
n=200; L=8;
x=linspace(0,L,n);
x1=x(1:100); % train
x2=x(101:200); % test
n1=length(x1);
n2=length(x2);
ftrain=(x1.^2).'; % train parabola x=[0,4]
ftest=(x2.^2).'; % test parbola x=[4,5]
figure(1), subplot(3,1,1),
plot(x1,ftrain,'r',x2,ftest,'b','Linewidth',[2])
legend('','','Location','Northwest')
legend boxoff
M=30; % number of model terms
Eni=zeros(100,M); Ene=zeros(100,M);
for jj=1:M
for j=1:jj
phi_i(:,j)=(x1.').^(j-1); % interpolation key
phi_e(:,j)=(x2.').^(j-1); % extrapolation key
end
f=(x.^2).';
for j=1:100
fni=(x1.^2+0.1*randn(1,n1)).'; % interpolation
fne=(x2.^2+0.1*randn(1,n2)).'; % extrapolation
ani=pinv(phi_i)*fni; fnai=phi_i*ani;
Eni(j,jj)=norm(ftrain-fnai)/norm(ftrain);
fnae=phi_e*ani; % use loadings from x in [0,4]
Ene(j,jj)=norm(ftest-fnae)/norm(ftest);
end
end
subplot(3,2,3), boxplot(Eni), axis([0.5 30.5 0 0.7]), set(gca,'Xlim',[0.5 30.5],'Xtick',1:30,'Xticklabel',{'1','','','','5','','','','','10','','','','','15','','','','','20','','','','','25','','','','','30'})
subplot(3,2,4), boxplot(Eni), axis([0.5 30.5 0 0.02]), set(gca,'Xlim',[0.5 30.5],'Xtick',1:30,'Xticklabel',{'1','','','','5','','','','','10','','','','','15','','','','','20','','','','','25','','','','','30'})
subplot(3,2,5), boxplot(Ene), set(gca,'Xlim',[0.5 30.5],'Xtick',1:30,'Xticklabel',{'1','','','','5','','','','','10','','','','','15','','','','','20','','','','','25','','','','','30'})
subplot(3,2,6), boxplot(log(Ene+1)), axis([0.5 30.5 0 30]), set(gca,'Xtick',1:30,'Xticklabel',{'1','','','','5','','','','','10','','','','','15','','','','','20','','','','','25','','','','','30'})
|
{"author": "dynamicslab", "repo": "databook_matlab", "sha": "d390d39d18489a4804ee87a143ae8db8a1f3010b", "save_path": "github-repos/MATLAB/dynamicslab-databook_matlab", "path": "github-repos/MATLAB/dynamicslab-databook_matlab/databook_matlab-d390d39d18489a4804ee87a143ae8db8a1f3010b/CH04/CH04_SEC05_1_CrossValidate.m"}
|
"""Inputs for MNIST dataset"""
import math
import numpy as np
import glob
import CSGM.dcgan.dcgan_utils as dcgan_utils
import CSGM.mnist.mnist_model_def as mnist_model_def
import tensorflow.compat.v1 as tf
from tensorflow.examples.tutorials.mnist import input_data
NUM_TEST_IMAGES = 10000
def get_random_test_subset(mnist, sample_size):
"""Get a small random subset of test images"""
idxs = np.random.choice(NUM_TEST_IMAGES, sample_size)
images = [mnist.test.images[idx] for idx in idxs]
images = {i: image for (i, image) in enumerate(images)}
return images
def sample_generator_images(hparams):
"""Sample random images from the generator"""
# Create the generator
_, x_hat, restore_path, restore_dict = mnist_model_def.vae_gen(hparams)
# Get a session
sess = tf.Session()
# Intialize and restore model parameters
init_op = tf.global_variables_initializer()
sess.run(init_op)
restorer = tf.train.Saver(var_list=restore_dict)
restorer.restore(sess, restore_path)
images = {}
counter = 0
rounds = int(math.ceil(hparams.num_input_images/hparams.batch_size))
for _ in range(rounds):
images_mat = sess.run(x_hat)
for (_, image) in enumerate(images_mat):
if counter < hparams.num_input_images:
images[counter] = image
counter += 1
# Reset TensorFlow graph
sess.close()
tf.reset_default_graph()
return images
def model_input(hparams):
"""Create input tensors"""
mnist = input_data.read_data_sets('./data/mnist', one_hot=True)
if hparams.input_type == 'full-input':
image_paths = glob.glob(hparams.input_path_pattern)
image_paths= image_paths[:hparams.num_input_images]
image_paths.sort()
images = [dcgan_utils.get_image(image_path,hparams.input_size,True,hparams.input_size,True) for image_path in image_paths]#images = [dcgan_utils.get_image(image_path, image_size) for image_path in image_paths]
images = {i: image.reshape([hparams.input_size*hparams.input_size*1])/2+0.5 for (i, image) in enumerate(images)}
elif hparams.input_type == 'random-test':
images = get_random_test_subset(mnist, hparams.num_input_images)
elif hparams.input_type == 'gen-span':
images = sample_generator_images(hparams)
else:
raise NotImplementedError
return images
|
{"hexsha": "e06d17bedcdd132e45464dc75257584e375ac846", "size": 2408, "ext": "py", "lang": "Python", "max_stars_repo_path": "CSGM/mnist/mnist_input.py", "max_stars_repo_name": "PSCLab-ASU/OpenICS", "max_stars_repo_head_hexsha": "e8f639f9278ce88c98f14daf026a56395cb64ca9", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-03-03T13:13:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T12:02:59.000Z", "max_issues_repo_path": "CSGM/mnist/mnist_input.py", "max_issues_repo_name": "PSCLab-ASU/OpenICS", "max_issues_repo_head_hexsha": "e8f639f9278ce88c98f14daf026a56395cb64ca9", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CSGM/mnist/mnist_input.py", "max_forks_repo_name": "PSCLab-ASU/OpenICS", "max_forks_repo_head_hexsha": "e8f639f9278ce88c98f14daf026a56395cb64ca9", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-04T12:16:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-09T03:07:44.000Z", "avg_line_length": 31.2727272727, "max_line_length": 217, "alphanum_fraction": 0.7059800664, "include": true, "reason": "import numpy", "num_tokens": 563}
|
# Plots product 10
# By Jose Ignacio Hernandez
# Load packages
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Load data
inputfile = "../output/producto10/FallecidosEtario_T.csv"
dat = pd.read_csv(inputfile)
# Create variables
date = dat["Grupo de edad"]
cases = dat[["<=39","40-49","50-59","60-69","70-79","80-89",">=90"]].diff()
colsums = cases.sum(axis=1)
group1 = cases["<=39"]#/colsums*100
group2 = cases["40-49"]#/colsums*100
group3 = cases["50-59"]#/colsums*100
group4 = cases["60-69"]#/colsums*100
group5 = cases["70-79"]#/colsums*100
group6 = cases["80-89"]#/colsums*100
group7 = cases[">=90"]#/colsums*100
group1 = group1.fillna(0)
group2 = group2.fillna(0)
group3 = group3.fillna(0)
group4 = group4.fillna(0)
group5 = group5.fillna(0)
group6 = group6.fillna(0)
group7 = group7.fillna(0)
# colsums = colsums.fillna(0)
p1 = plt.bar(date, group1)
p2 = plt.bar(date, group2,bottom=group1)
p3 = plt.bar(date, group3,bottom=group1+group2)
p4 = plt.bar(date, group4,bottom=group1+group2+group3)
p5 = plt.bar(date, group5,bottom=group1+group2+group3+group4)
p6 = plt.bar(date, group6,bottom=group1+group2+group3+group4+group5)
p7 = plt.bar(date, group7,bottom=group1+group2+group3+group4+group5+group6)
plt.ylabel('Casos')
plt.title('Fallecidos diarios por grupo de edad')
plt.xticks(np.arange(0,len(date),len(date)-1))
plt.legend((p1[0], p2[0], p3[0], p4[0], p5[0], p6[0], p7[0]), ('<=39', '40-49','50-59','60-69','70-79','80-89','>=90'))
plt.show()
plt.savefig('fallecidos_perc.png')
|
{"hexsha": "56895188ff3ded0abf8679975a0380b89e89b4bc", "size": 1521, "ext": "py", "lang": "Python", "max_stars_repo_path": "plots/plots_product_10.py", "max_stars_repo_name": "ighdez/Datos-COVID19", "max_stars_repo_head_hexsha": "9b11b59f3a63d743681916b55c33440f5f18e541", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plots/plots_product_10.py", "max_issues_repo_name": "ighdez/Datos-COVID19", "max_issues_repo_head_hexsha": "9b11b59f3a63d743681916b55c33440f5f18e541", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plots/plots_product_10.py", "max_forks_repo_name": "ighdez/Datos-COVID19", "max_forks_repo_head_hexsha": "9b11b59f3a63d743681916b55c33440f5f18e541", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0408163265, "max_line_length": 119, "alphanum_fraction": 0.6962524655, "include": true, "reason": "import numpy", "num_tokens": 572}
|
import re
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
import attr
import numpy as np
import pandas as pd
import talib
from sklearn.preprocessing import FunctionTransformer
@attr.s
class TAFactory:
"""
Factory that creates sklearn transformers for function available in the TA-lib package.
"""
ta_func_dict = talib.__dict__
_real = "real"
@staticmethod
def get_function_groups() -> Dict[str, str]:
"""
Returns a dictionary with TA-lib function names specified per group.
:return: the key-value pairs are group names by list of function names
:rtype: Dict[str, str]
"""
return talib.get_function_groups()
@staticmethod
def get_functions() -> List[str]:
"""
Returns all the available function names in TA-lib package.
:return: list with function names in the TA-lib package.
:rtype: List[str]
"""
return talib.get_functions()
def get_args_kwargs(self, func_name: str) -> Tuple[List[str]]:
"""
Returns a tuple with a list of valid argument names and keyword argument names for the TA function.
:param func_name: function name in the TA-lib package
:type func_name: str
:return: first element is the list with valid argument names and the second element is the list with
valid keyword argument names.
:rtype: Tuple[List[str]]
"""
ta_func = self.get_ta_func(func_name)
signature = re.search(r"\((.+)\)", ta_func.__doc__).group(1)
args_kwargs_names = signature.split("[, ")
if len(args_kwargs_names) == 1:
args_names, kwargs_names = args_kwargs_names[0], None
else:
args_names, kwargs_names = args_kwargs_names
args_names_list = args_names.split(", ")
if kwargs_names is not None:
kwargs_names_list = kwargs_names.strip("=?]").split("=?, ")
else:
kwargs_names_list = None
return args_names_list, kwargs_names_list
def get_ta_func(self, func_name: str) -> callable:
"""
Returns the TA function callable.
:param func_name: name of the function in TA-lib package
:type func_name: str
:raises ValueError: if provided name does not exist as a function in the TA-lib package an error is raised
:return: the TA function which creates TA features
:rtype: callable
"""
if func_name not in self.ta_func_dict.keys():
raise ValueError(
f"The func_name {func_name} is not available. Please check if the name is spelled correctly."
)
ta_func = self.ta_func_dict.get(func_name)
return ta_func
def create_transformer(
self,
func_name: str,
col_to_transform: Union[List[str], str] = None,
col_to_transform_mapping: Dict[str, str] = None,
kw_args: dict = None,
) -> FunctionTransformer:
"""
Creates a transformer for a given function available in the TA-lib package.
:param func_name: name of function in TA-lib package
:type func_name: str
:param col_to_transform: name of the column in dataframe to transform, defaults to None
:type col_to_transform: Union[List[str], str], optional
:param col_to_transform_mapping: remapping of names in dataframe to valid arg names in the TA function,
defaults to None
:type col_to_transform_mapping: Dict[str, str], optional
:param kw_args: kwargs for the TA function, defaults to None
:type kw_args: dict, optional
:raises TypeError: transformer only works on pandas DataFrames
:raises ValueError: transformer expect certain columns to be available in the dataframe to transform
:return: scikit-learn FunctionTransformer version of the TA function
:rtype: FunctionTransformer
"""
ta_func = self.get_ta_func(func_name)
args_names_list, kwargs_names_list = self.get_args_kwargs(func_name)
args_names_list = self._check_and_set_args(
func_name, args_names_list, col_to_transform, col_to_transform_mapping
)
kw_args = self._check_and_set_kwargs(func_name, kw_args, kwargs_names_list)
def ta_func_transformed(X: pd.DataFrame) -> np.ndarray:
if not isinstance(X, pd.DataFrame):
raise TypeError(
f"X must be a pandas DataFrame, but got type {type(X)}."
)
if not set(X.columns).issuperset(set(args_names_list)):
raise ValueError(
f"Expected the following columns in the dataframe: {set(args_names_list) - set(X.columns)}"
)
args = [X[i] for i in args_names_list]
output = ta_func(*args, **kw_args)
# parse output to 2-d arrays to enable concatenating
if isinstance(output, tuple):
output = pd.concat(output, axis=1).values
if output.ndim != 2:
if isinstance(output, pd.Series):
output = output.values
output = output.reshape(-1, 1)
return output
return FunctionTransformer(ta_func_transformed)
def _check_and_set_args(
self,
func_name: str,
args_names_list: List[str],
col_to_transform: Union[str, None],
col_to_transform_mapping: Union[str, None],
) -> List[str]:
"""
Checks and sets the args for the TA function.
:param func_name: name of the TA function
:type func_name: str
:param args_names_list: list with valid argument names
:type args_names_list: List[str]
:param col_to_transform: if the TA function takes in an array this will specify which column to transform in
the dataframe
:type col_to_transform: Union[str, None]
:param col_to_transform_mapping: maps valid argument names to column names in the dataframe to transform
:type col_to_transform_mapping: Union[str, None]
:raises ValueError: column name to transform is not provided, i.e. transformer does not know which column
in the dataframe to transform
:raises ValueError: transformer takes in two arrays, so both column need to be specified
:raises TypeError: the column mapping is not a dictionary
:raises ValueError: the mapping contains names that are not valid argument names
:return: list with valid column names to extract from the dataframe to transform
:rtype: List[str]
"""
# ta func takes in an array and transforms it directly
# since X is a DataFrame with multiple columns, it must be
# specified which column is being transformed
if self._real in args_names_list:
if not isinstance(col_to_transform, str):
raise ValueError(
f"The function {func_name} requires col_to_transform to be specified."
)
idx_real = args_names_list.index(self._real)
args_names_list[idx_real] = col_to_transform
# functions that take in two arrays need to be specified as a list with two column names
if all(f"real{i}" in args_names_list for i in range(2)):
if not (
isinstance(col_to_transform, list)
and all(isinstance(i, str) for i in col_to_transform)
):
raise ValueError(
f"Selected function {func_name} requires two columns to be "
"specified to apply the transformation on, please provide a list with column names. "
)
for i in range(2):
idx_real = args_names_list.index(f"real{i}")
args_names_list[idx_real] = col_to_transform[i]
# some expected columns can have different names in the dataframe
# this part will map the expected name in the ta func to the name in the dataframe
if col_to_transform_mapping is not None:
if not isinstance(col_to_transform_mapping, dict):
raise TypeError(
f"Expected a dictionary for col_to_transform_mapping, but got {type(col_to_transform_mapping)}."
)
if set(col_to_transform_mapping.keys()).issubset(set(args_names_list)):
for i, arg_name_i in enumerate(args_names_list):
if arg_name_i in col_to_transform_mapping.keys():
args_names_list[i] = col_to_transform_mapping[arg_name_i]
else:
raise ValueError(
f"The function {func_name} does not accept "
f"{set(col_to_transform_mapping.keys()) - set(args_names_list)} as arguments. "
f"The only valid arguments are {args_names_list}."
)
return args_names_list
@staticmethod
def _check_and_set_kwargs(
func_name: str,
kw_args: Union[None, Dict[str, any]],
kwargs_names_list: List[str],
) -> Dict[str, Any]:
"""
Checks if the provided kwargs for the TA function are valid.
:param func_name: name of the TA function
:type func_name: str
:param kw_args: dictionary with kwargs that are being set for the TA function
:type kw_args: Union[None, Dict[str, any]]
:param kwargs_names_list: list with valid kwarg names
:type kwargs_names_list: List[str]
:raises error: if the TA function has no kwargs or kwargs do not exist errors are raised
:raises ValueError: TA function has no kwargs, but kwargs are being set.
:raises ValueError: TA function does not have all the kwargs that are being set.
:return: Dictionary with valid kwargs for the TA function.
:rtype: Dict[str, Any]
"""
# raise error if kwargs are being set, while there are no kwargs or some do not exist
if kwargs_names_list is None and kw_args is not None:
raise ValueError(
f"Selected function {func_name} has no key word arguments, but got {kw_args}."
)
if kwargs_names_list is not None and kw_args is not None:
if not set(kw_args.keys()).issubset(set(kwargs_names_list)):
raise ValueError(
f"The kwargs {set(kw_args.keys()) - set(kwargs_names_list)} are not valid kwargs "
f"for the function {func_name}. Valid kwargs are: {kwargs_names_list}."
)
if kw_args is None:
kw_args = dict()
return kw_args
|
{"hexsha": "6082354cdd5f033faaefc2b0c6ef90e3cb46dbed", "size": 10875, "ext": "py", "lang": "Python", "max_stars_repo_path": "mizarlabs/transformers/technical/factory.py", "max_stars_repo_name": "MizarAI/mizar-labs", "max_stars_repo_head_hexsha": "c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2021-03-19T15:41:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T14:23:07.000Z", "max_issues_repo_path": "mizarlabs/transformers/technical/factory.py", "max_issues_repo_name": "MizarAI/mizar-labs", "max_issues_repo_head_hexsha": "c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2021-03-17T14:16:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-31T16:51:12.000Z", "max_forks_repo_path": "mizarlabs/transformers/technical/factory.py", "max_forks_repo_name": "MizarAI/mizar-labs", "max_forks_repo_head_hexsha": "c6ec17bc3d9a91ec3f6ee2e7b20017499115fc37", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-07-02T21:38:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-10T09:56:18.000Z", "avg_line_length": 42.48046875, "max_line_length": 116, "alphanum_fraction": 0.6271264368, "include": true, "reason": "import numpy", "num_tokens": 2242}
|
# Open3D: www.open3d.org
# The MIT License (MIT)
# See license file or visit www.open3d.org for details
import numpy as np
import argparse
import math
import sys
sys.path.append("../..")
sys.path.append("../Utility")
from py3d import *
from common import *
def scalable_integrate_rgb_frames(path_dataset, intrinsic):
[color_files, depth_files] = get_rgbd_file_lists(path_dataset)
n_files = len(color_files)
n_frames_per_fragment = 100
n_fragments = int(math.ceil(float(n_files) / n_frames_per_fragment))
volume = ScalableTSDFVolume(voxel_length = 3.0 / 512.0,
sdf_trunc = 0.04, with_color = True)
pose_graph_fragment = read_pose_graph(
path_dataset + template_global_posegraph_optimized)
for fragment_id in range(len(pose_graph_fragment.nodes)):
pose_graph_rgbd = read_pose_graph(path_dataset +
template_fragment_posegraph_optimized % fragment_id)
for frame_id in range(len(pose_graph_rgbd.nodes)):
frame_id_abs = fragment_id * n_frames_per_fragment + frame_id
print("Fragment %03d / %03d :: integrate rgbd frame %d (%d of %d)."
% (fragment_id, n_fragments-1, frame_id_abs, frame_id+1,
len(pose_graph_rgbd.nodes)))
color = read_image(color_files[frame_id_abs])
depth = read_image(depth_files[frame_id_abs])
rgbd = create_rgbd_image_from_color_and_depth(color, depth,
depth_trunc = 3.0, convert_rgb_to_intensity = False)
pose = np.dot(pose_graph_fragment.nodes[fragment_id].pose,
pose_graph_rgbd.nodes[frame_id].pose)
volume.integrate(rgbd, intrinsic, np.linalg.inv(pose))
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
draw_geometries([mesh])
mesh_name = path_dataset + template_global_mesh
write_triangle_mesh(mesh_name, mesh, False, True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
"integrate the whole RGBD sequence using estimated camera pose")
parser.add_argument("path_dataset", help="path to the dataset")
parser.add_argument("-path_intrinsic",
help="path to the RGBD camera intrinsic")
args = parser.parse_args()
if args.path_intrinsic:
intrinsic = read_pinhole_camera_intrinsic(args.path_intrinsic)
else:
intrinsic = PinholeCameraIntrinsic.prime_sense_default
scalable_integrate_rgb_frames(args.path_dataset, intrinsic)
|
{"hexsha": "60f3c600c1174c0147c1eec7d6ff8f8d60d47376", "size": 2274, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pipeline/py3d/Tutorial/ReconstructionSystem/integrate_scene.py", "max_stars_repo_name": "riccardomarin/FARM-ZOSR", "max_stars_repo_head_hexsha": "7d29469d7e1c08b4a1e5d13084435001f509bec3", "max_stars_repo_licenses": ["Python-2.0", "OLDAP-2.7"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-09-16T19:46:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T14:51:37.000Z", "max_issues_repo_path": "src/Python/Tutorial/ReconstructionSystem/integrate_scene.py", "max_issues_repo_name": "3DReconstruction/Open3D", "max_issues_repo_head_hexsha": "a3673d753091daf36fe0555a164b3967bf16546c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Python/Tutorial/ReconstructionSystem/integrate_scene.py", "max_forks_repo_name": "3DReconstruction/Open3D", "max_forks_repo_head_hexsha": "a3673d753091daf36fe0555a164b3967bf16546c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0952380952, "max_line_length": 70, "alphanum_fraction": 0.7748460862, "include": true, "reason": "import numpy", "num_tokens": 573}
|
##############################################################################
import sys
import numpy as np
from IOModule import IOProcessor
from HandlerModule import Handler
from EncoderModule import Encoder
from ExperimentationModule import Experimentation
from VariablesModule import N_FOLDS, MODEL_DICT, HEADERS_TRAIN_FILENAME
from VariablesModule import ROWS_REMOVABLE_ALL, ROWS_REMOVABLE_ANY
from VariablesModule import HEADERS_REMOVALBLE, HEADERS_MEAN, HEADERS_MODE
from VariablesModule import HEADERS_MEDIAN, HEADERS_PREVIOUS
from VariablesModule import HEADERS_BOOLEAN, HEADERS_CATEGORICAL
if __name__=='__main__':
"""This script evaluates and generates ML model trained on the
training set.
"""
try:
train_filepath = sys.argv[1]
model_filepath = sys.argv[2]
model_initials = sys.argv[3]
except:
raise Exception("Missing one or multiple sys args.")
try:
model = MODEL_DICT[model_initials]
except:
raise Exception("Model not found in MODEL DICT.")
print "Reading training set"
df = IOProcessor.read_dataset(train_filepath)
print "Removing unninformative rows and columns"
Handler.remove_rows(df, ROWS_REMOVABLE_ALL, 'all')
Handler.remove_rows(df, ROWS_REMOVABLE_ANY, 'any')
Handler.remove_columns(df, HEADERS_REMOVALBLE)
print "Imputing missing data"
Handler.impute_missing_values(df, HEADERS_MEAN, 'mean')
Handler.impute_missing_values(df, HEADERS_MEDIAN, 'median')
HEADERS_MODE.append('default')
Handler.impute_missing_values(df, HEADERS_MODE, 'mode')
Handler.impute_missing_values(df, HEADERS_PREVIOUS)
print "Encoding features and labels"
Encoder.encode_booleans(df, HEADERS_BOOLEAN)
encoded_df = Encoder.encode_categoricals(df,
HEADERS_CATEGORICAL)
X, y, ids_frame = Encoder.transform_and_del_dataframe(encoded_df,
'default', 'ids')
print "(rows, features) = " + str(X.shape)
print "Evaluating model"
exp = Experimentation(model, N_FOLDS)
f1_folds, avg, dev = exp.experiment_model(X, y, 'f1_micro')
print "\n################################"
print "F1 scores: " + str(f1_folds)
print "F1 mean: " + str(avg)
print "F1 deviation: " + str(dev)
print "################################\n"
f1_folds, avg, dev = exp.experiment_model(X, y, 'neg_log_loss')
print "\n################################"
print "Neg Log Loss scores: " + str(f1_folds)
print "Neg Log Loss mean: " + str(avg)
print "Neg Log Loss deviation: " + str(dev)
print "################################\n"
print "Training model"
exp.train_model(X, y)
print exp.predict_probs(X)
print "Storing model"
trained_model = exp.get_model()
IOProcessor.store_model(trained_model, model_filepath)
encoded_headers = np.array(encoded_df.columns)
encoded_headers = np.insert(encoded_headers, 0, 'ids', 0)
IOProcessor.store_encoded_headers(encoded_headers,
HEADERS_TRAIN_FILENAME)
print "Done"
|
{"hexsha": "4973787724d8588d1545388cec585ffcc7e5f2db", "size": 3161, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiment.py", "max_stars_repo_name": "jpedrocm/DSChallenge", "max_stars_repo_head_hexsha": "cb865d3e8bab66b718c3a2a6943827b0285f534b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "experiment.py", "max_issues_repo_name": "jpedrocm/DSChallenge", "max_issues_repo_head_hexsha": "cb865d3e8bab66b718c3a2a6943827b0285f534b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiment.py", "max_forks_repo_name": "jpedrocm/DSChallenge", "max_forks_repo_head_hexsha": "cb865d3e8bab66b718c3a2a6943827b0285f534b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5168539326, "max_line_length": 78, "alphanum_fraction": 0.6444163239, "include": true, "reason": "import numpy", "num_tokens": 718}
|
# encoding: utf-8
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def list_all_files(rootdir, key):
import os
_files = []
list = os.listdir(rootdir) # 列出文件夹下所有的目录与文件
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isdir(path):
_files.extend(list_all_files(path, key))
if os.path.isfile(path) and key in path:
_files.append(path)
return _files
def load_data():
global df, df_ind
root = '.'
key = '10_'
files = list_all_files(root, key)
for f in files:
yield extract_signal(f)
def extract_signal(f):
data = pd.read_table(f, header=None, skiprows=1)
rawdata = np.array(data.iloc[:, 19:20])
force_flag = np.array(data.iloc[:, 2])
tds = np.where(np.diff(force_flag) == 1)[0]
# print(len(tds))
x_data = np.array([np.diff(rawdata[i - 3:i + 13, :], axis=0).T.flatten() for i in tds])
x_data = x_data[np.max(x_data, axis=1) > 20]
x_data = x_data.T
x_data = np.apply_along_axis(
lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)), 0, x_data
)
x_data = (x_data - 0.5) * 2
# print(x_data.max(), x_data.min())
return x_data
def make_data():
sample = np.random.choice(datas.shape[0], batch_size, False)
return datas[sample]
def make_noise():
return np.random.uniform(-1, 1, (batch_size, generator_len))
def train():
d_optim = torch.optim.Adam(D.parameters(), d_lr, betas=(0.5, 0.9))
g_optim = torch.optim.Adam(G.parameters(), g_lr, betas=(0.5, 0.9))
plt.ion()
wd = []
for epoch in range(epochs):
D.train(), G.train()
for ci in range(critic_iters):
data_batch = make_data()
gen_batch = make_noise()
data_batch, gen_batch = Variable(torch.FloatTensor(data_batch)), \
Variable(torch.FloatTensor(gen_batch))
d_loss = -torch.mean(D(data_batch)) + torch.mean(D(G(gen_batch))) + calc_gradient_penalty(data_batch,
G(gen_batch))
wasserstein_distance = -torch.mean(D(G(gen_batch))) + torch.mean(D(data_batch))
print(wasserstein_distance.item())
# d_loss = -torch.mean(torch.log(D(data_batch)) + torch.log(1 - D(G(gen_batch))))
# g_loss = torch.mean(torch.log(1 - D(G(gen_batch))))
d_optim.zero_grad()
d_loss.backward(retain_graph=True)
d_optim.step()
data_batch = make_data()
gen_batch = make_noise()
data_batch, gen_batch = Variable(torch.FloatTensor(data_batch)), \
Variable(torch.FloatTensor(gen_batch))
g_loss = -torch.mean(D(G(gen_batch)))
g_optim.zero_grad()
g_loss.backward()
g_optim.step()
if epoch % 50 == 0:
D.eval(), G.eval()
plt.clf()
plt.suptitle('epoch=%d, w-dist=%.6f' % (epoch, wasserstein_distance.item()))
wd.append(wasserstein_distance.item())
for i in range(16):
plt.subplot(4, 4, i + 1)
gen_diff = G(gen_batch).detach().numpy()
gen_raw = np.hstack((np.cumsum(gen_diff[:, :int(data_len / 2)], axis=1),
np.cumsum(gen_diff[:, int(data_len / 2):], axis=1)))
plt.plot(gen_raw[i])
plt.xlim((0, data_len))
plt.ylim((-1, 1))
plt.pause(0.01)
plt.ioff()
plt.figure()
plt.plot(wd)
plt.show()
def calc_gradient_penalty(x_real, x_gen):
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(x_real.size())
x_hat = alpha * x_real + (1 - alpha) * x_gen
D_x = D(x_hat)
gradients = torch.autograd.grad(
outputs=D_x,
inputs=x_hat,
grad_outputs=torch.ones(D_x.size()),
create_graph=True,
retain_graph=True,
only_inputs=True
)[0]
# print(gradients)
gradient_penalty = gp_lambda * ((gradients.norm(2, dim=1) - 1) ** 2).mean()
print(gradient_penalty)
return gradient_penalty
if __name__ == '__main__':
datas = load_data()
datas = np.hstack([d for d in datas]).T
batch_size = 32
generator_len = 20
data_len = 15
epochs = 200000
d_lr = 0.000001
g_lr = 0.000001
gp_lambda = 0.1
critic_iters = 5
D = nn.Sequential(
nn.Linear(data_len, 32),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(32, 16),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(16, 4),
# nn.Dropout(0.5),
nn.ReLU(),
nn.Linear(4, 1)
)
G = nn.Sequential(
nn.Linear(generator_len, 30),
nn.ReLU(),
nn.Linear(30, 30),
nn.ReLU(),
nn.Linear(30, data_len),
nn.Tanh()
)
x = np.tile(np.linspace(-1, 1, data_len), [batch_size, 1])
# make_data()
train()
torch.save(D, 'D.model')
torch.save(G, 'G.model')
# D_ = torch.load('D.model')
# G_ = torch.load('G.model')
# print(D_, G_)
# batch_size = 1000
# gen_data = make_noise()
# print(gen_data.shape)
# gen_data = G_(Variable(torch.FloatTensor(gen_data))).detach().numpy()
# plt.ion()
# for i in range(gen_data.shape[0]):
# plt.cla()
# plt.plot(np.cumsum(gen_data[i, :int(data_len / 2)]))
# plt.plot(np.cumsum(gen_data[i, int(data_len / 2):]))
# # gen_raw = np.hstack((np.cumsum(gen_data[:, :int(data_len / 2)], axis=1),
# # np.cumsum(gen_data[:, int(data_len / 2):], axis=1)))
# # plt.plot(gen_raw[i])
# plt.pause(0.2)
|
{"hexsha": "2b8e54d9ba07085ebbc7eda5e53fdf3cf8fed6cb", "size": 5869, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch/exercises/wgan/main_improved_wgan.py", "max_stars_repo_name": "wangyendt/deeplearning_models", "max_stars_repo_head_hexsha": "47883b6c65b8d05a0d1c5737f1552df6476ded34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-04T11:10:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-04T11:10:27.000Z", "max_issues_repo_path": "pytorch/exercises/wgan/main_improved_wgan.py", "max_issues_repo_name": "wangyendt/deeplearning_models", "max_issues_repo_head_hexsha": "47883b6c65b8d05a0d1c5737f1552df6476ded34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch/exercises/wgan/main_improved_wgan.py", "max_forks_repo_name": "wangyendt/deeplearning_models", "max_forks_repo_head_hexsha": "47883b6c65b8d05a0d1c5737f1552df6476ded34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5677083333, "max_line_length": 114, "alphanum_fraction": 0.5529050946, "include": true, "reason": "import numpy", "num_tokens": 1578}
|
Fuzio is an Italianfusion Restaurants restaurant Universal pasta. They have mostly Italian pastas, with a few Asian noodle bowls and steak/tuna/salmon entrees. (If youre looking for something closer to true Italian rather than fusion, then try Caffe Italia, Strings Italian Cafe Strings, Pasta, or Osteria Fasulo.) In May 2009, Fuzio reopened in a new 3,000 sq. ft. location on G Street after moving from its original location in the Davis Commons. The new restaurant is at the site of the former Cantina del Cabo. The patio is a bit smaller but the inside is much more spacious.
From their opening, there has never been much of a wait, and there are always plenty of servers and bussers. The place has recently grown in popularity, due to the promotions theyve been implementing to bring in more customers. Every Wednesday night in February from 49pm, all the pasta dishes are half price, making them an average of about $5. If you go before 7pm, its still happy hour, and the discount is unreal.
Fuzios happy hour is MondayFriday from 3pm7pm. All the appetizers are $3, draft beers are $3, and martinis are discounted as well. Its totally worth it to go with a few friends; you can get about 6 appetizers for under $20. Their appetizers are all pretty good; a few popular ones include the Calamari, Vietnamese Spring Rolls, Firecracker Lettuce Wraps, Crab Cakes, and Firecracker Slyders. Two appetizers is easily a full meal if one of them is the pork slyders or the lettuce wraps. They also have halfpriced bottles of featured wines on Thursdays.
Pastas are mostly under $10 and entrees are generally under $15, and the menu includes items like Shanghai Noodles with lemon grass and red curry, Blue Cheese Chicken Salad with Fuji apples, Mediterranean Grilled Salmon with spinach and Pesto Penne, Flatbread Pizza with Roasted Portobello Mushrooms, Pesto, Tomatoes, and Cheese, and Chicken Parmesan with Provolone. Other favorite dishes include the chicken curry noodles and the house specialty, the Firecracker Fusili. The portions are a bit small, but it could be a positive movement to help with the prevalence of obesity in America (or it could be just small).
The new restaurant has a full bar and wine list, and Fuzio has good martinis and margaritas, too. They have excellent lemonade (raspberry, strawberry, or mango).
There is an http://www.fuzio.com/ official Fuzio site with a menu and other items.
Fuzio is owned by Calmex, Inc., a Modestobased company that also owns a few Chevys restaurants as a franchisee. Previously the Fuzio restaurants were even listed in the phone book as Chevys rather than Fuzio. There are 12 other Fuzios in California, one in Missouri and one in Nevada. The first Fuzio opened in San Franciscos Castro district in 1996.
One of the Users/KevinWan owners of Sophias is an exFuzio manager and took several of the staff (especially bartenders) with him when he opened Sophias bar.
On Sunday, November 14th, 2010, after 11 years in town, Fuzio closed its doors for business.
In late summer 2011, popular Sacramento restaurant http://www.paesanos.biz/ Paesanos will open a location in this space. Its parent company also owns Uncle Vitos.
Overall, a pretty good, energized place to go if youre out with friends. 11 of my friends and I went there for dinner before Junior Prom. It was rather disappointing that we had to wait for 30 minutes past our reservation time, but once we were seated, service was quite good and efficient. Users/CynthiaHe
I went there with my boyfriend for lunch one day. The service was pretty decent, but the food was highly dissapointing in quality and the prices were ridiculous. I paid $8 for a calamari pasta dish that was the size of a bowl of ice cream at Baskin Robbins that was so saturated with lemon I could barely eat it, and my boyfriend got what he later called a dish of vinegar with a lettuce leaf on the side salad. I wouldnt go here again unless someone else were paying and I was out of other options for food. Users/MargieHalloran
This place is crap. Very expensive compared to most restauraunts in Davis and the portions are tiny. I am biased as I worked at The Macaroni Grill for two years and fuzios is our rival. however our food is much better, our portions are huge, and you get to color on the tablecloth. plus our foccacia is the shit. poo on fuzios. Users/AllanRae
Okay I have to go there you worked at the MACARONI GRILL, but Fuzio is supposed to be expensive??? And yeah ur foccacia is the shit. Ima let you know I agree on that one LOL.. :) joeistheish
20050813 11:03:26 nbsp I have been here twice. Both times, the food was mediocre at best (but the mango lemonade was excellent). The food is also overpriced. I have no idea why this place is so popular when its both mediocre and expensive. Users/AlexPomeranz
20050823 17:50:12 nbsp I have not been to this Fuzio. I have been to the Fuzio in Dublin (west of Livermore). My father and I recently hit Fuzios/Dublin and had a great meal. I dont want to compare it to food from Italy, but instead say that the menu items we had were tasty and novel. And cheap! Each entree was less than $10. I liked it so much that next time we were on the 580 going by it, we called ahead for takeout. Users/TonyZuccarino
20050826 11:29:57 nbsp Their foccacia topped with parmesan and roma tomatoes is pretty yummy, and their salads are gigantic, as well as tasty. Users/AnnaJones
20051003 12:37:58 nbsp People always tell me that the portions arent large enough at Fuzio for the price but Ive never noticed. The staff is usually friendly and apologetic if anything goes wrong (only one bad server but great food regardless). I recommend the thai mussels/clams, firecracker fusili, barbwire gemelli, and the caramelized mushroom linguine. Perhaps the portions depend on what you get? Users/LiRic
20051013 17:32:46 nbsp Some of the food is okay. As for its popularity, take a look at the waiters: it will explain the appeal for gay men and straight women. Users/MisterProfessor
20051107 15:28:29 nbsp On Saturday 11/5 we had a 20 minute wait. Our service was excellent. The waiter didnt hover and ask constantly how the food was. He came at well spaced intervals to refill drinks. The food was good too, I really liked the Chicken Marsala. Users/MyaBrn
20051207 20:35:04 nbsp Basically, overpriced. Seafood dishes were good, the rest were just OK. Users/JohnNapier
20051221 04:23:41 nbsp It always surprises me when it seems I missed a comment on a restaurant Ive been to. I read about the bad things people said about Fuzio on the Wiki, but decided to try it out anyway because its kind of prettylooking inside. I was very, very disappointed. It seems as though the only thing good about this place is the way it looks. The service was probably the worst service I have ever, ever had. And the food! Usually when I go to Italian places I order a pasta dish, because I want to see just how cheap they are (cheap pasta places will bring out smaller dishes). Well, this was cheap, and it cost me a lot. I dont really plan on returning, ever. SS
20060205 03:31:34 nbsp After reading these comments I thought that these people were critizing fuzio like they were some expert on restaurants, after visiting this place for myself the other night, I thought that the food was excellent which I could see be made in front of me (I knew the ingrediants had to been fresh), and the service was just as good. The atmosphere is like no other in Davis, and their bartenders make the best martinis. Instead of reading all these ridiculous comments, you should give Fuzio a chance. Ask the cute asian bartenders for a rasberry lemon drop, theyre the best! Users/KellySmith
20060205 19:02:28 nbsp From 46 on weeknights they have their happy hour in which all apps are $2 and fuziotinis are a $1 off. Pretty cheap way to get filled up Users/JonAugst
20060310 18:35:28 nbsp amazing food. i have been there with my family several times and the dishes are delicious every time. i would recommend the chicken marsala and the calamari appetizer. i dont know why everyone is claiming they are expensive, the chicken marsala dish (which i can barely finish) is only 12 bucks. i guess it could be called expensive, compared to FAST FOOD. my boyfriend and i go there everytime we go out. Users/CorinneEpley
20061009 16:21:44 nbsp I like the setting. But the food was just alrite. The amount is too small considering how big the plate is and the expensive price. The seafood is good tho. Users/KiwiSelina
20070222 08:21:07 nbsp OK. I actually think the food here is pretty good. I ordered the firecracker pork pasta, which was excellent, although hardly spicey at all. My brother ordered the marinara with meatballs. The meatballs were great the marinara was better than the other italian places in davis (by default) but the pasta was certainly overcooked. I think the portions are small for the price. I think part of the problem is that this place has an identity crisis, the food and atmosphere kind of say nuevo but the crowd and service says this is just another davis restaurant. Decent but i wont be popping in the door anytime in the very near future Users/MattHh
20070312 14:56:25 nbsp I went there twice. Both times, the services are really good. I like the food!!!!!!!!!! (Although I found the table a little bit too narrow) Users/CarlieYang
20070327 09:16:13 nbsp They dont have any beer on tap! My steak was fine, after I sent it back to get it cooked the way I had ordered it. Users/JoFeuerstein
20070502 12:44:21 nbsp the taste of the food was not correctly seasoned and portions are quite small for the ridiculous price. Users/trambajuice
20070502 20:30:24 nbsp This will sound like an odd comment, but they make a great tuna melt. A great tuna melt is hard to find. Most are soggy or greasy or use dark tuna or put pickles in the tuna. Users/CovertProfessor
20070520 15:41:03 nbsp I just have to say this Im a new student in Davis, and EIGHT DOLLARS FOR A PASTA DISH IS NOT EXPENSIVE. I used to work full time, and went to plenty of restaurants in that timeframe (4 years) before transferring to the University and 8.00 is actually pretty reasonable. That being said, I LOVE this place. The pork fusilli dish is UHMAZING if you like REAL spicy food. I especially have a close tie to this place because that have one in the heart of the Castro District in SF (the motherland). Users/joeistheish
20070520 15:55:38 nbsp OH! I will say that it did take an act of CONGRESS one time to get some extra sour cream. I think our sever was new, though bless her heart. Users/joeistheish
Just to note, would that be oral congress? or normative congress? and was the cream good? ~Davepoole
20070801 13:41:11 nbsp great food ,my favorite is happy hour with being able to order the 2 dollar appetizers. My friends and I always get several plates of food appetizers for under 10 bucks, the mussels are excellent, the romano tomatoe bread and above all the calamari. I have eaten here on several occasions and enjoy their fire cracker pork, and seafood medley pasta. UNIQUE FLAVORS! FRIENDLY staff ,but best of all cheap appetizers Mfri 36 i believe with a dish normally costing 8 dollars a plate who could go wrong with being able to try all the appetizers on the menu for under 10 bucks(very filling) Users/ElizabethBarthel
20071012 20:09:26 nbsp I went here for a late weekday lunch, alone (with a book.) I felt comfortable dining alone here, and I dine alone often, so I value that. I sat inside because it was a little chilly and rain was threatening, but I love restaurants that have outside seating, and this outside seating area is nice enough, especially for peoplewatching. I had the pork fusilli. The pork (gingerbraised) was tender and delicious. The habanero pesto was quite spicy, but the sour cream cools it off and I LOVE sour cream. The pasta was perfectly cooked and an unusual type, which I appreciated. My waitress was friendly, attentive (but not overly so,) quick and happy to bring me some extra sour cream, and kept my water refilled. I thought that the portion was perfect. I suspect that most people are used to stuffing themselves and/or bringing home leftovers for less than $10, but as a recent San Francisco denizen, I do not have such expectations. I did not have an appetizer, just the pasta. I was starving and I left pleasantly full after eating the entire bowl of pasta. It was happy hour, so I enjoyed a $3 Hefeweizen, which I didnt think complimented the pasta, but that was my own mistake (I didnt ask my server for advice, which I usually do.) Id eaten at the Fuzio in downtown SF before, and remember not being impressed with the food, but I dont remember what I ordered. I had a couple of martinis there, though, and they were great. I would definitely return to this restaurant for the pork fusilli. In fact, I think Ill go do that now... Users/EvoDiva
20071019 18:13:24 nbsp The food was not bad: Not great but not that bad either. The lettuce wrap appetizer was the best part of the meal. I went with my boyfriend on a week night and we sat outside, which was nice. The service was very good. Users/LolaTorney
20071119 16:28:02 nbsp We went here over the summer for happy hour. We were seated and given menus but no water or silverware. We sat there for ten minutes, trying to make eye contact with the servers walking past us but no one stopped to give us water or talk to us. We left without anyone having stopped by our table. There were only a few tables there as well; we were the only table inside, and there were only three tables outside. Ridiculous. Users/ElleWeber
20071218 16:43:18 nbsp No matter how you feel about their food, you HAVE to love their $0 corkage fee! Users/Red
20080215 17:52:37 nbsp Love Fuzio! The food is unique and tasty! The best part is that it isnt too expensive for the college student! Users/jglovicz
20080220 19:16:04 nbsp If you are a minority dont go here! they are racist!we decided to go to Fuzio to try. We didnt go at a high peak time so we didnt ahve to wait. however when we went the waiter set us at an area where no one was and then put down the menus and left. That was it! he didnt even come back to ask whether we wanted something to drink or not. There wasnt even water available to us. NONE! no one even asked. however when white people came in they were served immediately, set at the place with all the people and given drinks. We waited over 15 minutes and nothing happened. That waiter never even came back to ask us for anything we just left. two of the waitresses were shocked there was people even in that secluded area when we left. But even when we left no one said thank you come again or something. DONT GO HERE! THE SERVICE SUCKS! Users/JGG
20080224 12:56:57 nbsp I have to agree with the above post. I dont know if we qualify as minorities, but regardless, the service here was terrible. We ordered a couple of drinks, waited 15 minutes for those, then over an hour for our meal. No water was offered, bread, nothing. When our food finally arrived, I got something I didnt order. To make things right, the manager offered to only charge us for the drinks, not the unrecognizable food that we didnt order. We decided to take off and find a meal elsewhere. No complaints about the bartenders though, the drinks were good. I guess I cant actually comment on the food since we were never served any. Users/joel
20080515 19:42:25 nbsp The patio during nice weather makes the variable service tolerable. I go to Fuzio a lot during the warm months; its my favorite outdoor dining spot in Davis. The Chinese elms provide a lovely filtered shade that makes even hot days pleasant. My favorite dish the one I order whenever Im there is the Greek salad.
There was an ownership change recently, and while the change introduced some new items to the menu, it also brought an unwelcome change to the house bread: a mediocre focaccia in place of a decent sourdough. Still, for a relaxed place to dine on a warm day or evening, with friends, colleagues or just a good newspaper, Fuzios patio is hard to beat. Users/JimFrame
20080622 17:39:11 nbsp Ate at Fuzio today for the first time. Higher prices have often lead me astray to Habit Burger or Plutos instead for a quicker, cheaper bite. I would agree that the service was somewhat slow, but they were very attentive to refilling our waters on such a hot day. The bread arrived with our meals. I had the mediterranean salmon, substituting a house salad for the pasta and it was DELICIOUS! Didnt mind waiting a little longer for the food since the outdoor seating was so nice. I will definatley return. Users/MissL
20080622 19:34:48 nbsp The food is tasty. Portions are a bit small. Ok. More than a bit small. The bread pudding is seriously good. Service ranges from slow to glacial, although much of the service delay appears to be related to cooking time. (Dont go here if you are pressed for time. Seriously.) Prices are high for what you get. Note that the kitchen appears to close well before the restaurant does. The patio is pleasant to dine on, so long as the weather is good. Users/IDoNotExist
20080712 12:15:08 nbsp Two words: Firecracker rocks!!! Users/SunjeetBaadkar
20080802 22:14:07 nbsp Fuzio is really cutting back their hours. This is the second time, on different parts of the week, that Ive found them closed a good hour before listed... chairs upside down and everything. Users/ChristopherPrice
20080822 11:56:34 nbsp A few times when Ive been there, the staff was not sure when they were closing. There was usually some debate over whether they were open or not. On one occasion, they let me stay. On the next (earlier in the evening), they said they were already closed. Users/IDoNotExist
20080823 12:13:29 nbsp I wish I knew how they make their firecracker dishes. It seems like this place is closing down or something with their whole are we open? issues. Itd be a shame if they go away before I could figure out how to make it myself (not to say that I wont still go here if they stick around). Users/SunjeetBaadkar
20081025 18:07:21 nbsp Great Happy Hour specials. The food is tasty and the atmosphere is fun. I dont tend to eat dinner here only because it is expensive (for what you get). Nothing upsets me more than to throw down a lot of money only to walk out still hungry. Its not that expensive, but it isnt a bargain, either. Why pay more for less? Users/CurlyGirl26
20081028 10:19:53 nbsp Couldnt make it for Happy hour so we ended up there during normal dinner hours. I want to like Fuzio, I typically have a decent meal when I go there but something is always off during my experience. Last night the front was almost completely empty and yet they sit us all the way in the back with 2 other couples. We were much too close for how busy it was and the ambiance of the bathroom really isnt all that wonderful. The World Series and Monday night football were on TV and yet they dont even bother to think about asking if you would like to view either of them. We always have to ask for bread and at these prices it seems like it should be there automatically or at least offered. Upon asking they always provide it and Im shocked they dont charge you for it. Some items are well sized and others are simply way too small. Fuzio has good flavors and a nice variety of choices but I just never leave feeling like it was a great value or wonderful experience. Happy hour seems like the time the only smart time to visit this place. Users/loneshark
20081106 22:32:47 nbsp I love Fuzio; the portions are pretty small, but I like it its just enough, so you have room for dessert. The tiramisu is AMAZING!! And HAPPY HOUR is the best Monday thru Friday from 36, all the appetizers are $3, so you can get a bunch of em and stuff yourself for cheap! The servers are pretty hot too. :) Users/mday24
20081106 22:33:57 nbsp For the record, theyre not closing; theyre moving to G Street, next to Froggys, supposedly on Jan. 1 theyll open in the new location. Users/mday24
20090130 01:07:52 nbsp This place is pretty good... I wasnt turned off by anything. The atmosphere was nice, the waitress was friendly and attentive, and the food was tasty. The portions were just right I was able to finish my meal and feel satisfied, but not overly stuffed. I recommend their mango sangria... delicious! I had no clue that theyre moving until I read it on here... but if and when they do, Id eat there again for sure. Users/MichellePalmer
20090202 13:54:49 nbsp If your dinner party cant decide on a cuisine, Fuzio has a few different types of food (Asianinspired, Italianinspired). However, this means that you cant really count on getting original or authentic flair. I had the pad thai, and though it wasnt bad, any other thai place in town could have done a better job. The wait staff was a bit slow, and my party spent about 20 minutes waiting for a check. Its a clean place and not badly priced, but if you want authentic flavors, go elsewhere. If you want food that is designed to suit a generalized palette, then Fuzio is the place. Users/AmanpreetSingh
20090531 09:13:14 nbsp Im going to miss this place and the carrot ginger soup... Users/KBathory
20090601 15:01:38 nbsp Fuzio has reopened to a new location on G street. I cant say Im thrilled to see it back as I always had horrible dining and drinking experiences at its prior location. Users/nkristis
20090710 22:00:43 nbsp I think its pretty good, but the portion size is kind of lacking for the price. I ordered calamari as an appetizer and for $10+, it was skimpy and I ordered some sort of Camboloza? Rigatoni and it turned out really plain for pasta with mushroom alfredo & chicken. I dont think Id come back. Users/MissAmyQ
20090907 23:27:59 nbsp terrible food. almost as bad as cafe italia, but with pretense. Users/JustinMehlhaff
20100119 11:50:15 nbsp Fuzio had been a mediocre restaurant in my opinion, but recently they have been making a lot of changes to bring in more customers. Theyve got a new menu with some flatbread pizzas that are actually pretty good! HalfPriced pastas on Mon/Tues/Wed nights are super cheap, and actually really tasty. They have a huge variety of flavors; all my friends found something they liked. The service was good too our waitress was really friendly and fun. We are definitely going back, especially during happy hour all 7 of us had appetizers and a meal for about $40! I would totally recommend it to anyone. :) Users/mday24
20100128 12:48:09 nbsp Ate here with a group of friends on two occasions and am still middle of the road about it.
Their prices are a bit high for the portions, while still being cheaper than many other similar establishments. Their drinks are pricy but flavorful.
I had steak on my two visits, while my friends ate Meatballs (which I was told were amazing), Salads (made large, vibrant and flavorful but you will need to practically beg for any kind of dressing if you want it) and Halibut (which was rubbery and not entirely tasty). As for my steaks, the first time it was well prepared and full of flavor while the second visit was much more bland and dry. The sides are another thing entirely, limp, dull and barely edible spinach; Crisp, minuscule fries...
I like the atmosphere, it has a certain appeal to it (warm lighting blended with auburn and bronze highlights and well structured seating arrangements) and also has two Large LCD screens for those interested in watching television or sports more than paying attention to their company.
The staff, once again a fiftyfifty for me. First visit the wait staff were timely, nonintrusive and polite while also being very knowledgeable of the menu items and ingredients. Second visit, had to ask for water to be served initially and then ask for refills, had to beg for dressing for the salad, and she wasnt entirely sure about the menu items. Perhaps a newer hire, not sure.
Overall I would rate it 2/5 stars but am willing to give it one more chance and see how things turn out. Users/WesOne
20100204 21:35:10 nbsp The MTW half price pasta were promotions for the month of January. I found that out the hard way. This month of February the promotion is 1/2 off pasta on Weds only. I was there on a Tuesday. So sad. Appetizers were really good for the happy hour price though. Users/KimN.
I, too, found out the hard way. Users/SunjeetBaadkar
20100220 16:11:32 nbsp I came with a group of 10 (including 2 preschoolers). The staff treated us very nicely, brought the boys juice with small plastic lids, and were attentive. I had the spinach salad, which was the best spinach salad Ive ever had. It was very large (compared to some of the other dishes). Two recommendations I would make: this seems like the type of restaurant that could provide each table with a nice basket of bread or rolls. Another would be to put a chicken and fries dish on the kids menu. I did order that for my son, and they were quite willing, but the kids menu has just 4 items which are variations on the same theme. Users/NoelBruening
20100308 16:00:31 nbsp First time at new location. I liked the larger tables and it isnt as noisy. The service was perfect. Attentative but not intrusive. My son had the chicken sausage pasta and really liked it. I had the asian tuna with the mango salsa. Cooked perfectly and very flavorful. My partner had the mediterranean salmon which was also well prepared. I will go back again.
Users/LokiAbbi
20100314 11:44:06 nbsp New location, new settings, but HORRIBLE service. We were seated in the back, which was not a problem; but the problem was, the waitress attended to people around us AFTER we arrived. We were done with the menus, and were ready to order but the waitress did not even stop by our table to ask how we were doing. The only time she came was when ANOTHER waitress asked her if she had attended to us (I could see them pointed and looking at us from the other side); the waitress then DID NOT come to us; we had to ASK for service. HORRIBLE, I mean HORRIBLE service Ive ever received in Davis...and it was not even busy, there were only 5 other groups! Users/kkloveablebaby
> Sometimes when it gets slower, the sections change, and the servers arent aware of their new tables. It happens occasionally in any restaurant. Im sure life will go on if you have to wait an extra 10 minutes to order your food. I understand your irritation (I get grumpy when Im hungry too) but, alas, patience is a virtue. ;)
20100526 17:46:52 nbsp We used to frequent the old location weekly, but once they changed the bread and I was diagnosed with Celiac, we stopped. Tried this new location a few times, and while the food is okay, the prices are pretty high now. The service is friendly, but really slow. We usually dont get our drinks we ordered before dinner until after were through eating. Weve cancelled a few of our drink orders due to this. Id say 3/5 stars; we love to sit outside and watch the trains pass by as we enjoy our meal. Users/JuanaBNDavis
20100613 12:14:26 nbsp Our server was great, but that was about it... Service was very slow even though the restaurant was pretty empty. We ordered the firecracker lettuce wraps, and for $7.50, I would expect A LOT more food. It was only enough for about 2 legit wraps. I ordered the pad thai, and it was too spicy to taste any of the flavors. My dad ordered the ahi tuna, which is supposed to be seared on the outside but rare inside. It was cooked fully through and almost as hard as beef jerky. They brought him another filet that was cooked right. My mom ordered the halibut without the garlic aioli, which was also very overcooked and drizzled in garlic aioli. Grandma said her chicken marsala was good. Wont be going back here. Users/StephanieRobinson
20100624 13:13:17 nbsp I love the happy hour. $12 will get you a beer and 3 any 3 appetizers, which is a nice, big dinner. They also have $5 minipizzas during happy hour (IIRC theyre around $6.50 or $7.00 normally) which are apparently new. And tasty. I dont recall exactly what the beer options were, although the only IPAs IPA they had was Racer 5, which seems to be a local staple (see, e.g., Burgers and Brew, which rarely carries other IPAs on tap). I think I got some garlic fries, the crab cakes, and some spicy chicken wraps, which were enough to easily fill me up. The fries were pretty good, although nothing special. The crab cakes and the wraps, though, were excellent. Highly recommended. Users/TomGarberson
20100726 22:27:41 nbsp If youre a portions type of person, you might not want to go here. The pasta I ordered did taste good, though. Users/AndrewTran
20100804 23:10:45 nbsp The main thing I remember about the old location is that it smelled like bleach. The new spot is comfortable (bad acoustics, lots of asking server to repeat) with good light and more than adequate taps: Racer 5 and Black Butte Porter among them. Happy hours a deal: $3 appetizers and $3 pints, and the spring rolls were fresh and flavorful, if a tad dry. The lingering question: how is it possible that their Firecracker Pork Fusilli gets so much love? Tastes like too much salt was added to the gingerflavored hamburger helper. Users/Swilltopower
20100820 09:21:42 nbsp This is my new favorite happy hour. The prices on the appetizers cant be beat, they also have a very nice outdoor patio to enjoy your food and beverages. I personally like the lettuce wraps and pork sliders. The portions on all of the appetizers (except for crab cakes and spring rolls) are quite generous. Users/DagonJones
20101127 15:40:50 nbsp Another good Davis restaurant closes its doors. :( Users/IDoNotExist
20101203 18:15:27 nbsp Noooooooooooooooooooooooooooooooooooooooooooooooooo! I LOVED THEIR PORK FUSILI :( Users/PeterAnselmo
20101207 18:18:29 nbsp I regret so much it has closed.
Users/AlessandraRachid
|
{"hexsha": "06c0336320dfd54421d9ec84bdf2b6acf0e404ed", "size": 30079, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Fuzio.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Fuzio.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Fuzio.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 176.9352941176, "max_line_length": 1568, "alphanum_fraction": 0.7856311712, "num_tokens": 7444}
|
"""
Created on 22 Apr 2015
@author: Anna
"""
from .Globals import G
from .Allocation_3 import Allocation2
from copy import deepcopy
from numpy import mean, array, absolute, std
from operator import itemgetter
from .UtilisationCalculation import utilisationCalc2, utilisationCalc1, utilisationCalc3
def AllocationRoutine_Final(initialWeek, itemList, itemType, ant):
excess = []
builtAnt = {}
ACOearliness = 0
ACOlateness = 0
ACOexcess = 0
# repeat allocation procedure for all items in the list
for item in itemList:
# ================================================
# Allocation step 1...allocation at current Week
# ================================================
Results = {}
step = 1
ind = G.WeekList.index(initialWeek)
weekList = [initialWeek]
weekListUtCalc = [initialWeek]
capacity = deepcopy(G.CurrentCapacityDict)
qty = item["Qty"]
Allocation = []
earliness = 0
lateness = 0
# ma = ant[item['orderID']]
chosenMA = None
possibleSolutions = []
lateForecast = 0
earlyForecast = 0
while step <= 3:
if step == 2:
weekList = [
G.WeekList[i]
for i in range(ind - 1, max(-1, ind - G.maxEarliness - 1), -1)
]
weekListUtCalc += weekList
if step == 3:
weekList = [
G.WeekList[i]
for i in range(
ind + 1, min(G.planningHorizon, ind + G.maxLateness + 1)
)
]
if len(weekList) == 0:
step += 1
continue
# check different MAs
for ma in item["MAlist"]:
if ma not in possibleSolutions:
if step > 1:
capacity = deepcopy(Results[ma]["remainingCap"])
qty = deepcopy(Results[ma]["remainingUnits"])
Allocation = deepcopy(Results[ma]["Allocation"])
earliness = deepcopy(Results[ma]["earliness"])
lateness = deepcopy(Results[ma]["lateness"])
else:
capacity = deepcopy(G.CurrentCapacityDict)
qty = item["Qty"]
Allocation = []
earliness = 0
lateness = 0
# try allocation to ma
Results[ma] = Allocation2(
ma,
qty,
weekList,
capacity,
G.incompleteBatches,
earliness,
lateness,
Allocation,
initialWeek,
)
if (
Results[ma]["remainingUnits"] == 0
): # if the allocation is not successful delete the record of the allocation results
possibleSolutions.append(ma)
if len(possibleSolutions) == len(item["MAlist"]):
break
step += 1
# choose best MA
if G.minDeltaUt:
chosenMA2, orderedMAlist = choseMA2(
Results, possibleSolutions, item["MAlist"], weekListUtCalc
)
else:
chosenMA2, orderedMAlist = choseMA(
Results, possibleSolutions, item["MAlist"], weekListUtCalc
)
oMAlist2 = [ix[0] for ix in orderedMAlist]
if ant == 0:
chosenMA = chosenMA2
else:
if ant[item["orderID"]] in possibleSolutions:
chosenMA = ant[item["orderID"]]
# riordinare la orderedMAlist per portare in chosenMA (da ACO) al primo posto
if oMAlist2[0] != chosenMA:
tempMAlist = [chosenMA]
for oMA in oMAlist2:
if oMA != chosenMA:
tempMAlist.append(oMA)
oMAlist2 = tempMAlist
builtAnt[item["orderID"]] = chosenMA
# confirm the solution
if chosenMA != None:
G.CurrentCapacityDict = Results[chosenMA]["remainingCap"]
G.incompleteBatches = Results[chosenMA]["remUnits"]
G.Earliness[initialWeek][chosenMA]["qty"].append(item["Qty"])
G.Earliness[initialWeek][chosenMA]["earliness"].append(
float(Results[chosenMA]["earliness"]) / item["Qty"]
)
G.Lateness[initialWeek][chosenMA]["qty"].append(item["Qty"])
G.Lateness[initialWeek][chosenMA]["lateness"].append(
float(Results[chosenMA]["lateness"]) / item["Qty"]
)
G.orders[item["orderID"]]["Allocation"] = Results[chosenMA]["Allocation"]
G.orders[item["orderID"]]["Excess"] = False
G.orders[item["orderID"]]["chosenMA"] = chosenMA
G.orders[item["orderID"]]["orderedList"] = oMAlist2
ACOearliness += float(Results[chosenMA]["earliness"]) / item["Qty"]
ACOlateness += float(Results[chosenMA]["lateness"]) / item["Qty"]
if Results[chosenMA]["lateness"]:
G.LateMeasures["noLateOrders"] += 1
G.LateMeasures["lateness"].append(
float(Results[chosenMA]["lateness"]) / item["Qty"]
)
if Results[chosenMA]["earliness"]:
G.LateMeasures["noEarlyOrders"] += 1
G.LateMeasures["earliness"].append(
float(Results[chosenMA]["earliness"]) / item["Qty"]
)
for allRep in Results[chosenMA]["Allocation"]:
G.globalMAAllocation[chosenMA][allRep["week"]][itemType][
item["priority"]
] += allRep["units"]
G.globalMAAllocationIW[chosenMA][initialWeek][itemType][
item["priority"]
] += allRep["units"]
else:
excess.append(item)
G.Excess[item["sp"]][initialWeek] += item["Qty"]
G.orders[item["orderID"]]["Allocation"] = []
G.orders[item["orderID"]]["Excess"] = True
G.orders[item["orderID"]]["chosenMA"] = None
G.orders[item["orderID"]]["orderedList"] = oMAlist2
G.LateMeasures["noExcess"] += 1
G.LateMeasures["exUnits"] += item["Qty"]
G.globalMAAllocationIW[item["sp"]][initialWeek][itemType][
item["priority"]
] += item["Qty"]
ACOexcess += item["Qty"]
# for orders add allocation information
if itemType == "order":
if chosenMA == None:
G.OrderResults.append(
(
item["orderID"],
item["sp"],
item["MAlist"],
item["Week"],
item["Qty"],
item["priority"],
chosenMA,
oMAlist2,
"NaN",
"NaN",
"None",
)
)
mList = ""
for i in range(len(oMAlist2)):
if i > 0:
mList += ", "
mList += oMAlist2[i]
G.OrderResultsShort.append((item["orderID"], mList))
else:
G.OrderResults.append(
(
item["orderID"],
item["sp"],
item["MAlist"],
item["Week"],
item["Qty"],
item["priority"],
chosenMA,
oMAlist2,
Results[chosenMA]["lateness"],
Results[chosenMA]["earliness"],
Results[chosenMA]["Allocation"],
)
)
mList = ""
for i in range(len(oMAlist2)):
if i > 0:
mList += ", "
mList += oMAlist2[i]
G.OrderResultsShort.append((item["orderID"], mList))
if itemType == "forecast":
if chosenMA == None:
G.forecastResults.append(
(
item["ppos"],
item["sp"],
item["MAlist"],
item["Week"],
item["Qty"],
item["priority"],
chosenMA,
orderedMAlist,
"NaN",
"NaN",
"None",
)
)
else:
G.forecastResults.append(
(
item["ppos"],
item["sp"],
item["MAlist"],
item["Week"],
item["Qty"],
item["priority"],
chosenMA,
orderedMAlist,
Results[chosenMA]["lateness"],
Results[chosenMA]["earliness"] / item["Qty"],
Results[chosenMA]["Allocation"],
)
)
if G.minDeltaUt:
ACOtargetUtil, ACOminUtil = utilisationCalc1(
G.CurrentCapacityDict, initialWeek, ind
)
else:
ACOtargetUtil, ACOminUtil = utilisationCalc2(
G.CurrentCapacityDict, initialWeek, ind
)
return {
"ant": builtAnt,
"excess": ACOexcess,
"earliness": ACOearliness,
"lateness": ACOlateness,
"targetUtil": ACOtargetUtil,
"minUtil": ACOminUtil,
}
def choseMA(allResults, possibleSolutions, MAlist, weeklist):
chosenMA = None
if len(MAlist) > 1:
res = []
for ma in MAlist:
minUtil = []
targetUtil = []
for week in weeklist:
for bn in allResults[ma]["utilisation"]:
if week in allResults[ma]["utilisation"][bn]:
if G.Capacity[bn][week]["minUtilisation"]:
minUtil.append(
max(
0,
(
G.Capacity[bn][week]["minUtilisation"]
- allResults[ma]["utilisation"][bn][week]
)
/ G.Capacity[bn][week]["minUtilisation"],
)
)
else:
minUtil.append(
max(
0,
(
G.Capacity[bn][week]["minUtilisation"]
- allResults[ma]["utilisation"][bn][week]
),
)
)
if G.Capacity[bn][week]["targetUtilisation"]:
targetUtil.append(
(
G.Capacity[bn][week]["targetUtilisation"]
- allResults[ma]["utilisation"][bn][week]
)
/ G.Capacity[bn][week]["targetUtilisation"]
)
else:
targetUtil.append(
G.Capacity[bn][week]["targetUtilisation"]
- allResults[ma]["utilisation"][bn][week]
)
res.append(
[
ma,
allResults[ma]["remainingUnits"],
allResults[ma]["lateness"],
std(array(targetUtil)),
std(array(minUtil)),
allResults[ma]["earliness"],
]
)
# order results...1st criterion: target utilisation (stdDev), 2nd criterion: min utilisation(stdDev)
sortedMA = sorted(res, key=itemgetter(1, 2, 3, 4, 5))
else:
sortedMA = [MAlist]
# if there is only one solution, chose the only solution available
if len(possibleSolutions) == 1:
chosenMA = possibleSolutions[0]
# if there are more than one successful allocations choose between them
if len(possibleSolutions) > 1:
chosenMA = sortedMA[0][0]
assert chosenMA in possibleSolutions
return chosenMA, sortedMA
def choseMA2(
allResults, possibleSolutions, MAlist, weeklist
): # more similar to ACO selection criteria
chosenMA = None
if len(MAlist) > 1:
res = []
for ma in MAlist:
minUtil = []
targetUtil = []
for bottleneck in G.Bottlenecks:
minU = []
targetU = []
for week in weeklist:
utilisation = (
float(
G.Capacity[bottleneck][week]["OriginalCapacity"]
- allResults[ma]["remainingCap"][bottleneck][week]
)
/ G.Capacity[bottleneck][week]["OriginalCapacity"]
)
if G.Capacity[bottleneck][week]["minUtilisation"]:
minU.append(
max(
0,
(
G.Capacity[bottleneck][week]["minUtilisation"]
- utilisation
)
/ G.Capacity[bottleneck][week]["minUtilisation"],
)
)
else:
minU.append(
max(
0,
(
G.Capacity[bottleneck][week]["minUtilisation"]
- utilisation
),
)
)
if G.Capacity[bottleneck][week]["targetUtilisation"]:
targetU.append(
(
utilisation
- G.Capacity[bottleneck][week]["targetUtilisation"]
)
/ G.Capacity[bottleneck][week]["targetUtilisation"]
)
else:
targetU.append(
(
utilisation
- G.Capacity[bottleneck][week]["targetUtilisation"]
)
)
minUtil.append(mean(array(minU)))
targetUtil.append(mean(absolute(targetU)))
res.append(
[
ma,
allResults[ma]["remainingUnits"],
allResults[ma]["lateness"],
mean(array(targetUtil)),
mean(array(minUtil)),
allResults[ma]["earliness"],
]
)
# order results...1st criterion: target utilisation (stdDev), 2nd criterion: min utilisation(stdDev)
sortedMA = sorted(res, key=itemgetter(1, 2, 3, 4, 5))
else:
sortedMA = [MAlist]
# if there is only one solution, chose the only solution available
if len(possibleSolutions) == 1:
chosenMA = possibleSolutions[0]
# if there are more than one successful allocations choose between them
if len(possibleSolutions) > 1:
chosenMA = sortedMA[0][0]
assert chosenMA in possibleSolutions
return chosenMA, sortedMA
|
{"hexsha": "5611f12e70a534ec575b90d2e3b5d4ecb180deeb", "size": 16498, "ext": "py", "lang": "Python", "max_stars_repo_path": "manpy/simulation/applications/DemandPlanning/AllocationRoutine_Final2.py", "max_stars_repo_name": "datarevenue-berlin/manpy", "max_stars_repo_head_hexsha": "0056eb6e93cba3bf2a1061f9170aa2a1edf248f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-08T07:52:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-10T07:52:14.000Z", "max_issues_repo_path": "manpy/simulation/applications/DemandPlanning/AllocationRoutine_Final2.py", "max_issues_repo_name": "datarevenue-berlin/manpy", "max_issues_repo_head_hexsha": "0056eb6e93cba3bf2a1061f9170aa2a1edf248f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-12-13T23:50:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-14T09:58:42.000Z", "max_forks_repo_path": "manpy/simulation/applications/DemandPlanning/AllocationRoutine_Final2.py", "max_forks_repo_name": "datarevenue-berlin/manpy", "max_forks_repo_head_hexsha": "0056eb6e93cba3bf2a1061f9170aa2a1edf248f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-21T03:08:45.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-21T20:21:03.000Z", "avg_line_length": 36.3392070485, "max_line_length": 108, "alphanum_fraction": 0.4132622136, "include": true, "reason": "from numpy", "num_tokens": 3159}
|
function table2 = i4mat_border_cut ( m, n, table )
%*****************************************************************************80
%
%% I4MAT_BORDER_CUT cuts the "border" of an I4MAT.
%
% Discussion:
%
% We suppose the input data gives values of a quantity on nodes
% on a 2D grid, and we wish to create a new table corresponding only
% to those nodes in the interior of the 2D grid.
%
% 0 0 0 0 0 0
% 0 * * * * 0 * * * *
% 0 * * * * 0 -> * * * *
% 0 * * * * 0 * * * *
% 0 0 0 0 0 0
%
% The illustration suggests the situation in which a 5 by 6 array
% is input, and a 3 by 4 array is to be output.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 25 January 2005
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer M, the spatial dimension.
%
% Input, integer N, the number of points.
%
% Input, integer TABLE(M,N), the table data.
%
% Output, integer TABLE2(M-2,N-2), the new table data.
%
if ( m <= 2 || n <= 2 )
table2 = [];
return
end
table2(1:m-2,1:n-2) = round ( table(2:m-1,2:n-1) );
return
end
|
{"author": "johannesgerer", "repo": "jburkardt-m", "sha": "1726deb4a34dd08a49c26359d44ef47253f006c1", "save_path": "github-repos/MATLAB/johannesgerer-jburkardt-m", "path": "github-repos/MATLAB/johannesgerer-jburkardt-m/jburkardt-m-1726deb4a34dd08a49c26359d44ef47253f006c1/i4lib/i4mat_border_cut.m"}
|
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nums.core.application_manager import instance as _instance
from nums.core.array.application import ArrayApplication
from nums.core.array.blockarray import BlockArray
def from_modin(df):
# pylint: disable = import-outside-toplevel, protected-access, unidiomatic-typecheck
try:
from modin.pandas.dataframe import DataFrame
from modin.engines.ray.pandas_on_ray.frame.data import PandasOnRayFrame
from modin.engines.ray.pandas_on_ray.frame.partition import (
PandasOnRayFramePartition,
)
except Exception as e:
raise Exception(
"Unable to import modin. Install modin with command 'pip install modin'"
) from e
assert isinstance(df, DataFrame), "Unexpected dataframe type %s" % str(type(df))
assert isinstance(
df._query_compiler._modin_frame, PandasOnRayFrame
), "Unexpected dataframe type %s" % str(type(df._query_compiler._modin_frame))
frame: PandasOnRayFrame = df._query_compiler._modin_frame
app: ArrayApplication = _instance()
system = app.cm
# Make sure the partitions are numeric.
dtype = frame.dtypes[0]
assert dtype in (
float,
np.float,
np.float32,
np.float64,
int,
np.int,
np.int32,
np.int64,
)
# Make sure dtypes are equal.
for dt in frame.dtypes:
if type(frame.dtypes.dtype) == np.dtype:
continue
assert dt == frame.dtypes
dtype = np.__getattribute__(str(dtype))
# Convert from Pandas to NumPy.
pd_parts = frame._partition_mgr_cls.map_partitions(
frame._partitions, lambda df: np.array(df)
)
grid_shape = len(frame._row_lengths), len(frame._column_widths)
shape = (np.sum(frame._row_lengths), np.sum(frame._column_widths))
block_shape = app.get_block_shape(shape, dtype)
rows = []
for i in range(grid_shape[0]):
cols = []
for j in range(grid_shape[1]):
curr_block_shape = (frame._row_lengths[i], frame._column_widths[j])
part: PandasOnRayFramePartition = pd_parts[(i, j)]
part.drain_call_queue()
ba: BlockArray = BlockArray.from_oid(
part.oid, curr_block_shape, dtype, system
)
cols.append(ba)
if grid_shape[1] == 1:
row_ba: BlockArray = cols[0]
else:
row_ba: BlockArray = app.concatenate(
cols, axis=1, axis_block_size=block_shape[1]
)
rows.append(row_ba)
result = app.concatenate(rows, axis=0, axis_block_size=block_shape[0])
return result
if __name__ == "__main__":
from nums.core import settings
import modin.pandas as mpd
filename = settings.pj(
settings.project_root, "tests", "core", "storage", "test.csv"
)
df = mpd.read_csv(filename)
ba: BlockArray = from_modin(df)
print(ba.get())
df = mpd.read_csv(
"https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz"
)
ba: BlockArray = from_modin(df)
print(ba.get())
|
{"hexsha": "e091aef6a1906865860162a8e79aedc8f6fd86c7", "size": 3716, "ext": "py", "lang": "Python", "max_stars_repo_path": "nums/experimental/nums_modin.py", "max_stars_repo_name": "gohar94/nums", "max_stars_repo_head_hexsha": "2d8b0d7dd7b48c5b56641d4f03279b5ce2185db5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 111, "max_stars_repo_stars_event_min_datetime": "2020-06-16T02:52:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:24:19.000Z", "max_issues_repo_path": "nums/experimental/nums_modin.py", "max_issues_repo_name": "gohar94/nums", "max_issues_repo_head_hexsha": "2d8b0d7dd7b48c5b56641d4f03279b5ce2185db5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 160, "max_issues_repo_issues_event_min_datetime": "2020-10-07T21:49:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T03:06:23.000Z", "max_forks_repo_path": "nums/experimental/nums_modin.py", "max_forks_repo_name": "gohar94/nums", "max_forks_repo_head_hexsha": "2d8b0d7dd7b48c5b56641d4f03279b5ce2185db5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2020-11-11T17:10:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T23:17:16.000Z", "avg_line_length": 33.4774774775, "max_line_length": 88, "alphanum_fraction": 0.6614639397, "include": true, "reason": "import numpy", "num_tokens": 903}
|
# coding:utf-8
import re
import numpy as np
class AddManualFeature(object):
def __init__(self, *, train_feature, test_feature):
self.__train_feature = train_feature.copy()
self.__test_feature = test_feature.copy()
self.__income_by_occupation = None
def add_manual_feature(self):
for df in [self.__train_feature, self.__test_feature]:
# AMT_CREDIT AMT_CREDIT AMT_GOODS_PRICE AMT_INCOME_TOTAL
df["NEW_CREDIT_TO_ANNUITY_RATIO"] = (
# Credit amount of the loan / Loan annuity
df["AMT_CREDIT"] / df["AMT_ANNUITY"]
)
df["NEW_CREDIT_TO_GOODS_RATIO"] = (
# Credit amount of the loan /
# For consumer loans it is the price of the goods for which the loan is given
# 贷款总额 / 贷款购买商品的价格
df["AMT_CREDIT"] / df["AMT_GOODS_PRICE"]
)
df["NEW_CREDIT_TO_INCOME_RATIO"] = (
df["AMT_CREDIT"] / df["AMT_INCOME_TOTAL"]
)
df["NEW_ANNUITY_TO_INCOME_RATIO"] = (
df["AMT_ANNUITY"] / (1 + df["AMT_INCOME_TOTAL"])
)
# FLAG_DOCUMENT
# FLAG_OWN_CAR, FLAG_OWN_REALTY
# FLAG_MOBIL, FLAG_EMP_PHONE, FLAG_WORK_PHONE, FLAG_CONT_MOBILE, FLAG_PHONE, FLAG_EMAIL
df["NEW_DOC_IND_SUM"] = (
df[
[col for col in df.columns if re.search(r"FLAG_DOCUMENT", col)]].sum(axis=1)
)
df["NEW_DOC_IND_KURT"] = (
df[
[col for col in df.columns if re.search(r"FLAG_DOCUMENT", col)]].kurtosis(axis=1)
)
df["NEW_LIVE_IND_SUM"] = (
df[
["FLAG_OWN_CAR", "FLAG_OWN_REALTY"]].sum(axis=1)
)
df["NEW_LIVE_IND_KURT"] = (
df[
["FLAG_OWN_CAR", "FLAG_OWN_REALTY"]].kurtosis(axis=1)
)
df["NEW_CONTACT_IND_SUM"] = (
df[
["FLAG_MOBIL", "FLAG_EMP_PHONE", "FLAG_WORK_PHONE", "FLAG_CONT_MOBILE", "FLAG_PHONE", "FLAG_EMAIL"]].sum(axis=1)
)
df["NEW_CONTACT_IND_KURT"] = (
df[
["FLAG_MOBIL", "FLAG_EMP_PHONE", "FLAG_WORK_PHONE", "FLAG_CONT_MOBILE", "FLAG_PHONE", "FLAG_EMAIL"]].kurtosis(axis=1)
)
# CNT_CHILDREN
df["NEW_INC_PER_CHLD"] = (
# 分母 + 1 防止分母为 0
df["AMT_INCOME_TOTAL"] / (1 + df["CNT_CHILDREN"])
)
# ORGANIZATION_TYPE
df["ORGANIZATION_TYPE"] = df["ORGANIZATION_TYPE"].fillna("missing")
self.__income_by_occupation = (
df[["AMT_INCOME_TOTAL", "ORGANIZATION_TYPE"]].groupby("ORGANIZATION_TYPE")["AMT_INCOME_TOTAL"].median()
)
df["NEW_INC_BY_ORG"] = df["ORGANIZATION_TYPE"].map(self.__income_by_occupation)
# EXT_SOURCE_1, EXT_SOURCE_2, EXT_SOURCE_3
df["NEW_SOURCES_PROD"] = (
df["EXT_SOURCE_1"] * df["EXT_SOURCE_2"] * df["EXT_SOURCE_3"]
)
df["NEW_SOURCES_MEAN"] = (
df[["EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3"]].mean(axis=1)
)
df["NEW_SOURCES_STD"] = df[["EXT_SOURCE_1", "EXT_SOURCE_2", "EXT_SOURCE_3"]].std(axis=1)
df["NEW_SOURCES_STD"] = df["NEW_SOURCES_STD"].fillna(df["NEW_SOURCES_STD"].mean())
df["NEW_SOURCES_NA_SUM"] = (
df["EXT_SOURCE_1"].isna().astype(np.float64) +
df["EXT_SOURCE_2"].isna().astype(np.float64) +
df["EXT_SOURCE_3"].isna().astype(np.float64)
)
# DAYS_EMPLOYED, DAYS_BIRTH, OWN_CAR_AGE DAYS_LAST_PHONE_CHANGE
df["NEW_EMPLOY_TO_BIRTH_RATIO"] = (
# How many days before the application the person started current employment /
# Client's age in days at the time of application
# 工龄 / 年龄
df["DAYS_EMPLOYED"] / df["DAYS_BIRTH"]
)
df["NEW_CAR_TO_BIRTH_RATIO"] = (
df["OWN_CAR_AGE"] / df["DAYS_BIRTH"]
)
df["NEW_CAR_TO_EMPLOY_RATIO"] = (
df["OWN_CAR_AGE"] / df["DAYS_EMPLOYED"]
)
df["NEW_PHONE_TO_BIRTH_RATIO"] = (
df["DAYS_LAST_PHONE_CHANGE"] / df["DAYS_BIRTH"]
)
df["NEW_PHONE_TO_EMPLOY_RATIO"] = (
df["DAYS_LAST_PHONE_CHANGE"] / df["DAYS_EMPLOYED"]
)
return self.__train_feature, self.__test_feature
|
{"hexsha": "0b4b32779027aeaa24ffedbb64a7b358e0ca1ed3", "size": 4649, "ext": "py", "lang": "Python", "max_stars_repo_path": "20180617/JamesShepherd/AddManualFeature.py", "max_stars_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_stars_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2018-06-13T07:34:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-07T16:38:25.000Z", "max_issues_repo_path": "20180617/JamesShepherd/AddManualFeature.py", "max_issues_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_issues_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-02T12:48:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-25T09:06:22.000Z", "max_forks_repo_path": "20180617/JamesShepherd/AddManualFeature.py", "max_forks_repo_name": "fengjiaxin/Home_Credit_Default_Risk", "max_forks_repo_head_hexsha": "3407e76b4e5cfb8dd6056d24675b80fe0e82c123", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-08-02T11:03:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-09T10:42:11.000Z", "avg_line_length": 42.6513761468, "max_line_length": 137, "alphanum_fraction": 0.5338782534, "include": true, "reason": "import numpy", "num_tokens": 1204}
|
using RungeKutta.Tableaus: get_radau_1_nodes, get_radau_1_weights, get_radau_1_coefficients,
get_radau_2_nodes, get_radau_2_weights, get_radau_2_coefficients
@testset "$(rpad("Radau Tableaus",80))" begin
@test_throws ErrorException get_radau_1_nodes(1)
@test_throws ErrorException get_radau_1_weights(1)
@test_throws ErrorException get_radau_1_coefficients(1)
@test_throws ErrorException get_radau_2_nodes(1)
@test_throws ErrorException get_radau_2_weights(1)
@test_throws ErrorException get_radau_2_coefficients(1)
function _TableauRadauIA2(T=Float64)
a = [[1//4 -1//4 ]
[1//4 5//12 ]]
b = [1//4, 3//4]
c = [0, 2//3]
Tableau{T}(:RadauIA2, 3, a, b, c)
end
function _TableauRadauIA3(T=Float64)
a = [
[ 1//9 (- 1 - √6)/18 (- 1 + √6)/18 ]
[ 1//9 ( 88 + 7*√6)/360 ( 88 - 43*√6)/360 ]
[ 1//9 ( 88 + 43*√6)/360 ( 88 - 7*√6)/360 ]
]
b = [1/9, (16+√6)/36, (16-√6)/36 ]
c = [0, ( 6-√6)/10, ( 6+√6)/10 ]
Tableau{T}(:RadauIA3, 5, a, b, c)
end
function _TableauRadauIB2(T=Float64)
a = [[1//8 -1//8 ]
[7//24 3//8 ]]
b = [1//4, 3//4]
c = [0, 2//3]
Tableau{T}(:RadauIB2, 3, a, b, c)
end
function _TableauRadauIB3(T=Float64)
a = [
[ 1//18 (- 1 - √6)/36 (- 1 + √6)/36 ]
[ ( 52 + 3*√6)/450 ( 16 + √6)/72 (472 - 217*√6)/1800 ]
[ ( 52 - 3*√6)/450 (472 + 217*√6)/1800 ( 16 - √6)/72 ]
]
b = [1/9, (16+√6)/36, (16-√6)/36 ]
c = [0, ( 6-√6)/10, ( 6+√6)/10 ]
Tableau{T}(:RadauIB3, 5, a, b, c)
end
function _TableauRadauIIA2(T=Float64)
a = [[5//12 -1//12]
[3//4 1//4 ]]
b = [3//4, 1//4]
c = [1//3, 1//1]
Tableau{T}(:RadauIIA2, 3, a, b, c)
end
function _TableauRadauIIA3(T=Float64)
a = [
[ 11/45 - 7*√6/360 37/225-169*√6/1800 -2/225+√6/75 ]
[ 37/225+169*√6/1800 11/45 + 7*√6/360 -2/225-√6/75 ]
[ 4/9 - √6/36 4/9 + √6/36 1/9 ]
]
b = [4/9-√6/36, 4/9+√6/36, 1/9 ]
c = [2/5-√6/10, 2/5+√6/10, 1 ]
Tableau{T}(:RadauIIA3, 5, a, b, c)
end
function _TableauRadauIIB2(T=Float64)
a = [[3//8 -1//24]
[7//8 1//8 ]]
b = [3//4, 1//4]
c = [1//3, 1//1]
Tableau{T}(:RadauIIB2, 3, a, b, c)
end
function _TableauRadauIIB3(T=Float64)
a = [
[( 16 - √6)/72 (328 - 167*√6)/1800 (-2 + 3*√6)/450 ]
[(328 + 167*√6)/1800 ( 16 + √6)/72 (-2 - 3*√6)/450 ]
[( 85 - 10*√6)/180 ( 85 + 10*√6)/180 1/18 ]
]
b = [4/9-√6/36, 4/9+√6/36, 1/9 ]
c = [2/5-√6/10, 2/5+√6/10, 1 ]
Tableau{T}(:RadauIIB3, 5, a, b, c)
end
@test_throws ErrorException TableauRadauIA(1)
@test_throws ErrorException TableauRadauIIA(1)
@test TableauRadauIA(2) ≈ _TableauRadauIA2()
@test TableauRadauIA(3) ≈ _TableauRadauIA3()
@test TableauRadauIB(2) ≈ _TableauRadauIB2()
@test TableauRadauIB(3) ≈ _TableauRadauIB3()
@test TableauRadauIIA(2) ≈ _TableauRadauIIA2()
@test TableauRadauIIA(3) ≈ _TableauRadauIIA3()
@test TableauRadauIIB(2) ≈ _TableauRadauIIB2()
@test TableauRadauIIB(3) ≈ _TableauRadauIIB3()
end
|
{"hexsha": "b15675674f7cf65ceedca2b8760c3d65c772d0b9", "size": 3677, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_radau.jl", "max_stars_repo_name": "JuliaGNI/RungeKutta.jl", "max_stars_repo_head_hexsha": "b6933446c0f76525a2e36f4d94bf7ff9694c7f5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-13T13:08:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-02T11:55:34.000Z", "max_issues_repo_path": "test/test_radau.jl", "max_issues_repo_name": "JuliaGNI/RungeKutta.jl", "max_issues_repo_head_hexsha": "b6933446c0f76525a2e36f4d94bf7ff9694c7f5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-11-28T20:00:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T17:28:29.000Z", "max_forks_repo_path": "test/test_radau.jl", "max_forks_repo_name": "JuliaGNI/RungeKutta.jl", "max_forks_repo_head_hexsha": "b6933446c0f76525a2e36f4d94bf7ff9694c7f5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9739130435, "max_line_length": 92, "alphanum_fraction": 0.4577100897, "num_tokens": 1633}
|
import numpy as np
import matplotlib.pyplot as plt
from random import random
from numba import njit
import random as rand
class Toric_code():
nbr_eq_classes = 16
def __init__(self, size):
self.system_size = size
self.qubit_matrix = np.zeros((2, self.system_size, self.system_size), dtype=np.uint8)
self.defect_matrix = np.zeros((2, self.system_size, self.system_size), dtype=np.uint8)
def generate_random_error(self, p_x, p_y, p_z):
size = self.system_size
for i in range(2):
for j in range(size):
for k in range(size):
q = 0
r = rand.random()
if r < p_z:
q = 3
elif p_z < r < (p_z + p_x):
q = 1
elif (p_z + p_x) < r < (p_z + p_x + p_y):
q = 2
self.qubit_matrix[i, j, k] = q
self.qubit_matrix[1, -1, :] = 0
self.qubit_matrix[1, :, -1] = 0
self.syndrome()
def generate_n_random_errors(self, n):
errors = np.random.randint(3, size=n) + 1
qubit_matrix_error = np.zeros(2*self.system_size**2)
qubit_matrix_error[:n] = errors
np.random.shuffle(qubit_matrix_error)
self.qubit_matrix[:, :, :] = qubit_matrix_error.reshape(2, self.system_size, self.system_size)
self.syndrome()
def count_errors(self):
return _count_errors(self.qubit_matrix)
def chain_lengths(self):
nx = np.count_nonzero(self.qubit_matrix[:, :, :] == 1)
ny = np.count_nonzero(self.qubit_matrix[:, :, :] == 2)
nz = np.count_nonzero(self.qubit_matrix[:, :, :] == 3)
return nx, ny, nz
def apply_logical(self, operator: int, layer: int, X_pos=0, Z_pos=0):
return _apply_logical(self.qubit_matrix, operator, X_pos, Z_pos)
def apply_stabilizer(self, row: int, col: int, operator: int):
return _apply_stabilizer(self.qubit_matrix, row, col, operator)
def apply_random_logical(self):
return _apply_random_logical(self.qubit_matrix)
def apply_random_stabilizer(self):
return _apply_random_stabilizer(self.qubit_matrix)
def apply_stabilizers_uniform(self, p=0.5):
return _apply_stabilizers_uniform(self.qubit_matrix, p)
def define_equivalence_class(self):
return _define_equivalence_class(self.qubit_matrix)
def to_class(self, eq: int):
return _to_class(eq, self.qubit_matrix)
def syndrome(self):
# generate vertex excitations (charge)
# can be generated by z and y errors
qubit0 = self.qubit_matrix[0, :, :]
y_errors = (qubit0 == 2).astype(int) # separate y and z errors from x
z_errors = (qubit0 == 3).astype(int)
charge = y_errors + z_errors # vertex_excitation
charge_shift = np.roll(charge, 1, axis=0)
charge = charge + charge_shift
charge0 = (charge == 1).astype(int) # annihilate two syndroms at the same place in the grid
qubit1 = self.qubit_matrix[1, :, :]
y_errors = (qubit1 == 2).astype(int)
z_errors = (qubit1 == 3).astype(int)
charge = y_errors + z_errors
charge_shift = np.roll(charge, 1, axis=1)
charge1 = charge + charge_shift
charge1 = (charge1 == 1).astype(int)
charge = charge0 + charge1
vertex_matrix = (charge == 1).astype(int)
# generate plaquette excitation (flux)
# can be generated by x and y errors
qubit0 = self.qubit_matrix[0, :, :]
x_errors = (qubit0 == 1).astype(int)
y_errors = (qubit0 == 2).astype(int)
flux = x_errors + y_errors # plaquette_excitation
flux_shift = np.roll(flux, -1, axis=1)
flux = flux + flux_shift
flux0 = (flux == 1).astype(int)
qubit1 = self.qubit_matrix[1, :, :]
x_errors = (qubit1 == 1).astype(int)
y_errors = (qubit1 == 2).astype(int)
flux = x_errors + y_errors
flux_shift = np.roll(flux, -1, axis=0)
flux1 = flux + flux_shift
flux1 = (flux1 == 1).astype(int)
flux = flux0 + flux1
plaquette_matrix = (flux == 1).astype(int)
self.defect_matrix = np.stack((vertex_matrix, plaquette_matrix), axis=0)
def plot(self, state, title, show_eq_class=None):
x_error_qubits1 = np.where(self.qubit_matrix[0, :, :] == 1)
y_error_qubits1 = np.where(self.qubit_matrix[0, :, :] == 2)
z_error_qubits1 = np.where(self.qubit_matrix[0, :, :] == 3)
x_error_qubits2 = np.where(self.qubit_matrix[1, :, :] == 1)
y_error_qubits2 = np.where(self.qubit_matrix[1, :, :] == 2)
z_error_qubits2 = np.where(self.qubit_matrix[1, :, :] == 3)
vertex_matrix = state[0, :, :]
plaquette_matrix = state[1, :, :]
vertex_defect_coordinates = np.where(vertex_matrix)
plaquette_defect_coordinates = np.where(plaquette_matrix)
xLine = np.linspace(0, self.system_size, self.system_size)
x = range(self.system_size)
X, Y = np.meshgrid(x, x)
XLine, YLine = np.meshgrid(x, xLine)
markersize_qubit = 15
markersize_excitation = 7
markersize_symbols = 7
linewidth = 2
ax = plt.subplot(111)
ax.plot(XLine, -YLine, 'black', linewidth=linewidth)
ax.plot(YLine, -XLine, 'black', linewidth=linewidth)
# add the last two black lines
ax.plot(XLine[:,-1] + 1.0, -YLine[:,-1], 'black', linewidth=linewidth)
ax.plot(YLine[:,-1], -YLine[-1,:], 'black', linewidth=linewidth)
ax.plot(X + 0.5, -Y, 'o', color = 'black', markerfacecolor = 'white', markersize=markersize_qubit+1)
ax.plot(X, -Y -0.5, 'o', color = 'black', markerfacecolor = 'white', markersize=markersize_qubit+1)
# add grey qubits
ax.plot(X[-1,:] + 0.5, -Y[-1,:] - 1.0, 'o', color = 'black', markerfacecolor = 'grey', markersize=markersize_qubit+1)
ax.plot(X[:,-1] + 1.0, -Y[:,-1] - 0.5, 'o', color = 'black', markerfacecolor = 'grey', markersize=markersize_qubit+1)
# all x errors
ax.plot(x_error_qubits1[1], -x_error_qubits1[0] - 0.5, 'o', color = 'r', label="x error", markersize=markersize_qubit)
ax.plot(x_error_qubits2[1] + 0.5, -x_error_qubits2[0], 'o', color = 'r', markersize=markersize_qubit)
ax.plot(x_error_qubits1[1], -x_error_qubits1[0] - 0.5, 'o', color = 'black', markersize=markersize_symbols, marker=r'$X$')
ax.plot(x_error_qubits2[1] + 0.5, -x_error_qubits2[0], 'o', color = 'black', markersize=markersize_symbols, marker=r'$X$')
# all y errors
ax.plot(y_error_qubits1[1], -y_error_qubits1[0] - 0.5, 'o', color = 'blueviolet', label="y error", markersize=markersize_qubit)
ax.plot(y_error_qubits2[1] + 0.5, -y_error_qubits2[0], 'o', color = 'blueviolet', markersize=markersize_qubit)
ax.plot(y_error_qubits1[1], -y_error_qubits1[0] - 0.5, 'o', color = 'black', markersize=markersize_symbols, marker=r'$Y$')
ax.plot(y_error_qubits2[1] + 0.5, -y_error_qubits2[0], 'o', color = 'black', markersize=markersize_symbols, marker=r'$Y$')
# all z errors
ax.plot(z_error_qubits1[1], -z_error_qubits1[0] - 0.5, 'o', color = 'b', label="z error", markersize=markersize_qubit)
ax.plot(z_error_qubits2[1] + 0.5, -z_error_qubits2[0], 'o', color = 'b', markersize=markersize_qubit)
ax.plot(z_error_qubits1[1], -z_error_qubits1[0] - 0.5, 'o', color = 'black', markersize=markersize_symbols, marker=r'$Z$')
ax.plot(z_error_qubits2[1] + 0.5, -z_error_qubits2[0], 'o', color = 'black', markersize=markersize_symbols , marker=r'$Z$')
#ax.plot(vertex_defect_coordinates[1], -vertex_defect_coordinates[0], 'x', color = 'blue', label="charge", markersize=markersize_excitation)
ax.plot(vertex_defect_coordinates[1], -vertex_defect_coordinates[0], 'o', color = 'blue', label="charge", markersize=markersize_excitation)
ax.plot(plaquette_defect_coordinates[1] + 0.5, -plaquette_defect_coordinates[0] - 0.5, 'o', color = 'red', label="flux", markersize=markersize_excitation)
ax.axis('off')
if show_eq_class:
ax.set_title('Equivalence class: ' + str(show_eq_class))
#plt.title(title)
plt.axis('equal')
plt.savefig('plots/graph_'+str(title)+'.png')
plt.close()
@njit('(uint8[:,:,:],)')
def _count_errors(qubit_matrix):
return np.count_nonzero(qubit_matrix)
@njit('(uint8[:,:,:], int64, int64, int64, int64)')
def _apply_logical(qubit_matrix, operator: int, layer: int, X_pos=0, Z_pos=0):
# Have to make copy, else original matrix is changed
result_qubit_matrix = np.copy(qubit_matrix)
# Operator is zero means identity, no need to keep going
if operator == 0:
return result_qubit_matrix, (0, 0, 0)
size = qubit_matrix.shape[1]
# List to store how errors redestribute when logical is applied
n_eq = [0, 0, 0, 0]
# layer 0 is qubits on vertical grid lines
# layer 1 is qubits on horizontal grid lines
# logical X works orthogonal to grid lines
# logical Z works parallel to grid lines
# Transpose copied matrix if layer is 1. Makes next step more straightforward
# Editing orient_result changes result_qubit matrix whether transposed or not
if layer == 0:
orient_result = result_qubit_matrix
elif layer == 1:
orient_result = result_qubit_matrix.transpose(0, 2, 1)
do_X = (operator == 1 or operator == 2)
do_Z = (operator == 3 or operator == 2)
# Helper function
def qubit_update(row, col, op):
old_qubit = orient_result[layer, row, col]
new_qubit = old_qubit ^ op
orient_result[layer, row, col] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
for index in range(size):
if do_X:
qubit_update(X_pos, index, 1)
if do_Z:
qubit_update(index, Z_pos, 3)
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:,:],)')
def _apply_random_logical(qubit_matrix):
size = qubit_matrix.shape[1]
# operator to use, 2 (Y) will make both X and Z on the same layer. 0 is identity
# one operator for each layer
operators = [int(random() * 4), int(random() * 4)]
# ok to not copy, since apply_logical doesnt change input
result_qubit_matrix = qubit_matrix
result_error_change = 0
for layer, op in enumerate(operators):
if op == 1 or op == 2:
X_pos = int(random() * size)
else:
X_pos = 0
if op == 3 or op == 2:
Z_pos = int(random() * size)
else:
Z_pos = 0
result_qubit_matrix, error_change = _apply_logical(result_qubit_matrix, op, layer, X_pos, Z_pos)
result_error_change += error_change[0] + error_change[1] + error_change[2]
return result_qubit_matrix, result_error_change
@njit('(uint8[:,:,:], int64, int64, int64)')
def _apply_stabilizer(qubit_matrix, row: int, col: int, operator: int):
# gives the resulting qubit error matrix from applying (row, col, operator) stabilizer
# doesn't update input qubit_matrix
size = qubit_matrix.shape[1]
if operator == 1:
qubit_matrix_layers = np.array([1, 1, 0, 0])
rows = np.array([row, row, row, (row - 1) % size])
cols = np.array([col, (col - 1) % size, col, col])
elif operator == 3:
qubit_matrix_layers = np.array([1, 0, 0, 1])
rows = np.array([row, row, row, (row + 1) % size])
cols = np.array([col, col, (col + 1) % size, col])
# Have to make copy, else original matrix is changed
result_qubit_matrix = np.copy(qubit_matrix)
# List to store how errors redestribute when stabilizer is applied
n_eq = [0, 0, 0, 0]
for i in range(4):
old_qubit = qubit_matrix[qubit_matrix_layers[i], rows[i], cols[i]]
new_qubit = old_qubit ^ operator
result_qubit_matrix[qubit_matrix_layers[i], rows[i], cols[i]] = new_qubit
n_eq[old_qubit] -= 1
n_eq[new_qubit] += 1
return result_qubit_matrix, (n_eq[1], n_eq[2], n_eq[3])
@njit('(uint8[:,:,:],)')
def _apply_random_stabilizer(qubit_matrix):
# select random coordinates where to apply operator
size = qubit_matrix.shape[1]
row = int(random() * size)
col = int(random() * size)
operator = int(random() * 2) # we only care about X and Z, and Y is represented by 2. Therefore:
if operator == 0:
operator = 3
return _apply_stabilizer(qubit_matrix, row, col, operator)
def _apply_stabilizers_uniform(qubit_matrix, p=0.5):
size = qubit_matrix.shape[1]
result_qubit_matrix = np.copy(qubit_matrix)
random_stabilizers = np.random.rand(2, size, size)
random_stabilizers = np.less(random_stabilizers, p)
# Numpy magic for iterating through matrix
it = np.nditer(random_stabilizers, flags=['multi_index'])
while not it.finished:
if it[0]:
op, row, col = it.multi_index
if op == 0:
op = 3
result_qubit_matrix, _ = _apply_stabilizer(result_qubit_matrix, row, col, op)
it.iternext()
return result_qubit_matrix
@njit('(uint8[:,:,:],)')
def _define_equivalence_class(qubit_matrix):
# checks odd and even errors in each layer
# gives a combination of four numbers corresponding to an equivalence class
# checks odd or even x-errors in first layer
x1prov = np.count_nonzero(qubit_matrix[0] == 1)
# checks odd or even z-errors in first layer
z1prov = np.count_nonzero(qubit_matrix[0] == 3)
# checks odd or even y-erros in first layer and adds them to total number of x/z errors in first layer
y1prov = np.count_nonzero(qubit_matrix[0] == 2)
x1 = x1prov + y1prov
z1 = z1prov + y1prov
# checks odd or even x-errors in second layer
x2prov = np.count_nonzero(qubit_matrix[1] == 1)
# checks odd or even z-errors in second layer
z2prov = np.count_nonzero(qubit_matrix[1] == 3)
# checks odd or even y-erros in second layer and adds them to total number of x/z errors in second layer
y2prov = np.count_nonzero(qubit_matrix[1] == 2)
x2 = x2prov + y2prov
z2 = z2prov + y2prov
# stores whether there was an odd or even number of errors
x1 = x1 % 2
z1 = z1 % 2
x2 = x2 % 2
z2 = z2 % 2
return x1 + z1 * 2 + x2 * 4 + z2 * 8
@njit('(int64, uint8[:,:,:],)')
def _to_class(eq, qubit_matrix):
# Returns an error chain with same syndrom as qubit_matrix, but in the class eq
# eq is interpreted as a 4-digit binary number (z2 x2 z1 x1)
# xor target class with current class, to calculate what operators "connect" them
diff = eq ^ _define_equivalence_class(qubit_matrix)
# These lines flip x2 if z2==1 and flip x1 if z1==1.
# This converts a 4-bit eq-class into two 2-bit operators
mask = 0b1010
xor = (mask & diff) >> 1
ops = diff ^ xor
# The leftmost two bits represent the operator to apply in layer 1
ops2 = ops >> 2
# The rightmost two bits represent the operator to apply in layer 0
ops1 = 0b0011 & ops
# Apply the operators
for layer, op in enumerate((ops1, ops2)):
qubit_matrix, _ = _apply_logical(qubit_matrix, operator=op, layer=layer, X_pos=0, Z_pos=0)
return qubit_matrix
|
{"hexsha": "106d3fd79468689f6c75514a3f749efb0414ccd3", "size": 15417, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/toric_model.py", "max_stars_repo_name": "mats-granath/EWD-QEC", "max_stars_repo_head_hexsha": "6ce9ac0940c18e2a63ec244cdd7b80e40b0c7073", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-06-21T15:45:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-11T13:30:24.000Z", "max_issues_repo_path": "src/toric_model.py", "max_issues_repo_name": "mats-granath/EWD-QEC", "max_issues_repo_head_hexsha": "6ce9ac0940c18e2a63ec244cdd7b80e40b0c7073", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/toric_model.py", "max_forks_repo_name": "mats-granath/EWD-QEC", "max_forks_repo_head_hexsha": "6ce9ac0940c18e2a63ec244cdd7b80e40b0c7073", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-13T15:12:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-20T19:20:38.000Z", "avg_line_length": 39.4296675192, "max_line_length": 162, "alphanum_fraction": 0.6268405007, "include": true, "reason": "import numpy,from numba", "num_tokens": 4534}
|
import logging
import warnings
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
from models import BetaBernoulli, ClasswiseEce
logger = logging.getLogger(__name__)
np.random.seed(0)
############################################################################
"""
Update DATA_DIR, RESULTS_DIR, FIGURE_DIR
"""
# INTPUT FILES
DATA_DIR = '/Users/disiji/Dropbox/current/bayesian-blackbox/data/'
DATAFILE_LIST = {
'cifar100': DATA_DIR + 'cifar100/cifar100_predictions_dropout.txt',
'imagenet': DATA_DIR + 'imagenet/resnet152_imagenet_outputs.txt',
'imagenet2_topimages': DATA_DIR + 'imagenet/resnet152_imagenetv2_topimages_outputs.txt',
'20newsgroup': DATA_DIR + '20newsgroup/bert_20_newsgroups_outputs.txt',
'svhn': DATA_DIR + 'svhn/svhn_predictions.txt',
'dbpedia': DATA_DIR + 'dbpedia/bert_dbpedia_outputs.txt',
}
LOGITSFILE_DICT = {
'cifar100': DATA_DIR + 'cifar100/resnet110_cifar100_logits.txt',
'imagenet': DATA_DIR + 'imagenet/resnet152_imagenet_logits.txt',
}
COST_MATRIX_FILE_DICT = {
'human': DATA_DIR + 'cost/cifar100_people_full/costs.npy',
'superclass': DATA_DIR + 'cost/cifar100_superclass_full/costs.npy'
}
COST_INFORMED_PRIOR_FILE = DATA_DIR + 'cost/cifar100_superclass_full/informed_prior.npy'
# OUTPUT FILES
RESULTS_DIR = '/Volumes/deepdata/bayesian_blackbox/output_from_datalab_20200204/output/'
FIGURE_DIR = '../../figures/'
############################################################################
# DATA INFO
DATASET_LIST = ['imagenet', 'dbpedia', 'cifar100', '20newsgroup', 'svhn', 'imagenet2_topimages']
DATASIZE_DICT = {
'cifar100': 10000,
'imagenet': 50000,
'imagenet2_topimages': 10000,
'20newsgroup': 7532,
'svhn': 26032,
'dbpedia': 70000,
}
NUM_CLASSES_DICT = {
'cifar100': 100,
'imagenet': 1000,
'imagenet2_topimages': 1000,
'20newsgroup': 20,
'svhn': 10,
'dbpedia': 14,
}
# PLOT
DATASET_NAMES = {
'cifar100': 'CIFAR-100',
'imagenet': 'ImageNet',
'svhn': 'SVHN',
'20newsgroup': '20 Newsgroups',
'dbpedia': 'DBpedia',
}
TOPK_DICT = {'cifar100': 10,
'imagenet': 10,
'svhn': 3,
'20newsgroup': 3,
'dbpedia': 3}
EVAL_METRIC_NAMES = {
'avg_num_agreement': '#agreements',
'mrr': 'MRR'
}
############################################################################
# CIFAR100 meta data needed to map classes to superclasses and vice versa.
CIFAR100_CLASSES = [
"apple", "aquarium_fish", "baby", "bear", "beaver", "bed", "bee", "beetle", "bicycle",
"bottle", "bowl", "boy", "bridge", "bus", "butterfly", "camel", "can", "castle",
"caterpillar", "cattle", "chair", "chimpanzee", "clock", "cloud", "cockroach", "couch",
"crab", "crocodile", "cup", "dinosaur", "dolphin", "elephant", "flatfish", "forest", "fox",
"girl", "hamster", "house", "kangaroo", "keyboard", "lamp", "lawn_mower", "leopard", "lion",
"lizard", "lobster", "man", "maple_tree", "motorcycle", "mountain", "mouse", "mushroom",
"oak_tree", "orange", "orchid", "otter", "palm_tree", "pear", "pickup_truck", "pine_tree",
"plain", "plate", "poppy", "porcupine", "possum", "rabbit", "raccoon", "ray", "road",
"rocket", "rose", "sea", "seal", "shark", "shrew", "skunk", "skyscraper", "snail", "snake",
"spider", "squirrel", "streetcar", "sunflower", "sweet_pepper", "table", "tank", "telephone",
"television", "tiger", "tractor", "train", "trout", "tulip", "turtle", "wardrobe", "whale",
"willow_tree", "wolf", "woman", "worm"
]
CIFAR100_SUPERCLASSES = [
"aquatic_mammals", "fish", "flowers", "food_containers", "fruit_and_vegetables",
"household_electrical_devices", "household_furniture", "insects", "large_carnivores",
"large_man-made_outdoor_things", "large_natural_outdoor_scenes",
"large_omnivores_and_herbivores", "medium_mammals", "non-insect_invertebrates", "people",
"reptiles", "small_mammals", "trees", "vehicles_1", "vehicles_2"
]
CIFAR100_REVERSE_SUPERCLASS_LOOKUP = {
"aquatic_mammals": ["beaver", "dolphin", "otter", "seal", "whale"],
"fish": ["aquarium_fish", "flatfish", "ray", "shark", "trout"],
"flowers": ["orchid", "poppy", "rose", "sunflower", "tulip"],
"food_containers": ["bottle", "bowl", "can", "cup", "plate"],
"fruit_and_vegetables": ["apple", "mushroom", "orange", "pear", "sweet_pepper"],
"household_electrical_devices": ["clock", "keyboard", "lamp", "telephone", "television"],
"household_furniture": ["bed", "chair", "couch", "table", "wardrobe"],
"insects": ["bee", "beetle", "butterfly", "caterpillar", "cockroach"],
"large_carnivores": ["bear", "leopard", "lion", "tiger", "wolf"],
"large_man-made_outdoor_things": ["bridge", "castle", "house", "road", "skyscraper"],
"large_natural_outdoor_scenes": ["cloud", "forest", "mountain", "plain", "sea"],
"large_omnivores_and_herbivores": ["camel", "cattle", "chimpanzee", "elephant", "kangaroo"],
"medium_mammals": ["fox", "porcupine", "possum", "raccoon", "skunk"],
"non-insect_invertebrates": ["crab", "lobster", "snail", "spider", "worm"],
"people": ["baby", "boy", "girl", "man", "woman"],
"reptiles": ["crocodile", "dinosaur", "lizard", "snake", "turtle"],
"small_mammals": ["hamster", "mouse", "rabbit", "shrew", "squirrel"],
"trees": ["maple_tree", "oak_tree", "palm_tree", "pine_tree", "willow_tree"],
"vehicles_1": ["bicycle", "bus", "motorcycle", "pickup_truck", "train"],
"vehicles_2": ["lawn_mower", "rocket", "streetcar", "tank", "tractor"]
}
CIFAR100_SUPERCLASS_LOOKUP = {class_: superclass for superclass, class_list in
CIFAR100_REVERSE_SUPERCLASS_LOOKUP.items() for class_ in
class_list}
############################################################################
def prepare_data(filename, four_column=False) -> Tuple[
List[int], List[bool], List[float], Dict[int, str], Dict[int, int], List[int]]:
"""
Load predictions.
:param filename: str
:param four_column: indicates whether the dataformat is "index, correct class, predicted class, confidence"
or true label followed by a vector of scores for each class
:return:
categories: List[int], predicted class
observations: List[bool], whether predicted class is the same as truth class
confidence: List[float]
idx2category: Dict[int, str] or None
category2idx: Dict[str, int] or None
labels: List[int], true label of samples.
"""
if four_column:
# when file is in 4 column format: index, correct class, predicted class, confidence
with open(filename, 'r') as f:
category2idx = dict()
idx2category = []
categories = []
observations = []
confidences = []
labels = []
next(f)
for line in f:
_, correct, predicted, confidence = line.split()
if predicted not in category2idx:
category2idx[predicted] = len(category2idx)
idx2category.append(predicted)
idx = category2idx[predicted]
categories.append(idx)
observations.append(correct == predicted)
confidences.append(float(confidence))
labels.append(correct)
else:
data = np.genfromtxt(filename)
categories = np.argmax(data[:, 1:], axis=1).astype(int)
confidences = list(np.max(data[:, 1:], axis=1).astype(float))
observations = list((categories == data[:, 0]))
categories = list(categories)
labels = list(data[:, 0])
idx2category = None
category2idx = None
logger.debug("Dataset Accuracy: %.3f" % (len([_ for _ in observations if _ == True]) * 1.0 / len(observations)))
return categories, observations, confidences, idx2category, category2idx, labels
def train_holdout_split(categories: List[int],
observations: List[bool],
confidences: List[float],
labels: List[int],
indices: List[int],
holdout_ratio: float = 0.2) -> Tuple[
List[int], List[bool], List[float], List[int], List[int], List[int], List[bool], List[float], List[int], List[int]]:
"""
Split categories, observations and confidences into train and holdout with hold_ratio.
:param categories: List[int], predicted class
:param observations: List[bool], whether predicted class is the same as truth class
:param confidences: List[float], list of scores
:param labels: List[int], true label fo samples.
:param indices: List[int], index of data in the raw file
:param holdout_ratio: float between 0 and 1. Default: 0.2.
:return: train and eval partion of inputs.
"""
num_samples = len(categories)
permutation = np.random.permutation(num_samples)
mask = np.zeros(num_samples)
mask[permutation[:int(len(categories) * holdout_ratio)]] = 1
train_categories = [categories[idx] for idx in range(num_samples) if mask[idx] == 0]
train_observations = [observations[idx] for idx in range(num_samples) if mask[idx] == 0]
train_confidences = [confidences[idx] for idx in range(num_samples) if mask[idx] == 0]
train_labels = [labels[idx] for idx in range(num_samples) if mask[idx] == 0]
train_indices = [indices[idx] for idx in range(num_samples) if mask[idx] == 0]
holdout_categories = [categories[idx] for idx in range(num_samples) if mask[idx] == 1]
holdout_observations = [observations[idx] for idx in range(num_samples) if mask[idx] == 1]
holdout_confidences = [confidences[idx] for idx in range(num_samples) if mask[idx] == 1]
holdout_labels = [labels[idx] for idx in range(num_samples) if mask[idx] == 1]
holdout_indices = [indices[idx] for idx in range(num_samples) if mask[idx] == 1]
return train_categories, train_observations, train_confidences, train_labels, train_indices, holdout_categories, \
holdout_observations, holdout_confidences, holdout_labels, holdout_indices
def eval_ece(confidences: List[float], observations: List[bool], num_bins=10):
"""
Evaluate ECE given a list of samples with equal-width binning.
:param confidences: List[float]
A list of prediction scores.
:param observations: List[bool]
A list of boolean observations.
:param num_bins: int
The number of bins used to estimate ECE. Default: 10
:return: float
"""
confidences = np.array(confidences)
observations = np.array(observations) * 1.0
bins = np.linspace(0, 1, num_bins + 1)
digitized = np.digitize(confidences, bins[1:-1])
w = np.array([(digitized == i).sum() for i in range(num_bins)])
w = w / sum(w)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
confidence_bins = np.array([confidences[digitized == i].mean() for i in range(num_bins)])
accuracy_bins = np.array([observations[digitized == i].mean() for i in range(num_bins)])
confidence_bins[np.isnan(confidence_bins)] = 0
accuracy_bins[np.isnan(accuracy_bins)] = 0
diff = np.absolute(confidence_bins - accuracy_bins)
ece = np.inner(diff, w)
return ece
def get_confidence_k(categories: List[int], confidences: List[float], num_classes: int) -> np.ndarray:
"""
Get average confidence of each predicted class, given a list of samples.
:param categories: List[int]
A list of predicted classes.
:param confidences: List[float]
A list of prediction scores.
:param num_classes: int
:return: confidence_k: (num_classes, )
Average score of predicted class.
"""
df = pd.DataFrame(list(zip(categories, confidences)), columns=['Predicted', 'Confidence'])
confidence_k = np.array([df[(df['Predicted'] == id)]['Confidence'].mean()
for id in range(num_classes)])
return confidence_k
def get_accuracy_k(categories: List[int], observations: List[bool], num_classes: int) -> np.ndarray:
"""
Get accuracy of each predicted class given a list of samples.
:param categories: List[int]
A list of predicted classes.
:param observations: List[bool]
A list of boolean observations.
:param num_classes: int
:return: accuracy_k: (num_classes, )
Accuracy of each predicted class.
"""
observations = np.array(observations) * 1.0
df = pd.DataFrame(list(zip(categories, observations)), columns=['Predicted', 'Observations'])
accuracy_k = np.array([df[(df['Predicted'] == class_idx)]['Observations'].mean()
for class_idx in range(num_classes)])
return accuracy_k
def get_ece_k(categories: List[int], observations: List[bool], confidences: List[float], num_classes: int,
num_bins=10) -> np.ndarray:
"""
Get ECE of each predicted class, given a list of samples. ECE of each predicted class is estimated with equal-width binning.
:param categories: List[int]
A list of predicted classes.
:param observations: List[bool]
A list of boolean observations.
:param confidences: List[float]
A list of prediction scores.
:param num_classes: int
:param num_bins: int
The number of bins used to estimate ECE. Default: 10.
:return: ece_k: (num_classes, )
ECE of each predicted class.
"""
ece_k = np.zeros((num_classes,))
for class_idx in range(num_classes):
mask_idx = [i for i in range(len(observations)) if categories[i] == class_idx]
observations_sublist = [observations[i] for i in mask_idx]
confidences_sublist = [confidences[i] for i in mask_idx]
ece_k[class_idx] = eval_ece(confidences_sublist, observations_sublist, num_bins)
return ece_k
def get_ground_truth(categories: List[int], observations: List[bool], confidences: List[float], num_classes: int,
metric: str, mode: str, topk: int = 1) -> np.ndarray:
"""
Compute ground truth given metric and mode with all data points.
:param categories: List[int]
A list of predicted classes.
:param observations: List[bool]
A list of boolean observations.
:param confidences: List[float]
A list of prediction scores.
:param num_classes: int
The number of classes.
:param metric: str
'accuracy' or 'calibration_error'
:param mode: str
'min' or max'
:param topk: int
The number of top classes to return. Default: 1.
:return: binary np.ndarray of shape (num_classes, ) indicating each class in top k or not.
"""
if metric == 'accuracy':
metric_val = get_accuracy_k(categories, observations, num_classes)
elif metric == 'calibration_error':
metric_val = get_ece_k(categories, observations, confidences, num_classes, num_bins=10)
output = np.zeros((num_classes,), dtype=np.bool_)
if mode == 'max':
indices = metric_val.argsort()[-topk:]
else:
indices = metric_val.argsort()[:topk]
output[indices] = 1
return output
def get_bayesian_ground_truth(categories: List[int], observations: List[bool], confidences: List[float],
num_classes: int,
metric: str, mode: str, topk: int = 1, pseudocount: int = 1, prior=None) -> np.ndarray:
"""
Compute ground truth given metric and mode with all data points.
:param categories: List[int]
A list of predicted classes.
:param observations: List[bool]
A list of boolean observations.
:param confidences: List[float]
A list of prediction scores.
:param num_classes: int
The number of classes.
:param metric: str
'accuracy' or 'calibration_error'
:param mode: str
'min' or max'
:param topk: int
The number of top classes to return. Default: 1.
:param pseudocount: int
Strength of prior for ClasswiseEce model. Default: 1.
:param prior: np.ndarray
Prior for BetaBernoulli model. Default: None.
:return: binary np.ndarray of shape (num_classes, ) indicating each class in top k or not.
"""
if metric == 'accuracy':
model = BetaBernoulli(num_classes, prior=prior)
model.update_batch(confidences, observations)
elif metric == 'calibration_error':
model = ClasswiseEce(num_classes, num_bins=10, pseudocount=pseudocount)
model.update_batch(categories, observations, confidences)
metric_val = model.eval
output = np.zeros((num_classes,), dtype=np.bool_)
if mode == 'max':
indices = metric_val.argsort()[-topk:]
else:
indices = metric_val.argsort()[:topk]
output[indices] = 1
return output
|
{"hexsha": "bcdd5873eff3a2544a8b91eaf68e69cbbcd54b44", "size": 16936, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data_utils.py", "max_stars_repo_name": "rloganiv/bayesian-blackbox", "max_stars_repo_head_hexsha": "6a111553200b6aa755149e08174abe1a61d37198", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-12-23T13:27:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-01T13:33:34.000Z", "max_issues_repo_path": "src/data_utils.py", "max_issues_repo_name": "rloganiv/bayesian-blackbox", "max_issues_repo_head_hexsha": "6a111553200b6aa755149e08174abe1a61d37198", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-31T11:06:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:39:33.000Z", "max_forks_repo_path": "src/data_utils.py", "max_forks_repo_name": "disiji/bayesian-blackbox", "max_forks_repo_head_hexsha": "6a111553200b6aa755149e08174abe1a61d37198", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-01-24T10:21:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-22T04:41:14.000Z", "avg_line_length": 42.8759493671, "max_line_length": 128, "alphanum_fraction": 0.6350968351, "include": true, "reason": "import numpy", "num_tokens": 4425}
|
/-
Copyright (c) 2021 Justus Springer. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Justus Springer
-/
import algebra.category.Group.filtered_colimits
import algebra.category.Module.basic
/-!
# The forgetful functor from `R`-modules preserves filtered colimits.
Forgetful functors from algebraic categories usually don't preserve colimits. However, they tend
to preserve _filtered_ colimits.
In this file, we start with a ring `R`, a small filtered category `J` and a functor
`F : J ⥤ Module R`. We show that the colimit of `F ⋙ forget₂ (Module R) AddCommGroup`
(in `AddCommGroup`) carries the structure of an `R`-module, thereby showing that the forgetful
functor `forget₂ (Module R) AddCommGroup` preserves filtered colimits. In particular, this implies
that `forget (Module R)` preserves filtered colimits.
-/
universes u v
noncomputable theory
open_locale classical
open category_theory
open category_theory.limits
open category_theory.is_filtered (renaming max → max') -- avoid name collision with `_root_.max`.
open AddMon.filtered_colimits (colimit_zero_eq colimit_add_mk_eq)
namespace Module.filtered_colimits
section
-- We use parameters here, mainly so we can have the abbreviations `M` and `M.mk` below, without
-- passing around `F` all the time.
parameters {R : Type u} [ring R] {J : Type v} [small_category J] [is_filtered J]
parameters (F : J ⥤ Module.{v} R)
/--
The colimit of `F ⋙ forget₂ (Module R) AddCommGroup` in the category `AddCommGroup`.
In the following, we will show that this has the structure of an `R`-module.
-/
abbreviation M : AddCommGroup :=
AddCommGroup.filtered_colimits.colimit (F ⋙ forget₂ (Module R) AddCommGroup)
/-- The canonical projection into the colimit, as a quotient type. -/
abbreviation M.mk : (Σ j, F.obj j) → M := quot.mk (types.quot.rel (F ⋙ forget (Module R)))
lemma M.mk_eq (x y : Σ j, F.obj j)
(h : ∃ (k : J) (f : x.1 ⟶ k) (g : y.1 ⟶ k), F.map f x.2 = F.map g y.2) :
M.mk x = M.mk y :=
quot.eqv_gen_sound (types.filtered_colimit.eqv_gen_quot_rel_of_rel (F ⋙ forget (Module R)) x y h)
/-- The "unlifted" version of scalar multiplication in the colimit. -/
def colimit_smul_aux (r : R) (x : Σ j, F.obj j) : M :=
M.mk ⟨x.1, r • x.2⟩
lemma colimit_smul_aux_eq_of_rel (r : R) (x y : Σ j, F.obj j)
(h : types.filtered_colimit.rel (F ⋙ forget (Module R)) x y) :
colimit_smul_aux r x = colimit_smul_aux r y :=
begin
apply M.mk_eq,
obtain ⟨k, f, g, hfg⟩ := h,
use [k, f, g],
simp only [category_theory.functor.comp_map, forget_map_eq_coe] at hfg,
rw [linear_map.map_smul, linear_map.map_smul, hfg],
end
/-- Scalar multiplication in the colimit. See also `colimit_smul_aux`. -/
instance colimit_has_scalar : has_scalar R M :=
{ smul := λ r x, begin
refine quot.lift (colimit_smul_aux F r) _ x,
intros x y h,
apply colimit_smul_aux_eq_of_rel,
apply types.filtered_colimit.rel_of_quot_rel,
exact h,
end }
@[simp]
lemma colimit_smul_mk_eq (r : R) (x : Σ j, F.obj j) : r • M.mk x = M.mk ⟨x.1, r • x.2⟩ := rfl
instance colimit_module : module R M :=
{ one_smul := λ x, begin
apply quot.induction_on x, clear x, intro x, cases x with j x,
erw [colimit_smul_mk_eq F 1 ⟨j, x⟩, one_smul],
refl,
end,
mul_smul := λ r s x, begin
apply quot.induction_on x, clear x, intro x, cases x with j x,
erw [colimit_smul_mk_eq F (r * s) ⟨j, x⟩, colimit_smul_mk_eq F s ⟨j, x⟩,
colimit_smul_mk_eq F r ⟨j, _⟩, mul_smul],
end,
smul_add := λ r x y, begin
apply quot.induction_on₂ x y, clear x y, intros x y, cases x with i x, cases y with j y,
erw [colimit_add_mk_eq _ ⟨i, x⟩ ⟨j, y⟩ (max' i j) (left_to_max i j) (right_to_max i j),
colimit_smul_mk_eq, smul_add, colimit_smul_mk_eq, colimit_smul_mk_eq,
colimit_add_mk_eq _ ⟨i, _⟩ ⟨j, _⟩ (max' i j) (left_to_max i j) (right_to_max i j),
linear_map.map_smul, linear_map.map_smul],
refl,
end,
smul_zero := λ r, begin
erw [colimit_zero_eq _ (is_filtered.nonempty.some : J), colimit_smul_mk_eq, smul_zero],
refl,
end,
zero_smul := λ x, begin
apply quot.induction_on x, clear x, intro x, cases x with j x,
erw [colimit_smul_mk_eq, zero_smul, colimit_zero_eq _ j],
refl,
end,
add_smul := λ r s x, begin
apply quot.induction_on x, clear x, intro x, cases x with j x,
erw [colimit_smul_mk_eq, add_smul, colimit_smul_mk_eq, colimit_smul_mk_eq,
colimit_add_mk_eq _ ⟨j, _⟩ ⟨j, _⟩ j (𝟙 j) (𝟙 j), category_theory.functor.map_id,
id_apply, id_apply],
refl,
end }
/-- The bundled `R`-module giving the filtered colimit of a diagram. -/
def colimit : Module R := Module.of R M
/-- The linear map from a given `R`-module in the diagram to the colimit module. -/
def cocone_morphism (j : J) : F.obj j ⟶ colimit :=
{ map_smul' := λ r x, begin erw colimit_smul_mk_eq F r ⟨j, x⟩, refl, end,
.. (AddCommGroup.filtered_colimits.colimit_cocone (F ⋙ forget₂ (Module R) AddCommGroup)).ι.app j }
/-- The cocone over the proposed colimit module. -/
def colimit_cocone : cocone F :=
{ X := colimit,
ι :=
{ app := cocone_morphism,
naturality' := λ j j' f,
linear_map.coe_injective ((types.colimit_cocone (F ⋙ forget (Module R))).ι.naturality f) } }
/--
Given a cocone `t` of `F`, the induced monoid linear map from the colimit to the cocone point.
We already know that this is a morphism between additive groups. The only thing left to see is that
it is a linear map, i.e. preserves scalar multiplication.
-/
def colimit_desc (t : cocone F) : colimit ⟶ t.X :=
{ map_smul' := λ r x, begin
apply quot.induction_on x, clear x, intro x, cases x with j x,
erw colimit_smul_mk_eq,
exact linear_map.map_smul (t.ι.app j) r x,
end,
.. (AddCommGroup.filtered_colimits.colimit_cocone_is_colimit
(F ⋙ forget₂ (Module R) AddCommGroup)).desc
((forget₂ (Module R) AddCommGroup.{v}).map_cocone t) }
/-- The proposed colimit cocone is a colimit in `Module R`. -/
def colimit_cocone_is_colimit : is_colimit colimit_cocone :=
{ desc := colimit_desc,
fac' := λ t j, linear_map.coe_injective $
(types.colimit_cocone_is_colimit (F ⋙ forget (Module R))).fac
((forget (Module R)).map_cocone t) j,
uniq' := λ t m h, linear_map.coe_injective $
(types.colimit_cocone_is_colimit (F ⋙ forget (Module R))).uniq
((forget (Module R)).map_cocone t) m ((λ j, funext $ λ x, linear_map.congr_fun (h j) x)) }
instance forget₂_AddCommGroup_preserves_filtered_colimits :
preserves_filtered_colimits (forget₂ (Module R) AddCommGroup.{v}) :=
{ preserves_filtered_colimits := λ J _ _, by exactI
{ preserves_colimit := λ F, preserves_colimit_of_preserves_colimit_cocone
(colimit_cocone_is_colimit F)
(AddCommGroup.filtered_colimits.colimit_cocone_is_colimit
(F ⋙ forget₂ (Module R) AddCommGroup.{v})) } }
instance forget_preserves_filtered_colimits : preserves_filtered_colimits (forget (Module R)) :=
limits.comp_preserves_filtered_colimits (forget₂ (Module R) AddCommGroup) (forget AddCommGroup)
end
end Module.filtered_colimits
|
{"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/algebra/category/Module/filtered_colimits.lean"}
|
import os
import argparse
import numpy as np
from itertools import cycle
import torch
import random
import pickle
from torchvision import datasets
from torch.autograd import Variable
from torch.distributions import Normal
import math
from alternate_data_loader import MNIST_Paired
from alternate_data_loader import DoubleUniNormal
import torch.optim as optim
from utils import accumulate_group_evidence, group_wise_reparameterize, reparameterize, mse_loss
import matplotlib.pyplot as plt
from utils import transform_config
from networks import Encoder, Decoder
from torch.utils.data import DataLoader
from mpl_toolkits.axes_grid1 import ImageGrid
parser = argparse.ArgumentParser()
torch.autograd.set_detect_anomaly(True)
# add arguments
parser.add_argument('--cuda', type=bool, default=True, help="run the following code on a GPU")
parser.add_argument('--reference_data', type=str, default='fixed', help="generate output using random digits or fixed reference")
parser.add_argument('--accumulate_evidence', type=str, default=False, help="accumulate class evidence before producing swapped images")
parser.add_argument('--batch_size', type=int, default=1, help="batch size for training")
parser.add_argument('--image_size', type=int, default=28, help="height and width of the image")
parser.add_argument('--num_channels', type=int, default=1, help="number of channels in the images")
parser.add_argument('--num_classes', type=int, default=10, help="number of classes in the dataset")
parser.add_argument('--style_dim', type=int, default=1, help="dimension of varying factor latent space")
parser.add_argument('--class_dim', type=int, default=1, help="dimension of common factor latent space")
# paths to save models
parser.add_argument('--encoder_save', type=str, default='encoder', help="model save for encoder")
parser.add_argument('--decoder_save', type=str, default='decoder', help="model save for decoder")
torch.set_printoptions(precision=8)
def run_through_network(X, labels_batch):
style_mu, style_logvar, class_mu, class_logvar = encoder(Variable(X))
grouped_mu, grouped_logvar = accumulate_group_evidence(
class_mu.data, class_logvar.data, labels_batch, FLAGS.cuda
)
importance_sampling(X[0], style_mu[0], style_logvar[0], class_mu[0], class_logvar[0])
# reconstruct samples
style_latent_embeddings = reparameterize(training=True, mu=style_mu, logvar=style_logvar)
class_latent_embeddings = group_wise_reparameterize(
training=True, mu=grouped_mu, logvar=grouped_logvar, labels_batch=labels_batch, cuda=FLAGS.cuda
)
reconstructed_images = decoder(style_latent_embeddings, class_latent_embeddings)
return reconstructed_images
def importance_sampling(sample, style_mu, style_logvar, grouped_mu, grouped_logvar):
print(style_mu)
print(style_logvar.exp())
print(grouped_mu)
print(grouped_logvar.exp())
#std = logvar.mul(0.5).exp_()
eps_s = torch.FloatTensor(128, 1)
eps_s.normal_()
eps_g = torch.FloatTensor(128, 1)
eps_g.normal_()
q_s = Normal(0, 1).log_prob(eps_s).exp()
print(q_s)
q_g = Normal(0, 1).log_prob(eps_g).exp()
reparam_s = eps_s.mul(style_logvar.mul(0.5).exp()).add(style_mu)
reparam_g = eps_g.mul(grouped_logvar.mul(0.5).exp()).add(grouped_mu)
p_s = Normal(0, 1).log_prob(reparam_s).exp()
p_g = Normal(0, 1).log_prob(reparam_g).exp()
reconstructed = decoder(reparam_s, reparam_g)
px_cond = Normal(0, 1).log_prob(sample - reconstructed).exp()
summand = px_cond * p_s * p_g / q_s / q_g
print(summand)
likelihood = torch.sum(summand) / summand.size(0)
FLAGS = parser.parse_args()
if __name__ == '__main__':
"""
model definitions
"""
encoder = Encoder(style_dim=FLAGS.style_dim, class_dim=FLAGS.class_dim)
decoder = Decoder(style_dim=FLAGS.style_dim, class_dim=FLAGS.class_dim)
encoder.load_state_dict(
torch.load(os.path.join('checkpoints', FLAGS.encoder_save), map_location=lambda storage, loc: storage))
decoder.load_state_dict(
torch.load(os.path.join('checkpoints', FLAGS.decoder_save), map_location=lambda storage, loc: storage))
encoder.cuda()
decoder.cuda()
if not os.path.exists('reconstructed_images'):
os.makedirs('reconstructed_images')
# load data set and create data loader instance
'''
print('Loading MNIST paired dataset...')
paired_mnist = MNIST_Paired(root='mnist', download=True, train=False, transform=transform_config)
loader = cycle(DataLoader(paired_mnist, batch_size=FLAGS.batch_size, shuffle=True, num_workers=0, drop_last=True))
image_array = []
for i in range(0, 11):
image_array.append([])
# add a blank image in the top row
image_array[0].append(np.zeros((28, 28, 3), dtype=np.float32))
sampled_classes = list(paired_mnist.data_dict.keys())
sampled_classes.sort()
if FLAGS.reference_data == 'random':
count = 1
# fill the top row and first column of the grid with reference images
for class_sample in sampled_classes:
class_image = random.SystemRandom().choice(paired_mnist.data_dict[class_sample])
class_image = np.transpose(class_image.numpy(), (1, 2, 0))
class_image = np.concatenate((class_image, class_image, class_image), axis=2)
# add image in the top row
image_array[0].append(class_image)
# add image in the first column
image_array[count].append(class_image)
count += 1
elif FLAGS.reference_data == 'fixed':
with open("reference_images.pkl", "rb") as fp:
reference_images = pickle.load(fp)
count = 1
# fill the top row and first column of the grid with reference images
for i in range(0, 10):
# add image in the top row
image_array[0].append(reference_images[i])
# add image in the first column
image_array[count].append(reference_images[i])
count += 1
# get class specified label for entire top row at once
specified_factor_images = []
for i in range(1, 11):
specified_factor_images.append(image_array[0][i][:, :, 0])
specified_factor_images = np.asarray(specified_factor_images)
specified_factor_images = np.expand_dims(specified_factor_images, axis=3)
specified_factor_images = np.transpose(specified_factor_images, (0, 3, 1, 2))
specified_factor_images = torch.FloatTensor(specified_factor_images)
specified_factor_images = specified_factor_images.contiguous()
if FLAGS.accumulate_evidence:
# sample a big batch, accumulate evidence and use that for class embeddings
image_batch, _, labels_batch = next(loader)
_, __, class_mu, class_logvar = encoder(Variable(image_batch))
grouped_mu, grouped_logvar = accumulate_group_evidence(
class_mu.data, class_logvar.data, labels_batch, FLAGS.cuda
)
accumulated_class_latent_embeddings = group_wise_reparameterize(
training=False, mu=grouped_mu, logvar=grouped_logvar, labels_batch=labels_batch, cuda=FLAGS.cuda
)
# select class latent embeddings for 10 digits sorted by class labels
class_latent_embeddings = []
for i in range(0, 10):
index = np.where(labels_batch.data.numpy() == i)[0][0]
class_latent_embeddings.append(accumulated_class_latent_embeddings[index])
class_latent_embeddings = torch.stack(class_latent_embeddings)
else:
# simply use 10 images selected from grid to produce class embeddings (no evidence accumulation)
_, __, class_mu, class_logvar = encoder(Variable(specified_factor_images))
labels_batch = torch.LongTensor(range(0, 10))
grouped_mu, grouped_logvar = accumulate_group_evidence(
class_mu.data, class_logvar.data, labels_batch, FLAGS.cuda
)
class_latent_embeddings = group_wise_reparameterize(
training=False, mu=grouped_mu, logvar=grouped_logvar, labels_batch=labels_batch, cuda=FLAGS.cuda
)
# generate all possible combinations using the encoder and decoder architecture in the grid
for row in range(1, 11):
style_image = image_array[row][0]
style_image = np.transpose(style_image, (2, 0, 1))
style_image = torch.FloatTensor(style_image)
style_image = style_image.contiguous()
style_image = style_image[0, :, :]
style_image = style_image.view(1, 1, 28, 28)
style_mu, style_logvar, _, __ = encoder(Variable(style_image))
style_latent_embeddings = reparameterize(training=False, mu=style_mu, logvar=style_logvar)
for col in range(1, 11):
specified_factor_temp = class_latent_embeddings[col - 1]
specified_factor_temp = specified_factor_temp.view(1, FLAGS.class_dim)
reconstructed_x = decoder(style_latent_embeddings, specified_factor_temp)
reconstructed_x = np.transpose(reconstructed_x.data.numpy(), (0, 2, 3, 1))[0]
reconstructed_x = np.concatenate((reconstructed_x, reconstructed_x, reconstructed_x), axis=2)
image_array[row].append(reconstructed_x)
# plot
image_array = np.asarray(image_array)
print(image_array.shape)
fig = plt.figure(1)
grid = ImageGrid(fig, 111, nrows_ncols=[11, 11], axes_pad=0.)
temp_list = []
for i in range(0, 11):
for j in range(0, 11):
temp_list.append(image_array[i][j])
for i in range(121):
grid[i].axis('off')
grid[i].imshow(temp_list[i])
plt.savefig('reconstructed_images/inference.png', bbox_inches='tight', pad_inches=0, transparent=True)
plt.clf()
'''
print('Loading double uninormal dataset...')
paired_mnist = DoubleUniNormal('DoubleUniNormal_theta=1_n=1500')
loader = cycle(DataLoader(paired_mnist, batch_size=FLAGS.batch_size, shuffle=True, num_workers=0, drop_last=True))
X = torch.FloatTensor(FLAGS.batch_size, 1)
# test data
test_data = torch.from_numpy(paired_mnist.x_test).float()
for i in range(len(test_data)):
if i == 1: # Running on the i-th test data
print('Running X_'+str(i))
print(paired_mnist.y_test[i])
print()
X_i = test_data[i] # 1-d vector
l = X_i.size(0)
errors = []
for eta in range(10, 90):
print('\tRunning eta =', eta)
g1 = X_i[0:eta].view(eta, -1).cuda()
g2 = X_i[eta:l].view(l-eta, -1).cuda()
total_error = 0
for g in [g1, g2]:
style_mu, _, class_mu, class_logvar = encoder(g)
grouped_mu, _ = accumulate_group_evidence(
class_mu.data, class_logvar.data, torch.zeros(g.size(0), 1)
)
decoder_style_input = torch.tensor(style_mu, requires_grad = True, device='cuda')
decoder_content_input = torch.tensor(grouped_mu[0], requires_grad = True, device='cuda')
content = decoder_content_input.expand(g.size(0), 1)
optimizer = optim.Adam(
[decoder_style_input, decoder_content_input],
lr = 0.01 # this may be an important parameter
)
for iterations in range(500):
optimizer.zero_grad()
reconstructed = decoder(decoder_style_input, content)
reconstruction_error = torch.sum((reconstructed - g).pow(2))
# print(reconstruction_error)
reconstruction_error.backward(retain_graph = True)
optimizer.step()
# print(reconstruction_error)
total_error += reconstruction_error
errors.append(total_error)
with open('errors.txt', 'w') as f:
for e in errors:
f.write("%s\n" % e.item())
'''
image_batch, labels_batch = next(loader)
X.copy_(image_batch)
style_mu, style_logvar, class_mu, class_logvar = encoder(Variable(X))
grouped_mu, grouped_logvar = accumulate_group_evidence(
class_mu.data, class_logvar.data, labels_batch, FLAGS.cuda
)
# reconstruct samples
style_latent_embeddings = reparameterize(training=True, mu=style_mu, logvar=style_logvar)
class_latent_embeddings = group_wise_reparameterize(
training=True, mu=grouped_mu, logvar=grouped_logvar, labels_batch=labels_batch, cuda=FLAGS.cuda
)
reconstructed_images = decoder(style_latent_embeddings, class_latent_embeddings)
print(X.view(1, -1))
print(reconstructed_images.view(1, -1))
'''
|
{"hexsha": "aa73a6a2290ed25a39fba8bd5bd82ef97e54ec2d", "size": 12930, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "vicissitude1999/multi-level-vae", "max_stars_repo_head_hexsha": "83bc98fbe5046c61941298d4fd49b08fd868ee89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference.py", "max_issues_repo_name": "vicissitude1999/multi-level-vae", "max_issues_repo_head_hexsha": "83bc98fbe5046c61941298d4fd49b08fd868ee89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference.py", "max_forks_repo_name": "vicissitude1999/multi-level-vae", "max_forks_repo_head_hexsha": "83bc98fbe5046c61941298d4fd49b08fd868ee89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-13T19:16:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-13T19:16:27.000Z", "avg_line_length": 38.7125748503, "max_line_length": 135, "alphanum_fraction": 0.6719257541, "include": true, "reason": "import numpy", "num_tokens": 2969}
|
import matplotlib.pyplot as plt
import rosbag
import argparse
import numpy as np
def make_llc_plot(bagfile):
b = rosbag.Bag(bagfile)
state_est_topic_name = '/vehicle/state_est'
mpc_path_topic_name = '/vehicle/mpc_path'
if '/vehicle/state_est_dyn' in b.get_type_and_topic_info()[1].keys():
state_est_topic_name = '/vehicle/state_est_dyn'
mpc_path_topic_name = '/vehicle/mpc_path_dyn'
t_se = []; df_se = []; a_se = []
t_a_long = []; a_long = []
t_a_cmd = []; a_cmd = [];
t_df_cmd = []; df_cmd = [];
# Measured Acceleration and Steering
for topic, msg, _ in b.read_messages(topics=state_est_topic_name):
t_se.append(msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs)
a_se.append(msg.a) # as of 7/20/18, filtered with alpha = 0.01, may be too aggressive
df_se.append(msg.df) # this one should be fine, no filtering involved
# Measured Longitudinal Acceleration (Vehicle IMU)
for topic, msg, _ in b.read_messages(topics='/vehicle/imu'):
t_a_long.append(msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs)
a_long.append(msg.long_accel)
# MPC Commands (Desired Values)
for topic, msg, t in b.read_messages(topics='/control/accel'):
t_a_cmd.append(t.secs + 1e-9 * t.nsecs)
a_cmd.append(msg.data)
for topic, msg, t in b.read_messages(topics='/control/steer_angle'):
t_df_cmd.append(t.secs + 1e-9 * t.nsecs)
df_cmd.append(msg.data)
# Estimate controller enable time by using the first optimal solution
# from the MPC module, based on the MPC command/path message.
# TODO: alternatively use ada_stat/acc_mode status information to see
# if the controller is enabled or not.
t_enable = None
for topic, msg, _ in b.read_messages(topics=mpc_path_topic_name):
if msg.solv_status == 'Optimal':
t_enable = msg.header.stamp.secs + 1e-9 * msg.header.stamp.nsecs
break
if t_enable == None:
t_enable = max(t_a_cmd[0], t_df_cmd[0])
t_se = np.array(t_se) - t_enable
t_a_long = np.array(t_a_long) - t_enable
t_a_cmd = np.array(t_a_cmd) - t_enable
t_df_cmd = np.array(t_df_cmd) - t_enable
# TODO: Do we want to allow simulated, path following data too? If so, need to switch a_long with a_se.
t_lims = [0.0, max(t_se[-1], t_a_long[-1], t_a_cmd[-1], t_df_cmd[-1])]
plt.figure()
plt.subplot(211)
plt.plot(t_a_cmd, a_cmd, 'r', label='MPC')
if len(a_long) == 0:
plt.plot(t_se, a_se, 'k', label='ACT') # Use filtered acceleration for simulated data.
print("Note: Using filtered acceleration for simulated bag file.")
else:
plt.plot(t_a_long, a_long, 'k', label='ACT') # Use longitudinal acceleration measured by vehicle for real data.
plt.xlim(t_lims)
plt.xlabel('t (s)')
plt.ylabel('Acceleration (m/s^2)')
plt.legend()
plt.subplot(212)
plt.plot(t_df_cmd, df_cmd, 'r', label='MPC')
plt.plot(t_se, df_se, 'k', label='ACT')
plt.xlim(t_lims)
plt.xlabel('t (s)')
plt.ylabel('Steer Angle (rad)')
plt.suptitle('Low Level Tracking Response')
plt.show()
if __name__=='__main__':
parser = argparse.ArgumentParser('Plots MPC command vs. actual low level acceleration + steering controller behavior. Meant for real, path following data.')
parser.add_argument('--bf', type=str, required=True, help='Bag file for path followed.')
args = parser.parse_args()
make_llc_plot(args.bf)
|
{"hexsha": "bc9f0e330eb97d93f3e0bf9657dd22944938bb64", "size": 3282, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/analysis/plot_low_level_control.py", "max_stars_repo_name": "yluthu/genesis_path_follower", "max_stars_repo_head_hexsha": "55d6e88a5e928cc214f33b07dc2624e6d13e51df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 101, "max_stars_repo_stars_event_min_datetime": "2018-10-18T23:42:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T21:54:22.000Z", "max_issues_repo_path": "scripts/analysis/plot_low_level_control.py", "max_issues_repo_name": "yluthu/genesis_path_follower", "max_issues_repo_head_hexsha": "55d6e88a5e928cc214f33b07dc2624e6d13e51df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-19T01:06:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-05T19:18:41.000Z", "max_forks_repo_path": "scripts/analysis/plot_low_level_control.py", "max_forks_repo_name": "yluthu/genesis_path_follower", "max_forks_repo_head_hexsha": "55d6e88a5e928cc214f33b07dc2624e6d13e51df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 58, "max_forks_repo_forks_event_min_datetime": "2018-10-18T23:45:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-17T15:20:13.000Z", "avg_line_length": 36.8764044944, "max_line_length": 158, "alphanum_fraction": 0.7093235832, "include": true, "reason": "import numpy", "num_tokens": 944}
|
Require Export Fiat.Common.Coq__8_4__8_5__Compat.
(** * Definition of a parse-tree-returning CFG parser-recognizer *)
Require Import Coq.Lists.List.
Require Import Coq.Arith.EqNat.
Require Import Coq.Arith.Compare_dec Coq.Arith.Wf_nat.
Require Import Coq.ZArith.ZArith.
Require Import Fiat.Common.List.Operations.
Require Import Fiat.Parsers.ContextFreeGrammar.Core.
Require Import Fiat.Parsers.BaseTypes.
Require Import Fiat.Parsers.GenericBaseTypes.
Require Import Fiat.Parsers.GenericCorrectnessBaseTypes.
Require Import Fiat.Parsers.StringLike.Properties.
Require Import Fiat.Parsers.MinimalParse.
Require Import Fiat.Parsers.CorrectnessBaseTypes.
Require Import Fiat.Parsers.BaseTypesLemmas.
Require Import Fiat.Parsers.ContextFreeGrammar.Properties Fiat.Parsers.WellFoundedParse.
Require Import Fiat.Parsers.MinimalParseOfParse.
Require Import Fiat.Parsers.GenericRecognizer.
Require Import Fiat.Parsers.GenericRecognizerExt.
Require Import Fiat.Common.Wf Fiat.Common.Wf1.
Require Import Fiat.Common.List.ListFacts.
Require Import Fiat.Common.NatFacts.
Require Import Fiat.Common.UIP.
Require Import Fiat.Common.
Import ListNotations.
Import NPeano.
Set Implicit Arguments.
Local Open Scope string_like_scope.
Local Arguments dec_stabalize : simpl never.
Local Ltac R_etransitivity_eq :=
idtac;
let x' := fresh in
match goal with
| [ |- ?R ?x _ ]
=> let T := type of x in
evar (x' : T);
replace x with x'; subst x'
end.
Local Ltac subst_le_proof :=
idtac;
match goal with
| [ H : ?x <= ?y, H' : ?x <= ?y |- _ ]
=> assert (H = H') by apply Le.le_proof_irrelevance; subst
end.
Local Ltac subst_nat_eq_proof :=
idtac;
match goal with
| [ H : ?x = ?y :> nat, H' : ?x = ?y |- _ ]
=> assert (H = H') by apply UIP_nat; subst
| [ H : ?x = ?x :> nat |- _ ]
=> assert (eq_refl = H) by apply UIP_nat; subst
end.
Local Ltac subst_bool_eq_proof :=
idtac;
match goal with
| [ H : ?x = ?y :> bool, H' : ?x = ?y |- _ ]
=> assert (H = H') by apply UIP_bool; subst
| [ H : is_true ?x, H' : ?x = true |- _ ]
=> assert (H = H') by apply UIP_bool; subst
| [ H : is_true ?x, H' : is_true ?x |- _ ]
=> assert (H = H') by apply UIP_bool; subst
end.
Local Ltac prove_nonterminals_t' :=
idtac;
match goal with
| _ => assumption
| [ H : is_true (is_valid_nonterminal initial_nonterminals_data (of_nonterminal _)) |- _ ]
=> apply initial_nonterminals_correct in H
| [ H : In (to_nonterminal _) (Valid_nonterminals ?G) |- _ ]
=> apply initial_nonterminals_correct' in H
end.
Local Ltac prove_nonterminals_t := repeat prove_nonterminals_t'.
Local Ltac solve_nonterminals_t' :=
idtac;
match goal with
| _ => prove_nonterminals_t'
| [ H : context[of_nonterminal (to_nonterminal _)] |- _ ]
=> rewrite of_to_nonterminal in H by prove_nonterminals_t
end.
Local Ltac solve_nonterminals_t := repeat solve_nonterminals_t'.
Section recursive_descent_parser.
Context {Char} {HSLM : StringLikeMin Char} {HSL : StringLike Char} {HSLP : StringLikeProperties Char} (G : grammar Char).
Context {data : @boolean_parser_dataT Char _}
{cdata : @boolean_parser_completeness_dataT' Char _ _ G data}
{rdata : @parser_removal_dataT' _ G _}
{gendata : @generic_parser_dataT Char}.
Context {gcdata : generic_parser_correctness_dataT}.
Context (str : String).
Local Notation dec T := (T + (T -> False))%type (only parsing).
Local Notation iffT x y := ((x -> y) * (y -> x))%type (only parsing).
Lemma dec_prod {A B} (HA : dec A) (HB : dec B) : dec (A * B).
Proof.
destruct HA; [ destruct HB; [ left; split; assumption | right ] | right ];
intros [? ?]; eauto with nocore.
Defined.
Lemma bool_of_sum_dec_prod {A B HA HB}
: (@dec_prod A B HA HB) = (andb HA HB) :> bool.
Proof. destruct HA, HB; reflexivity. Qed.
Lemma dec_In {A} {P : A -> Type} (HA : forall a, dec (P a)) ls
: dec { a : _ & (In a ls * P a) }.
Proof.
induction ls as [|x xs IHxs]; simpl.
{ right; intros [? [? ?]]; assumption. }
{ destruct (HA x); [ left; exists x; split; eauto | destruct IHxs; [ left | right ] ];
intros;
destruct_head sigT;
destruct_head prod;
destruct_head or;
subst;
eauto. }
Defined.
Lemma parse_complete_stabalize' {len0 valid str' it its}
(n m : nat)
(Hn : n >= length str')
(Hm : m >= length str')
: (minimal_parse_of_item (G := G) len0 valid (take n str') it
* minimal_parse_of_production (G := G) len0 valid (drop n str') its)
-> (minimal_parse_of_item (G := G) len0 valid (take m str') it
* minimal_parse_of_production (G := G) len0 valid (drop m str') its).
Proof.
intros [pi pp]; split;
[ eapply expand_minimal_parse_of_item; [ .. | eassumption ]
| eapply expand_minimal_parse_of_production; [ .. | eassumption ] ];
try reflexivity; eauto.
{ clear -Hn Hm HSLP.
abstract (rewrite !take_long by assumption; reflexivity). }
{ clear -Hn Hm HSLP.
abstract (apply bool_eq_empty; rewrite drop_length; omega). }
Defined.
Definition parse_complete_stabalize'' {len0 valid str' it its}
(n m : nat)
(Hn : n >= length str')
(Hm : m >= length str')
:= (@parse_complete_stabalize' len0 valid str' it its n m Hn Hm,
@parse_complete_stabalize' len0 valid str' it its m n Hm Hn).
Definition parse_complete_stabalize {len0 valid str' it its}
(n : nat)
(Hn : n >= length str')
:= @parse_complete_stabalize'' len0 valid str' it its n (S n) Hn (le_S _ _ Hn).
Global Arguments parse_complete_stabalize : simpl never.
Section min.
Section parts.
Local Ltac expand_onceL :=
idtac;
match goal with
| [ |- ?R (bool_of_sum ?x) ?y ]
=> let x' := head x in
unfold x'
end.
Local Ltac expand_onceR :=
idtac;
match goal with
| [ |- ?R (bool_of_sum ?x) ?y ]
=> let y' := head y in
unfold y'
end.
Local Ltac expand_once := try expand_onceL; try expand_onceR.
Local Ltac expand_both_once :=
idtac;
match goal with
| [ |- ?R ?x ?y ]
=> let x' := head x in
let y' := head y in
try unfold x'; try unfold y'
end.
Local Hint Resolve beq_nat_true : generic_parser_correctness.
Local Ltac eq_t' :=
first [ progress subst_le_proof
| progress subst_nat_eq_proof
| progress subst_bool_eq_proof
| solve [ eauto with generic_parser_correctness nocore ]
| rewrite sub_twice, Min.min_r by assumption
| rewrite !@min_max_sub
| rewrite Nat.sub_max_distr_l
| rewrite <- Nat.sub_add_distr
| rewrite (proj2 (Nat.ltb_lt _ _)) by assumption
| idtac;
match goal with
| [ |- ?x = ?x ] => reflexivity
| [ H : ?x = true, H' : ?x = false |- _ ] => exfalso; clear -H H'; congruence
| [ |- ?R ?v _ ]
=> match v with
| bool_of_sum (match ?x with
| inl H => inl (@?L H)
| inr H' => inr (@?R H')
end)
=> replace v with (bool_of_sum x) by (case x; reflexivity)
| bool_of_sum (match ?x with
| inl H => inl (@?L H)
| inr H' => match ?x' with
| inl H'0 => inl (@?RL H' H'0)
| inr H'0' => inr (@?RR H' H'0')
end
end)
=> replace v with (orb (bool_of_sum x) (bool_of_sum x'))
by (case x; case x'; reflexivity)
| bool_of_sum (match ?x with
| left H => @?L H
| right H' => @?R H'
end)
=> replace v with (match x with
| left H => bool_of_sum (L H)
| right H' => bool_of_sum (R H')
end)
by (case x; reflexivity)
end
| _ => solve [ eauto with nocore ]
| [ |- ?R (bool_of_sum (sumbool_rect _ _ _ ?sb)) (option_rect _ _ _ (sumbool_rect _ _ _ ?sb)) ]
=> destruct sb; simpl
| [ |- context[?e] ]
=> not is_var e;
not is_evar e;
match type of e with
| _ <= _ => idtac
| ?x = _ :> nat => not constr_eq e (eq_refl x)
end;
generalize e; intro
| [ H : ?x = cons _ _ |- context[match ?x with _ => _ end] ] => rewrite H
end
| rewrite fold_left_orb_true
| rewrite bool_of_sum_dec_prod
| idtac;
let R := match goal with |- ?R ?LHS ?RHS => R end in
let LHS := match goal with |- ?R ?LHS ?RHS => LHS end in
let RHS := match goal with |- ?R ?LHS ?RHS => RHS end in
match RHS with
| context Rc[bool_of_sum ?f0]
=> match f0 with
| ?f ?ae ?be ?ce ?de ?ee ?ge ?he
=> match LHS with
| context Lc[f ?a ?b ?c ?d ?e ?g ?h]
=> unify a ae; unify b be; unify c ce; unify d de; unify e ee; unify g ge; unify h he;
let v := fresh in
set (v := f a b c d e g h);
let L' := context Lc[v] in
let R' := context Rc[bool_of_sum v] in
change (R L' R');
clearbody v; destruct v
end
end
end
| idtac;
let R := match goal with |- ?R ?LHS ?RHS => R end in
let LHS := match goal with |- ?R ?LHS ?RHS => LHS end in
let RHS := match goal with |- ?R ?LHS ?RHS => RHS end in
match RHS with
| context Rc[bool_of_sum ?f0]
=> match f0 with
| ?f ?ae ?be ?ce ?de ?ee ?ge
=> match LHS with
| context Lc[f ?a ?b ?c ?d ?e ?g]
=> unify a ae; unify b be; unify c ce; unify d de; unify e ee; unify g ge;
let v := fresh in
set (v := f a b c d e g);
let L' := context Lc[v] in
let R' := context Rc[bool_of_sum v] in
change (R L' R');
clearbody v; destruct v
end
end
end
| idtac;
let R := match goal with |- ?R ?LHS ?RHS => R end in
let LHS := match goal with |- ?R ?LHS ?RHS => LHS end in
let RHS := match goal with |- ?R ?LHS ?RHS => RHS end in
match RHS with
| context Rc[bool_of_sum ?f0]
=> match f0 with
| ?f ?ae ?be ?ce ?de ?ee
=> match LHS with
| context Lc[f ?a ?b ?c ?d ?e]
=> unify a ae; unify b be; unify c ce; unify d de; unify e ee;
let v := fresh in
set (v := f a b c d e);
let L' := context Lc[v] in
let R' := context Rc[bool_of_sum v] in
change (R L' R');
clearbody v; destruct v
end
end
end
| idtac;
let RHS := match goal with |- ?R _ ?RHS => RHS end in
match RHS with
| context[match ?it with Terminal _ => _ | _ => _ end]
=> destruct it eqn:?
| _ => progress subst
| _ => progress simpl @bool_of_sum
| context G[is_char ?x ?y]
=> let H := fresh in
destruct (Utils.dec (is_char x y)) as [H|H];
[ let G' := context G[true] in
transitivity G'; [ | symmetry; exact H ]
| let G' := context G[false] in
transitivity G'; [ | symmetry; exact H ] ]
| context G[beq_nat ?x ?y]
=> let H := fresh in
destruct (Utils.dec (beq_nat x y)) as [H|H];
[ let G' := context G[true] in
transitivity G'; [ | symmetry; exact H ]
| let G' := context G[false] in
transitivity G'; [ | symmetry; exact H ] ]
| context[match ?x with _ => _ end]
=> let H := match goal with
| [ H : ?x = cons _ _ |- _ ] => H
end in
etransitivity; [ | rewrite H; reflexivity ]
end
| idtac;
let LHS := match goal with |- ?R ?LHS ?RHS => LHS end in
let RHS := match goal with |- ?R ?LHS ?RHS => RHS end in
match LHS with
| match Utils.dec ?x with _ => _ end
=> match RHS with
| context[x]
=> destruct (Utils.dec x)
end
end
| idtac;
match goal with
| [ H : ?x = true |- context[?x] ] => rewrite H
| [ H : ?x = false |- context[?x] ] => rewrite H
| [ H : (_ <? _)%nat = true |- _ ]
=> apply Nat.ltb_lt in H
| [ H : ?T, H' : ~?T |- _ ] => specialize (H' H)
| [ H : False |- _ ] => destruct H
end ].
Local Ltac eq_t := expand_once; repeat eq_t'.
(** Here are some general tactics to do variadic list_rect reasoning. Unfortunately, they're really slow (~ 20 s), so we don't use them. *)
Local Ltac curry_do_change HS :=
idtac;
match HS with
| context HS'[list_rect ?P ?N ?C]
=> (let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
(*set (P0 := P);*)
set (N0 := N);
set (C0 := C);
let HS'' := context HS'[list_rect P(*0*) N0 C0] in
change HS with HS'')
end.
Local Ltac pre_pre_curry_func :=
idtac;
let LHS := match goal with |- bool_of_sum ?LHS = ?RHS => LHS end in
let RHS := match goal with |- bool_of_sum ?LHS = ?RHS => RHS end in
curry_do_change LHS;
curry_do_change RHS.
Local Ltac pre_curry_func cont :=
idtac;
let LHS := match goal with |- bool_of_sum ?LHS = ?RHS => LHS end in
let RHS := match goal with |- bool_of_sum ?LHS = ?RHS => RHS end in
let ls := match LHS with
| context[list_rect ?P ?N ?C ?ls] => ls
end in
let LRL := match LHS with
| context[list_rect ?P ?N ?C] => constr:(list_rect P N C)
end in
let LRR := match RHS with
| context[list_rect ?P ?N ?C] => constr:(list_rect P N C)
end in
let F := fresh "F" in
let G := fresh "G" in
let F' := fresh "F'" in
let G' := fresh "G'" in
set (F := LRL);
set (G := LRR);
set (F' := fun ls (_ : unit) => F ls);
set (G' := fun ls (_ : unit) => G ls);
change (F ls) with (F' ls tt);
change (G ls) with (G' ls tt);
subst F G;
cont F' G'.
Local Ltac curry_func' F G n :=
idtac;
let LHS := match goal with |- bool_of_sum ?LHS = ?RHS => LHS end in
let RHS := match goal with |- bool_of_sum ?LHS = ?RHS => RHS end in
let ls := match LHS with
| context[F ?ls ?x0 ?x] => ls
end in
let x0 := match LHS with
| context[F ?ls ?x0 ?x] => x0
end in
let al := match LHS with
| context[F ?ls ?x0 ?x] => x
end in
let ar := match RHS with
| context[G ?ls ?x0 ?x] => x
end in
let T := (type of F) in
let P := match (eval cbv beta in T) with
| forall (ls : ?lsT) (x0 : @?T ls) (y0 : @?T' ls x0), _ => T'
end in
let F' := fresh "F'" in
let G' := fresh "G'" in
first [ constr_eq al ar;
first [ set (F' := fun ls v => F ls (fst v) (snd v));
set (G' := fun ls (v : sigT (P ls)) => G ls (fst v) (snd v));
progress change (F ls x0 al) with (F' ls (x0, al));
progress change (G ls x0 ar) with (G' ls (x0, ar))
| set (F' := fun ls (v : sigT (P ls)) => F ls (projT1 v) (projT2 v));
set (G' := fun ls (v : sigT (P ls)) => G ls (projT1 v) (projT2 v));
progress change (F ls x0 al) with (F' ls (existT (P ls) x0 al));
progress change (G ls x0 ar) with (G' ls (existT (P ls) x0 ar)) ];
try subst F;
try subst G;
idtac n
| not constr_eq al ar;
first [ set (F' := fun ls v => F ls (fst v) (snd v));
set (G' := fun ls v => G ls (fst v));
progress change (F ls x0 al) with (F' ls (x0, al));
progress change (G ls x0) with (G' ls (x0, al))
| set (F' := fun ls (v : sigT (P ls)) => F ls (projT1 v) (projT2 v));
set (G' := fun ls (v : sigT (P ls)) => G ls (projT1 v));
progress change (F ls x0 al) with (F' ls (existT (P ls) x0 al));
progress change (G ls x0) with (G' ls (existT (P ls) x0 al)) ];
try subst F;
try subst G ];
cbv beta in *;
try curry_func' F' G' (S n).
Local Ltac curry_list_rect := pre_pre_curry_func; pre_curry_func ltac:(fun F G => curry_func' F G 0).
Local Ltac post_resolve_list_rect :=
idtac;
(lazymatch goal with
| [ |- bool_of_sum (?F ?ls ?x) = ?G ?ls ?x ]
=> (let y := fresh in
let IH := fresh in
refine (list_rect
(fun ls' => forall x', bool_of_sum (F ls' x') = G ls' x')
_
_
ls x);
subst F G;
cbv beta;
[ intro y;
let LHS := match goal with |- bool_of_sum ?LHS = ?RHS => LHS end in
let RHS := match goal with |- bool_of_sum ?LHS = ?RHS => RHS end in
match LHS with
| context[list_rect _ ?N ?C]
=> subst N
end;
match RHS with
| context[list_rect _ ?N ?C]
=> subst N
end;
simpl @list_rect;
revert y
| intros ?? IH y;
let LHS := match goal with |- bool_of_sum ?LHS = ?RHS => LHS end in
let RHS := match goal with |- bool_of_sum ?LHS = ?RHS => RHS end in
let C := match LHS with | context[list_rect _ ?N ?C] => C end in
let C' := match RHS with | context[list_rect _ ?N ?C] => C end in
simpl @list_rect;
unfold C at 1, C' at 1;
revert y ];
repeat match goal with
| [ |- forall (x : sigT ?P), _ ] => intros_destruct; simpl
| [ |- forall (x : _ * _), _ ] => intros_destruct; simpl
end;
[
| repeat match type of IH with
| forall (x : sigT ?P), _
=> specialize (fun a b => IH (existT P a b)); simpl in IH
| forall (x : _ * _), _
=> specialize (fun a b => IH (a, b)); simpl in IH
end ];
intros
)
end).
Local Ltac eq_list_rect_slow :=
curry_list_rect; post_resolve_list_rect.
(** And here's the really fast specialized version *)
Local Ltac eq_list_rect
:= (idtac;
lazymatch goal with
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f ?g)) (list_rect ?P' ?N' ?C' ?ls ?a ?e ?f) ]
=> idtac;
(let R' := match (eval pattern a, e, f in R) with ?R' _ _ _ => R' end in
let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' g',
R' a' e' f'
(bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f' g'))
(list_rect P1 N1 C1 ls' a' e' f'))
_
_
ls a b c d e f g);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f ?g ?h)) (list_rect ?P' ?N' ?C' ?ls ?a ?e (?len0 - ?f)) ]
=> (let R' := match (eval pattern a, e, f, h in R) with ?R' _ _ _ _ => R' end in
let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
(*replace (len0 - f) with (len0 - f + 0) by omega;*)
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' g' h' h''(* z'*),
R' a' e' f' h'
(bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f' g' h'))
(list_rect P1 N1 C1 ls' a' e' (len0 - f'(* + z'*))))
_
_
ls a b c d e f g h h (*0*));
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f ?g ?h)) (list_rect ?P' ?N' ?C' ?ls ?a ?e ?f ?h) ]
=> (let R' := match eval pattern a, e, f, h in R with ?R' _ _ _ _ => R' end in
let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' g' h' h'',
R' a' e' f' h'
(bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f' g' h'))
(list_rect P1 N1 C1 ls' a' e' f' h''))
_
_
ls a b c d e f g h h);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f)) (list_rect ?P' ?N' ?C' ?ls ?a ?c ?d ?f) ]
=> (let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' f'' ,
R (bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f'))
(list_rect P1 N1 C1 ls' a' c' d' f''))
_
_
ls a b c d e f f);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f)) (list_rect ?P' ?N' ?C' ?ls ?c ?d ?f) ]
=> (let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' f'' ,
R (bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f'))
(list_rect P1 N1 C1 ls' c' d' f''))
_
_
ls a b c d e f f);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e)) (list_rect ?P' ?N' ?C' ?ls ?b ?c ?e) ]
=> (let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' e'',
R (bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e'))
(list_rect P1 N1 C1 ls' b' c' e''))
_
_
ls a b c d e e);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
end).
Local Ltac eq_list_rect_prop1 PH Hv
:= (idtac;
lazymatch goal with
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?a ?b ?c ?d ?e ?f)) (list_rect ?P' ?N' ?C' ?ls ?a ?c ?d ?f) ]
=> (let P0 := fresh in
let N0 := fresh in
let C0 := fresh in
let P1 := fresh in
let N1 := fresh in
let C1 := fresh in
set (P0 := P);
set (P1 := P');
set (N0 := N);
set (N1 := N');
set (C0 := C);
set (C1 := C');
refine (list_rect
(fun ls' => forall a' b' c' d' e' f' f'',
PH a'
-> R (bool_of_sum (list_rect P0 N0 C0 ls' a' b' c' d' e' f'))
(list_rect P1 N1 C1 ls' a' c' d' f''))
_
_
ls a b c d e f f Hv);
simpl @list_rect;
[ subst N0 N1; simpl; intros
| intros; unfold C0 at 1, C1 at 1; simpl ])
end).
Local Ltac eq_list_rect_fold_left_orb :=
idtac;
match goal with
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls)) (fold_left ?orb (map ?f ?ls) ?false) ]
=> let P' := fresh in
let N' := fresh in
let N' := fresh in
let C' := fresh in
let f' := fresh in
set (P' := P);
set (N' := N);
set (C' := C);
set (f' := f);
refine (list_rect
(fun ls' => R (bool_of_sum (list_rect P' N' C' ls'))
(fold_left orb (map f' ls') false))
_
_
ls);
simpl @list_rect; simpl @fold_left; intros;
[ subst P' f' N'
| unfold C' at 1, f' at 2 ]
end.
Local Ltac eq_list_rect_fold_right_orb :=
(idtac;
lazymatch goal with
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls)) (fold_right ?orb ?false (map ?f ?ls)) ]
=> (let P' := fresh in
let N' := fresh in
let C' := fresh in
let f' := fresh in
set (P' := P);
set (N' := N);
set (C' := C);
set (f' := f);
refine (list_rect
(fun ls' =>
R (bool_of_sum (list_rect P' N' C' ls'))
(fold_right orb false (map f' ls')))
_
_
ls);
simpl @list_rect; simpl @fold_right; intros;
[ subst P' f' N'
| unfold C' at 1, f' at 1 ];
cbv beta)
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?k0 ?k1)) (fold_right ?orb ?false (map ?f ?ls)) ]
=> (let R' := match (eval pattern ls in R) with ?R' _ => R' end in
let P' := fresh in
let N' := fresh in
let C' := fresh in
let f' := fresh in
set (P' := P);
set (N' := N);
set (C' := C);
set (f' := f);
refine (list_rect
(fun ls' => forall k0' k1',
R' ls'
(bool_of_sum (list_rect P' N' C' ls' k0' k1'))
(fold_right orb false (map f' ls')))
_
_
ls k0 k1);
simpl @list_rect; simpl @fold_right; intros;
[ subst P' f' N'
| unfold C' at 1, f' at 1 ];
cbv beta)
| [ |- ?R (bool_of_sum (list_rect ?P ?N ?C ?ls ?k0)) (fold_right ?orb ?false (map ?f ?ls)) ]
=> (let P' := fresh in
let N' := fresh in
let C' := fresh in
let f' := fresh in
set (P' := P);
set (N' := N);
set (C' := C);
set (f' := f);
refine (list_rect
(fun ls' => forall k0',
R (bool_of_sum (list_rect P' N' C' ls' k0'))
(fold_right orb false (map f' ls')))
_
_
ls k0);
simpl @list_rect; simpl @fold_right; intros;
[ subst P' f' N'
| unfold C' at 1, f' at 1 ];
cbv beta)
end).
Local Ltac t_item str_matches_nonterminal' :=
repeat match goal with
| [ H : andb _ _ = true |- _ ] => apply char_at_matches_is_char_no_ex in H; [ | assumption ]
| [ H : and _ _ |- _ ] => let H0 := fresh in
let H1 := fresh in
destruct H as [H0 H1]; try clear H
| [ H : or _ _ |- _ ] => let H0 := fresh in destruct H as [H0|H0]; try clear H
| [ H : beq_nat _ _ = true |- _ ] => apply Nat.eqb_eq in H
| [ H : ?x = 0, H' : context[?x] |- _ ] => rewrite H in H'
| _ => progress subst
| _ => progress simpl in *
| _ => congruence
| [ H : context[match get ?n ?s with _ => _ end] |- _ ]
=> destruct (get n s) eqn:?
| _ => eassumption
| [ H : minimal_parse_of_item _ _ _ (NonTerminal ?nt) |- _ ]
=> assert (List.In nt (Valid_nonterminals G));
inversion H; clear H
| [ H : minimal_parse_of_item _ _ _ (Terminal _) |- _ ]
=> inversion H; clear H
| [ H : minimal_parse_of_nonterminal _ _ _ ?nt |- List.In ?nt (Valid_nonterminals G) ]
=> inversion H; clear H
| [ H : is_true (is_char (substring _ 0 _) _) |- _ ] =>
apply length_singleton in H
| [ H : context[length (substring _ 0 _)] |- _ ]
=> rewrite take_length in H
| [ H : beq_nat ?len 1 = false,
H' : ?offset + ?len <= length ?str,
H'' : is_true (is_char (substring ?offset ?len ?str) _)
|- _ ]
=> apply length_singleton in H''; rewrite substring_length in H''
| [ H : context[min] |- _ ] => rewrite Min.min_l in H by omega
| [ H : context[min] |- _ ] => rewrite Min.min_r in H by omega
| [ H : _ |- _ ] => rewrite Nat.add_sub in H
| [ H : andb (beq_nat _ 1) (char_at_matches _ _ _) = false |- False ] => contradict H
| [ |- _ <> false ] => apply Bool.not_false_iff_true
| [ |- andb (beq_nat _ 1) (char_at_matches _ _ _) = true ] => apply char_at_matches_is_char
| [ |- ex _ ] => eexists; split; eassumption
| [ H : context[to_nonterminal (of_nonterminal _)] |- _ ]
=> rewrite to_of_nonterminal in H by assumption
| [ H : minimal_parse_of_nonterminal _ _ _ (to_nonterminal (of_nonterminal ?nt)) |- _ ]
=> assert (List.In nt (Valid_nonterminals G));
[ inversion H; clear H
| rewrite to_of_nonterminal in H by assumption ]
| [ H : is_true (is_valid_nonterminal _ (of_nonterminal _)) |- _ ]
=> apply initial_nonterminals_correct in H
| [ H : List.In (to_nonterminal _) _ |- _ ]
=> apply initial_nonterminals_correct' in H
| [ H : is_valid_nonterminal initial_nonterminals_data (of_nonterminal ?nt) = false,
H' : List.In ?nt (Valid_nonterminals ?G) |- _ ]
=> apply initial_nonterminals_correct in H'; congruence
end.
Section item.
Context {len0 valid}
(offset : nat) (len0_minus_len : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(str_matches_nonterminal'
: nonterminal_carrierT -> parse_nt_T)
(str_matches_nonterminal
: forall nt : nonterminal_carrierT,
dec (minimal_parse_of_nonterminal (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_nonterminal nt))).
Section valid.
Context (Hmatches
: forall nt,
is_valid_nonterminal initial_nonterminals_data nt = true
-> parse_nt_is_correct (substring offset (len0 - len0_minus_len) str) nt (str_matches_nonterminal nt) (str_matches_nonterminal' nt))
(it : item Char).
Definition parse_item'
: dec (minimal_parse_of_item (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) it).
Proof.
refine (match it return dec (minimal_parse_of_item len0 valid (substring offset _ str) it) with
| Terminal P => if Sumbool.sumbool_of_bool (EqNat.beq_nat (len0 - len0_minus_len) 1 && char_at_matches offset str P)%bool
then inl (match get offset str as g return get offset str = g -> _ with
| Some ch => fun H => MinParseTerminal _ _ _ ch _ _ _
| None => fun _ => !
end eq_refl)
else inr (fun _ => !)
| NonTerminal nt => if Sumbool.sumbool_of_bool (is_valid_nonterminal initial_nonterminals_data (of_nonterminal nt))
then if str_matches_nonterminal (of_nonterminal nt)
then inl (MinParseNonTerminal _)
else inr (fun _ => !)
else inr (fun _ => !)
end);
clear str_matches_nonterminal Hmatches;
abstract (t_item str_matches_nonterminal').
Defined.
Definition parse_item'_correct
: parse_item_is_correct (substring offset (len0 - len0_minus_len) str) it parse_item' (GenericRecognizer.parse_item' str str_matches_nonterminal' offset (len0 - len0_minus_len) it).
Proof. eq_t. Qed.
End valid.
Section all.
Context (Hmatches
: forall nt,
parse_nt_is_correct (substring offset (len0 - len0_minus_len) str) nt (str_matches_nonterminal nt) (str_matches_nonterminal' nt))
(it : item Char).
Definition parse_item'_all_correct
: parse_item_is_correct (substring offset (len0 - len0_minus_len) str) it (parse_item' it) (GenericRecognizer.parse_item' str str_matches_nonterminal' offset (len0 - len0_minus_len) it).
Proof. eq_t. Qed.
End all.
End item.
Hint Resolve parse_item'_correct parse_item'_all_correct : generic_parser_correctness.
Definition parse_item'_ext
{len0 valid}
(offset len0_minus_len : nat)
(Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(str_matches_nonterminal str_matches_nonterminal'
: forall nt : nonterminal_carrierT,
dec (minimal_parse_of_nonterminal (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_nonterminal nt)))
(ext : forall nt,
str_matches_nonterminal nt = str_matches_nonterminal' nt)
(it : item Char)
: parse_item' offset len0_minus_len Hlen str_matches_nonterminal it
= parse_item' offset len0_minus_len Hlen str_matches_nonterminal' it.
Proof.
expand_both_once; destruct it; try reflexivity; [].
rewrite ext.
clear ext str_matches_nonterminal.
reflexivity.
Qed.
Section production.
Context {len0 valid}
(parse_nonterminal
: forall (offset : nat) (len0_minus_len : nat) (Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str) (nt : nonterminal_carrierT),
dec (minimal_parse_of_nonterminal (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_nonterminal nt))).
Lemma Hlen_helper {offset len} (Hlen : len = 0 \/ offset + len <= length str)
: length (substring offset len str) = len.
Proof.
destruct Hlen; subst; rewrite substring_length; simpl;
apply Min.min_case_strong; omega.
Qed.
Lemma dec_in_helper {ls it its offset len0_minus_len}
(Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str)
: iffT {n0 : nat &
(In (min (length (substring offset (len0 - len0_minus_len) str)) n0) (map (min (length (substring offset (len0 - len0_minus_len) str))) ls) *
minimal_parse_of_item (G := G) len0 valid (take n0 (substring offset (len0 - len0_minus_len) str)) it *
minimal_parse_of_production (G := G) len0 valid (drop n0 (substring offset (len0 - len0_minus_len) str)) its)%type}
{n0 : nat &
(In n0 ls *
(minimal_parse_of_item (G := G) len0 valid (substring offset (len0 - max (len0 - n0) len0_minus_len) str) it *
minimal_parse_of_production (G := G) len0 valid (substring (offset + n0) (len0 - (len0_minus_len + n0)) str) its))%type}.
Proof.
rewrite Hlen_helper by assumption.
split; first [ intros [n [[H0 H1] H2]]
| intros [n [H0 [H1 H2]]] ].
{ destruct (le_lt_dec (len0 - len0_minus_len) n) as [pf|pf].
{ rewrite Min.min_l in H0 by assumption.
clear -H0 H1 H2 rdata cdata pf HSLP.
induction ls as [|x xs IHxs]; destruct_head_hnf False.
destruct (le_lt_dec (len0 - len0_minus_len) x).
{ exists x.
repeat split.
{ left; reflexivity. }
{ eapply expand_minimal_parse_of_item_beq; [ .. | eassumption ].
rewrite take_take, <- Nat.sub_min_distr_l.
rewrite !Min.min_r by omega.
reflexivity. }
{ eapply expand_minimal_parse_of_production_beq; [ .. | eassumption ].
rewrite drop_take, StringLike.drop_drop.
rewrite Nat.sub_add_distr.
apply bool_eq_empty; rewrite substring_length; apply Min.min_case_strong; generalize dependent (len0 - len0_minus_len); intros; omega. } }
{ simpl in *.
rewrite Min.min_r in H0 by omega.
destruct IHxs as [n' [IH0 [IH1 IH2]]].
{ destruct H0; try omega; assumption. }
{ exists n'; repeat split; try assumption.
right; assumption. } } }
{ exists n; repeat split; try assumption.
{ apply in_map_iff in H0.
repeat match goal with
| _ => progress destruct_head ex
| _ => progress destruct_head and
| [ H : context[min ?x ?y] |- _ ]
=> rewrite (Min.min_r x y) in H by omega
| _ => progress subst
| [ H : min ?x ?y < ?x |- _ ] => revert H; apply (Min.min_case_strong x y)
| _ => intro
| _ => omega
| _ => assumption
end. }
{ eapply expand_minimal_parse_of_item_beq; [ .. | eassumption ].
rewrite take_take.
rewrite <- Nat.sub_min_distr_l, sub_twice.
rewrite (Min.min_r len0) by omega.
reflexivity. }
{ eapply expand_minimal_parse_of_production_beq; [ .. | eassumption ].
rewrite drop_take, StringLike.drop_drop.
rewrite (plus_comm offset), Nat.sub_add_distr; reflexivity. } } }
{ exists n; repeat split; try assumption.
{ apply in_map; assumption. }
{ eapply expand_minimal_parse_of_item_beq; [ .. | eassumption ].
rewrite take_take.
rewrite <- Nat.sub_min_distr_l, sub_twice.
rewrite (Min.min_comm len0), <- !Min.min_assoc, (Min.min_r len0) by omega.
reflexivity. }
{ eapply expand_minimal_parse_of_production_beq; [ .. | eassumption ].
rewrite drop_take, StringLike.drop_drop.
rewrite (plus_comm offset), Nat.sub_add_distr.
reflexivity. } }
Defined.
Local Opaque dec_in_helper.
Lemma parse_production'_helper {offset len0_minus_len it its} (pf : length (substring offset (len0 - len0_minus_len) str) <= len0)
: dec {n0 : nat &
(minimal_parse_of_item (G := G) len0 valid (take n0 (substring offset (len0 - len0_minus_len) str)) it *
minimal_parse_of_production (G := G) len0 valid (drop n0 (substring offset (len0 - len0_minus_len) str)) its)%type}
-> dec (minimal_parse_of_production (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (it :: its)).
Proof.
intros [H|H]; [ left; destruct H as [n [??]] | right; intro p; apply H; clear H ].
{ econstructor; eassumption. }
{ clear -p; abstract (inversion p; subst; eexists; split; eassumption). }
Defined.
Lemma minus_le {x y z} (H : x <= z) : x - y <= z.
Proof. omega. Qed.
Lemma eq_le_trans {x y z} (H : x = y) (H' : y <= z) : x <= z.
Proof. subst; assumption. Defined.
Lemma min_le_r {x y z} (H : y <= z) : min x y <= z.
Proof. apply Min.min_case_strong; omega. Qed.
Lemma lift_le {offset len n length_str} (H : len = 0 \/ offset + len <= length_str)
: len - n = 0 \/ offset + n + (len - n) <= length_str.
Proof.
destruct H;
[ left; subst
| destruct (le_lt_dec n len); [ right | left ] ];
omega.
Qed.
Lemma lift_le_min {offset n len length_str} (H : len = 0 \/ offset + len <= length_str)
: min n len = 0 \/ offset + min n len <= length_str.
Proof.
apply Min.min_case_strong; [ | intro; assumption ].
destruct H; subst; [ left | right ]; omega.
Qed.
Lemma lift_parse_prod {str' offset len0_minus_len a it its}
(H : (minimal_parse_of_item
(G := G)
len0 valid
(substring offset (len0 - max (len0 - a) len0_minus_len) str') it *
minimal_parse_of_production
(G := G)
len0 valid
(substring (offset + a) (len0 - (len0_minus_len + a)) str') its)%type)
: minimal_parse_of_item
(G := G)
len0 valid
(take a (substring offset (len0 - len0_minus_len) str')) it *
minimal_parse_of_production
(G := G)
len0 valid
(drop a (substring offset (len0 - len0_minus_len) str')) its.
Proof.
destruct H as [pi pp]; split.
{ eapply expand_minimal_parse_of_item_beq; [ | eassumption ].
rewrite take_take, <- Nat.sub_min_distr_l, sub_twice.
rewrite (Min.min_comm len0), <- !Min.min_assoc, min_minus_r.
reflexivity. }
{ eapply expand_minimal_parse_of_production_beq; [ | eassumption ].
rewrite drop_take, StringLike.drop_drop, (plus_comm a offset), Nat.sub_add_distr.
reflexivity. }
Defined.
Local Ltac parse_production'_for_t' :=
idtac;
match goal with
| [ H : (beq_nat _ _) = true |- _ ] => apply EqNat.beq_nat_true in H
| _ => progress subst
| _ => solve [ constructor; assumption
| constructor;
rewrite substring_length; apply Min.min_case_strong; omega ]
| [ H : minimal_parse_of_production _ _ _ nil |- _ ] => (inversion H; clear H)
| [ H : minimal_parse_of_production _ _ _ (_::_) |- _ ] => (inversion H; clear H)
| [ H : ?x = 0, H' : context[?x] |- _ ] => rewrite H in H'
| _ => progress simpl in *
| _ => discriminate
| [ H : forall x, (_ * _)%type -> _ |- _ ] => specialize (fun x y z => H x (y, z))
| _ => solve [ eauto with nocore ]
| _ => solve [ apply Min.min_case_strong; omega ]
| _ => omega
| [ H : or _ _ |- _ ] => let H0 := fresh in destruct H as [H0|H0]; try clear H
| [ H : length (substring _ _ _) = 0 |- _ ] => rewrite substring_length in H
| [ H : context[min] |- _ ] => rewrite Min.min_l in H by omega
| [ H : context[min] |- _ ] => rewrite Min.min_r in H by omega
| [ H : _ |- _ ] => rewrite Nat.add_sub in H
end.
Local Ltac parse_production'_for_t := repeat parse_production'_for_t'.
Definition full_production_carrierT_reachableT (prod_idx : production_carrierT)
:= { nt : _
& { prefix_count : _
& { pre_prod_idx : _
& (List.In nt (Valid_nonterminals G)
* (apply_n prefix_count production_tl pre_prod_idx = prod_idx)
* List.InT pre_prod_idx (nonterminal_to_production (of_nonterminal nt)))%type } } }.
Lemma production_reachable_convert idx p
(H : to_production idx = p)
(H' : full_production_carrierT_reachableT idx)
: production_is_reachable G p.
Proof.
subst.
destruct H' as [nt H']; exists nt.
destruct H' as [count [idx' [[Hvalid H0] H1]]]; subst.
erewrite <- nonterminal_to_production_correct by assumption.
induction (nonterminal_to_production (of_nonterminal nt)) as [|x xs IHxs]; simpl in *.
{ destruct_head False. }
{ destruct_head or; destruct_head sum; subst; specialize_by assumption.
{ clear IHxs.
induction count as [|count IHcount]; simpl.
{ eexists nil; simpl.
split; [ assumption | left; reflexivity ]. }
{ rewrite apply_n_commute, production_tl_correct.
destruct IHcount as [prefix IHcount].
match goal with
| [ |- context[_ ++ tl ?ls] ]
=> exists (match ls with
| nil => prefix
| x::_ => prefix ++ [x]
end);
destruct ls eqn:Heq; simpl in *
end;
rewrite ?app_nil_r, <- ?app_assoc in IHcount;
rewrite ?app_nil_r, <- ?app_assoc;
assumption. } }
{ destruct IHxs as [prefix [H0 H1]].
exists prefix.
split; [ assumption | right; assumption ]. } }
Qed.
Lemma full_production_carrierT_reachableT_tl {idx}
(H : full_production_carrierT_reachableT idx)
: full_production_carrierT_reachableT (production_tl idx).
Proof.
destruct H as [nt H]; exists nt.
destruct H as [count H]; exists (S count).
destruct H as [idx' H]; exists idx'.
destruct_head and; destruct_head Datatypes.prod; simpl; repeat split; try assumption.
rewrite apply_n_commute; apply f_equal; assumption.
Qed.
Lemma substring_length_le_helper {offset len0_minus_len}
: length (substring offset (len0 - len0_minus_len) str) <= len0.
Proof.
rewrite substring_length; apply Min.min_case_strong; omega.
Qed.
Lemma Hlen_sub_more {offset n len0_minus_len}
: len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str
-> len0 - max (len0 - n) len0_minus_len = 0 \/
offset + (len0 - max (len0 - n) len0_minus_len) <= length str.
Proof.
clear; intros [Hlen|Hlen]; [ left | right ]; apply Max.max_case_strong; omega.
Qed.
Lemma Hlen_sub_some {n len0_minus_len offset}
: len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str
-> len0 - max (len0 - n) len0_minus_len <= len0.
Proof.
apply Max.max_case_strong; omega.
Qed.
Lemma Hlen_sub_helper {offset n len0_minus_len}
: len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str
-> len0 - (len0_minus_len + n) = 0 \/
offset + n + (len0 - (len0_minus_len + n)) <= length str.
Proof.
rewrite Nat.sub_add_distr.
intros [Hlen|Hlen]; try solve [ left; omega | right; omega ].
destruct (Compare_dec.le_dec n (len0 - len0_minus_len));
solve [ left; omega | right; omega ].
Qed.
(** To match a [production], we must match all of its items.
But we may do so on any particular split. *)
Definition parse_production'_for
(splits : production_carrierT -> String -> nat -> nat -> list nat)
(Hsplits : forall offset len0_minus_len it its idx pf',
(len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str
-> full_production_carrierT_reachableT idx
-> production_carrier_valid idx
-> to_production idx = it::its
-> split_list_completeT_for (len0 := len0) (G := G) (valid := valid) it its (substring offset (len0 - len0_minus_len) str) pf' (splits idx str offset (len0 - len0_minus_len)))
(offset len0_minus_len : nat)
(Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prod_idx : production_carrierT)
(Hreachable : full_production_carrierT_reachableT prod_idx)
(Hvalid : production_carrier_valid prod_idx)
: dec (minimal_parse_of_production (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_production prod_idx)).
Proof.
revert offset len0_minus_len Hlen.
refine
(list_rect
(fun ps =>
forall (idx : production_carrierT)
(Hreachable : full_production_carrierT_reachableT idx)
(Hvalid : production_carrier_valid idx)
(Hidx : to_production idx = ps)
(offset len0_minus_len : nat)
(Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str),
dec (minimal_parse_of_production (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) ps))
((** 0-length production, only accept empty *)
fun idx Hidx Hreachable Hvalid offset len0_minus_len Hlen
=> match Utils.dec (beq_nat (len0 - len0_minus_len) 0) with
| left H => inl _
| right H => inr (fun p => _)
end)
(fun it its parse_production' idx Hreachable Hvalid Hidx offset len0_minus_len Hlen
=> parse_production'_helper
substring_length_le_helper
(let parse_item := (fun n => parse_item' offset (max (len0 - n) len0_minus_len) (Hlen_sub_more Hlen) (parse_nonterminal offset (max (len0 - n) len0_minus_len) (Hlen_sub_more Hlen)) it) in
let parse_production := (fun n : nat => parse_production' (production_tl idx) (full_production_carrierT_reachableT_tl Hreachable) (production_tl_valid _ Hvalid) (eq_trans (production_tl_correct _) (f_equal (@tl _) Hidx)) (offset + n) (len0_minus_len + n) (Hlen_sub_helper Hlen)) in
match dec_In
(fun n => dec_prod (parse_item n) (parse_production n))
(splits idx str offset (len0 - len0_minus_len))
with
| inl p => inl (existT _ (projT1 p) (lift_parse_prod (snd (projT2 p))))
| inr p
=> let H := (_ : split_list_completeT_for (G := G) (len0 := len0) (valid := valid) it its (substring offset (len0 - len0_minus_len) str) substring_length_le_helper (splits idx str offset (len0 - len0_minus_len))) in
inr (fun p' => p (fst (dec_in_helper Hlen) (H p')))
end))
(to_production prod_idx)
prod_idx
Hreachable
Hvalid
eq_refl);
[ clear parse_nonterminal Hsplits splits rdata cdata
| clear parse_nonterminal Hsplits splits rdata cdata
| clear parse_item parse_production ];
abstract parse_production'_for_t.
Defined.
Definition parse_production'_for_correct
(parse_nonterminal'
: forall (offset len0_minus_len : nat) (nt : nonterminal_carrierT),
parse_nt_T)
(parse_nonterminal_eq
: forall offset len0_minus_len Hlen nt,
is_valid_nonterminal initial_nonterminals_data nt = true
-> parse_nt_is_correct (substring offset (len0 - len0_minus_len) str) nt (@parse_nonterminal offset len0_minus_len Hlen nt) (parse_nonterminal' offset len0_minus_len nt))
(splits : production_carrierT -> String -> nat -> nat -> list nat)
(Hsplits : forall offset len0_minus_len it its idx pf',
len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str
-> full_production_carrierT_reachableT idx
-> production_carrier_valid idx
-> to_production idx = it::its
-> split_list_completeT_for (len0 := len0) (G := G) (valid := valid) it its (substring offset (len0 - len0_minus_len) str) pf' (splits idx str offset (len0 - len0_minus_len)))
(offset len0_minus_len z : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prod_idx : production_carrierT)
(Hreachable : full_production_carrierT_reachableT prod_idx)
(Hvalid : production_carrier_valid prod_idx)
: parse_production_is_correct (substring offset (len0 - len0_minus_len) str) prod_idx (parse_production'_for splits Hsplits offset len0_minus_len Hlen Hreachable Hvalid) (GenericRecognizer.parse_production'_for (len0 := len0) str parse_nonterminal' splits offset len0_minus_len prod_idx).
Proof.
eq_t; eq_list_rect; repeat eq_t'; [].
expand_onceL; repeat eq_t'; [].
expand_onceL; eq_list_rect_fold_right_orb; repeat eq_t'; [].
apply ret_orb_production_is_correct; repeat eq_t'; [].
eapply ret_production_cons_is_correct; repeat eq_t'.
Qed.
Lemma split_list_completeT_production_is_reachable
{it its offset len pf splits idx}
(Hlen : len = 0 \/ offset + len <= length str)
(H : split_list_completeT (G := G) splits)
(Hreachable : full_production_carrierT_reachableT idx)
(Hvalid : production_carrier_valid idx)
(Heq : to_production idx = it::its)
: split_list_completeT_for (G := G) (len0 := len0) (valid := valid) it its (substring offset len str) pf (splits idx str offset len).
Proof.
specialize (fun nt Hvalid => H len0 valid str offset len pf nt Hvalid Hlen).
hnf in Hreachable.
destruct Hreachable as [nt [count [idx' [[Hr0 Hr1] Hr2]]]].
specialize (H nt).
erewrite <- nonterminal_to_production_correct in H by assumption.
apply initial_nonterminals_correct in Hr0.
specialize_by assumption.
subst.
generalize dependent (nonterminal_to_production (of_nonterminal nt)).
intro p; induction p as [|x xs IHxs]; simpl.
{ intros ? []. }
{ intros H [H'|H']; subst;
destruct_head prod;
specialize_by assumption; trivial; [].
clear dependent xs.
generalize dependent idx'.
induction count as [|count IHcount]; simpl in *; intros.
{ repeat match goal with
| [ H : ?x = _::_, H' : context[match ?x with _ => _ end] |- _ ] => rewrite H in H'
| [ H : _ |- _ ] => apply Forall_tails_id in H
| _ => solve [ eauto with nocore ]
end. }
{ specialize (IHcount (production_tl idx')).
specialize_by assumption.
rewrite production_tl_correct in IHcount.
apply IHcount; clear IHcount.
destruct (to_production idx');
simpl in *; destruct_head prod; trivial. } }
Qed.
Definition parse_production'
(offset len0_minus_len : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prod_idx : production_carrierT)
(Hreachable : full_production_carrierT_reachableT prod_idx)
(Hvalid : production_carrier_valid prod_idx)
: dec (minimal_parse_of_production (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_production prod_idx)).
Proof.
refine (parse_production'_for _ _ _ _ Hlen Hreachable Hvalid).
intros; eapply split_list_completeT_production_is_reachable; try eassumption.
eapply split_string_for_production_complete.
Defined.
Definition parse_production'_correct
(parse_nonterminal'
: forall (offset len0_minus_len : nat) (nt : nonterminal_carrierT),
parse_nt_T)
(parse_nonterminal_eq
: forall offset len0_minus_len Hlen nt,
is_valid_nonterminal initial_nonterminals_data nt = true
-> parse_nt_is_correct (substring offset (len0 - len0_minus_len) str) nt (@parse_nonterminal offset len0_minus_len Hlen nt) (parse_nonterminal' offset len0_minus_len nt))
(offset len0_minus_len : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prod_idx : production_carrierT)
(Hreachable : full_production_carrierT_reachableT prod_idx)
(Hvalid : production_carrier_valid prod_idx)
: parse_production_is_correct (substring offset (len0 - len0_minus_len) str) prod_idx (parse_production' offset len0_minus_len Hlen Hreachable Hvalid) (GenericRecognizer.parse_production' (len0 := len0) str parse_nonterminal' offset len0_minus_len prod_idx).
Proof.
apply parse_production'_for_correct; try assumption.
Qed.
End production.
Hint Resolve parse_production'_correct : generic_parser_correctness.
Section productions.
Context {len0 valid}
(parse_nonterminal'
: forall (offset len0_minus_len : nat)
(nt : nonterminal_carrierT),
parse_nt_T)
(parse_nonterminal
: forall (offset len0_minus_len : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(nt : nonterminal_carrierT),
dec (minimal_parse_of_nonterminal (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (to_nonterminal nt)))
(Hmatches
: forall (offset len0_minus_len : nat)
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(nt : nonterminal_carrierT)
(Hvalid : is_valid_nonterminal initial_nonterminals_data nt = true),
parse_nt_is_correct (substring offset (len0 - len0_minus_len) str) nt (parse_nonterminal offset len0_minus_len Hlen nt) (parse_nonterminal' offset len0_minus_len nt))
(offset len0_minus_len : nat).
Definition productions_is_reachable (prods : productions Char)
:= { nt : _ & { prefix : _ | In nt (Valid_nonterminals G) /\ prefix ++ prods = Lookup G nt } }.
Lemma hd_productions_is_reachable (p : production Char) (ps : productions Char) (H : productions_is_reachable (p :: ps))
: production_is_reachable G p.
Proof.
destruct H as [nt H]; exists nt.
eexists nil; simpl.
destruct H as [prefix [? H]]; split; try assumption; [].
rewrite <- H; clear.
induction prefix as [|x xs IHxs]; simpl.
{ left; reflexivity. }
{ right; assumption. }
Qed.
Local Ltac t_prods_fin :=
try solve
[ eassumption
| idtac;
match goal with
| [ p : _ |- _ ] => clear -p; abstract inversion p
end
| repeat
match goal with
| [ Hreachable : productions_is_reachable (?p :: ?ps)
|- productions_is_reachable ?ps ]
=> exists (projT1 Hreachable); destruct Hreachable as [nt Hreachable]; simpl
| [ Hreachable : productions_is_reachable (?p :: ?ps)
|- full_production_carrierT_reachableT _ ]
=> exists (projT1 Hreachable); destruct Hreachable as [nt Hreachable]; simpl
| [ Hreachable : { prefix : _ | ?V /\ prefix ++ ?p::?ps = ?k }
|- { prefix : _ | ?V /\ prefix ++ ?ps = ?k } ]
=> exists (proj1_sig Hreachable ++ [p]); destruct Hreachable as [prefix [? Hreachable]]; split; [ assumption | simpl ]
| [ H : ?x ++ ?y::?z = ?k |- (?x ++ [?y]) ++ ?z = ?k ]
=> clear -H; abstract (rewrite <- app_assoc; assumption)
| [ |- { prefix : _ & (_ * _)%type } ]
=> eexists nil; simpl; split
| [ H : { x : _ | ?k /\ _ } |- ?k ] => destruct H as [? [? ?]]; assumption
| [ H : { prefix : _ | _ /\ prefix ++ ?p :: ?ps = ?k } |- InT ?p ?k ]
=> let prefix' := fresh "prefix" in
destruct H as [prefix' [? H]]; clear -prefix' H;
generalize dependent k; intros; subst;
induction prefix'; simpl in *
| [ |- ((?x = ?x) + _)%type ] => left; reflexivity
| [ |- (_ + ?k)%type ] => right; assumption
| [ H0 : minimal_parse_of_production _ _ _ ?p -> False,
H1 : minimal_parse_of _ _ _ ?ps -> False,
H2 : minimal_parse_of _ _ _ (?p :: ?ps)
|- False ]
=> clear -H0 H1 H2; abstract (inversion p'; subst; eauto with nocore)
| _ => assumption
| _ => progress simpl in *
end ].
Definition full_productions_carrierT_reachableT (prods_idx : list production_carrierT)
:= { nt : _
& { prefix : _
| List.In nt (Valid_nonterminals G)
/\ prefix ++ prods_idx = nonterminal_to_production (of_nonterminal nt) } }.
Lemma invert_full_productions_carrierT_reachableT p ps
(H : full_productions_carrierT_reachableT (p::ps))
: (full_production_carrierT_reachableT p * full_productions_carrierT_reachableT ps)%type.
Proof.
destruct H as [nt [prefix [H0 H1]]];
split; exists nt;
[ exists 0; exists p; simpl; repeat split; try assumption
| exists (prefix ++ [p]); rewrite <- app_assoc; simpl; split; assumption ].
rewrite <- H1.
clear.
induction prefix; simpl in *; [ left | right ]; trivial.
Qed.
Definition parse_productions'
(Hlen : len0 - len0_minus_len = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prods : list production_carrierT)
(Hreachable : full_productions_carrierT_reachableT prods)
(Hvalid : List.Forall production_carrier_valid prods)
: dec (minimal_parse_of (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (List.map to_production prods)).
Proof.
revert prods Hreachable Hvalid.
refine (list_rect
(fun prods
=> full_productions_carrierT_reachableT prods
-> List.Forall production_carrier_valid prods
-> dec (minimal_parse_of (G := G) len0 valid (substring offset (len0 - len0_minus_len) str) (List.map to_production prods)))
(fun _ _ => inr (fun p => _))
(fun p ps IHps Hreachable Hvalid
=> match parse_production' parse_nonterminal offset len0_minus_len Hlen _ _ with
| inl H => inl (MinParseHead _ _)
| inr H
=> match IHps _ _ with
| inl H' => inl (MinParseTail _ _)
| inr H' => inr (fun p' => _)
end
end));
t_prods_fin; t_prods_fin;
try solve [ eapply invert_full_productions_carrierT_reachableT; eassumption
| eapply (@Forall_inv_iff _ production_carrier_valid); eassumption ].
Defined.
Lemma parse_productions'_correct
(Hlen : (len0 - len0_minus_len) = 0 \/ offset + (len0 - len0_minus_len) <= length str)
(prods : list production_carrierT)
(Hreachable : full_productions_carrierT_reachableT prods)
(Hvalid : List.Forall production_carrier_valid prods)
: parse_productions_is_correct
(substring offset (len0 - len0_minus_len) str) prods
(@parse_productions' Hlen prods Hreachable Hvalid)
(GenericRecognizer.parse_productions' (len0 := len0) str parse_nonterminal' offset len0_minus_len prods).
Proof.
eq_t; eq_list_rect_fold_right_orb; repeat eq_t'.
Qed.
End productions.
Hint Resolve parse_productions'_correct : generic_parser_correctness.
Section nonterminals.
Section step.
Context {len0 valid_len}
(parse_nonterminal'
: forall (p : nat * nat),
prod_relation lt lt p (len0, valid_len)
-> forall (valid : nonterminals_listT)
(offset len : nat)
(pf : len <= fst p)
(nt : nonterminal_carrierT),
parse_nt_T)
(parse_nonterminal
: forall (p : nat * nat)
(pR : prod_relation lt lt p (len0, valid_len))
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= snd p)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= fst p)
(nt : nonterminal_carrierT),
dec (minimal_parse_of_nonterminal (G := G) (fst p) valid (substring offset len str) (to_nonterminal nt)))
(Hmatches
: forall (p : nat * nat)
(pR : prod_relation lt lt p (len0, valid_len))
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= snd p)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= fst p)
(nt : nonterminal_carrierT)
(Hvalid : is_valid_nonterminal initial_nonterminals_data nt = true),
parse_nt_is_correct
(substring offset len str) nt
(@parse_nonterminal p pR valid Hvalid_len offset len Hlen pf nt)
(@parse_nonterminal' p pR valid offset len pf nt)).
Let Hmatches'
: forall x y
(pR pR' : prod_relation lt lt (x, y) (len0, valid_len))
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= y)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= x)
(nt : nonterminal_carrierT)
(Hvalid : is_valid_nonterminal initial_nonterminals_data nt = true),
parse_nt_is_correct
(substring offset len str) nt
(@parse_nonterminal (x, y) pR valid Hvalid_len offset len Hlen pf nt)
(@parse_nonterminal' (x, y) pR' valid offset len pf nt).
Proof.
clear -Hmatches.
abstract (
unfold prod_relation, lt; simpl;
intros; destruct pR as [?|[? ?]], pR' as [?|[? ?]];
repeat first [ progress subst
| subst_le_proof
| subst_nat_eq_proof
| omega
| eapply (@Hmatches (_, _)); try eassumption ]
).
Qed.
Local Ltac p_step_t' :=
idtac;
match goal with
| _ => assumption
| _ => progress subst
| _ => progress specialize_by assumption
| _ => progress simpl in *
| [ |- pred ?x < ?x ] => is_var x; destruct x
| _ => omega
| _ => discriminate
| _ => congruence
| _ => progress destruct_head and
| [ H : andb _ _ = true |- _ ] => apply Bool.andb_true_iff in H
| [ H : is_true ?e, H' : context[?e] |- _ ] => rewrite H in H'
| [ H : context[andb _ true] |- _ ] => rewrite Bool.andb_true_r in H
| [ H : negb _ = false |- _ ] => apply Bool.negb_false_iff in H
| [ H : beq_nat _ _ = true |- _ ] => apply beq_nat_true in H
| [ H : context[beq_nat ?x 0] |- context[pred ?x] ] => is_var x; destruct x
| [ H : _ <= 0 |- _ ] => apply le_n_0_eq in H
| [ H : 0 = _ |- _ ] => symmetry in H
| [ H : nonterminals_length ?v = 0, H' : context[is_valid_nonterminal ?v ?nt] |- _ ]
=> rewrite nonterminals_length_zero in H' by assumption
| [ H : _ |- _ ] => rewrite of_to_nonterminal in H by assumption
| _ => rewrite of_to_nonterminal by assumption
| [ Hvalid : is_valid_nonterminal _ ?nt = true |- _ ]
=> is_var nt; unique pose proof (proj1 (initial_nonterminals_correct' _) Hvalid)
| [ |- context[Lookup ?G (to_nonterminal ?nt)] ]
=> is_var nt; rewrite <- nonterminal_to_production_correct by assumption
| [ H : context[Lookup ?G (to_nonterminal ?nt)] |- _ ]
=> is_var nt; rewrite <- nonterminal_to_production_correct in H by assumption
| [ H : is_valid_nonterminal ?valid ?nt = true
|- nonterminals_length (remove_nonterminal ?valid ?nt) <= _ ]
=> let H' := fresh in
assert (H' := remove_nonterminal_dec _ _ H);
hnf in H';
omega
| [ H : minimal_parse_of_nonterminal _ _ _ (to_nonterminal ?nt) |- _ ]
=> inversion H; clear H
| [ |- Forall _ _ ] => apply nonterminal_to_production_valid; assumption
| [ H : or _ _ |- _ ] => let H0 := fresh in destruct H as [H0|H0]; try clear H
| [ |- context[length (substring _ _ _)] ]
=> rewrite substring_length
| _ => apply Min.min_case_strong; omega
| [ H : ?x = 0 \/ ?T |- _ ]
=> destruct (Compare_dec.zerop x);
[ clear H | assert T by (destruct H; try assumption; omega); clear H ]
| [ |- context[min ?x ?y - ?y] ]
=> rewrite <- Nat.sub_min_distr_r, minus_diag, Min.min_0_r
| _ => rewrite Nat.add_sub
| _ => rewrite Min.min_r by omega
| _ => rewrite Min.min_l by omega
| [ H : context[length (substring _ 0 _)] |- _ ]
=> rewrite take_length in H
| [ H : context[length (substring _ _ _)] |- _ ]
=> rewrite substring_length, Min.min_r, Nat.add_sub in H by omega
| [ H : context[?x - (?x - _)] |- _ ] => rewrite sub_twice in H
| [ H : context[min ?x ?y] |- _ ] => rewrite (Min.min_r x y) in H by assumption
| [ H : context[min ?x ?y] |- _ ] => rewrite (Min.min_l x y) in H by assumption
| [ H : context[min ?x ?x] |- _ ] => rewrite Min.min_idempotent in H
| [ H : context[?x - ?x] |- _ ] => rewrite minus_diag in H
| [ H : context[?x - 0] |- _ ] => rewrite Nat.sub_0_r in H
end.
Local Ltac p_step := repeat p_step_t'.
Lemma Hlen_helper_sub_sub {len' len offset} (Hlen : len = 0 \/ offset + len <= length str)
: len' - (len' - len) = 0 \/ offset + (len' - (len' - len)) <= length str.
Proof.
clear -Hlen; omega.
Qed.
Definition parse_nonterminal_step
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= valid_len)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= len0)
(nt : nonterminal_carrierT)
: dec (minimal_parse_of_nonterminal (G := G) len0 valid (substring offset len str) (to_nonterminal nt)).
Proof.
destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data nt)) as [Hvalid|Hvalid];
[
| right; clear -rdata Hvalid Hlen; intro p;
abstract (
inversion p; subst; try omega;
solve_nonterminals_t;
congruence
) ].
refine (sumbool_rect (fun _ => _) (fun pf' => _) (fun pf' => _) (lt_dec len len0));
simpl;
[ (** [str] got smaller, so we reset the valid nonterminals list *)
destruct (@parse_productions'
len
initial_nonterminals_data
(fun offset len0_minus_len Hlen nt
=> @parse_nonterminal
(len, nonterminals_length initial_nonterminals_data)
(or_introl pf')
initial_nonterminals_data
(reflexivity _)
offset (len - len0_minus_len) Hlen (le_minus _ _) nt)
offset (len - len)
(Hlen_helper_sub_sub Hlen)
(nonterminal_to_production nt))
as [mp|nmp];
[ eexists _, nil; simpl; split;
[ apply initial_nonterminals_correct'; eassumption
| rewrite of_to_nonterminal by assumption; reflexivity ]
|
| left; apply MinParseNonTerminalStrLt
| right; intro mp ]
| ((** [str] didn't get smaller, so we cache the fact that we've hit this nonterminal already *)
refine (sumbool_rect
(fun _ => _)
(fun is_valid => _)
(fun is_valid => _)
(Sumbool.sumbool_of_bool (negb (EqNat.beq_nat valid_len 0) && is_valid_nonterminal valid nt)));
[ ((** It was valid, so we can remove it *)
edestruct (fun pf'' pf'''
=> @parse_productions'
len0
(remove_nonterminal valid nt)
(fun offset len0_minus_len Hlen
=> @parse_nonterminal
(len0, pred valid_len)
(or_intror (conj eq_refl pf''))
(remove_nonterminal valid nt)
pf''' offset (len0 - len0_minus_len)
Hlen (le_minus _ _))
offset (len0 - len)
(Hlen_helper_sub_sub Hlen)
(nonterminal_to_production nt))
as [mp|nmp];
[
|
| eexists _, nil; simpl; split;
[ apply initial_nonterminals_correct'; eassumption
| rewrite of_to_nonterminal by assumption; reflexivity ]
|
| left; apply MinParseNonTerminalStrEq
| right; intro mp ])
| ((** oops, we already saw this nonterminal in the past. ABORT! *)
simpl in *;
right; intro mp) ])
];
try first [ clear -is_valid; abstract p_step
| clear -Hlen pf'; abstract p_step
| clear -HSLP pf'; abstract p_step
| clear -HSLP Hlen pf pf'; abstract p_step
| clear -rdata Hvalid; abstract p_step
| clear -rdata Hvalid mp; abstract p_step
| clear -rdata Hvalid pf mp; abstract p_step
| clear -rdata Hvalid is_valid; abstract p_step
| clear -rdata Hvalid_len is_valid; abstract p_step
| clear -HSLP rdata Hvalid Hlen mp; abstract p_step
| clear -HSLP rdata Hvalid Hlen pf' mp nmp; abstract p_step
| clear -HSLP rdata Hvalid Hlen Hvalid_len is_valid pf' mp; abstract p_step ].
Defined.
Definition parse_nonterminal_step_correct
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= valid_len)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf pf' : len <= len0)
(nt : nonterminal_carrierT)
(Hvalid : is_valid_nonterminal initial_nonterminals_data nt = true)
: parse_nt_is_correct
(substring offset len str) nt
(@parse_nonterminal_step valid Hvalid_len offset len Hlen pf nt)
(GenericRecognizer.parse_nonterminal_step str parse_nonterminal' valid offset pf' nt).
Proof.
eq_t.
destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data nt)) as [Hvalid'|Hvalid']; simpl;
repeat eq_t'.
{ apply ret_nt_is_correct; try assumption; [].
replace len with (len - (len - len)) at 1 by omega.
eapply parse_productions'_correct;
repeat eq_t'. }
{ apply ret_nt_is_correct; try assumption; [].
replace len with (len0 - (len0 - len)) at 1 by omega.
match goal with
| [ |- context[?x <? ?y] ]
=> destruct (x <? y) eqn:?
end;
repeat eq_t'. }
Qed.
End step.
Section wf.
Definition parse_nonterminal_or_abort
: forall (p : nat * nat)
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= snd p)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= fst p)
(nt : nonterminal_carrierT),
dec (minimal_parse_of_nonterminal (G := G) (fst p) valid (substring offset len str) (to_nonterminal nt))
:= @Fix
(nat * nat)
_
(well_founded_prod_relation lt_wf lt_wf)
_
(fun sl => @parse_nonterminal_step (fst sl) (snd sl)).
Lemma parse_nonterminal_or_abort_correct
(p : nat * nat)
(valid : nonterminals_listT)
(Hvalid_len : nonterminals_length valid <= snd p)
(offset len : nat)
(Hlen : len = 0 \/ offset + len <= length str)
(pf : len <= fst p)
(nt : nonterminal_carrierT)
(Hvalid : is_valid_nonterminal initial_nonterminals_data nt)
: parse_nt_is_correct
(substring offset len str) nt
(@parse_nonterminal_or_abort p valid Hvalid_len offset len Hlen pf nt)
(GenericRecognizer.parse_nonterminal_or_abort str p valid offset pf nt).
Proof.
expand_once.
revert valid Hvalid_len offset len Hlen pf nt Hvalid.
match goal with
| [ |- context[Fix ?Wf _ _ ?p] ]
=> induction (Wf p) as [?? IH]; intros
end.
match goal with
| [ |- ?R ?x ?y ] => set (x' := x)
end.
rewrite Fix5_eq
by (intros; apply parse_nonterminal_step_ext; eauto with nocore);
subst x'.
destruct_head prod.
R_etransitivity_eq.
{ eapply parse_nonterminal_step_correct;
first [ intros; eapply IH; eassumption
| assumption ]. }
{ match goal with
| [ |- bool_of_sum ?x = bool_of_sum ?y ]
=> destruct x, y; try reflexivity; exfalso; eauto with nocore
end. }
Unshelve.
assumption.
assumption.
assumption.
assumption.
assumption.
Qed.
Hint Resolve parse_nonterminal_or_abort_correct : generic_parser_correctness .
Definition parse_nonterminal'_substring
(nt : nonterminal_carrierT)
: dec (minimal_parse_of_nonterminal (G := G) (length str) initial_nonterminals_data (substring 0 (length str) str) (to_nonterminal nt)).
Proof.
destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data nt)) as [Hvalid|Hvalid].
{ eapply (@parse_nonterminal_or_abort (length str, nonterminals_length initial_nonterminals_data));
try first [ reflexivity | eassumption | right; reflexivity ]. }
{ right; intro p.
clear -Hvalid p rdata.
abstract (
inversion p; subst; try omega;
repeat match goal with
| [ H : is_true (is_valid_nonterminal initial_nonterminals_data (of_nonterminal _)) |- _ ]
=> apply initial_nonterminals_correct in H
| [ |- is_valid_nonterminal initial_nonterminals_data (of_nonterminal _) = true ]
=> apply initial_nonterminals_correct
| [ H : In (to_nonterminal _) (Valid_nonterminals ?G) |- _ ]
=> apply initial_nonterminals_correct' in H
| [ H : context[of_nonterminal (to_nonterminal _)] |- _ ]
=> rewrite of_to_nonterminal in H by assumption
| _ => congruence
| [ H : _ = false |- _ ] => apply Bool.not_true_iff_false in H; apply H; clear H
end
). }
Defined.
Definition parse_nonterminal'_substring_minus
(nt : nonterminal_carrierT)
: dec (minimal_parse_of_nonterminal (G := G) (length str) initial_nonterminals_data (substring 0 (length str - 0) str) (to_nonterminal nt)).
Proof.
destruct (parse_nonterminal'_substring nt) as [p|p]; [ left | right ];
rewrite <- minus_n_O;
exact p.
Defined.
Definition parse_nonterminal'
(nt : nonterminal_carrierT)
: dec (minimal_parse_of_nonterminal (G := G) (length str) initial_nonterminals_data str (to_nonterminal nt)).
Proof.
destruct (parse_nonterminal'_substring nt) as [p|np];
[ left | right; intro p; apply np; clear np ].
{ eapply expand_minimal_parse_of_nonterminal_beq; [ | eassumption ].
rewrite substring_correct3; reflexivity. }
{ eapply expand_minimal_parse_of_nonterminal_beq; [ | eassumption ].
rewrite substring_correct3; reflexivity. }
Defined.
Lemma parse_nonterminal'_substring_correct
(nt : nonterminal_carrierT)
: parse_nt_is_correct
str nt
(@parse_nonterminal'_substring nt)
(GenericRecognizer.parse_nonterminal' str nt).
Proof.
rewrite <- drop_0 at 1.
erewrite <- take_long at 1 by reflexivity.
rewrite drop_length, <- minus_n_O.
expand_once.
destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data nt)) as [H|H];
repeat eq_t'.
{ eapply (parse_nonterminal_or_abort_correct (_, _)); assumption. }
{ unfold GenericRecognizer.parse_nonterminal_or_abort.
rewrite Fix5_eq by (intros; apply parse_nonterminal_step_ext; assumption).
unfold GenericRecognizer.parse_nonterminal_step at 1.
simpl.
rewrite H, Bool.andb_false_r; simpl.
edestruct lt_dec; try omega; simpl.
repeat eq_t'. }
Qed.
Lemma parse_nonterminal'_substring_minus_correct
(nt : nonterminal_carrierT)
: parse_nt_is_correct
str nt
(@parse_nonterminal'_substring_minus nt)
(GenericRecognizer.parse_nonterminal' str nt).
Proof.
R_etransitivity_eq; [ eapply parse_nonterminal'_substring_correct | ].
unfold parse_nonterminal'_substring_minus.
edestruct parse_nonterminal'_substring;
destruct (minus_n_O (length str)); reflexivity.
Qed.
Lemma parse_nonterminal'_correct
(nt : nonterminal_carrierT)
: parse_nt_is_correct
str nt
(@parse_nonterminal' nt)
(GenericRecognizer.parse_nonterminal' str nt).
Proof.
R_etransitivity_eq.
{ eapply parse_nonterminal'_substring_correct. }
{ unfold parse_nonterminal'.
symmetry.
repeat eq_t'. }
Qed.
Definition parse_nonterminal
(nt : String.string)
: dec (minimal_parse_of_nonterminal (G := G) (length str) initial_nonterminals_data str nt).
Proof.
destruct (parse_nonterminal' (of_nonterminal nt)) as [p|p]; [ left | right ].
{ clear -p rdata.
abstract (
rewrite to_of_nonterminal in p; [ assumption | ];
inversion p; subst; try omega;
repeat match goal with
| _ => assumption
| [ H : is_true (is_valid_nonterminal initial_nonterminals_data (of_nonterminal _)) |- _ ]
=> apply initial_nonterminals_correct in H
| [ |- is_valid_nonterminal initial_nonterminals_data (of_nonterminal _) = true ]
=> apply initial_nonterminals_correct
| [ H : In (to_nonterminal _) (Valid_nonterminals ?G) |- _ ]
=> apply initial_nonterminals_correct' in H
| [ H : context[of_nonterminal (to_nonterminal _)] |- _ ]
=> rewrite of_to_nonterminal in H by assumption
end
). }
{ intro p'; apply p; clear p.
abstract (
rewrite to_of_nonterminal; [ assumption | ];
inversion p'; subst; try omega;
repeat match goal with
| _ => assumption
| [ H : is_true (is_valid_nonterminal initial_nonterminals_data (of_nonterminal _)) |- _ ]
=> apply initial_nonterminals_correct in H
| [ |- is_valid_nonterminal initial_nonterminals_data (of_nonterminal _) = true ]
=> apply initial_nonterminals_correct
| [ H : In (to_nonterminal _) (Valid_nonterminals ?G) |- _ ]
=> apply initial_nonterminals_correct' in H
| [ H : context[of_nonterminal (to_nonterminal _)] |- _ ]
=> rewrite of_to_nonterminal in H by assumption
end
). }
Defined.
Lemma parse_nonterminal_correct
(nt : String.string)
: parse_nt_is_correct
str (of_nonterminal nt)
(@parse_nonterminal nt)
(GenericRecognizer.parse_nonterminal str nt).
Proof.
expand_once.
repeat eq_t'.
eapply parse_nonterminal'_correct.
Qed.
Lemma parse_nonterminal_invalid_none
nt (H : is_valid_nonterminal initial_nonterminals_data (of_nonterminal nt) = false)
: @parse_nonterminal nt = false :> bool.
Proof.
unfold parse_nonterminal; repeat eq_t'.
unfold parse_nonterminal'; repeat eq_t'.
unfold parse_nonterminal'_substring; repeat eq_t'.
congruence.
Qed.
Lemma parse_nonterminal_invalid_none'
nt (H : ~List.In nt (Valid_nonterminals G))
: @parse_nonterminal nt = false :> bool.
Proof.
apply parse_nonterminal_invalid_none.
destruct (is_valid_nonterminal initial_nonterminals_data (of_nonterminal nt)) eqn:H'; trivial.
apply initial_nonterminals_correct in H'.
tauto.
Qed.
Lemma parse_nonterminal_correct'
(nt : nonterminal_carrierT)
: parse_nt_is_correct
str nt
(@parse_nonterminal (to_nonterminal nt))
(GenericRecognizer.parse_nonterminal str (to_nonterminal nt)).
Proof.
expand_once.
repeat eq_t'.
destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data nt)) as [H|H].
{ rewrite of_to_nonterminal by assumption.
apply parse_nonterminal'_correct. }
{ destruct (Utils.dec (is_valid_nonterminal initial_nonterminals_data (of_nonterminal (to_nonterminal nt)))) as [H'|H'].
{ apply initial_nonterminals_correct, initial_nonterminals_correct' in H'.
congruence. }
{ unfold GenericRecognizer.parse_nonterminal'.
unfold GenericRecognizer.parse_nonterminal_or_abort.
rewrite Fix5_eq by (intros; apply parse_nonterminal_step_ext; assumption).
unfold GenericRecognizer.parse_nonterminal_step at 1.
simpl.
rewrite H', Bool.andb_false_r; simpl.
edestruct lt_dec; try omega; simpl.
repeat eq_t'.
R_etransitivity_eq; [ eapply ret_nt_invalid_is_correct | ].
symmetry.
unfold parse_nonterminal'; repeat eq_t'.
unfold parse_nonterminal'_substring; repeat eq_t'.
congruence. } }
Qed.
End wf.
End nonterminals.
End parts.
Local Ltac str_to_substring :=
rewrite <- drop_0 at 1;
erewrite <- take_long at 1 by reflexivity;
rewrite drop_length(*, <- minus_n_O at 1*).
Local Ltac substring_to_str :=
repeat rewrite <- minus_n_O at 1; rewrite drop_0, take_long at 1 by reflexivity.
Lemma Hlen0 {lenstr} : lenstr - 0 = 0 \/ 0 + (lenstr - 0) <= lenstr.
Proof. omega. Qed.
Section item.
Context (it : item Char).
Definition parse_item_substring : dec _
:= parse_item' (len0 := length str) 0 0 Hlen0 (@parse_nonterminal'_substring_minus) it.
Definition parse_item
: dec (minimal_parse_of_item (G := G) (length str) initial_nonterminals_data str it).
Proof.
destruct parse_item_substring as [p|np];
[ left | right; intro p; apply np; clear np ];
(eapply expand_minimal_parse_of_item_beq; [ | eassumption ]);
clear -HSLP; abstract (rewrite <- minus_n_O, substring_correct3'; reflexivity).
Defined.
Lemma parse_item_substring_correct
: parse_item_is_correct
str it
parse_item_substring
(GenericRecognizer.parse_item str it).
Proof.
str_to_substring.
unfold GenericRecognizer.parse_item.
rewrite (minus_n_O (length str)) at 6;
apply parse_item'_all_correct; intro; substring_to_str.
apply parse_nonterminal'_substring_minus_correct.
Qed.
Lemma parse_item_correct
: parse_item_is_correct
str it
parse_item
(GenericRecognizer.parse_item str it).
Proof.
R_etransitivity_eq.
{ eapply parse_item_substring_correct. }
{ unfold parse_item;
destruct parse_item_substring; reflexivity. }
Qed.
End item.
Section production.
Context (p : production_carrierT)
(Hreachable : full_production_carrierT_reachableT p)
(Hvalid : production_carrier_valid p).
Definition parse_production_substring_minus
: dec (minimal_parse_of_production (G := G) (length str) initial_nonterminals_data (substring 0 (length str - 0) str) (to_production p)).
Proof.
eapply parse_production'; [ | right; clear; apply le_minus | reflexivity.. | assumption | assumption ].
intros.
eapply (@parse_nonterminal_or_abort (length str, _));
simpl; try reflexivity; subst; try assumption; apply le_minus.
Defined.
Definition parse_production_substring
: dec (minimal_parse_of_production (G := G) (length str) initial_nonterminals_data (substring 0 (length str) str) (to_production p)).
Proof.
destruct parse_production_substring_minus as [p'|p']; [ left | right ];
rewrite <- minus_n_O in p';
exact p'.
Defined.
Lemma parse_production_substring_minus_correct
: parse_production_is_correct
str p
parse_production_substring_minus
(GenericRecognizer.parse_production str p).
Proof.
str_to_substring.
unfold GenericRecognizer.parse_production, parse_production_substring.
apply parse_production'_correct.
simpl; intros.
eapply (parse_nonterminal_or_abort_correct (_, _)).
assumption.
Qed.
Definition parse_production
: dec (minimal_parse_of_production (G := G) (length str) initial_nonterminals_data str (to_production p)).
Proof.
destruct parse_production_substring as [p'|np];
[ left | right; intro p'; apply np; clear np ];
(eapply expand_minimal_parse_of_production_beq; [ | eassumption ]);
clear -HSLP; abstract (rewrite substring_correct3'; reflexivity).
Defined.
Lemma parse_production_substring_correct
: parse_production_is_correct
str p
parse_production_substring
(GenericRecognizer.parse_production str p).
Proof.
R_etransitivity_eq; [ eapply parse_production_substring_minus_correct | ].
unfold parse_production_substring.
destruct parse_production_substring_minus;
destruct (minus_n_O (length str)); reflexivity.
Qed.
Lemma parse_production_correct
: parse_production_is_correct
str p
parse_production
(GenericRecognizer.parse_production str p).
Proof.
R_etransitivity_eq.
{ eapply parse_production_substring_correct. }
{ unfold parse_production.
destruct parse_production_substring; reflexivity. }
Qed.
End production.
Section productions.
Context (ps : list production_carrierT)
(Hreachable : full_productions_carrierT_reachableT ps)
(Hvalid : List.Forall production_carrier_valid ps).
Definition parse_productions_substring_minus
: dec (minimal_parse_of (G := G) (length str) initial_nonterminals_data (substring 0 (length str - 0) str) (List.map to_production ps)).
Proof.
eapply parse_productions'; [ | right; apply le_minus | reflexivity.. | assumption | assumption ].
intros.
eapply (@parse_nonterminal_or_abort (length str, _));
simpl; try reflexivity; subst; try apply le_minus; assumption.
Defined.
Definition parse_productions_substring
: dec (minimal_parse_of (G := G) (length str) initial_nonterminals_data (substring 0 (length str) str) (List.map to_production ps)).
Proof.
destruct parse_productions_substring_minus as [p'|p']; [ left | right ];
rewrite <- minus_n_O in p';
exact p'.
Defined.
Definition parse_productions
: dec (minimal_parse_of (G := G) (length str) initial_nonterminals_data str (List.map to_production ps)).
Proof.
destruct parse_productions_substring as [p'|np];
[ left | right; intro p'; apply np; clear np ];
(eapply expand_minimal_parse_of_beq; [ | eassumption ]);
clear -HSLP; abstract (rewrite substring_correct3'; reflexivity).
Defined.
Lemma parse_productions_substring_minus_correct
: parse_productions_is_correct
str ps
parse_productions_substring_minus
(GenericRecognizer.parse_productions str ps).
Proof.
str_to_substring; apply parse_productions'_correct; simpl; intros.
eapply (parse_nonterminal_or_abort_correct (_, _)).
assumption.
Qed.
Lemma parse_productions_substring_correct
: parse_productions_is_correct
str ps
parse_productions_substring
(GenericRecognizer.parse_productions str ps).
Proof.
R_etransitivity_eq; [ eapply parse_productions_substring_minus_correct | ].
unfold parse_productions_substring.
destruct parse_productions_substring_minus;
destruct (minus_n_O (length str)); reflexivity.
Qed.
Lemma parse_productions_correct
: parse_productions_is_correct
str ps
parse_productions
(GenericRecognizer.parse_productions str ps).
Proof.
R_etransitivity_eq.
{ apply parse_productions_substring_correct. }
{ unfold parse_productions.
destruct parse_productions_substring; reflexivity. }
Qed.
End productions.
End min.
End recursive_descent_parser.
|
{"author": "mit-plv", "repo": "fiat", "sha": "4c78284c3a88db32051bdba79202f40c645ffb7f", "save_path": "github-repos/coq/mit-plv-fiat", "path": "github-repos/coq/mit-plv-fiat/fiat-4c78284c3a88db32051bdba79202f40c645ffb7f/src/Parsers/GenericRecognizerMin.v"}
|
# -*- coding: utf-8 -*-
# This file as well as the whole tsfresh package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), Blue Yonder Gmbh, 2016
from unittest import TestCase
import pandas as pd
import numpy as np
from tsfresh.transformers.feature_selector import FeatureSelector
class FeatureSelectorTestCase(TestCase):
def setUp(self):
np.random.seed(0)
def test_not_fitted(self):
selector = FeatureSelector()
X = pd.DataFrame()
self.assertRaises(RuntimeError, selector.transform, X)
def test_extract_relevant_features(self):
selector = FeatureSelector()
y = pd.Series(np.random.binomial(1, 0.5, 1000))
X = pd.DataFrame(index=range(1000))
z = y - np.random.binomial(1, 0.1, 1000) + np.random.binomial(1, 0.1, 1000)
z[z == -1] = 0
z[z == 2] = 1
X["rel1"] = z
X["rel2"] = y * np.random.normal(0, 1, 1000) + np.random.normal(0, 1, 1000)
X["rel3"] = y + np.random.normal(0, 1, 1000)
X["rel4"] = y ** 2 + np.random.normal(0, 1, 1000)
X["rel5"] = np.sqrt(y) + np.random.binomial(2, 0.1, 1000)
X["irr_constant"] = 1.113344
X["irr1"] = np.random.normal(0, 1, 1000)
X["irr2"] = np.random.poisson(1, 1000)
X["irr3"] = np.random.binomial(1, 0.3, 1000)
X["irr4"] = np.random.normal(0, 1, 1000)
X["irr5"] = np.random.poisson(1, 1000)
X["irr6"] = np.random.binomial(1, 0.3, 1000)
X["irr7"] = np.random.normal(0, 1, 1000)
X["irr8"] = np.random.poisson(1, 1000)
X["irr9"] = np.random.binomial(1, 0.3, 1000)
returned_seelctor = selector.fit(X, y)
self.assertIs(returned_seelctor, selector)
self.assertEqual(sorted(list(selector.relevant_features.index)), ["rel1", "rel3", "rel4", "rel5"])
new_X = X.copy()
selected_X = selector.transform(new_X)
self.assertEqual(sorted(list(selector.relevant_features.index)), sorted(list(selected_X.columns)))
def test_nothing_relevant(self):
selector = FeatureSelector()
y = pd.Series(np.random.binomial(1, 0.5, 1000))
X = pd.DataFrame(index=range(1000))
X["irr1"] = np.random.normal(0, 1, 1000)
X["irr2"] = np.random.normal(2, 1, 1000)
selector.fit(X, y)
transformed_X = selector.transform(X.copy())
self.assertEqual(list(transformed_X.columns), [])
self.assertEqual(list(transformed_X.index), list(X.index))
def test_with_numpy_array(self):
selector = FeatureSelector()
y = pd.Series(np.random.binomial(1, 0.5, 1000))
X = pd.DataFrame(index=range(1000))
X["irr1"] = np.random.normal(0, 1, 1000)
X["rel1"] = y
y_numpy = y.values
X_numpy = X.as_matrix()
selector.fit(X, y)
selected_X = selector.transform(X)
selector.fit(X_numpy, y_numpy)
selected_X_numpy = selector.transform(X_numpy)
self.assertTrue((selected_X_numpy == selected_X.values).all())
self.assertTrue(selected_X_numpy.shape, (1, 1000))
|
{"hexsha": "f748e51dcd1ea370eb197cbae823f26353bb34ee", "size": 3152, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/transformers/test_feature_selector.py", "max_stars_repo_name": "awesome-archive/tsfresh", "max_stars_repo_head_hexsha": "9419aa15bb26a3725291f39636354e67c9b04caa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/transformers/test_feature_selector.py", "max_issues_repo_name": "awesome-archive/tsfresh", "max_issues_repo_head_hexsha": "9419aa15bb26a3725291f39636354e67c9b04caa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/transformers/test_feature_selector.py", "max_forks_repo_name": "awesome-archive/tsfresh", "max_forks_repo_head_hexsha": "9419aa15bb26a3725291f39636354e67c9b04caa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-09T03:46:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-09T03:46:27.000Z", "avg_line_length": 30.6019417476, "max_line_length": 106, "alphanum_fraction": 0.6078680203, "include": true, "reason": "import numpy", "num_tokens": 935}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.