text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
/*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include <ecto/ecto.hpp>
#include <ecto/cell.hpp>
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <boost/python/raw_function.hpp>
#include <boost/python/iterator.hpp>
#include <boost/python/slice.hpp>
#include <boost/python/stl_iterator.hpp>
#include <ecto/python/std_map_indexing_suite.hpp>
#include <ecto/python/raw_constructor.hpp>
#include <ecto/python/repr.hpp>
namespace ecto
{
namespace py
{
namespace bp = boost::python;
struct TendrilSpecification
{
cell_ptr mod_input, mod_output;
std::string key;
TendrilSpecification() { }
bool
check(cell_ptr mod, const std::string& key)
{
if (key.empty())
return true;
if (mod->inputs.find(key) == mod->inputs.end() && mod->outputs.find(key) == mod->outputs.end()
&& mod->parameters.find(key) == mod->parameters.end())
{
return false;
}
return true;
}
TendrilSpecification(cell_ptr mod_in, cell_ptr mod_out, const std::string& key)
:
mod_input(mod_in),
mod_output(mod_out),
key(key)
{
if (!check(mod_in, key))
BOOST_THROW_EXCEPTION(except::EctoException()
<< except::diag_msg("no input or parameter found")
<< except::tendril_key(key)
<< except::cell_name(mod_in->name()));
if (!check(mod_out, key))
BOOST_THROW_EXCEPTION(except::EctoException()
<< except::diag_msg("no output or parameter found")
<< except::tendril_key(key)
<< except::cell_name(mod_in->name()));
}
TendrilSpecification(cell_ptr mod, const std::string& key)
:
mod_input(mod),
mod_output(mod),
key(key)
{
if (!check(mod, key))
BOOST_THROW_EXCEPTION(except::EctoException()
<< except::diag_msg("no inputs or outputs found")
<< except::tendril_key(key)
<< except::cell_name(mod->name()));
}
tendril_ptr
toTendril(tendril_type t)
{
switch (t)
{
case OUTPUT:
return mod_output->outputs[key];
case INPUT:
return mod_input->inputs[key];
case PARAMETER:
return mod_input->parameters[key];
default:
return tendril_ptr();
}
}
bp::str
__str__()
{
bp::str str = bp::str(mod_input->name());
str += ", " + bp::str(key);
return str;
}
};
struct TendrilSpecifications
{
typedef std::vector<TendrilSpecification> Vector;
TendrilSpecifications()
{
}
TendrilSpecifications(Vector vts)
:
vts(vts)
{
}
TendrilSpecifications(bp::list l)
{
bp::stl_input_iterator<const TendrilSpecification&> begin(l), end;
std::copy(begin, end, std::back_inserter(vts));
}
TendrilSpecification
toSpec()
{
if (vts.size() != 1)
{
BOOST_THROW_EXCEPTION(except::EctoException()
<< except::diag_msg("This specification must be of length one. "
"e.g. module['only_one_key']"));
}
return vts.front();
}
static tendrils_ptr
toTendrils(bp::dict d, int tt)
{
bp::list keys = d.keys();
bp::stl_input_iterator<std::string> begin(keys), end;
tendrils_ptr ts(new tendrils);
while (begin != end)
{
std::string key = *begin;
TendrilSpecifications spec = bp::extract<TendrilSpecifications>(d.get(bp::str(key)));
tendril_ptr tp = spec.toSpec().toTendril(tendril_type(tt));
ts->declare(key, tp);
++begin;
}
return ts;
}
Vector vts;
};
inline TendrilSpecifications
getitem_str(cell_ptr mod, const std::string& key)
{
return TendrilSpecifications::Vector(1, TendrilSpecification(mod, key));
}
inline TendrilSpecifications
getitem_tuple(cell_ptr mod, bp::tuple keys)
{
int end = bp::len(keys);
TendrilSpecifications l;
l.vts.reserve(end);
for (int i = 0; i != end; ++i)
{
bp::extract<std::string> se(keys[i]);
if (se.check())
l.vts.push_back(TendrilSpecification(mod, se()));
else
throw std::runtime_error("All items must be str's");
}
return l;
}
inline TendrilSpecifications
getitem_list(cell_ptr mod, bp::list keys)
{
bp::tuple t(keys);
return getitem_tuple(mod, t);
}
inline TendrilSpecifications
getitem_slice(cell_ptr mod, bp::slice s)
{
if (s == bp::slice())
{
return TendrilSpecifications::Vector(1, TendrilSpecification(mod, ""));
}
else
{
throw std::runtime_error("Slice is only valid if its the [:] form...");
}
}
inline TendrilSpecifications
expand(cell_ptr mod, const tendrils& t)
{
TendrilSpecifications l;
BOOST_FOREACH(const tendrils::value_type& pair, t)
{
l.vts.push_back(TendrilSpecification(mod, pair.first));
}
return l;
}
inline bp::list
rshift_spec(TendrilSpecifications& lhs, TendrilSpecifications& rhs)
{
bp::list result;
if (lhs.vts.size() == 1 && lhs.vts.front().key.empty())
{
lhs = expand(lhs.vts.front().mod_output, lhs.vts.front().mod_output->outputs);
}
if (rhs.vts.size() == 1 && rhs.vts.front().key.empty())
{
rhs = expand(rhs.vts.front().mod_input, rhs.vts.front().mod_input->inputs);
}
//the spec must be the same size...
if (lhs.vts.size() != rhs.vts.size())
{
std::string msg = boost::str(
boost::format("Specification mismatch... len(lhs) != len(rhs) -> %d != %d") % lhs.vts.size()
% rhs.vts.size());
throw std::runtime_error(msg);
}
for (size_t i = 0, end = lhs.vts.size(); i < end; i++)
{
TendrilSpecification out = lhs.vts[i], in = rhs.vts[i];
//check types, this will also assert on not found...
out.mod_output->outputs[out.key]->compatible_type(*in.mod_input->inputs[in.key]);
result.append(bp::make_tuple(out.mod_output, out.key, in.mod_input, in.key));
}
return result;
}
inline bp::list
rshift_spec_tuples(TendrilSpecifications& lhs, bp::tuple& rhs)
{
bp::list result;
bp::stl_input_iterator<TendrilSpecifications&> begin(rhs), end;
while (begin != end)
{
result.extend(rshift_spec(lhs, *begin));
++begin;
}
return result;
}
}
}
|
{"hexsha": "94273add051cf18921c42c99d630bff9127d0b0e", "size": 8641, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/pybindings/tendril_spec.hpp", "max_stars_repo_name": "fujiehuang/ecto", "max_stars_repo_head_hexsha": "fea744337aa1fad1397c9a3ba5baa143993cb5eb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 77.0, "max_stars_repo_stars_event_min_datetime": "2015-01-30T15:45:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T02:29:37.000Z", "max_issues_repo_path": "src/pybindings/tendril_spec.hpp", "max_issues_repo_name": "fujiehuang/ecto", "max_issues_repo_head_hexsha": "fea744337aa1fad1397c9a3ba5baa143993cb5eb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 37.0, "max_issues_repo_issues_event_min_datetime": "2015-01-18T21:04:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-09T08:24:54.000Z", "max_forks_repo_path": "src/pybindings/tendril_spec.hpp", "max_forks_repo_name": "fujiehuang/ecto", "max_forks_repo_head_hexsha": "fea744337aa1fad1397c9a3ba5baa143993cb5eb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 29.0, "max_forks_repo_forks_event_min_datetime": "2015-02-17T14:37:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T07:46:26.000Z", "avg_line_length": 31.1949458484, "max_line_length": 104, "alphanum_fraction": 0.5807198241, "num_tokens": 1991}
|
from platform_inverse_kinematics.arduino_interface import arduino_interface
from platform_inverse_kinematics.MotionPath import MotionPath
from platform_inverse_kinematics.StewartPlatform import StewartPlatform
import numpy as np
if __name__=="__main__":
#6DOF demonstration
stu = StewartPlatform(base_radius=116, platform_radius=128 / 2, servo_arm_length=45, coupler_length=220,
home_height=207,
base_attatchment_point_angles=np.array(
[np.radians(x) for x in [60, 120, 180, 240, 300, 360]]),
platform_angles=np.array([np.radians(x) for x in [47.72, 132.38, 167.72, 252.28, 287.7, 12.28]]),
servo_pitch_angle=np.radians(np.arctan((100 - 128 / 2) / 204)),
servo_odd_even=[1, -1, 1, -1, 1, -1],
max_tilt=50,
max_angular_velocity=np.radians(100),
axis_offset=np.radians(125),
offset_90=np.radians(0),
offset_0=np.radians(0)
)
path = MotionPath.from_platform_positions(stu,[[0,0,0,stu.home_height,np.radians(10),0,0],
[1,20,-20,stu.home_height,0,np.radians(10),0],
[2,-20,20,stu.home_height,0,0,np.radians(10)],
[3, 0, 0, stu.home_height-15, np.radians(10), 0, 0],
[4, 0, 0, stu.home_height+15, np.radians(-10), 0, 0]],30)
path.csv_servo_trajcectories("6dof.csv")
msg=path.string_servo_trajectories()
ard = arduino_interface('/dev/ttyACM0', 115200)
ard.send_string("start")
ard.poll_until_message("ok")
input()
ard.reset_serial_buff()
for message in msg:
ard.send_string(message)
ard.poll_until_message("ok")
ard.reset_serial_buff()
ard.send_string("end")
ard.send_string("go")
ard.poll_until_message("dn")
|
{"hexsha": "7788b757d33edd9dd4028e893d4d8d30233cea5d", "size": 2202, "ext": "py", "lang": "Python", "max_stars_repo_path": "platform_inverse_kinematics/6dof.py", "max_stars_repo_name": "trevormccrt/OpenStew", "max_stars_repo_head_hexsha": "815f41208656bdcb871792d175088cda0a487d9d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-12T18:22:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T18:22:40.000Z", "max_issues_repo_path": "platform_inverse_kinematics/6dof.py", "max_issues_repo_name": "trevormccrt/OpenStew", "max_issues_repo_head_hexsha": "815f41208656bdcb871792d175088cda0a487d9d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "platform_inverse_kinematics/6dof.py", "max_forks_repo_name": "trevormccrt/OpenStew", "max_forks_repo_head_hexsha": "815f41208656bdcb871792d175088cda0a487d9d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8695652174, "max_line_length": 127, "alphanum_fraction": 0.5217983651, "include": true, "reason": "import numpy", "num_tokens": 507}
|
from pseas.new_instance_selection.new_instance_selection import NewInstanceSelection
from pseas.model import Model
import numpy as np
from typing import Callable, List
from scipy import optimize
def __compute_distance_matrix__(features: np.ndarray, distance: Callable[[np.ndarray, np.ndarray], float]) -> np.ndarray:
"""
Computes the distance matrix between the instances.
It assumes the distance function is symmetric that is d(x,y)=d(y,x) and it assumes d(x, x)=0.
Parameters:
-----------
- features (np.ndarray) - the features of the instances
- distance (Callable[[np.ndarray, np.ndarray], float]) - a function that given two features compute their distance
Return:
-----------
The distance_matrix (np.ndarray) the distance matrix.
"""
num_instances: int = features.shape[0]
distance_matrix: np.ndarray = np.zeros(
(num_instances, num_instances), dtype=np.float64)
for instance1_index in range(num_instances):
features1: np.ndarray = features[instance1_index]
for instance2_index in range(instance1_index + 1, num_instances):
d: float = distance(features1, features[instance2_index])
distance_matrix[instance2_index, instance1_index] = d
distance_matrix[instance1_index, instance2_index] = d
return distance_matrix
def __find_weights__(x: np.ndarray, y: np.ndarray, mask: np.ndarray) -> np.ndarray:
instances: int = x.shape[0]
features: int = x.shape[1]
removed_instances = np.sum(mask <= 0)
instances -= removed_instances
qty: int = int(instances * (instances - 1) / 2)
dx: np.ndarray = np.zeros((qty, features))
dy: np.ndarray = np.zeros((qty,))
# Compute dataset
index: int = 0
for i in range(instances):
if mask[i] <= 0:
continue
for j in range(i + 1, instances):
if mask[j] <= 0:
continue
dx[index] = x[i] - x[j]
dy[index] = y[i] - y[j]
index += 1
np.square(dx, out=dx)
np.abs(dy, out=dy)
# np.square(dy, out=dy)
# weights = argmin_w_i (norm [w_i (x_i -x'_i)]_i - |y - y'|)^2
weights, residual = optimize.nnls(dx, dy)
return np.sqrt(weights)
class UDD(NewInstanceSelection):
def __init__(self, alpha: float = 1, beta: float = 1, k : int = 5) -> None:
super().__init__()
self.alpha: float = alpha
self.beta: float = beta
self.k : int = k
def __uncertainty(self, perf_matrix: np.ndarray, selectables_instances, model: Model, challenger_configuration) -> List[int]:
"""
Original: Difference between max vote and max second vote for classification
Ours: variance of predictions among forest"""
uncertainty: np.ndarray = np.zeros(perf_matrix.shape[0])
for instance in selectables_instances:
_, var = model.predict(challenger_configuration, instance)
uncertainty[instance] = var
return uncertainty
def __k_nearest_neighbours(self, instance, selectables_instances, distances: np.ndarray):
d = distances[instance, :]
sorted = np.argsort(d)[::-1]
k_best = []
for i in sorted:
if i in selectables_instances and i != instance:
k_best.append(i)
if len(k_best) == self.k:
break
return k_best
def __density(self, selectables_instances, distances: np.ndarray):
densities = np.zeros(distances.shape[0], float)
for instance in selectables_instances[:]:
neighbours = self.__k_nearest_neighbours(instance, selectables_instances, distances)
total: float = 0
for neighbour in neighbours:
dist: float = distances[instance, neighbour]
total += dist*dist
total /= max(1, len(neighbours))
densities[instance] = total
return densities
def __diversity(self, selectables_instances, distances: np.ndarray):
done_mask = np.array([i not in selectables_instances for i in range(distances.shape[0])])
if np.any(done_mask):
diversities = np.min(distances[:, done_mask], axis=1)
diversities[done_mask] = 0
else:
diversities = np.zeros((len(selectables_instances)))
return diversities
def select(self, challenger_configuration: int, incumbent_configuration: int, perf_matrix: np.ndarray, perf_mask: np.ndarray, model: Model, predicted_perf_matrix: np.ndarray, instance_features: np.ndarray) -> int:
mask = np.sum(perf_mask, axis=1)
# Find optimal distance function
y = np.zeros((perf_matrix.shape[0]))
for instance in range(y.shape[0]):
if np.any(perf_mask[instance]):
times = perf_matrix[instance, perf_mask[instance]]
y[instance] = np.median(times)
weights: np.ndarray = __find_weights__(instance_features, y, mask)
distances = __compute_distance_matrix__(instance_features, lambda x1, x2: np.linalg.norm(weights * (x1 - x2)))
selectables_instances = [i for i in range(perf_matrix.shape[0]) if not np.any(perf_mask[i, :])]
uncertainties = self.__uncertainty(perf_matrix, selectables_instances, model, challenger_configuration)
# Normalize values in [0, 1]
uncertainties -= np.min(uncertainties)
uncertainties /= max(1e-3, np.max(uncertainties))
if self.alpha == 0 and self.beta == 0:
scores = uncertainties
else:
densities = self.__density(selectables_instances, distances)
diversities = self.__diversity(selectables_instances, distances)
# Normalize values in [0, 1]
densities -= np.min(densities)
diversities -= np.min(diversities)
densities /= max(1e-3, np.max(densities))
diversities /= max(1e-3, np.max(diversities))
scores = uncertainties + self.alpha * densities - self.beta * diversities
for i in range(perf_matrix.shape[0]):
if i not in selectables_instances:
scores[i] = 1e30
return np.argmin(scores)
def name(self) -> str:
return "uncertainty" if self.alpha == 0 and self.beta == 0 else f"udd-{self.alpha}-{self.beta}"
|
{"hexsha": "e7a1af395a4af1c86eb69c4f6addcb62baeeb550", "size": 6353, "ext": "py", "lang": "Python", "max_stars_repo_path": "pseas/new_instance_selection/udd.py", "max_stars_repo_name": "Theomat/MPSEAS", "max_stars_repo_head_hexsha": "91f9c991e2061a7d230e491210d2c93005fd2236", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pseas/new_instance_selection/udd.py", "max_issues_repo_name": "Theomat/MPSEAS", "max_issues_repo_head_hexsha": "91f9c991e2061a7d230e491210d2c93005fd2236", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pseas/new_instance_selection/udd.py", "max_forks_repo_name": "Theomat/MPSEAS", "max_forks_repo_head_hexsha": "91f9c991e2061a7d230e491210d2c93005fd2236", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9870967742, "max_line_length": 218, "alphanum_fraction": 0.6322997009, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1487}
|
\documentclass[twoside]{article}
\usepackage{epsfig}
\usepackage{amssymb}
\usepackage{amsmath}
\usepackage{subcaption}
\setlength{\oddsidemargin}{0.25 in}
\setlength{\evensidemargin}{-0.25 in}
\setlength{\topmargin}{-0.6 in}
\setlength{\textwidth}{6.5 in}
\setlength{\textheight}{8.5 in}
\setlength{\headsep}{0.75 in}
\setlength{\parindent}{0 in}
\setlength{\parskip}{0.1 in}
\newtheorem{thm}{Theorem}[section]
\newtheorem{Defn}{Definition}[section]
\newcommand{\lecture}[3]{
\pagestyle{myheadings}
\thispagestyle{plain}
\newpage
\setcounter{page}{1}
\noindent
\begin{center}
\framebox{
\vbox{\vspace{2mm}
\hbox to 6.28in { {\bf ~Probabilistic Graphical Models 10-708 Notes with Koller and Friedman Textbook\hfill} }
\vspace{6mm}
\hbox to 6.28in { {\Large \hfill #1 \hfill} }
\vspace{6mm}
\hbox to 6.28in { {\it Lecturer: #2 \hfill Scribes: #3} }
\vspace{2mm}}
}
\end{center}
\markboth{#1}{#1}
\vspace*{4mm}
}
\begin{document}
\lecture{3 : Representation of Undirected GMs}{Eric P. Xing}{Xing JunJie} % Lecture name, Lecturer, Scribes
\section{Review}
There are several important concepts and theorems introduced in last lecture about Directed Graphical Models.
\begin{itemize}
\item Local independence: \(For\ each\ variable\ X_i: (X_i \perp NonDescendant_{X_i} | Pa_{x_i}).\) Indicate that in a directed graph, each variable is independent to its nondescendants given its parent.
\item Global independence: \(I(G) = \{(\mathbf{X}\perp{\mathbf{Y}}\ |\ \mathbf{Z})\ :\ d\textrm{-}sep_G(\mathbf{X} ; \mathbf{Y} \ |\ \mathbf{Z})\}.\)The \emph{global} independence is given by d-seperation. Note that there is no need to consider too much about \emph{global and local} things, you can call them whatever you want.
\item A fully connected DAG \(\mathcal{G}\) is an I-map of \emph{any} distribution, since \(I_{l}(\mathcal{G}) = \emptyset \subset I(P)\) for any \(P\).
\item Minimal I-map: A DAG \(\mathcal{G}\) is a minimal I-map of \(P\), if the removal of even a single edge from \(\mathcal{G}\) renders it not an I-map.
\item A distribution may have several I-maps.
\item P-map: A DAG \(\mathcal{G}\) is a perfect map (p-map) of a distribution \(P\) if \(I(P)=I(\mathcal{G})\)
\end{itemize}
Note that not every distribution has a perfect map as DAG. Here is an example:
\[A\perp C|\{B,D\}\quad B\perp D|\{A, C\}\]
\begin{figure}[!hb]
\centering
\includegraphics[width=.8\linewidth]{assets/dgm_unable.png}
\caption{\label{fig:dgm_unable} BN1 wrongly says \(B\perp D|A\), BN2 wrongly says \(B\perp{D}\)}
\end{figure}
It is impossible for a DAG to capture both of the two independences at same time. The main reason is that the directed model (sometimes) encodes more independences together with the one we want. Thus, there is a portion of the space of distribution that we cannot encode with a DGM. That motivates another type of graphical model: undirected graphical models, aka Markov Random Fields.
\section{Undirected Graphical Models}
UGMs are very similar to DGMs in structure; but the directed or undirected edges encode differently. The directed model encodes \emph{causal} relationship between nodes, while UGMs captures pairwise relationship which represents \emph{correlation} between nodes, rough affinity.
Many things can be modeled as a UGM, such as a photo---each pixel can be a node, a go game---the grid chessboard seems intuitive, or even social networks, as shown in figure \ref{fig:ugm_ex}.
\begin{figure}[!ht]
\centering
\begin{subfigure}{.3\textwidth}
\centering
\includegraphics[width=.9\linewidth]{assets/ugm_ex1.png}
\caption{}
\end{subfigure}
\begin{subfigure}{.3\textwidth}
\centering
\includegraphics[width=.9\linewidth]{assets/ugm_ex2.png}
\caption{}
\end{subfigure}
\begin{subfigure}{.3\textwidth}
\centering
\includegraphics[width=.9\linewidth]{assets/ugm_ex3.png}
\caption{}
\end{subfigure}
\caption{UGM examples}
\label{fig:ugm_ex}
\end{figure}
\section{Representation}
\begin{Defn}
an undirected graphical model represents a distribution \(P(X_1,\ldots,X_n)\) defined by an undirected graph \(H\), and a set of positive potential functions \(y_c\) associated with the cliques of \(H\), s.t.
\begin{equation}
P(X_1,\ldots,X_n) = \frac{1}{Z} \prod_{c\in C}{\psi_c(X_c)}
\label{equation:1}
\end{equation}
where \(Z\) is known as a partition function:
\begin{equation}
Z = \sum_{X_1, \ldots, X_n} \prod_{c\in C}(\psi_c(X_c))
\end{equation}
\end{Defn}
The potential function can be understood as an contingency function of its arguments assigning ``pre-probabilistic'' score of their joint configuration. We call this of distribution in equation \ref{equation:1} as \textbf{Gibbs distribution}, as \emph{Definition 4.3 in Koller textbook}. And the potential function is defined as \textbf{factor} in Koller textbook.
\begin{Defn}
For \(G={V, E}\), a complete subgraph (clique) is a subgraph \(G'={V'\subseteq {V},E'\subseteq{E}}\) such that nodes in \(V'\) are fully interconnected.A (maximal) clique is a complete subgraph s.t. any superset \(V^{\prime\prime} \supset V'\) is not complete.
\end{Defn}
\subsection{Interpretation of Clique Potentials}
\begin{figure}[!ht]
\centering
\includegraphics[width=.4\linewidth]{assets/clique_potential.png}
\end{figure}
The model implies \(X\perp Z|Y\). This independence statement implies (by definition) that the joint must factorize as:\[p(x,y,z)=p(y)p(x|y)p(z|y)\]
We can write this as \[p(x,y,z)=p(x,y)p(z|y)\] or \[p(x,y,z)=p(x|y)p(z,y)\]
However, we cannot have all potentials be marginals and cannot have all potentials be conditionals.
The positive clique potentials can only be thought of as general ``compatibility", ``goodness" or ``happiness" functions over their variables, but not as probability distributions.
\subsubsection{Example UGM --- using max cliques}
Here we'll use an example to show an UGM.
\begin{figure}[!h]
\centering
\includegraphics[width=.8\linewidth]{assets/ugm_max_clique.png}
\end{figure}
We can factorize the graph into two max cliques:
\[P(x_1,x_2,x_3,x_4)=\frac{1}{Z}\psi_c(X_{123})\times \psi_c(X_{234})\]
\[Z=\sum_{x_1,x_2,x_3,x_4}\psi_c(X_{123})\times \psi_c(X_{234})\]
We can represent \(P(X_{1:4})\) as two 3D tables instead of one 4D table.
\subsubsection{Using subcliques}
In this example, the distribution factorized over the subcliques.
\begin{figure}[!h]
\centering
\includegraphics[width=.4\linewidth]{assets/ugm_sub_clique.png}
\end{figure}
\[
\begin{split}
P(x_1,x_2,x_3,x_4) & = \frac{1}{Z}\prod_{ij}\psi_{ij}(X_{ij}) \\
& = \frac{1}{Z}\psi_{12}(X_{12})\psi_{14}(X_{14})\psi_{23}(X_{23})\psi_{24}(X_{24})\psi_{34}(X_{34}) \\
Z & = \sum_{x_1,x_2,x_3,x_4}\prod_{ij}\psi_{ij}(X_{ij})
\end{split}
\]
\subsubsection{Example UGM --- canonical representation}
A canonical representation of such a graph can be expressed as:
\[
\begin{split}
P(x_1,x_2,x_3,x_4) & = \frac{1}{Z}\psi_c(X_{123})\times \psi_c(X_{234}) \\
& \times \frac{1}{Z}\psi_{12}(X_{12})\psi_{14}(X_{14})\psi_{23}(X_{23})\psi_{24}(X_{24})\psi_{34}(X_{34}) \\
& \times \psi_{x_1}(x_1)\psi_{x_2}(x_2)\psi_{x_3}(x_3)\psi_{x_4}(x_4) \\
Z & = \sum_{x_1,x_2,x_3,x_4} \ldots
\end{split}
\]
\subsection{Independence properties}
\subsubsection{Global independence}
\begin{Defn}
A set of nodes \(Z\) separates \(X\) and \(Y\) in \(H\), denoted \(sep_H(X : Y |Z)\), if there is no active path between any node \(X \in \mathbf{X}\) and \(Y \in \mathbf{Y}\) given \(\mathbf{Z}\). Global independences associated with \(H\) are defined as:
\begin{equation}
I(H)={(X\perp Y|Z) :sep_H( X :Y|Z)}
\end{equation}
\end{Defn}
\begin{figure}[!htb]
\centering
\includegraphics[width=.5\linewidth]{assets/ugm_separate.png}
\caption{In this, the set \(X_B\) separates \(X_A\) from \(X_C\) . All paths from \(X_A\) to \(X_C\) pass through \(X_B\)}
\label{fig:separate}
\end{figure}
In Figure \ref{fig:separate}, B separates A and C if every path from a node in A to a node in C passes through a node in B. It is written as sepH(A : C|B). A probability distribution satisfies the global Markov property if for any disjoint A,B,C such that B separates A and C, A is independent of C given B.
\subsubsection{Local independence}
\begin{figure}[!ht]
\centering
\includegraphics[width=0.4\linewidth]{assets/ugm_local.png}
\caption{Illustration of Markov Blanket in undirected graph}
\end{figure}
\begin{Defn}
For each node \(X_i \in V\), there is unique Markov blanket of \(X_i\) , denoted \(MB_{X_i}\) , which is the set of neighbors of \(X_i\) in the graph (those that share an edge with \(X_i\) )
\end{Defn}
\begin{Defn}
The local Markov independencies associated with H is:
\begin{equation}
I_l(H): \{X_i \perp V - \{X_i\} - MB_{x_i} | MB_{x_i} : \forall i\}
\end{equation}
\end{Defn}
In other words, X i is independent of the rest of the nodes in the graph given its immediate neighbors.
Note that, based on the local independence:
\begin{equation}
P(X_i|X_{-i}=P(X_i|MB_{x_i})
\end{equation}
\subsubsection{Soundness and completeness of global Markov property}
\begin{Defn}
An UG \(H\) is an I-map for a distribution \(P\) if \(I(H) \subseteq I(P)\), i.e., \(P\) entails \(I(H)\).
\end{Defn}
\begin{Defn}
P is a Gibbs distribution over H if it can be represented as
\begin{equation}
P(X_1, \ldots, X_n) = \frac{1}{Z}\prod_{c\in C}\psi_c(X_c)
\end{equation}
\end{Defn}
\begin{thm}
(soundness): If \(P\) is a Gibbs distribution over \(H\), then \(H\) is an I-map of \(P\).
\end{thm}
\begin{thm}
(Completeness): If X and Y are not separated given Z in H (\(\lnot sep_H (X ; Z |Y )\)), then X and Y are dependent given Z, in some distribution P represented as (\(X \not\perp_P Z|Y \)) that factorizes over H.
\end{thm}
The proof of the theorems are available on Koller textbook.
\subsubsection{Other Markov properties}
For directed graphs, we defined I-maps in terms of local Markov properties, and derived global independence.For undirected graphs, we defined I-maps in terms of global Markov properties, and will now derive local independence.
The pairwise Markov independencies associated with UG H = (V;E) are
\[I_p(H)=\{(X\perp Y|V-\{X,Y\}):{X,Y}\notin E\}\]
\begin{figure}
\centering
\includegraphics[width=.5\linewidth]{assets/ugn_pair_independence.png}
\caption{Pairwise independence in undirected graph. Red nodes are observed.}
\label{fig:pairwise_independence}
\end{figure}
For example, in figure \ref{fig:pairwise_independence}, we have the following independence
\[X_1\perp X_5 | \{X_2, X_3,X_4\}\]
\subsubsection{Relationship between local and global Markov properties}
\begin{itemize}
\item For any Markov Network H, and any distribution P, we have that if \(P \models I_l(H)\) then \(P \models I_p(H)\)
\item For any Markov Network H, and any distribution P, we have that if \(P \models I_l(H)\) then \(P \models I_p(H)\)
\item Let P be a positive distribution. If \(P \models I_l(H)\), then \(P \models I_p(H)\)
\end{itemize}
The following three statements are equivalent for a positive distribution P:
\begin{itemize}
\item \(P \models I_l(H)\)
\item \(P \models I_p(H)\)
\item \(P \models I(H)\)
\end{itemize}
Above equivalence relies on the positivity assumption of \(P\). For nonpositive distributions, there are examples of distributions \(P\), there are examples which satisfies one of these properties, but not the stronger property.
\subsubsection{Perfect maps}
\begin{Defn}
A Markov network H is a perfect map for P if for any X; Y;Z we have that
\begin{equation}
sep_H(X;Z|Y) \Leftrightarrow P \models (X\perp Z|Y)
\end{equation}
\end{Defn}
Note that, just like DMs, not every distribution has a perfect map as UGM.
\subsubsection{Exponential Form}
Constraining clique potentials to be positive could be inconvenient (e.g., the interactions between a pair of atoms can be either attractive or repulsive). We represent a clique potential \(\psi_x(X_c)\) in an unconstrained form using a real-value "energy" function \(\psi_x(X_c)\):
\begin{equation}
\psi_c(X_c) = exp\{-\phi_c(X_c)\}
\end{equation}
Thus, this gives the joint distribution an additive structure:
\begin{equation}
P(X)=\frac{1}{Z}exp\{-\sum_{c\in C}\phi_c(X_c)\} = \frac{1}{Z}exp\{-H(X)\}
\end{equation}
where the \(H(X)\) is called the ``free energy''.
The exponential ensures that the distribution is positive. In physics, this is called the ``Boltzmann distribution''.In statistics, this is called a log-linear model (as Koller textbook introduces).
\end{document}
|
{"hexsha": "e980717f0c1399bd1d5ee6ce81c047166efa23cb", "size": 12528, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "assets/pgm/lecture3/lecture3.tex", "max_stars_repo_name": "GavinXing/blog", "max_stars_repo_head_hexsha": "59cf8ac227974a17178a28176469adbfc8bacbdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assets/pgm/lecture3/lecture3.tex", "max_issues_repo_name": "GavinXing/blog", "max_issues_repo_head_hexsha": "59cf8ac227974a17178a28176469adbfc8bacbdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-01-31T02:21:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T03:34:38.000Z", "max_forks_repo_path": "assets/pgm/lecture3/lecture3.tex", "max_forks_repo_name": "GavinXing/blog", "max_forks_repo_head_hexsha": "59cf8ac227974a17178a28176469adbfc8bacbdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.8078175896, "max_line_length": 385, "alphanum_fraction": 0.7132822478, "num_tokens": 3990}
|
#!/usr/bin/env python
from __future__ import print_function
from keras.models import Sequential
from keras.layers import TimeDistributed
from keras.layers.core import Dense, Activation, Dropout, RepeatVector, TimeDistributedDense
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
import numpy as np
import random,string
import sys
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
try:
text = open(path).read().lower()
except UnicodeDecodeError:
import codecs
text = codecs.open(path, encoding='utf-8').read().lower()
print('corpus length:', len(text))
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
maxlen = 4 # might be much easier with 3 or 2...
nbatch = 32
print('Vectorization...')
X = np.zeros((len(text), len(chars)), dtype=np.bool)
for t, char in enumerate(text):
X[t, char_indices[char]] = 1
# build the model: 2 stacked LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(512, stateful=True, return_sequences=False, batch_input_shape=(nbatch, maxlen, len(chars))))
model.add(Dense(256, activation='relu'))
model.add(RepeatVector(maxlen))
model.add(LSTM(512, stateful=True, return_sequences=True))
model.add(TimeDistributed(Dense(len(chars))))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
# start with a small sample that increases each iteration
numsamps = len(X)/100
numsampinc = len(X)/100
# train the model, output generated text after each iteration
for iteration in range(1, 100):
print()
print('-' * 50)
print('Iteration', iteration)
# get consecutive sequences for each "lane" by breaking the dataset
# into 'nbatch' regions
# X[0] X[s] X[2*s] ... X[(nbatch-1)*s] X[1] X[s+1] X[2*s+1] ...
numsamps = min(len(X), numsamps)
numsamps += numsampinc
stride = int((numsamps-maxlen)/nbatch)
sampsperbatch = int(stride/maxlen)
totalsamps = sampsperbatch*nbatch
XXs = np.zeros((totalsamps, maxlen, len(chars)), dtype=np.bool)
YYs = np.zeros((totalsamps, maxlen, len(chars)), dtype=np.bool)
for i in range(0,sampsperbatch):
for j in range(0,nbatch):
ofs = j*stride+i*maxlen
XX = X[ofs:ofs+maxlen]
YY = X[ofs+maxlen:ofs+maxlen*2]
XXs[i*nbatch+j] = XX
YYs[i*nbatch+j] = YY
model.reset_states()
model.fit(XXs, YYs, batch_size=nbatch, nb_epoch=3, shuffle=False)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
model.reset_states()
for i in range(400/maxlen):
x = np.zeros((nbatch, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
# just get prediction from 1st batch
preds_seq = model.predict(x, verbose=0)[0]
# don't know if this is correct since each successive sample
# doesn't take into account the prior...
next_indices = [sample(preds, diversity) for preds in preds_seq]
next_chars = string.join([indices_char[next_index] for next_index in next_indices],'')
generated += next_chars
sentence = next_chars
sys.stdout.write(next_chars)
sys.stdout.flush()
print()
|
{"hexsha": "792a5ea109d63f77a9cd12b1444b2b265f198b04", "size": 3975, "ext": "py", "lang": "Python", "max_stars_repo_path": "hard-gists/3fdd80a08808bd275142d46863e92d68/snippet.py", "max_stars_repo_name": "jjhenkel/dockerizeme", "max_stars_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2019-07-08T08:26:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-24T23:53:25.000Z", "max_issues_repo_path": "hard-gists/3fdd80a08808bd275142d46863e92d68/snippet.py", "max_issues_repo_name": "jjhenkel/dockerizeme", "max_issues_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-06-15T14:47:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-26T05:02:56.000Z", "max_forks_repo_path": "hard-gists/3fdd80a08808bd275142d46863e92d68/snippet.py", "max_forks_repo_name": "jjhenkel/dockerizeme", "max_forks_repo_head_hexsha": "eaa4fe5366f6b9adf74399eab01c712cacaeb279", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-05-16T03:50:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-14T14:35:12.000Z", "avg_line_length": 33.4033613445, "max_line_length": 107, "alphanum_fraction": 0.6548427673, "include": true, "reason": "import numpy", "num_tokens": 1033}
|
import cv2
import numpy as np
hsv_image = None
pixel = (20,60,80) #Arbitary Value [H,S,V] H(Hue) = 0-179, S(Saturation) = 0-255, V(Brightness) = 0-255
def color_info(event,x,y,flag,param):
if event == cv2.EVENT_LBUTTONDOWN: #When Left is clicked in the mouse
pixel = hsv_image[y,x]
upper_limit = np.array([pixel[0] + 10, pixel[1] + 10, pixel[2] + 40])
lower_limit = np.array([pixel[0] - 10, pixel[1] - 10, pixel[2] - 40])
print("HSV Pixel:",pixel)
print("Lower_Limit: ",lower_limit)
print("Upper_Limit: ",upper_limit)
print('\n')
#Display the masking result of the threshold value
image_mask = cv2.inRange(hsv_image,lower_limit,upper_limit)
cv2.imshow("Mask",image_mask)
def main():
global hsv_image,pixel
#Raw Image Input
raw_image = cv2.imread("/home/vincent/vincent_dev/gazebo_ws/src/robot_vision/src/buffer.jpg")
if raw_image is None:
print("Image input failed!")
cv2.imshow("Raw Image",raw_image)
#Covert Image to HSV and create a new window to receive Mouse Click on a picture
hsv_image = cv2.cvtColor(raw_image,cv2.COLOR_BGR2HSV)
cv2.namedWindow('HSV_Picker') #A new window call HSV_Picker
cv2.imshow("HSV_Picker",hsv_image) #Show image on it
cv2.setMouseCallback('HSV_Picker',color_info) #Receive mouse click on HSV_Picker
#Wiat until any key is pressed
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
|
{"hexsha": "b58797d87304de0c2cce1fab616bd435e9f400b0", "size": 1546, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/robot_vision/src/HSV_Picker.py", "max_stars_repo_name": "vincent51689453/Gazebo_IC382_Simulation", "max_stars_repo_head_hexsha": "15e2e15463374d868e76d507f2b0de7d0a8069d2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/robot_vision/src/HSV_Picker.py", "max_issues_repo_name": "vincent51689453/Gazebo_IC382_Simulation", "max_issues_repo_head_hexsha": "15e2e15463374d868e76d507f2b0de7d0a8069d2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/robot_vision/src/HSV_Picker.py", "max_forks_repo_name": "vincent51689453/Gazebo_IC382_Simulation", "max_forks_repo_head_hexsha": "15e2e15463374d868e76d507f2b0de7d0a8069d2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9534883721, "max_line_length": 106, "alphanum_fraction": 0.6481241915, "include": true, "reason": "import numpy", "num_tokens": 448}
|
# bca4abm
# See full license in LICENSE.txt.
from builtins import range
import logging
import os.path
import numpy as np
import pandas as pd
from activitysim.core import inject
from activitysim.core import config
from bca4abm import bca4abm as bca
logger = logging.getLogger(__name__)
@inject.table()
def households(data_dir, settings):
logger.debug("reading households table")
table_settings = config.read_model_settings('tables.yaml')
base_households = bca.read_csv_table(table_name="base_households",
index_col="household_id",
data_dir=data_dir,
settings=table_settings)
build_households = bca.read_csv_table(table_name="build_households",
index_col="household_id",
data_dir=data_dir,
settings=table_settings)
households = pd.merge(base_households, build_households, left_index=True, right_index=True)
# - assign chunk_ids
assert 'chunk_id' not in households.columns
households['chunk_id'] = pd.Series(list(range(len(households))), households.index)
return households
inject.broadcast(cast='households',
onto='persons',
cast_index=True,
onto_on='household_id')
|
{"hexsha": "a799977ab9f1d5d1ae07ff930118b90b57fea53e", "size": 1417, "ext": "py", "lang": "Python", "max_stars_repo_path": "bca4abm/tables/households.py", "max_stars_repo_name": "steventrev/in_midstates_bca", "max_stars_repo_head_hexsha": "f762637a0b8976fa835320cb3b31a5eb5c423dfa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bca4abm/tables/households.py", "max_issues_repo_name": "steventrev/in_midstates_bca", "max_issues_repo_head_hexsha": "f762637a0b8976fa835320cb3b31a5eb5c423dfa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bca4abm/tables/households.py", "max_forks_repo_name": "steventrev/in_midstates_bca", "max_forks_repo_head_hexsha": "f762637a0b8976fa835320cb3b31a5eb5c423dfa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.34, "max_line_length": 95, "alphanum_fraction": 0.6132674665, "include": true, "reason": "import numpy", "num_tokens": 268}
|
# This file is part of DagAmendment, the reference implementation of:
#
# Michel, Élie and Boubekeur, Tamy (2021).
# DAG Amendment for Inverse Control of Parametric Shapes
# ACM Transactions on Graphics (Proc. SIGGRAPH 2021), 173:1-173:14.
#
# Copyright (c) 2020-2021 -- Télécom Paris (Élie Michel <elie.michel@telecom-paris.fr>)
#
# The MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the “Software”), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided “as is”, without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and non-infringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the Software.
# no bpy here
import numpy as np
from numpy.linalg import norm, inv
# TODO: rename perspective_matrix into projection_matrix
class Projector:
"""Holds the transformation from world space to manipulation space"""
def __init__(self, context=None, perspective_matrix=None, view_matrix=None, lens=None):
"""Build a projector from the current 3D view
(weird API for backward compat: context is used only if matrices are None)"""
if perspective_matrix is None or view_matrix is None:
from .utils import get_viewport, get_viewport_area
region, rv3d = get_viewport(context)
space_3d = get_viewport_area(context).spaces[0]
M = np.array(((.5,0,0,.5),(0,.5,0,.5),(0,0,1,0),(0,0,0,1)))
perspective_matrix = M @ np.array(rv3d.window_matrix)
view_matrix = np.array(rv3d.view_matrix)
lens = space_3d.lens
self.perspective_matrix = perspective_matrix
self.view_matrix = view_matrix
self.P = self.perspective_matrix @ self.view_matrix
self.w = 3
self.inv_view_matrix = inv(view_matrix)
self.lens = lens
#assert(np.isclose(self.P, np.array(M @ rv3d.perspective_matrix)).all())
def eval(self, X):
"""Output in [0,1]"""
Y = self.P @ np.array((*X, 1))
return Y[:2] / Y[self.w]
def jacobian(self, X):
Y = self.P @ np.array((*X, 1))
projX = Y[:2] / Y[self.w]
J = (self.P[:2] - np.outer(projX, self.P[self.w])) / Y[self.w]
return J[:,:3]
def unproject(self, uv):
"""Take a uv screen pos in range [0,1]² and return a world space
direction"""
u, v = uv
proj = self.perspective_matrix
screenspace = np.array((u, v, 0.0, 1.0))
viewspace = inv(proj) @ screenspace
viewspace[3] = 1
worldspace = self.inv_view_matrix @ viewspace
worldspace = worldspace[:3]
direction = worldspace - self.position
return direction / norm(direction)
@property
def position(self):
return self.inv_view_matrix[:3,3]
@property
def yfov(self):
return 2.0 * np.arctan(0.5 / self.perspective_matrix[1,1])# * 180.0 / np.pi
@property
def xfov(self):
return 2.0 * np.arctan(0.5 / self.perspective_matrix[0,0])# * 180.0 / np.pi
def to_json(self):
return {
'presp': self.perspective_matrix.tolist(),
'view': self.view_matrix.tolist(),
'lens': self.lens
}
@classmethod
def from_json(cls, data):
view = np.array(data['presp'])
presp = np.array(data['view'])
lens = np.array(data['lens'])
return Projector(perspective_matrix=presp, view_matrix=view, lens=lens)
|
{"hexsha": "bc1377a9515c3520373a0b77efa291d6d243183e", "size": 4198, "ext": "py", "lang": "Python", "max_stars_repo_path": "addons/DagAmendment/Projector.py", "max_stars_repo_name": "V-Sekai/V-Sekai-Blender-tools", "max_stars_repo_head_hexsha": "3473ad4abb737756290a9007273519460742960d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-21T16:38:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T00:56:35.000Z", "max_issues_repo_path": "addons/DagAmendment/Projector.py", "max_issues_repo_name": "V-Sekai/V-Sekai-Blender-game-tools", "max_issues_repo_head_hexsha": "3473ad4abb737756290a9007273519460742960d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-29T05:46:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-29T05:46:50.000Z", "max_forks_repo_path": "addons/DagAmendment/Projector.py", "max_forks_repo_name": "V-Sekai/V-Sekai-Blender-game-tools", "max_forks_repo_head_hexsha": "3473ad4abb737756290a9007273519460742960d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-07T19:41:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-07T19:41:34.000Z", "avg_line_length": 38.8703703704, "max_line_length": 91, "alphanum_fraction": 0.656264888, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1082}
|
import serial
import time
import cv2
from picamera import PiCamera
import numpy as np
# from scipy import io
import multiprocessing
import math
import threading
# import matplotlib.pyplot as plt
class MySerial:
def __init__(self):
self._ser = serial.Serial('/dev/ttyAMA0', 9600) # 初始化串口
# 命令初始化
self.STEP_CMD_ON = '<StepON>'.encode('ascii')
self.STEP_CMD_OFF = '<StepOFF>'.encode('ascii')
self.STEP_CMD_ONE_STEP = '<StepOne>'.encode('ascii')
self.STEP_CMD_RUN_TEST = '<StepRUN>'.encode('ascii')
self.STEP_CMD_DIR_RIGHT = '<directRight>'.encode('ascii')
self.STEP_CMD_DIR_LEFT = '<directLeft>'.encode('ascii')
self.LIGHT_CMD_ON = '<lightON>'.encode('ascii')
self.LIGHT_CMD_OFF = '<lightOFF>'.encode('ascii')
def write(self, cmd):
'''
:param cmd: 待发送的命令
:return:
'''
try:
self._ser.write(cmd)
except KeyboardInterrupt as serialException:
print(serialException)
"""
def write1(self, cmd):
'''
:param cmd: 待发送的命令
:return:
'''
if not self.ser.isOpen():
self.ser.open()
try:
self.ser.write(cmd)
except KeyboardInterrupt as serialException:
print(serialException)
"""
def close(self):
self._ser.close()
def open(self):
self._ser.open()
class Stepper:
ROUND = 1600
def __init__(self):
self._ser = MySerial()
self.closeStep()
self.rightRun()
def openStep(self):
self._ser.write(self._ser.STEP_CMD_ON)
def closeStep(self):
self._ser.write(self._ser.STEP_CMD_OFF)
def rightRun(self):
self._ser.write(self._ser.STEP_CMD_DIR_RIGHT)
def leftRun(self):
self._ser.write(self._ser.STEP_CMD_DIR_LEFT)
def stepOne(self):
self.openStep()
self._ser.write(self._ser.STEP_CMD_ONE_STEP)
def stepRound(self):
self.openStep()
self._ser.write(self._ser.STEP_CMD_RUN_TEST)
self.closeStep()
def angleRun(self, angle=360):
'''
:param angle: 转动角度
:return:
'''
times = int((angle / 360) * self.ROUND) # 步进次数
for t in range(times):
self._ser.write(self._ser.STEP_CMD_ONE_STEP)
class Light:
def __init__(self):
self._ser = MySerial()
def openLight(self):
self._ser.write(self._ser.LIGHT_CMD_ON)
def closeLight(self):
self._ser.write(self._ser.LIGHT_CMD_OFF)
class MyCamera:
def __init__(self):
self.camera = PiCamera() # 初始化相机
self.PHOTO_INFO = {'width': int(1920), 'height': int(1408), 'mode': 3} # 照片格式
self.camera.resolution = (self.PHOTO_INFO['width'], self.PHOTO_INFO['height']) # 设置照片格式
self.camera.start_preview() # 启动相机
def openCamera(self):
self.camera.start_preview() # 启动相机
def getImage(self):
'''
:return: 返回采集的照片
'''
img = np.empty((self.PHOTO_INFO['height'] * self.PHOTO_INFO['width'] * self.PHOTO_INFO['mode'],),
dtype=np.uint8)
self.camera.capture(img, 'bgr')
img = img.reshape((self.PHOTO_INFO['height'], self.PHOTO_INFO['width'], self.PHOTO_INFO['mode']))
return img
def closeCamera(self):
self.camera.close()
class ImageHandle:
def __init__(self):
self._InterestRange = {'left_col': 720, 'top_row': 840, 'right_col': 940, 'bottom_row': 1270} # 参考范围
sLine = self.findCenterLine(
self.rangeByColors(self.getInterestRange(cv2.imread('standerd.png')))) # 参考直线
def getImgSize(self):
return self._InterestRange['bottom_row'] - self._InterestRange['top_row'], self._InterestRange['right_col'] - \
self._InterestRange['left_col']
def getGrayImg(self, img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def getOutLine(self, img):
''' 获取轮廓 '''
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(img, 30, 90)
return edges
def getGaussianBlur(self, img):
'''高斯模糊'''
kernel_size = (5, 5)
sigma = 2.0
return cv2.GaussianBlur(img, kernel_size, sigma)
def getLines(self, img):
edges = cv2.Canny(img, 50, 200)
minLineLength = 300
maxLineGap = 15
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 100, minLineLength, maxLineGap)
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
return img
def getInterestRange(self, img):
img = img[self._InterestRange['top_row']:self._InterestRange['bottom_row'],
self._InterestRange['left_col']:self._InterestRange['right_col']]
return img
def rangeByColors(self, img, *colors):
'''
根据给定的颜色范围范围颜色范围图片
:param img: 兴趣范围的图片
:type colors: np.array,给定的任何颜色
'''
# 生成颜色范围
if len(colors) == 0:
# 未给定颜色使用默认颜色范围
lower_color = np.array([0, 0, 135])
upper_color = np.array([136, 150, 255])
else:
lower_color = np.min(np.array(colors), axis=0)
upper_color = np.max(np.array(colors), axis=0)
mask = cv2.inRange(img, lower_color, upper_color)
return mask
def rangeByGray(self, img, *colors):
'''
根据灰度滤波图像
:param img: 兴趣范围的图片
:type colors: np.array,给定的二值颜色范围
'''
# 生成颜色范围
if len(colors) == 0:
# 未给定颜色使用默认颜色范围
lower_color = 100
upper_color = 255
else:
lower_color = np.min(np.array(colors), axis=0)
upper_color = np.max(np.array(colors), axis=0)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = cv2.inRange(gray, lower_color, upper_color)
return mask
def findCenterLine(self, img):
'''
:param img: 二值图片
:return: 直线
无奈~~~时间太长了
'''
# 根据给定图片生成空图片
# start = time.time()
lineImg = np.empty(img.shape, dtype=np.uint8)
lineImg[:] = 0
# 包含特征像素的行
line_rows = []
rowP, colP = np.where(img == 255)
[line_rows.append(i) for i in (rowP) if not i in line_rows]
# 根据每行目标像素位置算出每行中心像素位置
for row in line_rows:
# 查询以获得一行中拥有目标像素的点的索引
cols_index = np.where(rowP == row)
# 目标点列的位置
cols = []
col = None
# 根据索引以获取目标的列位置
cols.append((colP[cols_index]))
# 计算每行的直线中心
col = int(np.average(colP[cols_index]))
lineImg[row][col] = 255
# print('get rang time:' + str(time.time() - start))
return lineImg
def getPosition(self, img, angle):
'''
标定参数:1cm为24个像素点 1920/2下(转台中心位置)
1cm为42个像素点 1920下
焦距:F = (P x D) / W
P:41,D:42.1,W:1===>F:1880
每向前移动1cm,激光为止平移3个像素 1920/2下
每向前移动1cm,激光为止平移13个像素 1920下
摄像头距离转台中心为42.1cm
'''
sLine = self.findCenterLine(
self.rangeByColors(self.getInterestRange(cv2.imread('standerd.png')))) # 参考直线,建议该变量放到全局(读写操作费时间)
# cv2.imshow('sl', sLine)
# cv2.waitKey(0)
interestImg = self.getInterestRange(img)
blurImg = self.getGaussianBlur(interestImg) # 图片模糊处理
mask1 = self.rangeByGray(blurImg) # 灰度滤波
mask2 = self.rangeByColors(blurImg) # 阈值滤波
laserLine = self.findCenterLine(mask1 | mask2) # 实际激光直线
# cv2.imshow('ll', laserLine)
# cv2.waitKey(0)
sx, sy = np.where(sLine == 255)
lx, ly = np.where(laserLine == 255)
# sx = sx.tolist()
lx = lx.tolist()
# pointInfo = np.empty(img.shape)
# print(type(lx))
lineInfo = np.empty([len(sx), 3])
lineInfo[:] = 0
for sp in range(len(sx)):
splot_y = sy[sp]
if sp in lx:
lplot_y = ly[lx.index(sp)]
# print(sp, lplot_y - splot_y)
pixDistence = lplot_y - splot_y
# print(pixDistence)
# if pixDistence > 0:
# deepth_cam = 42.5 - (abs(pixDistence) * (1 / 13))
# else:
# deepth_cam = 42.5 + abs(pixDistence) * (1 / 13)
deepth = round(pixDistence * (1 / 13), 3)
p_x = round(deepth * math.cos(angle), 3)
p_y = round(deepth * math.sin(angle), 3)
p_z = round((len(sx) - sp) * (1 / 15))
lineInfo[sp, 0] = p_x
lineInfo[sp, 1] = p_y
lineInfo[sp, 2] = p_z
#print(lineInfo)
return lineInfo
# print('x:' + str(p_x) + '\ty:' + str(p_y) + '\tz:' + str(p_z))
class Scanner:
# PIX_LENGTH = 100 / 24 # 像素长度,单位毫米
def __init__(self):
self._stepper = Stepper()
self._light = Light()
self._camera = MyCamera()
self._imageHandle = ImageHandle()
self._stepper.openStep() # 电机使能
self._light.closeLight() # 关闭激光
time.sleep(1) # 等待设备时间
self.model = None # np.empty([0]) # 创建空模型
def scan(self, angle):
times = int((angle / 360) * self._stepper.ROUND) # 根据角度计算出转动的次数
# print(times) # 测试
self._stepper.openStep() # 电机使能
self._stepper.rightRun() # 设置转动方向
for t in range(times):
# color_img = self._camera.getImage() # 采集颜色图像
# cv2.imwrite('color.jpg', color_img) # 保存颜色图片
self._light.openLight()
laser_img = self._camera.getImage() # 采集激光图像
self._light.closeLight()
start = time.time()
print(t)
lineInfo = self._imageHandle.getPosition(img=laser_img, angle=(math.pi) * (t * angle / self._stepper.ROUND))
for l in lineInfo:
print(l)
if self.model is None:
self.model = lineInfo
else:
self.model = np.append(self.model, lineInfo, axis=0)
print(self.model.shape)
print(time.time() - start)
scanner.saveAsFile('model', scanner.model)
print("-----------------Next Time-----------------")
self._stepper.stepOne() # 步进
def saveAsFile(self, filename, file):
# io.savemat(filename + ".mat", {'array': file})
np.save(filename, file)
def closeScanner(self):
self._stepper.closeStep()
self._light.closeLight()
# self.saveAsFile('model', self.model)
if __name__ == '__main__':
pool = multiprocessing.Pool(processes=4)
scanner = Scanner()
try:
# scanner.scan(20)
pool.apply_async(scanner.scan(360), (1,))
finally:
scanner.closeScanner()
# scanner.saveAsFile('model', scanner.model)
|
{"hexsha": "595922c6e3776352486b39103ce8f3979d2abc62", "size": 10975, "ext": "py", "lang": "Python", "max_stars_repo_path": "Raspberry pi Programes/scanner.py", "max_stars_repo_name": "ElvisKing/Design-and-Implementation-of-Three-dimensional-Scanning-System-for-Structured-Light-Based-on-Raspber", "max_stars_repo_head_hexsha": "f6059a655c5d2f03464ef865df4d2146714a6714", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2019-06-06T14:13:41.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-30T01:35:58.000Z", "max_issues_repo_path": "Raspberry pi Programes/scanner.py", "max_issues_repo_name": "ElvisKing/Design-and-Implementation-of-Three-dimensional-Scanning-System-for-Structured-Light-Based-on-Raspber", "max_issues_repo_head_hexsha": "f6059a655c5d2f03464ef865df4d2146714a6714", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Raspberry pi Programes/scanner.py", "max_forks_repo_name": "ElvisKing/Design-and-Implementation-of-Three-dimensional-Scanning-System-for-Structured-Light-Based-on-Raspber", "max_forks_repo_head_hexsha": "f6059a655c5d2f03464ef865df4d2146714a6714", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-21T06:58:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-09T13:12:48.000Z", "avg_line_length": 31.0906515581, "max_line_length": 120, "alphanum_fraction": 0.5513439636, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3318}
|
module Geometry
using ..RecursiveApply
import LinearAlgebra
import StaticArrays: SVector
export ⊗
include("coordinates.jl")
include("axistensors.jl")
include("localgeometry.jl")
include("conversions.jl")
include("globalgeometry.jl")
end # module
|
{"hexsha": "a61f2bc23dd1f2e2b5e7dbc63b91e3b99ae53f81", "size": 251, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Geometry/Geometry.jl", "max_stars_repo_name": "CliMA/ClimaCore.jl", "max_stars_repo_head_hexsha": "e28309249a4c0dea0e8bb897b4dc9ebc376fa94e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 32, "max_stars_repo_stars_event_min_datetime": "2021-07-19T20:14:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T00:18:43.000Z", "max_issues_repo_path": "src/Geometry/Geometry.jl", "max_issues_repo_name": "CliMA/ClimaCore.jl", "max_issues_repo_head_hexsha": "e28309249a4c0dea0e8bb897b4dc9ebc376fa94e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 543, "max_issues_repo_issues_event_min_datetime": "2021-07-06T18:21:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T20:39:02.000Z", "max_forks_repo_path": "src/Geometry/Geometry.jl", "max_forks_repo_name": "CliMA/ClimateMachineCore.jl", "max_forks_repo_head_hexsha": "73dff5e125aa641b1560f3ca5163db472c1ed07a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-27T16:54:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-27T16:54:21.000Z", "avg_line_length": 14.7647058824, "max_line_length": 28, "alphanum_fraction": 0.7848605578, "num_tokens": 63}
|
import numpy as np
import os
import logging
logger = logging.getLogger('telemanom')
class Channel:
def __init__(self, config, chan_id):
"""
Load and reshape channel values (predicted and actual).
Args:
config (obj): Config object containing parameters for processing
chan_id (str): channel id
Attributes:
id (str): channel id
config (obj): see Args
X_train (arr): training inputs with dimensions
[timesteps, l_s, input dimensions)
X_test (arr): test inputs with dimensions
[timesteps, l_s, input dimensions)
y_train (arr): actual channel training values with dimensions
[timesteps, n_predictions, 1)
y_test (arr): actual channel test values with dimensions
[timesteps, n_predictions, 1)
train (arr): train data loaded from .npy file
test(arr): test data loaded from .npy file
"""
self.id = chan_id
self.config = config
self.X_train = None
self.y_train = None
self.X_test = None
self.y_test = None
self.y_hat = None
self.train = None
self.test = None
def shape_data(self, arr, train=True):
"""Shape raw input streams for ingestion into LSTM. config.l_s specifies
the sequence length of prior timesteps fed into the model at
each timestep t.
Args:
arr (np array): array of input streams with
dimensions [timesteps, 1, input dimensions]
train (bool): If shaping training data, this indicates
data can be shuffled
"""
data = []
for i in range(len(arr) - self.config.l_s - self.config.n_predictions):
data.append(arr[i:i + self.config.l_s + self.config.n_predictions])
data = np.array(data)
assert len(data.shape) == 3
if train:
np.random.seed(42) #add for reproductivity
np.random.shuffle(data)
self.X_train = data[:, :-self.config.n_predictions, :]
self.y_train = data[:, -self.config.n_predictions:, 0] # telemetry value is at position 0
else:
self.X_test = data[:, :-self.config.n_predictions, :]
self.y_test = data[:, -self.config.n_predictions:, 0] # telemetry value is at position 0
def load_data(self):
"""
Load train and test data from local.
"""
try:
self.train = np.load(os.path.join("data", "train", "{}.npy".format(self.id)))
self.test = np.load(os.path.join("data", "test", "{}.npy".format(self.id)))
except FileNotFoundError as e:
logger.critical(e)
logger.critical("Source data not found, may need to add data to repo: <link>")
self.shape_data(self.train)
self.shape_data(self.test, train=False)
|
{"hexsha": "480633dc72b37fbc8ce3b84ff799140f6434f25a", "size": 2962, "ext": "py", "lang": "Python", "max_stars_repo_path": "telemanom/channel.py", "max_stars_repo_name": "cc-pine/telemanom", "max_stars_repo_head_hexsha": "67599ff8d34bf36bf5a4bef61693a598830e7363", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "telemanom/channel.py", "max_issues_repo_name": "cc-pine/telemanom", "max_issues_repo_head_hexsha": "67599ff8d34bf36bf5a4bef61693a598830e7363", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "telemanom/channel.py", "max_forks_repo_name": "cc-pine/telemanom", "max_forks_repo_head_hexsha": "67599ff8d34bf36bf5a4bef61693a598830e7363", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.686746988, "max_line_length": 102, "alphanum_fraction": 0.5806887238, "include": true, "reason": "import numpy", "num_tokens": 651}
|
[STATEMENT]
lemma joinable_components_eq:
"connected t \<and> t \<subseteq> s \<and> c1 \<in> components s \<and> c2 \<in> components s \<and> c1 \<inter> t \<noteq> {} \<and> c2 \<inter> t \<noteq> {} \<Longrightarrow> c1 = c2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. connected t \<and> t \<subseteq> s \<and> c1 \<in> components s \<and> c2 \<in> components s \<and> c1 \<inter> t \<noteq> {} \<and> c2 \<inter> t \<noteq> {} \<Longrightarrow> c1 = c2
[PROOF STEP]
by (metis (full_types) components_iff joinable_connected_component_eq)
|
{"llama_tokens": 203, "file": null, "length": 1}
|
(*
* Copyright (C) 2014, National ICT Australia Limited. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* * The name of National ICT Australia Limited nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*)
(*
Miscellaneous library definitions and lemmas.
*)
header "Distinct Proposition"
theory DistinctProp
imports
"../lib/Lib"
"~~/src/HOL/Library/Sublist"
begin
text {* distinct\_prop *}
primrec
distinct_prop :: "('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('a list \<Rightarrow> bool)"
where
"distinct_prop P [] = True"
| "distinct_prop P (x # xs) = ((\<forall>y\<in>set xs. P x y) \<and> distinct_prop P xs)"
lemma distinct_prop_map:
"distinct_prop P (map f xs)
= distinct_prop (\<lambda>x y. P (f x) (f y)) xs"
apply (induct xs)
apply simp
apply simp
done
lemma distinct_prop_append:
"distinct_prop P (xs @ ys) =
(distinct_prop P xs \<and> distinct_prop P ys \<and> (\<forall>x \<in> set xs. \<forall>y \<in> set ys. P x y))"
apply (induct xs arbitrary: ys)
apply simp
apply (simp add: conj_ac ball_Un)
done
lemma distinct_prop_distinct:
"\<lbrakk> distinct xs; \<And>x y. \<lbrakk> x \<in> set xs; y \<in> set xs; x \<noteq> y \<rbrakk> \<Longrightarrow> P x y \<rbrakk>
\<Longrightarrow> distinct_prop P xs"
apply (induct xs)
apply simp
apply clarsimp
apply blast
done
lemma distinct_prop_True [simp]:
"distinct_prop (\<lambda>x y. True) xs"
by (induct xs, auto)
end
|
{"author": "jcaesar", "repo": "fixed-topos-header-space-analysis", "sha": "2da808ab41e5924d616ad1af15e8f50cb986c803", "save_path": "github-repos/isabelle/jcaesar-fixed-topos-header-space-analysis", "path": "github-repos/isabelle/jcaesar-fixed-topos-header-space-analysis/fixed-topos-header-space-analysis-2da808ab41e5924d616ad1af15e8f50cb986c803/thy/autocorres-0.98/lib/DistinctProp.thy"}
|
import unittest
import pyapprox as pya
import numpy as np
import matplotlib.pyplot as plt
from pyapprox.configure_plots import *
from pyapprox.control_variate_monte_carlo import *
from scipy.stats import uniform,norm,lognorm
from functools import partial
skiptest = unittest.skipIf(
not use_torch, reason="torch package missing")
class PolynomialModelEnsemble(object):
def __init__(self):
self.nmodels=5
self.nvars=1
self.models = [self.m0,self.m1,self.m2,self.m3,self.m4]
univariate_variables = [uniform(0,1)]
self.variable=pya.IndependentMultivariateRandomVariable(
univariate_variables)
self.generate_samples=partial(
pya.generate_independent_random_samples,self.variable)
def m0(self,samples):
return samples.T**5
def m1(self,samples):
return samples.T**4
def m2(self,samples):
return samples.T**3
def m3(self,samples):
return samples.T**2
def m4(self,samples):
return samples.T**1
def get_means(self):
gauss_legendre = partial(
pya.gauss_jacobi_pts_wts_1D,alpha_poly=0,beta_poly=0)
x,w = gauss_legendre(10)
#scale to [0,1]
x = (x[np.newaxis,:]+1)/2
nsamples = x.shape[1]
nqoi = len(self.models)
vals = np.empty((nsamples,nqoi))
for ii in range(nqoi):
vals[:,ii] = self.models[ii](x)[:,0]
means = vals.T.dot(w)
return means
def get_covariance_matrix(self):
gauss_legendre = partial(
pya.gauss_jacobi_pts_wts_1D,alpha_poly=0,beta_poly=0)
x,w = gauss_legendre(10)
#scale to [0,1]
x = (x[np.newaxis,:]+1)/2
nsamples = x.shape[1]
nqoi = len(self.models)
vals = np.empty((nsamples,nqoi))
for ii in range(nqoi):
vals[:,ii] = self.models[ii](x)[:,0]
cov = np.cov(vals,aweights=w,rowvar=False,ddof=0)
return cov
class TunableModelEnsemble(object):
def __init__(self,theta1,shifts=None):
"""
Parameters
----------
theta0 : float
Angle controling
Notes
-----
The choice of A0, A1, A2 here results in unit variance for each model
"""
self.A0 = np.sqrt(11)
self.A1 = np.sqrt(7)
self.A2 = np.sqrt(3)
self.nmodels=3
self.theta0=np.pi/2
self.theta1=theta1
self.theta2=np.pi/6
assert self.theta0>self.theta1 and self.theta1>self.theta2
self.shifts=shifts
if self.shifts is None:
self.shifts = [0,0]
assert len(self.shifts)==2
self.models = [self.m0,self.m1,self.m2]
univariate_variables = [uniform(-1,2),uniform(-1,2)]
self.variable=pya.IndependentMultivariateRandomVariable(
univariate_variables)
self.generate_samples=partial(
pya.generate_independent_random_samples,self.variable)
def m0(self,samples):
assert samples.shape[0]==2
x,y=samples[0,:],samples[1,:]
return (self.A0*(np.cos(self.theta0) * x**5 + np.sin(self.theta0) *
y**5))[:,np.newaxis]
def m1(self,samples):
assert samples.shape[0]==2
x,y=samples[0,:],samples[1,:]
return (self.A1*(np.cos(self.theta1) * x**3 + np.sin(self.theta1) *
y**3)+self.shifts[0])[:,np.newaxis]
def m2(self,samples):
assert samples.shape[0]==2
x,y=samples[0,:],samples[1,:]
return (self.A2*(np.cos(self.theta2) * x + np.sin(self.theta2) *
y)+self.shifts[1])[:,np.newaxis]
def get_covariance_matrix(self):
cov = np.eye(self.nmodels)
cov[0, 1] = self.A0*self.A1/9*(np.sin(self.theta0)*np.sin(
self.theta1)+np.cos(self.theta0)*np.cos(self.theta1))
cov[1, 0] = cov[0,1]
cov[0, 2] = self.A0*self.A2/7*(np.sin(self.theta0)*np.sin(
self.theta2)+np.cos(self.theta0)*np.cos(self.theta2))
cov[2, 0] = cov[0, 2]
cov[1, 2] = self.A1*self.A2/5*(
np.sin(self.theta1)*np.sin(self.theta2)+np.cos(
self.theta1)*np.cos(self.theta2))
cov[2, 1] = cov[1,2]
return cov
class ShortColumnModelEnsemble(object):
def __init__(self):
self.nmodels=5
self.nvars=5
self.models = [self.m0,self.m1,self.m2,self.m3,self.m4]
self.apply_lognormal=False
univariate_variables = [
uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
lognorm(s=0.5,scale=np.exp(5))]
self.variable = pya.IndependentMultivariateRandomVariable(
univariate_variables)
self.generate_samples=partial(
pya.generate_independent_random_samples,self.variable)
def extract_variables(self,samples):
assert samples.shape[0]==5
b = samples[0,:]
h = samples[1,:]
P = samples[2,:]
M = samples[3,:]
Y = samples[4,:]
if self.apply_lognormal:
Y = np.exp(Y)
return b,h,P,M,Y
def m0(self,samples):
b,h,P,M,Y = self.extract_variables(samples)
return (1 - 4*M/(b*(h**2)*Y) - (P/(b*h*Y))**2)[:,np.newaxis]
def m1(self,samples):
b,h,P,M,Y = self.extract_variables(samples)
return (1 - 3.8*M/(b*(h**2)*Y) - (
(P*(1 + (M-2000)/4000))/(b*h*Y))**2)[:,np.newaxis]
def m2(self,samples):
b,h,P,M,Y = self.extract_variables(samples)
return (1 - M/(b*(h**2)*Y) - (P/(b*h*Y))**2)[:,np.newaxis]
def m3(self,samples):
b,h,P,M,Y = self.extract_variables(samples)
return (1 - M/(b*(h**2)*Y) - (P*(1 + M)/(b*h*Y))**2)[:,np.newaxis]
def m4(self,samples):
b,h,P,M,Y = self.extract_variables(samples)
return (1 - M/(b*(h**2)*Y) - (P*(1 + M)/(h*Y))**2)[:,np.newaxis]
def get_quadrature_rule(self):
nvars = self.variable.num_vars()
degrees=[10]*nvars
var_trans = pya.AffineRandomVariableTransformation(self.variable)
gauss_legendre = partial(
pya.gauss_jacobi_pts_wts_1D,alpha_poly=0,beta_poly=0)
univariate_quadrature_rules = [
gauss_legendre,gauss_legendre,pya.gauss_hermite_pts_wts_1D,
pya.gauss_hermite_pts_wts_1D,pya.gauss_hermite_pts_wts_1D]
x,w = pya.get_tensor_product_quadrature_rule(
degrees,self.variable.num_vars(),univariate_quadrature_rules,
var_trans.map_from_canonical_space)
return x,w
def get_covariance_matrix(self):
x,w = self.get_quadrature_rule()
nsamples = x.shape[1]
nqoi = len(self.models)
vals = np.empty((nsamples,nqoi))
for ii in range(nqoi):
vals[:,ii] = self.models[ii](x)[:,0]
cov = np.cov(vals,aweights=w,rowvar=False,ddof=0)
return cov
def get_means(self):
x,w = self.get_quadrature_rule()
nsamples = x.shape[1]
nqoi = len(self.models)
vals = np.empty((nsamples,nqoi))
for ii in range(nqoi):
vals[:,ii] = self.models[ii](x)[:,0]
return vals.T.dot(w).squeeze()
def setup_check_variance_reduction_model_ensemble_short_column(
nmodels=5,npilot_samples=None):
example = ShortColumnModelEnsemble()
model_ensemble = pya.ModelEnsemble(
[example.models[ii] for ii in range(nmodels)])
univariate_variables = [
uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
lognorm(s=0.5,scale=np.exp(5))]
variable=pya.IndependentMultivariateRandomVariable(univariate_variables)
generate_samples=partial(
pya.generate_independent_random_samples,variable)
if npilot_samples is not None:
# The number of pilot samples effects ability of numerical estimate
# of variance reduction to match theoretical value
cov, samples, weights = pya.estimate_model_ensemble_covariance(
npilot_samples,generate_samples,model_ensemble)
else:
# it is difficult to create a quadrature rule for the lognormal
# distribution so instead define the variable as normal and then
# apply log transform
univariate_variables = [
uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
norm(loc=5,scale=0.5)]
variable=pya.IndependentMultivariateRandomVariable(
univariate_variables)
example.apply_lognormal=True
cov = example.get_covariance_matrix(variable)[:nmodels,:nmodels]
example.apply_lognormal=False
return model_ensemble, cov, generate_samples
def setup_check_variance_reduction_model_ensemble_tunable():
example = TunableModelEnsemble(np.pi/4)
model_ensemble = pya.ModelEnsemble(example.models)
cov = example.get_covariance_matrix()
return model_ensemble, cov, example.generate_samples
def setup_check_variance_reduction_model_ensemble_polynomial():
example = PolynomialModelEnsemble()
model_ensemble = pya.ModelEnsemble(example.models)
cov = example.get_covariance_matrix()
#npilot_samples=int(1e6)
#cov, samples, weights = pya.estimate_model_ensemble_covariance(
# npilot_samples,generate_samples,model_ensemble)
return model_ensemble, cov, example.generate_samples
def check_variance_reduction(allocate_samples,generate_samples_and_values,
get_cv_weights,get_rsquared,setup_model,
rtol=1e-2,ntrials=1e3,max_eval_concurrency=1):
assert get_rsquared is not None
model_ensemble, cov, generate_samples = setup_model()
means, numerical_var_reduction, true_var_reduction = \
estimate_variance_reduction(
model_ensemble, cov, generate_samples,
allocate_samples,generate_samples_and_values,
get_cv_weights,get_rsquared,ntrials,max_eval_concurrency)
#print('true',true_var_reduction,'numerical',numerical_var_reduction)
#print(np.absolute(true_var_reduction-numerical_var_reduction),rtol*np.absolute(true_var_reduction))
if rtol is not None:
assert np.allclose(numerical_var_reduction,true_var_reduction,
rtol=rtol)
class TestCVMC(unittest.TestCase):
def setUp(self):
np.random.seed(1)
def test_mlmc_sample_allocation(self):
# The following will give mlmc with unit variance
# and discrepancy variances 1,4,4
target_cost = 81
cov = np.asarray([[1.00,0.50,0.25],
[0.50,1.00,0.50],
[0.25,0.50,4.00]])
# ensure cov is positive definite
np.linalg.cholesky(cov)
#print(np.linalg.inv(cov))
costs = [6,3,1]
nmodels = len(costs)
nhf_samples,nsample_ratios, log10_var = pya.allocate_samples_mlmc(
cov, costs, target_cost)
assert np.allclose(10**log10_var,1)
nsamples = np.concatenate([[1],nsample_ratios])*nhf_samples
lamda = 9
nsamples_discrepancy = 9*np.sqrt(np.asarray([1/(6+3),4/(3+1),4]))
nsamples_true = [
nsamples_discrepancy[0],nsamples_discrepancy[:2].sum(),
nsamples_discrepancy[1:3].sum()]
assert np.allclose(nsamples,nsamples_true)
def test_standardize_sample_ratios(self):
nhf_samples,nsample_ratios = 10,[2.19,3.32]
std_nhf_samples, std_nsample_ratios = pya.standardize_sample_ratios(
nhf_samples,nsample_ratios)
assert np.allclose(std_nsample_ratios,[2.1,3.3])
def test_generate_samples_and_values_mfmc(self):
functions = ShortColumnModelEnsemble()
model_ensemble = pya.ModelEnsemble(
[functions.m0,functions.m1,functions.m2])
univariate_variables = [
uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
lognorm(s=0.5,scale=np.exp(5))]
variable=pya.IndependentMultivariateRandomVariable(
univariate_variables)
generate_samples=partial(
pya.generate_independent_random_samples,variable)
nhf_samples = 10
nsample_ratios = [2,4]
samples,values =\
pya.generate_samples_and_values_mfmc(
nhf_samples,nsample_ratios,model_ensemble,generate_samples)
for jj in range(1,len(samples)):
assert samples[jj][1].shape[1]==nsample_ratios[jj-1]*nhf_samples
idx=1
if jj==1:
idx=0
assert np.allclose(samples[jj][0],samples[jj-1][idx])
def test_rsquared_mfmc(self):
functions = ShortColumnModelEnsemble()
model_ensemble = pya.ModelEnsemble(
[functions.m0,functions.m3,functions.m4])
univariate_variables = [
uniform(5,10),uniform(15,10),norm(500,100),norm(2000,400),
lognorm(s=0.5,scale=np.exp(5))]
variable=pya.IndependentMultivariateRandomVariable(
univariate_variables)
generate_samples=partial(
pya.generate_independent_random_samples,variable)
npilot_samples = int(1e4)
pilot_samples = generate_samples(npilot_samples)
config_vars = np.arange(model_ensemble.nmodels)[np.newaxis,:]
pilot_samples = pya.get_all_sample_combinations(
pilot_samples,config_vars)
pilot_values = model_ensemble(pilot_samples)
pilot_values = np.reshape(
pilot_values,(npilot_samples,model_ensemble.nmodels))
cov = np.cov(pilot_values,rowvar=False)
nhf_samples = 10
nsample_ratios = np.asarray([2,4])
nsamples_per_model = np.concatenate(
[[nhf_samples],nsample_ratios*nhf_samples])
eta = pya.get_mfmc_control_variate_weights(cov)
cor = pya.get_correlation_from_covariance(cov)
var_mfmc = cov[0,0]/nsamples_per_model[0]
for k in range(1,model_ensemble.nmodels):
var_mfmc+=(1/nsamples_per_model[k-1]-1/nsamples_per_model[k])*(
eta[k-1]**2*cov[k,k]+2*eta[k-1]*cor[0,k]*np.sqrt(
cov[0,0]*cov[k,k]))
assert np.allclose(var_mfmc/cov[0,0]*nhf_samples,
1-pya.get_rsquared_mfmc(cov,nsample_ratios))
def test_variance_reduction_acv_IS(self):
setup_model = setup_check_variance_reduction_model_ensemble_tunable
allocate_samples = pya.allocate_samples_mfmc
generate_samples_and_values = generate_samples_and_values_acv_IS
get_cv_weights = partial(
get_approximate_control_variate_weights,
get_discrepancy_covariances=get_discrepancy_covariances_IS)
get_rsquared = partial(
get_rsquared_acv,
get_discrepancy_covariances=get_discrepancy_covariances_IS)
check_variance_reduction(
allocate_samples, generate_samples_and_values,
get_cv_weights, get_rsquared, setup_model, rtol=1e-2, ntrials=4e3)
def test_variance_reduction_acv_MF(self):
setup_model = \
setup_check_variance_reduction_model_ensemble_tunable
allocate_samples = pya.allocate_samples_mfmc
generate_samples_and_values = partial(
generate_samples_and_values_mfmc, acv_modification=True)
get_cv_weights = partial(
get_approximate_control_variate_weights,
get_discrepancy_covariances=get_discrepancy_covariances_MF)
get_rsquared = partial(
get_rsquared_acv,
get_discrepancy_covariances=get_discrepancy_covariances_MF)
check_variance_reduction(
allocate_samples, generate_samples_and_values,
get_cv_weights, get_rsquared, setup_model, ntrials=1e4,
rtol=1e-2)
def test_variance_reduction_acv_KL(self):
KL_sets = [[4,1],[3,1],[3,2],[3,3],[2,1],[2,2]]
# Note K,L=[nmodels-1,i], for all i<=nmodels-1, e.g. [4,0],
# will give same result as acv_mf
for K,L in KL_sets:
#print(K,L)
setup_model = \
setup_check_variance_reduction_model_ensemble_polynomial
allocate_samples = pya.allocate_samples_mfmc
generate_samples_and_values = partial(
generate_samples_and_values_acv_KL,K=K,L=L)
get_discrepancy_covariances = partial(
get_discrepancy_covariances_KL,K=K,L=L)
get_cv_weights = partial(
get_approximate_control_variate_weights,
get_discrepancy_covariances=get_discrepancy_covariances)
get_rsquared = partial(
get_rsquared_acv,
get_discrepancy_covariances=get_discrepancy_covariances)
# Check sizes of samples allocated to each model are correct
model_ensemble, cov, generate_samples = setup_model()
nmodels = cov.shape[0]
target_cost = int(1e4)
costs = np.asarray([100//2**ii for ii in range(nmodels)])
nhf_samples, nsample_ratios = allocate_samples(
cov, costs, target_cost)[:2]
samples,values = generate_samples_and_values(
nhf_samples,nsample_ratios,model_ensemble,generate_samples)
for ii in range(0,K+1):
assert samples[ii][0].shape[1]==nhf_samples
assert values[ii][0].shape[0]==nhf_samples
for ii in range(K+1,nmodels):
assert samples[ii][0].shape[1]==samples[L][1].shape[1]
assert values[ii][0].shape[0]==samples[L][1].shape[1]
for ii in range(1,K+1):
assert samples[ii][1].shape[1]==\
nsample_ratios[ii-1]*nhf_samples
assert values[ii][1].shape[0]==\
nsample_ratios[ii-1]*nhf_samples
for ii in range(K+1,nmodels):
assert samples[ii][1].shape[1]==\
nsample_ratios[ii-1]*nhf_samples
assert values[ii][1].shape[0]==samples[ii][1].shape[1]
check_variance_reduction(
allocate_samples, generate_samples_and_values,
get_cv_weights, get_rsquared, setup_model, ntrials=int(3e4),
max_eval_concurrency=1)
def test_variance_reduction_mfmc(self):
setup_model = \
setup_check_variance_reduction_model_ensemble_tunable
allocate_samples = pya.allocate_samples_mfmc
generate_samples_and_values = generate_samples_and_values_mfmc
get_cv_weights = get_mfmc_control_variate_weights_pool_wrapper
get_rsquared = get_rsquared_mfmc
check_variance_reduction(
allocate_samples, generate_samples_and_values,
get_cv_weights, get_rsquared, setup_model, rtol=1e-2,
ntrials=int(1e4),max_eval_concurrency=1)
def test_variance_reduction_mlmc(self):
setup_model = \
setup_check_variance_reduction_model_ensemble_polynomial
allocate_samples = pya.allocate_samples_mlmc
generate_samples_and_values = generate_samples_and_values_mlmc
get_cv_weights = get_mlmc_control_variate_weights_pool_wrapper
get_rsquared = get_rsquared_mlmc
check_variance_reduction(
allocate_samples, generate_samples_and_values,
get_cv_weights, get_rsquared, setup_model, ntrials=5e4,
max_eval_concurrency=1)
def test_CVMC(self):
nhf_samples = 10
model_ensemble, cov, generate_samples = \
setup_check_variance_reduction_model_ensemble_polynomial()
lf_means = PolynomialModelEnsemble().get_means()[1:]
true_gamma = 1-get_control_variate_rsquared(cov)
eta = get_control_variate_weights(cov)
ntrials = int(5e3)
means = np.empty((ntrials,2))
for ii in range(ntrials):
samples = generate_samples(nhf_samples)
values = [f(samples) for f in model_ensemble.functions]
# compute mean using only hf data
hf_mean = values[0].mean()
# compute ACV mean
acv_mean = compute_control_variate_mean_estimate(
eta,values,lf_means)
means[ii,:] = hf_mean,acv_mean
numerical_gamma=means[:,1].var(axis=0)/means[:,0].var(axis=0)
rtol=1e-2
#print('true',true_gamma,'numerical',numerical_gamma)
#print(np.absolute(true_gamma-numerical_gamma),
# rtol*np.absolute(true_gamma))
assert np.allclose(true_gamma,numerical_gamma,rtol=4e-2)
def test_allocate_samples_mlmc_lagrange_formulation(self):
cov = np.asarray([[1.00,0.50,0.25],
[0.50,1.00,0.50],
[0.25,0.50,4.00]])
costs = np.array([6, 3, 1])
target_cost = 81
estimator = MLMC(cov,costs)
estimator.use_lagrange_formulation(True)
nhf_samples_exact, nsample_ratios_exact = allocate_samples_mlmc(
cov,costs,target_cost,standardize=False)[:2]
estimator_cost = nhf_samples_exact*costs[0]+(
nsample_ratios_exact*nhf_samples_exact).dot(costs[1:])
assert np.allclose(estimator_cost,target_cost,rtol=1e-12)
lagrange_mult = pya.get_lagrange_multiplier_mlmc(
cov,costs,nhf_samples_exact)
#print('lagrange_mult',lagrange_mult)
x0 = np.concatenate([[nhf_samples_exact],nsample_ratios_exact,
[lagrange_mult]])
if use_torch:
jac = estimator.jacobian(x0)
# objective does not have lagrangian shift so account for it
# missing here
mlmc_var = estimator.variance_reduction(
nsample_ratios_exact).item()*cov[0,0]/nhf_samples_exact
jac[-1]-=mlmc_var
else:
jac=None
estimator.use_lagrange_formulation(False)
optim_method='SLSQP'
#optim_method='trust-constr'
factor=1-0.1
initial_guess = np.concatenate([
[x0[0]*np.random.uniform(factor,1/factor)],
x0[1:-1]*np.random.uniform(factor,1/factor,x0.shape[0]-2)])
nhf_samples,nsample_ratios,var=allocate_samples_acv(
cov, costs, target_cost, estimator,
standardize=False,initial_guess=initial_guess,
optim_method=optim_method)
#print(nhf_samples,nhf_samples_exact)
#print(nsample_ratios_exact,nsample_ratios)
assert np.allclose(nhf_samples_exact,nhf_samples)
assert np.allclose(nsample_ratios_exact,nsample_ratios)
def test_ACVMC_sample_allocation(self):
np.random.seed(1)
matr = np.random.randn(3,3)
cov_should = np.dot(matr, matr.T)
L = np.linalg.cholesky(cov_should)
samp = np.dot(np.random.randn(100000, 3),L.T)
cov = np.cov(samp, rowvar=False)
#model_ensemble, cov, generate_samples = \
# setup_check_variance_reduction_model_ensemble_polynomial()
costs = [4, 2, 1]
target_cost = 20
nhf_samples_init, nsample_ratios_init = allocate_samples_mlmc(
cov, costs, target_cost, standardize=True)[:2]
initial_guess = np.concatenate(
[[nhf_samples_init],nsample_ratios_init])
nhf_samples,nsample_ratios,log10_var=allocate_samples_acv_best_kl(
cov,costs,target_cost,standardize=True,
initial_guess=initial_guess)
print("opt = ", nhf_samples, nsample_ratios, log10_var)
# this is a regression test to make sure optimization produces
# answer consistently. It is hard to determine and exact solution
regression_log10_var = np.asarray([
0.5159013235987686,-0.2153434757601942,-0.2153434757601942])
assert np.allclose(log10_var,regression_log10_var.min())
gamma = 1-get_rsquared_acv_KL_best(cov,nsample_ratios)
print(gamma)
# To recover alexs answer use his standardization and initial guess
# is mlmc with standardize=True')
@skiptest
def test_ACVMC_objective_jacobian(self):
cov = np.asarray([[1.00,0.50,0.25],
[0.50,1.00,0.50],
[0.25,0.50,4.00]])
costs = [4, 2, 1]
target_cost = 20
nhf_samples, nsample_ratios = pya.allocate_samples_mlmc(
cov, costs, target_cost)[:2]
estimator = ACVMF(cov,costs)
errors = pya.check_gradients(
partial(acv_sample_allocation_objective,estimator),
partial(acv_sample_allocation_jacobian_torch,estimator),
nsample_ratios[:,np.newaxis],disp=False)
#print(errors.min())
assert errors.min()<1e-8
def test_bootstrap_monte_carlo_estimator(self):
nsamples = int(1e4)
nbootstraps=int(1e3)
values = np.random.normal(1.,1.,(nsamples,1))
est_variance = np.var(values)/nsamples
bootstrap_mean,bootstrap_variance = \
pya.bootstrap_monte_carlo_estimator(values,nbootstraps)
print(abs(est_variance-bootstrap_variance)/est_variance)
assert abs((est_variance-bootstrap_variance)/est_variance)<1e-2
def test_bootstrap_control_variate_estimator(self):
example = TunableModelEnsemble(np.pi/2*0.95)
model_ensemble = pya.ModelEnsemble(example.models)
univariate_variables = [uniform(-1,2),uniform(-1,2)]
variable=pya.IndependentMultivariateRandomVariable(univariate_variables)
cov_matrix = example.get_covariance_matrix()
model_costs = [1,0.5,0.4]
est = ACVMF(cov_matrix,model_costs)
target_cost = 1000
nhf_samples,nsample_ratios = est.allocate_samples(target_cost)[:2]
generate_samples = partial(
pya.generate_independent_random_samples,variable)
samples,values=est.generate_data(
nhf_samples,nsample_ratios,generate_samples,model_ensemble)
mc_cov_matrix = compute_covariance_from_control_variate_samples(values)
#assert np.allclose(cov_matrix,mc_cov_matrix,atol=1e-2)
est = ACVMF(mc_cov_matrix,model_costs)
weights = get_mfmc_control_variate_weights(
example.get_covariance_matrix())
bootstrap_mean,bootstrap_variance = \
pya.bootstrap_mfmc_estimator(values,weights,10000)
est_mean = est(values)
est_variance = est.get_variance(nhf_samples,nsample_ratios)
print(abs((est_variance-bootstrap_variance)/est_variance))
assert abs((est_variance-bootstrap_variance)/est_variance)<6e-2
if __name__== "__main__":
cvmc_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestCVMC)
unittest.TextTestRunner(verbosity=2).run(cvmc_test_suite)
|
{"hexsha": "7c6ee7f51088b6ad3d5f51ad2d9168bfb9a3f25e", "size": 26911, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyapprox/tests/test_control_variate_monte_carlo.py", "max_stars_repo_name": "ConnectedSystems/pyapprox", "max_stars_repo_head_hexsha": "4f405654c707cba83d211f327c0f0fdbc95efa29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2019-12-16T02:21:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T09:59:18.000Z", "max_issues_repo_path": "pyapprox/tests/test_control_variate_monte_carlo.py", "max_issues_repo_name": "ConnectedSystems/pyapprox", "max_issues_repo_head_hexsha": "4f405654c707cba83d211f327c0f0fdbc95efa29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-03-03T03:04:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-19T22:50:42.000Z", "max_forks_repo_path": "pyapprox/tests/test_control_variate_monte_carlo.py", "max_forks_repo_name": "ConnectedSystems/pyapprox", "max_forks_repo_head_hexsha": "4f405654c707cba83d211f327c0f0fdbc95efa29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-03-02T03:49:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-17T02:07:53.000Z", "avg_line_length": 39.9272997033, "max_line_length": 104, "alphanum_fraction": 0.6339415109, "include": true, "reason": "import numpy,from scipy", "num_tokens": 6819}
|
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import math
import pandas
from StringIO import StringIO
from collections import Counter
from matplotlib.patches import Rectangle
import seaborn as sns
import pyBigWig
def computeMatrix(bwfile,boundarylist,chrn,winsize,res,chrs_l):
mm = np.zeros((len(boundarylist),2*winsize+1))
blist = boundarylist[(boundarylist>winsize)&(boundarylist<(chrs_l-winsize*res)/res)]
for i in range(0,len(blist)):
for j in range(0,winsize+1):
mm[i,winsize-j]=max(0,np.nansum(np.array(bwfile.values(chrn, int((blist[i]-j)*res),int((blist[i]-j+1)*res))))/res)
mm[i,winsize+j]=max(0,np.nansum(np.array(bwfile.values(chrn, int((blist[i]+j)*res),int((blist[i]+j+1)*res))))/res)
return mm
chrs_length = [249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,59128983,63025520,48129895,51304566]
res = 10000
bw = pyBigWig.open("/storage/home/lua137/work/TADcalling/data/ENCODE-rad21.signal.bigWig")
#bw = pyBigWig.open("/storage/home/lua137/work/TADcalling/data/E116-Ctcf.fc.signal.bigwig")
#bw = pyBigWig.open("/storage/home/lua137/work/TADcalling/data/ENCODE-smc3.fc.signal.bigWig")
cDIall = np.empty((0,21))
cArrowall = np.empty((0,21))
crGMAPball = np.empty((0,21))
cdpTAD_rawball = np.empty((0,21))
cdpTAD_rawbl1 = np.empty((0,21))
cdpTAD_rawbl2 = np.empty((0,21))
cdpTAD_corball = np.empty((0,21))
cdpTAD_corbl1 = np.empty((0,21))
cdpTAD_corbl2 = np.empty((0,21))
cTADtreeTADball = np.empty((0,21))
cICTADball = np.empty((0,21))
cdpTAD_rawsoloball = np.empty((0,21))
cdpTAD_rawhierball = np.empty((0,21))
cdpTAD_rawoverball = np.empty((0,21))
cdpTAD_hollowball = np.empty((0,21))
cdpTAD_pknormball = np.empty((0,21))
def getlevel(tads):
ftads = tads[(tads[:,1]-tads[:,0]).argsort()[::-1],:]
rtads = tads[(tads[:,1]-tads[:,0]).argsort(),:]
flevel = np.ones(len(tads))
rlevel = np.ones(len(tads))
for i in range(0,len(tads)):
rn = []
fn = []
for j in range(0,i):
if rtads[i,0]<=rtads[j,0] and rtads[i,1]>=rtads[j,1]:
rn.append(rlevel[j])
if ftads[i,0]>=ftads[j,0] and ftads[i,1]<=ftads[j,1]:
fn.append(flevel[j])
if len(rn)>=1:
rlevel[i] = max(rn)+1
if len(fn)>=1:
flevel[i] = max(fn)+1
return (np.column_stack((ftads,flevel,rlevel[::-1])))
nhier =0
nsolo = 0
for chrnum in range(2,23):
if chrnum in [9]:
continue
else:
DItad = pandas.read_table('/storage/home/lua137/work/TADcalling/DI_TAD/hg19/GM12878/10kb/GM12878_10kb_chr'+str(chrnum)+'.add.DI.out.7col.final',sep='\t',header=None)
DI=DItad.loc[:,1:2].values/res
DIb=np.unique(DI.flatten())
DIb = DIb[~np.isnan(DIb)]
print len(DIb),len(DI)
Arrowhead = pandas.read_table('/storage/home/lua137/work/TADcalling/juicer/Arrowhead.Gm12878.10kb.KR.chr'+str(chrnum),sep='\t',header=None)
Arrow=Arrowhead.loc[:,1:2].values/res
Arrowb=np.unique(Arrow.flatten())
print len(Arrowb),len(Arrow)
import glob, os
flist=[]
os.chdir('/storage/home/lua137/work/TADcalling/TADtree/final_alg/10kb/Gm12878/chr'+str(chrnum))
for file in glob.glob("N*.txt"):
flist.append(int(file.split('.')[0].split('N')[1]))
TADtree = pandas.read_table('/storage/home/lua137/work/TADcalling/TADtree/final_alg/10kb/Gm12878/chr'+str(chrnum)+'/N'+str(max(flist))+'.txt',sep='\t',header=0)
TADtreeTAD = TADtree[['start','end']].values-1
TADtreeTADb = np.unique(TADtreeTAD.flatten())
print len(TADtreeTADb),len(TADtreeTAD)
rG = pandas.read_table('/storage/home/lua137/work/TADcalling/rGMAP/GM12878_combined_10000_chr'+str(chrnum)+'.rGMAPTAD',sep='\t',header=None)
rGMAP=rG.loc[:,0:1].values/res
rGMAPb=np.unique(rGMAP.flatten())
print len(rGMAPb), len(rGMAP)
# ICFinder = pandas.read_table('/storage/home/lua137/work/TADcalling/IC-Finder/IC-Finder/Gm12878/chr'+str(chrnum)+'.domain',sep='\t',header=None)
# ICTAD=ICFinder.values-1
# ICTADb=np.unique(ICTAD.flatten())
# len(ICTADb),len(ICTAD)
dpTAD_raw = pandas.read_table('/storage/home/lua137/work/TADcalling/dpruns/Gm12878/10kb/'+'dp_raw_pen0.1_newest.chr'+str(chrnum),sep='\t',header=None)
dpTAD_rawa1 = dpTAD_raw.loc[dpTAD_raw[5]>0,:].values[:,0:3]
dpTAD_rawa = dpTAD_rawa1[dpTAD_rawa1[:,2]>0,0:2]-1
dpTAD_rawfr = getlevel(dpTAD_rawa)
dpTAD_rawsoloa = dpTAD_rawfr[(dpTAD_rawfr[:,3]==1)&(dpTAD_rawfr[:,2]==1),0:2]
dpTAD_rawhiera = dpTAD_rawfr[np.logical_or((dpTAD_rawfr[:,3]!=1),(dpTAD_rawfr[:,2]!=1)),0:2]
dpTAD_rawb = np.unique(dpTAD_rawa.flatten())
dpTAD_rawsolob1 = np.unique(dpTAD_rawsoloa.flatten())
dpTAD_rawhierb1 = np.unique(dpTAD_rawhiera.flatten())
dpTAD_rawsolob = np.setdiff1d(dpTAD_rawsolob1,dpTAD_rawhierb1)
dpTAD_rawhierb = np.setdiff1d(dpTAD_rawhierb1,dpTAD_rawsolob1)
dpTAD_rawoverb = np.intersect1d(dpTAD_rawsolob1,dpTAD_rawhierb1)
print len(np.unique(dpTAD_rawb)),len(dpTAD_rawa)
nhier += np.shape(dpTAD_rawhiera)[0]
nsolo += np.shape(dpTAD_rawsoloa)[0]
dpTAD_hollow = pandas.read_table('/storage/home/lua137/work/TADcalling/dpruns/Gm12878/10kb/'+'dp_raw_pen0.1_hollow.chr'+str(chrnum),sep='\t',header=None)
dpTAD_hollowa1 = dpTAD_hollow.loc[dpTAD_hollow[5]>0,:].values[:,0:3]
dpTAD_hollowa = dpTAD_hollowa1[dpTAD_hollowa1[:,2]>0,0:2]-1
dpTAD_hollowb = np.unique(dpTAD_hollowa.flatten())
print len(np.unique(dpTAD_hollowb)),len(dpTAD_hollowa)
dpTAD_pknorm = pandas.read_table('/storage/home/lua137/work/TADcalling/dpruns/Gm12878/10kb/'+'dp_raw_pen0.1_pknorm.chr'+str(chrnum),sep='\t',header=None)
dpTAD_pknorma1 = dpTAD_pknorm.loc[dpTAD_pknorm[5]>0,:].values[:,0:3]
dpTAD_pknorma = dpTAD_pknorma1[dpTAD_pknorma1[:,2]>0,0:2]-1
dpTAD_pknormb = np.unique(dpTAD_pknorma.flatten())
print len(np.unique(dpTAD_pknormb)),len(dpTAD_pknorma)
cDIall = np.append(cDIall,computeMatrix(bw,DIb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
cArrowall = np.append(cArrowall,computeMatrix(bw,Arrowb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
crGMAPball = np.append(crGMAPball,computeMatrix(bw,rGMAPb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
cdpTAD_rawball = np.append(cdpTAD_rawball,computeMatrix(bw,dpTAD_rawb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawsoloball = np.append(cdpTAD_rawsoloball,computeMatrix(bw,dpTAD_rawsolob,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawhierball = np.append(cdpTAD_rawhierball,computeMatrix(bw,dpTAD_rawhierb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawoverball = np.append(cdpTAD_rawoverball,computeMatrix(bw,dpTAD_rawoverb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_corball = np.append(cdpTAD_corball,computeMatrix(bw,dpTAD_corb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_pknormball = np.append(cdpTAD_pknormball,computeMatrix(bw,dpTAD_pknormb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_hollowball = np.append(cdpTAD_hollowball,computeMatrix(bw,dpTAD_hollowb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawbl1 = np.append(cdpTAD_rawbl1,computeMatrix(bw,np.unique(dpTAD_rawa1[dpTAD_rawa1[:,2]==1,0:2].flatten())-1,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawbl2 = np.append(cdpTAD_rawbl2,computeMatrix(bw,np.unique(dpTAD_rawa1[dpTAD_rawa1[:,2]==2,0:2].flatten())-1,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawbl3 = np.append(cdpTAD_rawbl2,computeMatrix(bw,np.unique(dpTAD_rawa1[dpTAD_rawa1[:,2]==3,0:2].flatten())-1,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
#cdpTAD_rawbl4 = np.append(cdpTAD_rawbl2,computeMatrix(bw,np.unique(dpTAD_rawa1[dpTAD_rawa1[:,2]>3,0:2].flatten())-1,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
cTADtreeTADball = np.append(cTADtreeTADball,computeMatrix(bw,TADtreeTADb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
# cICTADball = np.append(cICTADball,computeMatrix(bw,ICTADb,'chr'+str(chrnum),10,10000,chrs_length[chrnum-1]), axis=0)
print nhier,nsolo
out = np.array([np.mean(cdpTAD_rawball, axis=0),np.mean(cDIall, axis=0),np.mean(cArrowall, axis=0),np.mean(crGMAPball, axis=0),np.mean(cTADtreeTADball, axis=0)])
np.savetxt('/storage/home/lua137/work/TADcalling/Rad21enrichment.wholegenome.compare.txt',out)
'''
plt.figure(6)
fig,ax = plt.subplots(1)
ax.plot(np.mean(cdpTAD_rawball, axis=0),c='m',label='Our method (n = {1:d})'
''.format(0, np.shape(cdpTAD_rawball)[0]))
#ax.plot(np.mean(cdpTAD_corball, axis=0),c='m',label='Our method (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_corball)[0]))
#ax.plot(np.mean(cdpTAD_pknormball, axis=0),c='g',label='DP_pknorm (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_pknormball)[0]))
ax.plot(np.mean(cDIall, axis=0),label='DIcaller (n = {1:d})'
''.format(0, np.shape(cDIall)[0]))
ax.plot(np.mean(cArrowall, axis=0),c='r',label='Arrowhead (n = {1:d})'
''.format(0, np.shape(cArrowall)[0]))
#ax.plot(np.mean(cdpTAD_hollowball, axis=0),c='g',label='DP_rawhollowall (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_hollowball)[0]))
#ax.plot(np.mean(cdpTAD_rawhierball, axis=0),c='y',label='DP_rawhierall (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawhierball)[0]))
#ax.plot(np.mean(cdpTAD_rawsoloball, axis=0),c='g',label='DP_rawsoloall (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawsoloball)[0]))
#ax.plot(np.mean(cdpTAD_rawoverball, axis=0),c='b',label='DP_rawoverall (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawoverball)[0]))
#ax.plot(np.mean(cdpTAD_corbl1, axis=0),c='k',linestyle='dashed',label='DP_cor > 1 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_corbl1)[0]))
#ax.plot(np.mean(cdpTAD_rawbl1, axis=0),c='g',linestyle='dashed',label='DP_raw > 1 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawbl1)[0]))
#ax.plot(np.mean(cdpTAD_corbl2, axis=0),c='k',linestyle=':',label='DP_cor > 2 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_corbl2)[0]))
#ax.plot(np.mean(cdpTAD_rawbl2, axis=0),c='b',label='DP_raw = 2 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawbl2)[0]))
#ax.plot(np.mean(cdpTAD_rawbl3, axis=0),c='k',label='DP_raw = 3 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawbl3)[0]))
#ax.plot(np.mean(cdpTAD_rawbl4, axis=0),c='y',label='DP_raw >= 4 (n = {1:d})'
# ''.format(0, np.shape(cdpTAD_rawbl4)[0]))
ax.plot(np.mean(crGMAPball, axis=0),c='c',label='rGMAP (n = {1:d})'
''.format(0, np.shape(crGMAPball)[0]))
ax.plot(np.mean(cTADtreeTADball, axis=0),c='g',label='TADtree (n = {1:d})'
''.format(0, np.shape(cTADtreeTADball)[0]))
#ax.plot(np.mean(cICTADball, axis=0),c='y',label='ICFinder (n = {1:d})'
# ''.format(0, np.shape(cICTADball)[0]))
ax.set_ylabel('CTCF signal')
ax.legend(loc="upper right",prop={'size': 8})
ax.axes.get_xaxis().set_visible(False)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 26}
matplotlib.rc('font', **font)
plt.savefig('/storage/home/lua137/work/TADcalling/CTCFenrichment.wholegenome.compare.png',dpi=400)
plt.close()
'''
|
{"hexsha": "ae46d0bf624ae9a4597864417cd07da9ab742cac", "size": 11362, "ext": "py", "lang": "Python", "max_stars_repo_path": "MainFig_CTCFenrich.py", "max_stars_repo_name": "anlin00007/OnTAD_figure", "max_stars_repo_head_hexsha": "a4848c6a5a1cc755befe910a1b3eaec400f85fb2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MainFig_CTCFenrich.py", "max_issues_repo_name": "anlin00007/OnTAD_figure", "max_issues_repo_head_hexsha": "a4848c6a5a1cc755befe910a1b3eaec400f85fb2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MainFig_CTCFenrich.py", "max_forks_repo_name": "anlin00007/OnTAD_figure", "max_forks_repo_head_hexsha": "a4848c6a5a1cc755befe910a1b3eaec400f85fb2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 59.1770833333, "max_line_length": 228, "alphanum_fraction": 0.6916916036, "include": true, "reason": "import numpy", "num_tokens": 4238}
|
import shap
import numpy as np
import torch
from datasets import load_dataset
from tqdm.contrib import tenumerate
from project.binary_bert.utils import load_binary_bert
train = load_dataset("civil_comments", split='train')
test = load_dataset("civil_comments", split='test')
model, tokenizer, class_names = load_binary_bert()
txts = []
for id, batch in tenumerate(test, total=10):
if id > 10:
break
inputs = tokenizer(
batch['text'], return_tensors="pt", truncation=True, padding=True
)
txts.append(inputs['input_ids'].tolist())
train_txt = []
for id, batch in tenumerate(train, total=10):
if id > 10:
break
inputs = tokenizer(
batch['text'], return_tensors="pt", truncation=True, padding=True
)
train_txt.append(inputs['input_ids'].tolist())
model.eval()
with torch.no_grad():
e = shap.DeepExplainer(model, torch.tensor(train_txt[0]))
shap_values = e.shap_values(torch.tensor(txts[0]))
print(shap_values)
|
{"hexsha": "aaacaadc54dc6d4e308488385e20847496c859a8", "size": 984, "ext": "py", "lang": "Python", "max_stars_repo_path": "project/binary_bert/explain.py", "max_stars_repo_name": "hancia/ToxicSpansDetection", "max_stars_repo_head_hexsha": "4a10600292af90a936767aee09559b39380e3d5e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-03-23T08:07:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-13T07:13:32.000Z", "max_issues_repo_path": "project/binary_bert/explain.py", "max_issues_repo_name": "hancia/ToxicSpansDetection", "max_issues_repo_head_hexsha": "4a10600292af90a936767aee09559b39380e3d5e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "project/binary_bert/explain.py", "max_forks_repo_name": "hancia/ToxicSpansDetection", "max_forks_repo_head_hexsha": "4a10600292af90a936767aee09559b39380e3d5e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5945945946, "max_line_length": 73, "alphanum_fraction": 0.7012195122, "include": true, "reason": "import numpy", "num_tokens": 246}
|
\filetitle{dbmerge}{Merge two or more databases}{dbase/dbmerge}
\paragraph{Syntax}\label{syntax}
\begin{verbatim}
D = dbmerge(D1,D2,...)
\end{verbatim}
\paragraph{Input arguments}\label{input-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{D1}, \texttt{D2}, \ldots{} {[} struct {]} - Input databases
whose entries will be combined in the output datase.
\end{itemize}
\paragraph{Output arguments}\label{output-arguments}
\begin{itemize}
\itemsep1pt\parskip0pt\parsep0pt
\item
\texttt{D} {[} struct {]} - Output database that combines entries from
all input database; if some entries are found in more than one input
databases, the last occurence is used.
\end{itemize}
\paragraph{Description}\label{description}
\paragraph{Example}\label{example}
\begin{verbatim}
d1 = struct('a',1,'b',2);
d2 = struct('a',10,'c',20);
d = dbmerge(d1,d2)
d =
a: 10
b: 2
c: 20
\end{verbatim}
|
{"hexsha": "a9546022c32d70376130fe4a80f373797b8eea93", "size": 935, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "-help/dbase/dbmerge.tex", "max_stars_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_stars_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-06T13:38:38.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-06T13:38:38.000Z", "max_issues_repo_path": "-help/dbase/dbmerge.tex", "max_issues_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_issues_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-03-28T08:13:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-02T10:40:25.000Z", "max_forks_repo_path": "-help/dbase/dbmerge.tex", "max_forks_repo_name": "OGResearch/IRIS-Toolbox-For-Octave", "max_forks_repo_head_hexsha": "682ea1960229dc701e446137623b120688953cef", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T07:06:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T07:06:39.000Z", "avg_line_length": 20.7777777778, "max_line_length": 72, "alphanum_fraction": 0.7101604278, "num_tokens": 305}
|
The http://en.wikipedia.org/wiki/Works_Progress_Administration Works Progress Administration, or WPA, was a one of the largest New Deal programs instituted by President Franklin D. Roosevelt during the Great Depression. It provided jobs to unemployed workers, usually through civic improvement projects.
Davis still bears the stamp of these WPA projectsliterally. Perhaps youve seen the WPA initials imprinted in a sidewalk while strolling through old North Davis. These sidewalks were laid in the late 1930s with federal funding through the Works Progress Administration. Look on the corner curbs at intersections for the most obvious stamps, especially along C Street.
Below is a gallery of WPA marks in Davis. Add any that you find, and be sure to mention where you found it!
Sidewalks
Other
|
{"hexsha": "15ad37f92b43c7a17818b572cdb3ab7a04893701", "size": 809, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/WPA.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/WPA.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/WPA.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 73.5454545455, "max_line_length": 369, "alphanum_fraction": 0.8096415328, "num_tokens": 168}
|
# This file is an example on how to use the BEL1D codes using a simple 2-layer DC experiment (with noise)
if __name__=="__main__": # To prevent recomputation when in parallel
from pyBEL1D import BEL1D
import cProfile # For debugging and timings measurements
import time # For simple timing measurements
import numpy as np # For the initialization of the parameters
from matplotlib import pyplot # For graphics on post-processing
from pyBEL1D.utilities import Tools # For further post-processing
from os import listdir
from os.path import isfile, join
from pathos import multiprocessing as mp
from pathos import pools as pp
# Parameters for the tested model
# modelTrue = np.asarray([5.0, 0.05, 0.25, 0.1, 0.2])
priorDC = np.array([[0.005, 0.05, 0.1, 0.5, 0.2, 4.0, 1.5, 3.5], [0.045, 0.145, 0.1, 0.8, 0.2, 4.0, 1.5, 3.5], [0, 0, 0.3, 2.5, 0.2, 4.0, 1.5, 3.5]]) # MIRANDOLA prior test case
nbParam = int(priorDC.size/2 - 1)
nLayer, nParam = priorDC.shape
nParam = int(nParam/2)
stdPrior = [None]*nbParam
meansPrior = [None]*nbParam
stdUniform = lambda a,b: (b-a)/np.sqrt(12)
meansUniform = lambda a,b: (b-a)/2
ident = 0
for j in range(nParam):
for i in range(nLayer):
if not((i == nLayer-1) and (j == 0)):# Not the half-space thickness
stdPrior[ident] = stdUniform(priorDC[i,j*2],priorDC[i,j*2+1])
meansPrior[ident] = meansUniform(priorDC[i,j*2],priorDC[i,j*2+1])
ident += 1
Dataset = np.loadtxt("Data/DC/Mirandola_InterPACIFIC/Average/Average_interp60_cuttoff.txt")
FreqMIR = Dataset[:,0]
Dataset = np.divide(Dataset[:,1],1000)# Phase velocity in km/s for the forward model
ErrorModel = [0.075, 20]
ErrorFreq = np.asarray(np.divide(ErrorModel[0]*Dataset*1000 + np.divide(ErrorModel[1],FreqMIR),1000))# Errors in km/s for each Frequency
# Attention, the maximum number of periods is 60 for the forward model! Keep the number of points low!
# Show the noise model for the dataset:
pyplot.plot(FreqMIR, Dataset,'b')
dataNoisyU = Dataset + np.divide(ErrorModel[0]*Dataset*1000 + np.divide(ErrorModel[1],FreqMIR),1000)
dataNoisyU2 = Dataset + 2*np.divide(ErrorModel[0]*Dataset*1000 + np.divide(ErrorModel[1],FreqMIR),1000)
dataNoisyL = Dataset - np.divide(ErrorModel[0]*Dataset*1000 + np.divide(ErrorModel[1],FreqMIR),1000)
dataNoisyL2 = Dataset - 2*np.divide(ErrorModel[0]*Dataset*1000 + np.divide(ErrorModel[1],FreqMIR),1000)
DataPath = "Data/DC/Mirandola_InterPACIFIC/"
files = [f for f in listdir(DataPath) if isfile(join(DataPath, f))]
for currFile in files:
DatasetOther = np.loadtxt(DataPath+currFile)
DatasetOther = np.divide(DatasetOther[:,1],1000) # Dataset for surf96 in km/s
DatasetOther[DatasetOther==0] = np.nan
pyplot.plot(FreqMIR, DatasetOther,'ko')
pyplot.plot(FreqMIR, dataNoisyU,'r')
pyplot.plot(FreqMIR, dataNoisyL,'r')
pyplot.plot(FreqMIR, dataNoisyU2,'r--')
pyplot.plot(FreqMIR, dataNoisyL2,'r--')
pyplot.plot(FreqMIR, Dataset,'b')
pyplot.xlabel("Frequency [Hz]")
pyplot.ylabel("Phase velocity [km/s]")
pyplot.xscale('log')
pyplot.yscale('log')
pyplot.show(block=False)
ErrorModel = ErrorFreq # Error model for every frequency
# Function to test the most direct approach:
def test(nbModPre=1000):
# To first declare the parameters, we call the constructor MODELSET().SNMR() with the right parameters
print('Initializing . . .')
TestCase = BEL1D.MODELSET().DC(prior=priorDC, Frequency=FreqMIR)
# Then, we build the "pre-bel" operations using the PREBEL function
Prebel = BEL1D.PREBEL(TestCase,nbModels=nbModPre)
# We then run the prebel operations:
print('Running PREBEL . . .')
pool = pp.ProcessPool(mp.cpu_count())
Prebel.run(Parallelization=[True,pool])
# You can observe the relationship using:
# Prebel.KDE.ShowKDE()
# Then, since we know the dataset, we can initialize the "post-bel" operations:
Postbel = BEL1D.POSTBEL(Prebel)
# Run the operations:
print('Sampling posterior . . .')
Postbel.run(Dataset=Dataset, nbSamples=nbModPre, NoiseModel=ErrorModel)
# All the operations are done, now, you just need to analyze the results (or run the iteration process - see next example)
# Show the models parameters uncorrelated:
Postbel.ShowPost()
# Show the models parameters correlated with also the prior samples (Prebel.MODELS):
Postbel.ShowPostCorr(OtherMethod=Prebel.MODELS)
# Show the depth distributions of the parameters with the RMSE
Postbel.ShowPostModels(RMSE=True,Parallelization=[True,pool])
Postbel.ShowDataset(RMSE=True,Parallelization=[True,pool])
Postbel.KDE.ShowKDE(Xvals=Postbel.CCA.transform(Postbel.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
Prebel.ShowPreModels()
Prebel.ShowPriorDataset()
Postbel.ShowPostModels(RMSE=True,Best=1)
Postbel.ShowDataset(RMSE=True,Best=1)
# Get key statistics
means, stds = Postbel.GetStats()
pool.terminate()
# TODO: debug save functions
BEL1D.SavePREBEL(CurrentPrebel=Prebel,Filename="TestSave")
BEL1D.SavePOSTBEL(CurrentPostbel=Postbel,Filename="TestSave")
BEL1D.SaveSamples(CurrentPostbel=Postbel,Data=True,Filename="TestSave")
return means, stds
# Now, let's see how to iterate:
def testIter(nbIter=5):
distance = np.zeros((nbIter))
nbModPre = 1000
means = np.zeros((nbIter,nbParam))
stds = np.zeros((nbIter,nbParam))
timings = np.zeros((nbIter,))
start = time.time()
parallel = False
pool = None#pp.ProcessPool(mp.cpu_count())
diverge = True
distancePrevious = 1e10
MixingUpper = 0
MixingLower = 1
for idxIter in range(nbIter):
if idxIter == 0: # Initialization
TestCase = BEL1D.MODELSET().DC(prior=priorDC, Frequency=FreqMIR)
PrebelIter = BEL1D.PREBEL(TestCase,nbModPre)
PrebelIter.run(Parallelization=[parallel,pool])
ModLastIter = PrebelIter.MODELS
print(idxIter+1)
PostbelTest = BEL1D.POSTBEL(PrebelIter)
PostbelTest.run(Dataset=Dataset,nbSamples=nbModPre,NoiseModel=ErrorModel)
# PostbelTest.KDE.ShowKDE(Xvals=PostbelTest.CCA.transform(PostbelTest.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
means[idxIter,:], stds[idxIter,:] = PostbelTest.GetStats()
end = time.time()
timings[idxIter] = end-start
else:
ModLastIter = PostbelTest.SAMPLES
# Here, we will use the POSTBEL2PREBEL function that adds the POSTBEL from previous iteration to the prior (Iterative prior resampling)
PrebelIter = BEL1D.PREBEL.POSTBEL2PREBEL(PREBEL=PrebelIter,POSTBEL=PostbelTest,Dataset=Dataset,NoiseModel=ErrorModel,Parallelization=[parallel,pool])
# Since when iterating, the dataset is known, we are not computing the full relationship but only the posterior distributions directly to gain computation timing
print(idxIter+1)
PostbelTest = BEL1D.POSTBEL(PrebelIter)
PostbelTest.run(Dataset,nbSamples=nbModPre,NoiseModel=ErrorModel)
means[idxIter,:], stds[idxIter,:] = PostbelTest.GetStats()
end = time.time()
timings[idxIter] = end-start
diverge, distance[idxIter] = Tools.ConvergeTest(SamplesA=ModLastIter,SamplesB=PostbelTest.SAMPLES, tol=1e-5)
print('KS distance: {}'.format(distance[idxIter]))
if not(diverge) or (abs((distancePrevious-distance[idxIter])/distancePrevious)*100<1):
# Convergence acheived if:
# 1) Distance below threshold
# 2) Distance does not vary significantly (less than 2.5%)
print('Model has converged at iter {}!'.format(idxIter+1))
break
distancePrevious = distance[idxIter]
start = time.time()
PostbelTest.ShowPostCorr(OtherMethod=PrebelIter.MODELS)
PostbelTest.ShowPostModels(RMSE=True, Parallelization=[True,pool])
PostbelTest.ShowDataset(RMSE=True,Prior=False, Parallelization=[True,pool])
timings = timings[:idxIter+1]
means = means[:idxIter+1,:]
stds = stds[:idxIter+1,:]
paramnames = PostbelTest.MODPARAM.paramNames["NamesS"] # For the legend of the futur graphs
if pool is not None:
pool.terminate()
return timings, means, stds, paramnames, distance
IterTest = True
if IterTest:
nbIter = 10
timings, means, stds, names, distance = testIter(nbIter=nbIter)
print('Total time: {} seconds'.format(np.sum(timings)))
fig = pyplot.figure()
pyplot.plot(np.arange(len(timings)),timings)
pyplot.ylabel('Computation Time [sec]')
pyplot.xlabel('Iteration nb.')
pyplot.show(block=False)
pyplot.plot(np.arange(len(timings)),np.divide(means,meansPrior))
pyplot.ylabel('Normalized means [/]')
pyplot.xlabel('Iteration nb.')
pyplot.legend(names)
pyplot.show(block=False)
pyplot.plot(np.arange(len(timings)),np.divide(stds,stdPrior))
pyplot.ylabel('Normalized standard deviations [/]')
pyplot.xlabel('Iteration nb.')
pyplot.legend(names)
pyplot.show(block=False)
pyplot.figure()
pyplot.plot(np.arange(len(timings)),distance[0:len(timings)])
pyplot.ylabel('Distance between successive posterior [/]')
pyplot.xlabel('Iteration nb.')
pyplot.legend(names)
pyplot.show(block=False)
if not(IterTest):
test(nbModPre=10000)
# nbModPre = 10000
# print('Initializing . . .')
# TestCase = BEL1D.MODELSET().DC(prior=priorDC, Frequency=FreqMIR)
# # Then, we build the "pre-bel" operations using the PREBEL function
# Prebel = BEL1D.PREBEL(TestCase,nbModels=nbModPre)
# # We then run the prebel operations:
# print('Running PREBEL . . .')
# Prebel.run()
# # You can observe the relationship using:
# # Prebel.KDE.ShowKDE()
# # Then, since we know the dataset, we can initialize the "post-bel" operations:
# Postbel = BEL1D.POSTBEL(Prebel)
# # Run the operations:
# for idxUsed in range(len(files)):
# print('Current file: {}'.format(files[idxUsed]))
# DatasetOther = np.loadtxt(DataPath+files[idxUsed])
# DatasetOther = np.divide(DatasetOther[:,1],1000)
# Dataset = DatasetOther # Testing if works with other dataset than the mean (not the same span)
# Prebel.KDE.ShowKDE(Xvals=Postbel.CCA.transform(Postbel.PCA['Data'].transform(np.reshape(Dataset,(1,-1)))))
# try:
# print('Sampling posterior . . .')
# start = time.time()
# Postbel.run(Dataset=Dataset, nbSamples=10000, NoiseModel=ErrorModel)
# end = time.time()
# print('Dataset {} succeded in {} seconds!'.format(idxUsed+1,end-start))
# except:
# print('Dataset {} failed!'.format(idxUsed+1))
pyplot.show()
|
{"hexsha": "b15829f72f30d153379ec13505310b8c4620b8a6", "size": 11493, "ext": "py", "lang": "Python", "max_stars_repo_path": "exampleDC.py", "max_stars_repo_name": "hadrienmichel/pyBEL1D", "max_stars_repo_head_hexsha": "36b7adf8a0ad2abaccfd546f153c449392fc922e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-02-14T10:33:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-03T15:18:48.000Z", "max_issues_repo_path": "exampleDC.py", "max_issues_repo_name": "hadrienmichel/pyBEL1D", "max_issues_repo_head_hexsha": "36b7adf8a0ad2abaccfd546f153c449392fc922e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exampleDC.py", "max_forks_repo_name": "hadrienmichel/pyBEL1D", "max_forks_repo_head_hexsha": "36b7adf8a0ad2abaccfd546f153c449392fc922e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-14T09:00:56.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T09:00:56.000Z", "avg_line_length": 50.6299559471, "max_line_length": 181, "alphanum_fraction": 0.6436091534, "include": true, "reason": "import numpy", "num_tokens": 3037}
|
%% manage paths
path = strsplit(pwd,"sim_vehicle_dynamics");
path2module = strcat(path(1), "sim_vehicle_dynamics");
path2frictionmodel_folder = strcat(path2module, "\variable_friction\frictionmodel");
path2input = strcat(path2frictionmodel_folder, "\GUI\inputs");
path2output = strcat(path2frictionmodel_folder, "\GUI\outputs");
filepath2output_sldd=strcat(path2output, "\frictionmaps.sldd");
filepath2output_config=strcat(path2output, "\Gridmap_Config.txt");
%Handover Referenceline path and resolution from base workspace
Grid_mode=evalin('base', 'Grid_mode');
InterpTime=evalin('base','InterpTime');
%Set Variables
fid=fopen(filepath2output_config, 'wt');
Gridmap_Ident=zeros(3,3);
%% generate data dictionaries containing friction maps
switch Grid_mode
case 1
filename_1 = dir(strcat(path2input, '\1*.mat'));
filename_1 = filename_1.name;
Gridmap_1 = importdata(strcat(path2input, '\', filename_1));
Gridmap_2 = int8(zeros(size(Gridmap_1,1),size(Gridmap_1,2)));
Gridmap_3 = Gridmap_2;
Gridmap_4 = Gridmap_2;
fprintf(fid, ['Gridmap_1: ', filename_1, ...
'\nGridmap_2: 0 \nGridmap_3: 0 \nGridmap_4: 0 ']);
fclose(fid);
Gridmap_Ident(1:2,1:2) = Gridmap_1(1:2,1:2);
Gridmap_Ident(1,3) = Gridmap_1(1,3);
Gridmap_Ident(3,1) = size(Gridmap_1,1);
Gridmap_Ident(3,2) = size(Gridmap_1,2);
case 2
filename_1 = dir(strcat(path2input, '\1*.mat'));
filename_2 = dir(strcat(path2input, '\2*.mat'));
filename_1 = filename_1.name;
filename_2 = filename_2.name;
Gridmap_1 = importdata(strcat(path2input, '\', filename_1));
Gridmap_2 = importdata(strcat(path2input, '\', filename_2));
Gridmap_3 = int8(zeros(size(Gridmap_1,1),size(Gridmap_1,2)));
Gridmap_4 = Gridmap_3;
fprintf(fid, ['Gridmap_1: ', filename_1, ...
'\nGridmap_2: ', filename_2, ...
'\nGridmap_3: 0 \nGridmap_4: 0 ']);
fclose(fid);
Gridmap_Ident(1:2,1:2) = Gridmap_1(1:2,1:2);
Gridmap_Ident(1,3) = Gridmap_1(1,3);
Gridmap_Ident(3,1) = size(Gridmap_1,1);
Gridmap_Ident(3,2) = size(Gridmap_1,2);
case 3
filename_1 = dir(strcat(path2input, '\1*.mat'));
filename_2 = dir(strcat(path2input, '\2*.mat'));
filename_3 = dir(strcat(path2input, '\3*.mat'));
filename_1 = filename_1.name;
filename_2 = filename_2.name;
filename_3 = filename_3.name;
Gridmap_1 = importdata(strcat(path2input, '\', filename_1));
Gridmap_2 = importdata(strcat(path2input, '\', filename_2));
Gridmap_3 = importdata(strcat(path2input, '\', filename_3));
Gridmap_4 = int8(zeros(size(Gridmap_1,1),size(Gridmap_1,2)));
fprintf(fid, ['Gridmap_1: ', filename_1, ...
'\nGridmap_2: ', filename_2, ...
'\nGridmap_3: ', filename_3, ...
'\nGridmap_4: 0 ']);
fclose(fid);
Gridmap_Ident(1:2,1:2) = Gridmap_1(1:2,1:2);
Gridmap_Ident(1,3) = Gridmap_1(1,3);
Gridmap_Ident(3,1) = size(Gridmap_1,1);
Gridmap_Ident(3,2) = size(Gridmap_1,2);
case 4
filename_1 = dir(strcat(path2input, '\1*.mat'));
filename_2 = dir(strcat(path2input, '\2*.mat'));
filename_3 = dir(strcat(path2input, '\3*.mat'));
filename_4 = dir(strcat(path2input, '\4*.mat'));
filename_1 = filename_1.name;
filename_2 = filename_2.name;
filename_3 = filename_3.name;
filename_4 = filename_4.name;
Gridmap_1 = importdata(strcat(path2input, '\', filename_1));
Gridmap_2 = importdata(strcat(path2input, '\', filename_2));
Gridmap_3 = importdata(strcat(path2input, '\', filename_3));
Gridmap_4 = importdata(strcat(path2input, '\', filename_4));
fprintf(fid, ['Gridmap_1: ', filename_1, ...
'\nGridmap_2: ',filename_2, ...
'\nGridmap_3: ',filename_3, ...
'\nGridmap_4: ',filename_4]);
Gridmap_Ident(1:2,1:2) = Gridmap_1(1:2,1:2);
Gridmap_Ident(1,3) = Gridmap_1(1,3);
Gridmap_Ident(3,1) = size(Gridmap_1,1);
Gridmap_Ident(3,2) = size(Gridmap_1,2);
end
%Create Datadict
if exist(filepath2output_sldd, 'file')==0
myDictionaryObj = Simulink.data.dictionary.create(filepath2output_sldd);
dDataSectObj = getSection(myDictionaryObj,'Design Data');
addEntry(dDataSectObj,'Grid_map_1',Gridmap_1);
addEntry(dDataSectObj,'Grid_map_2',Gridmap_2);
addEntry(dDataSectObj,'Grid_map_3',Gridmap_3);
addEntry(dDataSectObj,'Grid_map_4',Gridmap_4);
addEntry(dDataSectObj,'InterpTime',InterpTime);
addEntry(dDataSectObj,'Gridmap_Ident',Gridmap_Ident);
else
myDictionaryObj = Simulink.data.dictionary.open(filepath2output_sldd);
dDataSectObj = getSection(myDictionaryObj,'Design Data');
assignin(dDataSectObj,'Grid_map_1',Gridmap_1);
assignin(dDataSectObj,'Grid_map_2',Gridmap_2);
assignin(dDataSectObj,'Grid_map_3',Gridmap_3);
assignin(dDataSectObj,'Grid_map_4',Gridmap_4);
assignin(dDataSectObj,'InterpTime',InterpTime);
assignin(dDataSectObj,'Gridmap_Ident',Gridmap_Ident);
end
%Save Changes
saveChanges(myDictionaryObj);
%Finish
clc
"Datadict generated successfully! It is located in \frictionmodel\GUI\outputs" %#ok<NOPTS>
|
{"author": "TUMFTM", "repo": "sim_vehicle_dynamics", "sha": "df2ae95dbeb6f8e4591f31ee378acac8e812f358", "save_path": "github-repos/MATLAB/TUMFTM-sim_vehicle_dynamics", "path": "github-repos/MATLAB/TUMFTM-sim_vehicle_dynamics/sim_vehicle_dynamics-df2ae95dbeb6f8e4591f31ee378acac8e812f358/vehicle_environment/variable_friction/archive/2d_frictionmap/GUI/scripts/Datadict_Creation.m"}
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import range
import re
import warnings
import numpy as np
from skbio.util import cardinal_to_ordinal
_whitespace_regex = re.compile(r'\s')
_newline_regex = re.compile(r'\n')
def _decode_qual_to_phred(qual_str, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to decode "
"quality scores.",
"Decoding Solexa quality scores is not currently supported, "
"as quality scores are always stored as Phred scores in "
"scikit-bio. Please see the following scikit-bio issue to "
"track progress on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual = np.fromstring(qual_str, dtype=np.uint8) - phred_offset
if np.any((qual > phred_range[1]) | (qual < phred_range[0])):
raise ValueError("Decoded Phred score is out of range [%d, %d]."
% (phred_range[0], phred_range[1]))
return qual
def _encode_phred_to_qual(phred, variant=None, phred_offset=None):
phred_offset, phred_range = _get_phred_offset_and_range(
variant, phred_offset,
["Must provide either `variant` or `phred_offset` in order to encode "
"Phred scores.",
"Encoding Solexa quality scores is not currently supported. "
"Please see the following scikit-bio issue to track progress "
"on this:\n\t"
"https://github.com/biocore/scikit-bio/issues/719"])
qual_chars = []
for score in phred:
if score < phred_range[0]:
raise ValueError("Phred score %d is out of range [%d, %d]."
% (score, phred_range[0], phred_range[1]))
if score > phred_range[1]:
warnings.warn(
"Phred score %d is out of targeted range [%d, %d]. Converting "
"to %d." % (score, phred_range[0], phred_range[1],
phred_range[1]), UserWarning)
score = phred_range[1]
qual_chars.append(chr(score + phred_offset))
return ''.join(qual_chars)
def _get_phred_offset_and_range(variant, phred_offset, errors):
if variant is None and phred_offset is None:
raise ValueError(errors[0])
if variant is not None and phred_offset is not None:
raise ValueError(
"Cannot provide both `variant` and `phred_offset`.")
if variant is not None:
if variant == 'sanger':
phred_offset = 33
phred_range = (0, 93)
elif variant == 'illumina1.3':
phred_offset = 64
phred_range = (0, 62)
elif variant == 'illumina1.8':
phred_offset = 33
phred_range = (0, 62)
elif variant == 'solexa':
phred_offset = 64
phred_range = (-5, 62)
raise NotImplementedError(errors[1])
else:
raise ValueError("Unrecognized variant %r." % variant)
else:
if not (33 <= phred_offset <= 126):
raise ValueError(
"`phred_offset` %d is out of printable ASCII character range."
% phred_offset)
phred_range = (0, 126 - phred_offset)
return phred_offset, phred_range
def _get_nth_sequence(generator, seq_num):
# i is set to None so that an empty generator will not result in an
# undefined variable when compared to seq_num.
i = None
if seq_num is None or seq_num < 1:
raise ValueError('Invalid sequence number (`seq_num`=%s). `seq_num`'
' must be between 1 and the number of sequences in'
' the file.' % str(seq_num))
try:
for i, seq in zip(range(1, seq_num + 1), generator):
pass
finally:
generator.close()
if i == seq_num:
return seq
raise ValueError('Reached end of file before finding the %s sequence.'
% cardinal_to_ordinal(seq_num))
def _parse_fasta_like_header(line):
id_ = ''
desc = ''
header = line[1:].rstrip()
if header:
if header[0].isspace():
# no id
desc = header.lstrip()
else:
header_tokens = header.split(None, 1)
if len(header_tokens) == 1:
# no description
id_ = header_tokens[0]
else:
id_, desc = header_tokens
return id_, desc
def _format_fasta_like_records(generator, id_whitespace_replacement,
description_newline_replacement, require_qual,
lowercase=None):
if ((id_whitespace_replacement is not None and
'\n' in id_whitespace_replacement) or
(description_newline_replacement is not None and
'\n' in description_newline_replacement)):
raise ValueError(
"Newline character (\\n) cannot be used to replace whitespace in "
"sequence IDs, nor to replace newlines in sequence descriptions.")
for idx, seq in enumerate(generator):
if len(seq) < 1:
raise ValueError(
"%s sequence does not contain any characters (i.e., it is an "
"empty/blank sequence). Writing empty sequences is not "
"supported." % cardinal_to_ordinal(idx + 1))
if 'id' in seq.metadata:
id_ = '%s' % seq.metadata['id']
else:
id_ = ''
if id_whitespace_replacement is not None:
id_ = _whitespace_regex.sub(id_whitespace_replacement, id_)
if 'description' in seq.metadata:
desc = seq.metadata['description']
else:
desc = ''
if description_newline_replacement is not None:
desc = _newline_regex.sub(description_newline_replacement, desc)
if desc:
header = '%s %s' % (id_, desc)
else:
header = id_
if require_qual and 'quality' not in seq.positional_metadata:
raise ValueError(
"Cannot write %s sequence because it does not have quality "
"scores associated with it." % cardinal_to_ordinal(idx + 1))
qual = None
if 'quality' in seq.positional_metadata:
qual = seq.positional_metadata['quality'].values
if lowercase is not None:
if hasattr(seq, 'lowercase'):
seq_str = seq.lowercase(lowercase)
else:
raise AttributeError("lowercase specified but class %s does "
"not support lowercase functionality" %
seq.__class__.__name__)
else:
seq_str = str(seq)
yield header, "%s" % seq_str, qual
def _line_generator(fh, skip_blanks=False):
for line in fh:
line = line.strip()
if line or not skip_blanks:
yield line
def _too_many_blanks(fh, max_blanks):
count = 0
too_many = False
for line in _line_generator(fh, skip_blanks=False):
if line:
break
else:
count += 1
if count > max_blanks:
too_many = True
break
fh.seek(0)
return too_many
|
{"hexsha": "e031c9bd8b7454c7a12078814052e5f27a903d72", "size": 7758, "ext": "py", "lang": "Python", "max_stars_repo_path": "skbio/io/format/_base.py", "max_stars_repo_name": "wdwvt1/scikit-bio", "max_stars_repo_head_hexsha": "12853ade2b85b00ee02170c8dd8c9037bb21cc7f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "skbio/io/format/_base.py", "max_issues_repo_name": "wdwvt1/scikit-bio", "max_issues_repo_head_hexsha": "12853ade2b85b00ee02170c8dd8c9037bb21cc7f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skbio/io/format/_base.py", "max_forks_repo_name": "wdwvt1/scikit-bio", "max_forks_repo_head_hexsha": "12853ade2b85b00ee02170c8dd8c9037bb21cc7f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2636363636, "max_line_length": 79, "alphanum_fraction": 0.5760505285, "include": true, "reason": "import numpy", "num_tokens": 1749}
|
\documentclass[10pt,letterpaper]{article}
\usepackage[margin=1in]{geometry}
\usepackage{setspace}
\usepackage{fancyhdr}
\usepackage{lastpage}
\pagestyle{fancyplain}
% Put watermark on
% \usepackage{draftwatermark}
% \SetWatermarkText{Draft}
% \SetWatermarkScale{7}
\lhead{}
\chead{Central Massachusetts Amateur Radio Association}
\rhead{}
\lfoot{\texttt{https://github.com/mide/cmara-meeting-minutes/}}
\cfoot{}
\rfoot{Page \thepage\ of \pageref{LastPage}}
\begin{document}
\begin{center}
{\huge October 2017 Board of Directors Meeting}\\
\emph{of the}\\
{\Large Central Massachusetts Amateur Radio Association}\\
\emph{Submitted by Mark Ide \texttt{W1IDE}, Secretary}
\end{center}
\section{Meeting Called to Order}
The CMARA October 2017 board meeting was called to order on October 19, 2017 at 8:26 PM by CMARA president Bob Peloquin (\texttt{KB1VUA}).
\section{Attendance}
\subsection{Officers Present}
\begin{tabular}{|l|l|l|c|}
\hline
\textbf{Position} & \textbf{Name} & \textbf{Callsign} & \textbf{Present} \\ \hline
President & Bob Peloquin & \texttt{KB1VUA} & Yes \\
Vice President & Brian Loverro & \texttt{K1BML} & Yes \\
Secretary & Mark Ide & \texttt{W1IDE} & Yes \\
Treasurer & Jim Singer & \texttt{N1EKO} & Yes \\
Webmaster & Lyn Glagowski & \texttt{WB1CCL} & No \\
\hline
\end{tabular}
\subsection{Board of Directors Present}
\begin{tabular}{|l|l|c|}
\hline
\textbf{Name} & \textbf{Callsign} & \textbf{Present} \\ \hline
Adrian Zeffert & \texttt{AB2IX} & Yes \\
Harold Carlson & \texttt{N1ZC} & No \\
Greg Algieri & \texttt{WA1JXR} & Yes \\
Terry Glagowski & \texttt{W1TR} & Yes \\
Randy Dore & \texttt{W4FEB} & Yes \\
Johnathan Sherman & \texttt{WW2JS} & No \\
\hline
\end{tabular}
\subsection{Members Present}
\texttt{KC1GIB}, \texttt{W1PA}
\subsection{Guests \& Visitors}
Christopher Wentworth
\section{Primary Discussion}
\subsection{Club QSL Cards}
\begin{enumerate}
\item We are looking for someone to help send out QSL cards when needed. Right now it's done on an ad-hoc basis and it can be difficult to keep track of the files. We send out approximately three per year.
\item Terry (\texttt{W1TR}) will send Mark (\texttt{W1IDE}) the QSL card images and Mark will price how much it would cost to print 100 of them. Printing them ahead of time (as opposed to by need) would reduce the work needed to fill them out.
\end{enumerate}
\subsection{Club Documents (Roster)}
\begin{enumerate}
\item A discussion was had about moving some of the club documents into something like Google Drive, so a select group of people (officers) could modify and everyone could read. Mark (\texttt{W1IDE}) and Jim (\texttt{N1EKO}) will communicate to accomplish this.
\end{enumerate}
\subsection{State Registration}
\begin{enumerate}
\item Mark (\texttt{W1IDE}) will check the status of the state registration and report back.
\end{enumerate}
\subsection{Swap Meet}
\begin{enumerate}
\item Adrian (\texttt{AB2IX}) proposed having another swap meet at a club meeting.
\item We'd be looking for the February / March time frame.
\item We need to establish much better rules, as last time was a bit chaotic.
\item We will likely have the meeting for club members only, with a specific purpose of helping other club members (lower prices).
\item A discussion about putting items for sale in the monthly email, but that would be too much to maintain and could become too much like spam for some readers.
\end{enumerate}
\section{Next Month's Presentation}
\begin{enumerate}
\item \textbf{Primary Plan:} Greg (\texttt{WA1JXR}) will check with Bruce about the presentation on the new FCC entry-level licenses.
\item \textbf{Backup Plan:} Greg (\texttt{WA1JXR}) will be continuing with the soldering trainings and find a new video.
\end{enumerate}
\section{Meeting Adjourned}
The meeting was adjourned October 19, 2017 at 8:48 PM by CMARA president Bob Peloquin (\texttt{KB1VUA}).
\end{document}
|
{"hexsha": "e855608b25b884f6e204067bb496e3449727f1bb", "size": 4071, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "minutes/2017-10-19-board-meeting.tex", "max_stars_repo_name": "cmara/meeting-minutes", "max_stars_repo_head_hexsha": "e1f7e3debca5145a668321f75d12ce3db418eb5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-01-27T17:33:16.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-27T17:33:16.000Z", "max_issues_repo_path": "minutes/2017-10-19-board-meeting.tex", "max_issues_repo_name": "cmara/meeting-minutes", "max_issues_repo_head_hexsha": "e1f7e3debca5145a668321f75d12ce3db418eb5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "minutes/2017-10-19-board-meeting.tex", "max_forks_repo_name": "cmara/meeting-minutes", "max_forks_repo_head_hexsha": "e1f7e3debca5145a668321f75d12ce3db418eb5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-17T09:20:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-17T09:20:26.000Z", "avg_line_length": 39.9117647059, "max_line_length": 263, "alphanum_fraction": 0.7219356423, "num_tokens": 1193}
|
#include <fstream>
#include <functional>
#include <boost/test/unit_test.hpp>
#include <turtle/mock.hpp>
#include <common_defs.h>
#include <Describer.h>
BOOST_AUTO_TEST_SUITE(Describers)
BOOST_AUTO_TEST_SUITE(Dummy)
BOOST_AUTO_TEST_CASE(SimplyExtractsDoublesFromStream)
{
std::stringstream s;
s << "1;2.3;6.66;5.44;";
DummyDescriber desc;
desc.setInputStream(&s);
auto obj = desc.generateObjectDescription();
BOOST_REQUIRE_EQUAL(4, obj.size());
BOOST_CHECK_CLOSE(1, obj[0], 0.1);
BOOST_CHECK_CLOSE(2.3, obj[1], 0.1);
BOOST_CHECK_CLOSE(6.66, obj[2], 0.1);
BOOST_CHECK_CLOSE(5.44, obj[3], 0.1);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE(MPEG7)
BOOST_AUTO_TEST_CASE(MPEG7DescriberExistsAndRequiresStream)
{
const double MAX_PERCENTAGE_DIFFERENCE = 0.01;
std::ifstream is("../../../InstrumentRecognizerTester/test_files/mp7/bassoon/1953_bassoon_034_1_6_1.mp7");
BOOST_REQUIRE(is.is_open());
MPEG7Describer describer(&is);
ObjectDescription description = describer.generateObjectDescription();
ObjectDescription expected(7);
expected[0] = -1.2067; // LogAttackTime
expected[1] = 256.85; // SpectralCentroid
expected[2] = 0.074510; // TemporalCentroid
expected[3] = 619.47; // HarmonicSpectralCentroid
expected[4] = 0.18898; // HarmonicSpectralDeviation
expected[5] = 0.99624; // HarmonicSpectralSpread
expected[6] = 0.15982; //HarmonicSpectralVariation
BOOST_REQUIRE_EQUAL(description.size(), expected.size());
for (std::size_t i = 0; i < expected.size(); i++)
{
// BOOST_CHECK_CLOSE(description.at(i).real(), expected.at(i).real(), MAX_PERCENTAGE_DIFFERENCE);
// BOOST_CHECK_CLOSE(description.at(i).imag(), expected.at(i).imag(), MAX_PERCENTAGE_DIFFERENCE);
BOOST_CHECK_CLOSE(description.at(i), expected.at(i), MAX_PERCENTAGE_DIFFERENCE);
}
}
BOOST_AUTO_TEST_CASE(MPEG7DetailedDescriberExistsAndRequiresStream)
{
const double MAX_PERCENTAGE_DIFFERENCE = 0.1;
std::ifstream is("../../../InstrumentRecognizerTester/test_files/mp7/bassoon/1953_bassoon_034_1_6_1.mp7");
BOOST_REQUIRE(is.is_open());
MPEG7DetailedDescriber describer(&is);
ObjectDescription description = describer.generateObjectDescription();
ObjectDescription expected(13);
expected[0] = -1.2067; // LogAttackTime
expected[1] = 256.85; // SpectralCentroid
expected[2] = 0.074510; // TemporalCentroid
expected[3] = 619.47; // HarmonicSpectralCentroid
expected[4] = 0.18898; // HarmonicSpectralDeviation
expected[5] = 0.99624; // HarmonicSpectralSpread
expected[6] = 0.15982; //HarmonicSpectralVariation
expected[7] = 86.6904; //average FundamentalFrequency
expected[8] = 54.7215; //FundamentalFrequency variation
expected[9] = 0.55203; //average AudioHarmonicity
expected[10] = 0.19118; //AudioHarmonicity variation
expected[11] = 5851.97; //average UpperLimitOfHarmonicity
expected[12] = 6967.366; //UpperLimitOfHarmonicity variation
BOOST_REQUIRE_EQUAL(description.size(), expected.size());
for (std::size_t i = 0; i < expected.size(); i++)
{
// BOOST_CHECK_CLOSE(description.at(i).real(), expected.at(i).real(), MAX_PERCENTAGE_DIFFERENCE);
// BOOST_CHECK_CLOSE(description.at(i).imag(), expected.at(i).imag(), MAX_PERCENTAGE_DIFFERENCE);
BOOST_CHECK_CLOSE(description.at(i), expected.at(i), MAX_PERCENTAGE_DIFFERENCE);
}
}
BOOST_AUTO_TEST_CASE(ViolinHandledCorrectly)
{
const double MAX_PERCENTAGE_DIFFERENCE = 0.01;
std::ifstream is("../../../InstrumentRecognizerTester/test_files/mp7/viola/1002_viola_055_3_10_107.mp7");
BOOST_REQUIRE(is.is_open());
MPEG7Describer describer(&is);
ObjectDescription description = describer.generateObjectDescription();
ObjectDescription expected(7);
expected[0] = -1.198057;
expected[1] = 198.7499;
expected[2] = 0.2377094;
expected[3] = 1045.8944;
expected[4] = 0.19090895;
expected[5] = 0.6714315;
expected[6] = 0.19529179;
for(std::size_t i = 0; i < expected.size(); i++)
{
// BOOST_CHECK_CLOSE(description.at(i).real(), expected.at(i).real(), MAX_PERCENTAGE_DIFFERENCE);
// BOOST_CHECK_CLOSE(description.at(i).imag(), expected.at(i).imag(), MAX_PERCENTAGE_DIFFERENCE);
BOOST_CHECK_CLOSE(description.at(i), expected.at(i), MAX_PERCENTAGE_DIFFERENCE);
}
}
BOOST_AUTO_TEST_CASE(MPEG7DescriberThrowsOnTXTInput)
{
std::ifstream is("../../../InstrumentRecognizerTester/test_files/single_file/one.txt");
BOOST_REQUIRE(is.is_open());
MPEG7Describer describer(&is);
BOOST_CHECK_THROW(describer.generateObjectDescription(), MPEG7Describer::InputParsingError);
}
BOOST_AUTO_TEST_CASE(MPEG7DescriberThrowsOnInputWithoutLogAttackTimeType)
{
std::ifstream is("../../../InstrumentRecognizerTester/test_files/mp7_modified/bassoon/1953_bassoon_034_1_6_1_modified.mp7");
BOOST_REQUIRE(is.is_open());
MPEG7Describer describer(&is);
BOOST_CHECK_THROW(describer.generateObjectDescription(), MPEG7Describer::RequiredDescriptorNotFound);
}
BOOST_AUTO_TEST_SUITE_END()
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "5512fa3ac7bf7031e8e0bdcd75ed72fd18ef07ae", "size": 4929, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "InstrumentRecognizerTester/src/DescriberTester.cpp", "max_stars_repo_name": "Lewerow/InstrumentRecognizer", "max_stars_repo_head_hexsha": "ca6ad1c674854aa1cbd0dc006d25ace321f2213c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "InstrumentRecognizerTester/src/DescriberTester.cpp", "max_issues_repo_name": "Lewerow/InstrumentRecognizer", "max_issues_repo_head_hexsha": "ca6ad1c674854aa1cbd0dc006d25ace321f2213c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "InstrumentRecognizerTester/src/DescriberTester.cpp", "max_forks_repo_name": "Lewerow/InstrumentRecognizer", "max_forks_repo_head_hexsha": "ca6ad1c674854aa1cbd0dc006d25ace321f2213c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7173913043, "max_line_length": 125, "alphanum_fraction": 0.7599918848, "num_tokens": 1370}
|
"""
McsCMOSMEA
~~~~~~~~~~
Data classes to wrap and hide raw data handling of the CMOS-MEA HDF5 data files.
It is based on the MCS-CMOS-MEA Rawdata and ProcessedData definitions for HDF5
of the given compatible versions.
:copyright: (c) 2018 by Multi Channel Systems MCS GmbH
:license: see LICENSE for more details
"""
import h5py
from builtins import IndexError
import datetime
import math
import uuid
import collections
import numpy as np
import pandas as pd
from numpy import rec
import itertools
from numbers import Number
from inspect import signature
import re
from typing import Dict
from . import ureg, McsHdf5Types, McsHdf5Protocols
from .McsData import RawData
from pint import UndefinedUnitError
MCS_TICK = 1 * ureg.us
CLR_TICK = 100 * ureg.ns
# day -> number of clr ticks (100 ns)
DAY_TO_CLR_TIME_TICK = 24 * 60 * 60 * (10**7)
VERBOSE = False
def dprint(n, *args):
if VERBOSE:
print(n, args)
class DictProperty_for_Classes(object):
"""
"""
class _proxy(object):
def __init__(self, obj, fget, fset, fdel):
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self, key):
if self._fset is None:
raise TypeError("Cannot read item.")
return self._fget(self._obj, key)
def __setitem__(self, key, value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __init__(self, fget=None, fset=None, fdel=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self._proxy(obj, self._fget, self._fset, self._fdel)
class _property(object):
class _proxy(object):
def __init__(self, obj, fget, fset, fdel):
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self,key):
if self._fget is None:
raise TypeError("Cannot read item.")#
return self._fget(self._obj, key)
def __setitem__(self,key,value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __init__(self, fget=None, fset=None, fdel=None):
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self._proxy(obj, self._fget, self._fset, self._fdel)
class _list_property(object):
"""
Creates helper class which is a list subclass. It is used to hand lists of streams to the McsPy user.
:param list: list of streams
"""
class McsProxy(collections.UserList):
def __init__(self, initlist=None, obj=None, fget=None, fset=None, fdel=None):
"""
ATTENTION! The collections.UserList documentation requires the init method of collections.UserList subclasses to accept zero or one argument!
"""
super().__init__(initlist)
self._obj = obj
self._fget = fget
self._fset = fset
self._fdel = fdel
def __getitem__(self,key):
if self._fget is None:
raise TypeError("Cannot read item.")
if isinstance(key, int):
return self._fget([self.data[key][1]])
return self._fget([id_set.mcs_instanceid for id_set in selection])
def __setitem__(self,key,value):
if self._fset is None:
raise TypeError("Cannot set item.")
self._fset(self._obj, key, value)
def __delitem__(self, key):
if self._fdel is None:
raise TypeError("Cannot delete item.")
self._fdel(self._obj, key)
def __str__(self):
stream_types = dict()
column_width = 35
line = '-'*(column_width*3+4)+'\n'
bold_line = '='*(column_width*3+4)+'\n'
out = '|'+'{:^{}}'.format('Subtype', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|\n'
out += bold_line
for id_set in self.data:
type =self._obj[id_set.h5py].attrs['ID.Type'].decode('UTF-8')
subtype = self._obj[id_set.h5py].attrs['SubType'].decode('UTF-8')
if not type in stream_types:
stream_types[type] = list()
stream_types[type].append('|'+'{:^{}}'.format(subtype, column_width)+'|'+'{:^{}}'.format(id_set.mcspy, column_width)+'|'+'{:^{}}'.format(id_set.h5py, column_width)+'|\n')
for type in stream_types:
out += type +':\n'
out += ''.join(stream_types[type])
out += line
return out
def __init__(self, content, owner_instance, fget=None, fset=None, fdel=None):
self._content = content
self._owner_instance = owner_instance
self._fget = fget
self._fset = fset
self._fdel = fdel
def __get__(self, obj, objtype=None):
#if obj is None:
# return self
return self.McsProxy(self._content, obj=obj, fget=self._fget, fset=self._fset, fdel=self._fdel)
def __str__(self):
return self.McsProxy(self._content, obj=self._owner_instance).__str__()
class McsHDF5(object):
"""
Container class that provides common structures for an Mcs HDF5 file
"""
def __init__(self, hdf5_object):
"""
Initializes the HDF5 container class from an HDF5 object
"""
self._hdf5_attributes = None
self._h5py_object = hdf5_object
if hasattr(self._h5py_object,'attrs'):
self._mcs_type = hdf5_object.attrs['ID.Type'].decode('UTF-8')
self._mcs_typeid = hdf5_object.attrs['ID.TypeID'].decode('UTF-8')
self._mcs_instance = hdf5_object.attrs['ID.Instance'].decode('UTF-8')
self._mcs_instanceid = hdf5_object.attrs['ID.InstanceID'].decode('UTF-8')
def _get_attributes(self):
"Read and convert all attributes of the HDF5 group for easy access"
if hasattr(self._h5py_object,'attrs'):
hdf5_attributes = self._h5py_object.attrs.items()
self._hdf5_attributes = {}
for (name, value) in hdf5_attributes:
if hasattr(value, "decode"):
try:
self._hdf5_attributes[name] = value.decode('utf-8').rstrip()
except:
self._hdf5_attributes[name] = value
else:
self._hdf5_attributes[name] = value[0]
else:
raise AttributeError('No Attributes')
def _get_mcspy_instance(self, h5py_object, mcspy_parent=None):
"""
takes a h5py object and returns an appropriate mcspy object
:param hdf5_object.
"""
typeID = h5py_object.attrs['ID.TypeID'].decode('utf-8').rstrip()
cls = McsHdf5Types.get_mcs_class_name(typeID)
if cls is None:
return h5py_object
elif isinstance(h5py_object, h5py.Dataset):
if isinstance(mcspy_parent, McsGroup) and 'mcspy_parent' in signature(cls.__init__).parameters and h5py_object.name.split('/')[-1] in mcspy_parent:
return cls(h5py_object, mcspy_parent=mcspy_parent)
return cls(h5py_object)
@staticmethod
def get_attributes(hdf5_object):
"Read and convert all attributes of the HDF5 group for easy access"
if hasattr(hdf5_object,'attrs'):
hdf5_attributes = hdf5_object.attrs.items()
hdf5_attributes_decoded = {}
for (name, value) in hdf5_attributes:
if hasattr(value, "decode"):
hdf5_attributes_decoded[name] = value.decode('utf-8').rstrip()
else:
hdf5_attributes_decoded[name] = value
return hdf5_attributes_decoded
else:
raise AttributeError('No Attributes')
def hdf5_to_mcspy(self, hdf5_names):
"""
receives a hdf5_name as string in Mcs CMOS MEA file system style
and converts it to python toolbox equivalent.
"""
#weird_mcf_hdf5_file_name = ["channeldata", "sensordata", "high-pass"]
if isinstance(hdf5_names,str):
return hdf5_names.strip().replace(":","").replace("(","").replace(")","").replace(" ","_").replace('@','at').replace('.','_').replace(',','_')
else:
raise TypeError("Pass a 'str' object")
@property
def attributes(self):
if self._hdf5_attributes == None:
try:
self._get_attributes()
except AttributeError as err:
print(err)
return self._hdf5_attributes
@property
def h5py_object(self):
return self._h5py_object
class McsGroup(h5py.Group, McsHDF5):
"""
this class subclasses the h5py.Group object and extends it with McsPy toolbox functionality
"""
IDSetGroup = collections.namedtuple('IDSetGroup', ['h5py', 'mcs_instanceid', 'mcspy', 'mcs_typeid'])
IDSetDataset = collections.namedtuple('IDSetDataset', ['h5py', 'mcs_instanceid', 'mcspy', 'mcs_typeid'])
def __init__(self, h5py_group_object):
if isinstance(h5py_group_object, h5py.Group):
h5py.Group.__init__(self, h5py_group_object.id)
McsHDF5.__init__(self, h5py_group_object)
self._child_storage = dict()
self._child_inventory = list()
for child in h5py_group_object:
try:
mcs_instanceid = h5py_group_object[child].attrs['ID.InstanceID'].decode('UTF-8')
mcs_typeid = h5py_group_object[child].attrs['ID.TypeID'].decode('UTF-8').rstrip()
mcspy_child_name = self.hdf5_to_mcspy(child)
if isinstance(self._h5py_object[child], h5py.Dataset):
self._child_inventory.append(McsGroup.IDSetDataset(h5py=child,
mcs_instanceid=mcs_instanceid,
mcspy=mcspy_child_name,
mcs_typeid=mcs_typeid)) # (h5py key/name, mcs instance id, mcs py key/name, mcs_typeid)
if isinstance(self._h5py_object[child], h5py.Group):
self._child_inventory.append(McsGroup.IDSetGroup(h5py=child,
mcs_instanceid=mcs_instanceid,
mcspy=mcspy_child_name,
mcs_typeid=mcs_typeid)) # (h5py key/name, mcs instance id, mcs py key/name, mcs_typeid)
except Exception as e:
print("Error opening group " + child + ": " + str(e))
else:
raise TypeError('The h5py_group_object \'{}\' is not an instance of the h5py.Group class.'.format(h5py_group_object.name))
def __repr__(self):
return '<McsGroup object at '+str(hex(id(self)))+'>'
def __str__(self):
column_width = 25
bold_line = '='*(column_width*3+4)+'\n'
line = '-'*(column_width*3+4)+'\n'
out = line + 'Parent Group: <'+str(type(self)).strip('<>')+' object at '+str(hex(id(self)))+'>\n'
header = '|'+'{:^{}}'.format('Mcs Type', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|\n'
dataset = 'Datasets:\n'
group = 'Groups:\n'
for child in self._child_inventory:
#h5py_key, mcs_typeid, mcspy_key, mcs_typeid = child
mcs_type = self._h5py_object[child.h5py].attrs['ID.Type'].decode('utf-8')
if isinstance(child, McsGroup.IDSetGroup):
group += '|'+'{:^{}}'.format(mcs_type, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
if isinstance(child, McsGroup.IDSetDataset):
dataset += '|'+'{:^{}}'.format(mcs_type, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
if group.count('\n') == 1:
group += ' '*4+'None\n'
if dataset.count('\n') == 1:
dataset += ' '*4+'None\n'
out += line + '\n\n' + header + bold_line + group + line + dataset
return out
def __getattr__(self, name):
id_set = self.ischild(name)
if not id_set:
raise AttributeError('There is no instance with name {} within this group'.format(name))
return self._children[id_set.mcs_instanceid]
def __dir__(self):
return super().__dir__() + [s.mcspy for s in self._child_inventory]
def ischild(self, id):
"""
Takes an identifier and checks if it is a valid identifier for a child of this group:
:param id: mcs instanceid, h5py name , mcspy name as instance of 'str'
:return: False if id is not valid, set of identifiers of the child
"""
if not isinstance(id, str):
return False
return next((set for set in self._child_inventory if id in set[0:3]), False)
def _get_child(self, key):
"""
Retrieves a child from the dictionary self._child_storage:
:param key: mcs_instanceid which indentifies a subgroup of self._h5py_object
"""
child_id_set = self.ischild(key)
if not child_id_set:
raise KeyError('key \'{}\' is not valid. Pass an instance of \'str\', which identifies a child of this group.')
if not child_id_set.mcs_instanceid in self._child_storage.keys():
self._read_child(child_id_set)
return self._child_storage[child_id_set.mcs_instanceid]
def _get_children(self, key):
"""
Retrieves a set of children from the dictionary self._child_storage:
:param key: list or tuple with mcs_instanceid which indentify a subgroup of self._h5py_object respectively
"""
if isinstance(key, (list, tuple)):
if len(key) == 1:
return self._get_child(key[0])
out = list()
for id in key:
try:
out.append(self._get_child(id))
except KeyError as err:
print(err)
return out
_children = _property(_get_child, None, None)
def _set_child(self, key, value):
pass
def _del_child(self, key):
pass
def _read_children_of_type(self, child_typeid, store_parents=True):
"""
reads all children with given typeID
:param child_typeid: mcs type id for a specific mcs hdf5 structure
"""
for id_set in self._child_inventory:
if child_typeid == id_set[3] and id_set[1] not in self._child_storage.keys():
self._readf_child(id_set, store_parents)
def _read_child(self, id_set, store_parent=True):
"""
read given child
:param id_set: id_set must be a valid id_set identifiying a child of this group
"""
if store_parent:
self._child_storage[id_set.mcs_instanceid] = self._get_mcspy_instance(self._h5py_object[id_set.h5py], self)
else:
self._child_storage[id_set.mcs_instanceid] = self._get_mcspy_instance(self._h5py_object[id_set.h5py])
def tree(self, name='mcspy', mcs_type=False, max_level=None):
"""
builds the hdf5 hierarchy beginning with the current group then traversing all subentities depth first as a string
:param name: cfg variable for the type of name that is to be printed for each entity in the
h5py group, default: 'h5py', options: 'mcspy'
:param mcs_type: cfg variable to show mcs type in the tree, default: False
:param max_level: cfg variable to limit the number of tree levels shown, default: None (show all)
"""
if not hasattr(self, '_tree_string'):
self._tree_string = ''
if not hasattr(self, '_tree_mcs_type'):
self._tree_mcs_type = ''
if not hasattr(self, '_tree_names'):
self._tree_names = ''
if not hasattr(self, '_tree_level'):
self._tree_level = None
if self._tree_string == '' or mcs_type != self._tree_mcs_type or self._tree_names != name or self._tree_level != max_level:
self._tree_string = ''
self._tree_mcs_type = mcs_type
self._tree_names = name
self._tree_level = max_level
if self.name == '/':
print(self.name)
else:
print(self.name.split('/')[-1])
name_width = 35
base_level = self.name.count('/')
if self._tree_names == 'mcspy':
def _print_mcspy_tree(name):
level = name.count('/')+1
if max_level is None or level - base_level < max_level:
mcstype = ''
if 'ID.Type' in self[name].attrs and mcs_type:
mcstype += ' - '+self[name].attrs['ID.Type'].decode('UTF-8')
name = self.hdf5_to_mcspy(name.split('/')[-1])
self._tree_string +=' '*4*level+name.ljust(name_width)+mcstype+'\n'
self.visit(_print_mcspy_tree)
elif self._tree_names == 'h5py':
def _print_h5py_tree(name):
level = name.count('/')+1
if max_level is None or level - base_level < max_level:
mcstype = ''
if 'ID.Type' in self[name].attrs and mcs_type:
mcstype += ' - '+self[name].attrs['ID.Type'].decode('UTF-8')
name = name.split('/')[-1]
self._tree_string +=' '*4*level+name.ljust(name_width)+mcstype+'\n'
self.visit(_print_h5py_tree)
else:
raise ValueError('name \'{}\' is not a valid argument. Pass \'h5py\' or \'mcspy\''.format(name))
return self._tree_string
class McsDataset(h5py.Dataset, McsHDF5):
"""
This class subclasses the h5py.Dataset object and extends it with McsPy toolbox functionality
"""
def __init__(self, h5py_dataset_object):
h5py.Dataset.__init__(self, h5py_dataset_object.id)
McsHDF5.__init__(self, h5py_dataset_object)
self._compound_dataset_names = None #compound dataset names in mcs python syntax
if self.dtype.names:
self._compound_dataset_names = [ self.hdf5_to_mcspy(name) for name in self.dtype.names ]
def __getattr__(self, name):
if self._compound_dataset_names:
if name in self._compound_dataset_names:
name = self.dtype.names[self._compound_dataset_names.index(name)]
if name in list(self.dtype.names):
if hasattr(self._h5py_object[name], "decode"):
return self._h5py_object[name].decode('utf-8').rstrip()
else:
return self[name]
else:
raise AttributeError('\'{}\' is not a valid attribute for: {}!'.format(name,self.__repr__()))
else:
raise AttributeError('\'{}\' is not a valid attribute for: {}!'.format(name,self.__repr__()))
def iscompound(self):
"""
Determines whether Dataset is a Compound Dataset
:return Boolean: True if Dataset object represents h5py Compound Dataset, False otherwise
"""
if self._compound_dataset_names:
return True
return False
def __repr__(self):
if self.iscompound():
return '<McsDataset object representing a compound dataset at '+str(hex(id(self)))+'>'
return '<McsDataset object at '+str(hex(id(self)))+'>'
def __str__(self):
first_col_width = 25
if self.iscompound():
out = 'Compound McsDataset '+self.name.split("/")[-1]+'\n\n'
else:
out = 'McsDataset '+self.name.split("/")[-1].ljust(first_col_width)+'\n\n'
out += 'location in hdf5 file:'.ljust(first_col_width)+self.name+'\n'
out += 'shape:'.ljust(first_col_width)+self.name+'{}'.format(self.shape)+'\n'
out += 'dtype:'.ljust(first_col_width)+self.name+'{}'.format(self.dtype)+'\n'
return out
def to_pdDataFrame(self):
"""
Returns the data set as a pandas DataFrame
"""
return pd.DataFrame(self[()])
class McsStreamList(collections.UserList):
"""
Creates helper class which is a list subclass. It is used to hand lists of streams to the McsPy user.
:param list: list of streams
"""
def __str__(self):
stream_types = dict()
column_width = 35
line = '-'*(column_width*3+4)+'\n'
bold_line = '='*(column_width*3+4)+'\n'
out = '|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|'+'{:^{}}'.format('Stream Subtype', column_width)+'|\n'
out += bold_line
for stream in self:
if not stream.attributes['ID.Type'] in stream_types:
stream_types[stream.attributes['ID.Type']] = list()
if 'SubType' in stream.attributes:
stream_types[stream.attributes['ID.Type']].append((stream.name.rsplit('/',1)[1], stream.hdf5_to_mcspy(stream.name.rsplit('/',1)[1]), stream.attributes['SubType'])) #hdf5_name, mcspy_name, subtype
else:
stream_types[stream.attributes['ID.Type']].append((stream.name.rsplit('/',1)[1], stream.hdf5_to_mcspy(stream.name.rsplit('/',1)[1]), '')) #hdf5_name, mcspy_name, subtype
for stream_type in stream_types:
out += stream_type +':\n'
for stream in stream_types[stream_type]:
out += '|'+'{:^{}}'.format(stream[0], column_width)+'|'+'{:^{}}'.format(stream[1], column_width)+'|'+'{:^{}}'.format(stream[2], column_width)+'|\n'
out += line
return out
class McsData(object):
"""
Dummy class provides access to all types of mcs files by returning an instance the class that corresponds to the file type
"""
def __new__(cls, file_path):
"""
Creates a Data object this includes checking the validity of the passed HDF5 file and the return of a
an object that matches the MCS file type.
:param file_path: path to a HDF5 file that contains data encoded in a supported MCS-HDF5 format version
"""
h5_file = h5py.File(file_path, 'r')
try:
mcs_hdf5_protocol_type, _ = McsData.validate_mcs_hdf5_version(h5_file)
except IOError as err:
print(err)
h5_file.close()
if mcs_hdf5_protocol_type == 'CMOS_MEA':
return McsCMOSMEAData(file_path)
elif mcs_hdf5_protocol_type == 'RawData':
return RawData(file_path)
@staticmethod
def validate_mcs_hdf5_version(mcs_h5_file_obj):
"Check if the MCS-HDF5 protocol type and version of the file is supported by this class"
root_grp = mcs_h5_file_obj['/']
if 'McsHdf5ProtocolType' in root_grp.attrs: #check for old file type
mcs_hdf5_protocol_type = root_grp.attrs['McsHdf5ProtocolType'].decode('UTF-8')
if mcs_hdf5_protocol_type == "RawData":
mcs_hdf5_protocol_type_version = root_grp.attrs['McsHdf5ProtocolVersion']
supported_versions = McsHdf5Protocols.SUPPORTED_PROTOCOLS[mcs_hdf5_protocol_type]
if ((mcs_hdf5_protocol_type_version < supported_versions[0]) or
(supported_versions[1] < mcs_hdf5_protocol_type_version)):
raise IOError('Given HDF5 file has MCS-HDF5 RawData protocol version %s and supported are all versions from %s to %s' %
(mcs_hdf5_protocol_type_version, supported_versions[0], supported_versions[1]))
else:
raise IOError("The root group of this HDF5 file has no 'McsHdf5ProtocolVersion' attribute -> so it could't be checked if the version is supported!")
elif 'ID.Type' in root_grp.attrs: #check for CMOS MEA file type
mcs_hdf5_protocol_type = "CMOS_MEA"
if 'FileVersion' in root_grp.attrs:
mcs_hdf5_protocol_type_version = root_grp.attrs['FileVersion']
supported_versions = McsHdf5Protocols.SUPPORTED_PROTOCOLS[mcs_hdf5_protocol_type]
if ((mcs_hdf5_protocol_type_version[0] < supported_versions[0]) or
(supported_versions[1] < mcs_hdf5_protocol_type_version[0])):
raise IOError('Given HDF5 file has MCS-HDF5 CMOS-MEA version %s and supported are all versions from %s to %s' %
(mcs_hdf5_protocol_type_version, supported_versions[0], supported_versions[1]))
else:
raise IOError("The root group of this HDF5 file has no 'FileID' attribute -> so it could't be checked if the version is supported!")
else:
raise IOError("The root group of this HDF5 file has no attribute that can be associated to a MCS HDF5 file type -> this file is not supported by McsPy!")
return list((mcs_hdf5_protocol_type, mcs_hdf5_protocol_type_version))
class McsCMOSMEAData(McsGroup):
"""
This class holds the information of a complete MCS CMOS-MEA data file system
"""
sensorWidth: int = 65
sensorHeight: int = 65
def __init__(self, cmos_data_path):
"""
Creates and initializes a McsCMOSMEAData object that provides access to the content of the given MCS-HDF5 file
:param cmos_data_path: path to a HDF5 file that contains raw data encoded in a supported MCS-HDF5 format version
"""
self.h5_file = h5py.File(cmos_data_path, 'r')
super().__init__(self.h5_file)
self.mcs_hdf5_protocol_type, self.mcs_hdf5_protocol_type_version = McsData.validate_mcs_hdf5_version(self.h5_file)
#self._get_session_info()
#self._acquisition = None
#self._filter_tool = None
#self._sta_explorer = None
#self._spike_explorer = None
#self._spike_sorter = None
def __del__(self):
self.h5_file.close()
def __repr__(self):
return '<McsCMOSMEAData filename=' + self.attributes['ID.Instance'] + '>'
def __str__(self):
out: str = '<McsCMOSMEAData instance at '+str(hex(id(self)))+'>\n\n'
out += 'This object represents the Mcs CMOS MEA file:\n'
#out += ''*4+'Path:'.ljust(12)+'\\'.join(self.attributes['ID.Instance'].split('\\')[:-1])+'\n'
out += ''*4+'Filename:'.ljust(12)+self.attributes['ID.Instance'].split('\\')[-1]+'\n\n'
out += 'Date'.ljust(21)+'Program'.ljust(28)+'Version'.ljust(12)+'\n'
out += '-'*19+' '*2+'-'*26+' '*2+'-'*10+'\n'
out += self.attributes['DateTime'].ljust(21) + self.attributes['ProgramName'].ljust(28)+self.attributes['ProgramVersion'].ljust(12)+'\n\n'
mcs_group_string = super().__str__().split('\n')
return out+'\nContent:\n'+'\n'.join(mcs_group_string[4:])
#def _get_session_info(self):
# "Read all session metadata/root group atributes of the Cmos mea file"
# root_grp_attributes = self.h5_file['/'].attrs.items()
# self.session_info = {}
# for (name, value) in root_grp_attributes:
# #print(name, value)
# if hasattr(value, "decode"):
# self.session_info[name] = value.decode('utf-8').rstrip()
# else:
# self.session_info[name] = value
def __read_acquisition(self):
"Read aquisition group"
if 'Acquisition' in list(self.h5_file.keys()):
acquisition_folder = self.h5_file['Acquisition']
#acquisition_attributes = self.h5_file['Acquisition'].attrs.items()
if len(acquisition_folder)>0:
self._acquisition = Acquisition(acquisition_folder)
for (name, value) in acquisition_folder.items():
dprint(name, value)
else:
raise AttributeError("The HDF5 file does not contain a group 'Acquisition'.")
def __read_sta_explorer(self):
if 'STA Explorer' in list(self.h5_file.keys()):
"Read sta explorer group"
network_explorer_folder = self.h5_file['STA Explorer']
#sta_explorer_attributes = self.h5_file['STA Explorer'].attrs.items()
if len(network_explorer_folder)>0:
self._sta_explorer = NetworkExplorer(network_explorer_folder)
for (name, value) in network_explorer_folder.items():
dprint(name, value)
elif 'Network Explorer' in list(self.h5_file.keys()):
"Read network explorer group"
network_explorer_folder = self.h5_file['Network Explorer']
#sta_explorer_attributes = self.h5_file['STA Explorer'].attrs.items()
if len(network_explorer_folder)>0:
self._sta_explorer = NetworkExplorer(network_explorer_folder)
for (name, value) in network_explorer_folder.items():
dprint(name, value)
else:
raise AttributeError("The HDF5 file does not contain a group 'STA Explorer' or 'Network Explorer'.")
def __read_filter_tool(self):
if 'Filter Tool' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Filter Tool'.")
def __read_spike_explorer(self):
if 'Spike Explorer' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Spike Explorer'.")
def __read_spike_sorter(self):
if 'Spike Sorter' in list(self.h5_file.keys()):
pass
else:
raise AttributeError("The HDF5 file does not contain a group 'Spike Sorter'.")
@classmethod
def sensorID_to_coordinates(self, sensorID):
"Computes the [x,y] chip coordinates of a sensor. Note: both, sensor IDs and coordinates are base 1"
if 0<sensorID and sensorID<=self.sensorWidth*self.sensorHeight:
sensorID -= 1
return np.array([(sensorID % self.sensorHeight)+1,(sensorID // self.sensorHeight)+1])
else:
raise KeyError('Sensor ID out of range!')
@classmethod
def coordinates_to_sensorID(self, row: int, col: int) -> int:
"Computes the sensor ID for row and column coordinates. Note: sensor IDs and rows and columns are base 1"
if 0<row and row<=self.sensorHeight and 0<col and col<=self.sensorWidth:
return self.sensorHeight*(col-1)+row
else:
raise KeyError('Coordinates out of range!')
class Acquisition(McsGroup):
"""
Container class for acquisition data.
Acquisition Group can hold different types of streams: Analog Streams, Event Streams, Timestamp Streams, Segment Streams, Spike Streams
"""
"holds allowed stream types in TypeID:Type pairs"
_stream_types = {"AnalogStream" : "9217aeb4-59a0-4d7f-bdcd-0371c9fd66eb",
"FrameStream" : "15e5a1fe-df2f-421b-8b60-23eeb2213c45",
"SegmentStream" : "35f15fa5-8427-4d07-8460-b77a7e9b7f8d",
"TimeStampStream" : "425ce2e0-f1d6-4604-8ab4-6a2facbb2c3e",
"SpikeStream" : "26efe891-c075-409b-94f8-eb3a7dd68c94",
"EventStream" : "09f288a5-6286-4bed-a05c-02859baea8e3"}
def __init__(self, acquisition_group):
super().__init__(acquisition_group)
setattr(Acquisition, 'ChannelStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["AnalogStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SensorStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["FrameStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SegmentStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["SegmentStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'SpikeStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["SpikeStream"]], self, fget=self._get_children, fset=None, fdel=None))
setattr(Acquisition, 'EventStreams', _list_property([id_set for id_set in self._child_inventory if id_set.mcs_typeid == Acquisition._stream_types["EventStream"]], self, fget=self._get_children, fset=None, fdel=None))
def __str__(self) -> str:
if self._child_inventory:
column_width: int = 25
bold_line: str = '='*(column_width*3+4)+'\n'
line: str = '-'*(column_width*3+4)+'\n'
out: str = line + 'Parent Group: <'+str(type(self)).strip('<>')+' object at '+str(hex(id(self)))+'>\n\n'
header: str = '|'+'{:^{}}'.format('Subtype', column_width)+'|'+'{:^{}}'.format('HDF5 name', column_width)+'|'+'{:^{}}'.format('McsPy name', column_width)+'|\n'
stream_types: Dict[str, str] = dict()
for child in self._child_inventory:
#h5py_key, mcs_typeid, mcspy_key, mcs_typeid = child
stream_type = self._h5py_object[child.h5py].attrs['ID.Type'].decode('utf-8')
stream_subtype = self._h5py_object[child.h5py].attrs['SubType'].decode('utf-8')
if not stream_type in stream_types:
stream_types[stream_type] = ""
stream_types[stream_type] += '|'+'{:^{}}'.format(stream_subtype, column_width)+'|'+'{:^{}}'.format(child.h5py, column_width)+'|'+'{:^{}}'.format(child.mcspy, column_width)+'|\n'
out += line + '\n\n' + header + bold_line
for stream_type in stream_types:
out += stream_type+'\n'+stream_types[stream_type] + line
else:
out = "No streams found"
return out
def __repr__(self):
return '<Acquisition object at '+str(hex(id(self)))+', ChannelStreams='+str(len(self.ChannelStreams))+', SensorStreams='+str(len(self.SensorStreams))+', SegmentStreams='+str(len(self.SegmentStreams))+', SpikeStreams='+str(len(self.SpikeStreams))+', EventStreams='+str(len(self.EventStreams))+'>'
class McsInfo(McsDataset):
"""
Container class for Stream Meta Data
"""
def __init__(self, meta_data_set):
"""
Initializes a Meta object from a provided HDF5 dataset
"""
super().__init__(meta_data_set)
class McsStream(McsGroup):
"""
Base class for all stream types
"""
def __init__(self, stream_grp, data_typeid, meta_typeid, *args):
"""
Initializes a stream object with its associated h5py group object
:param stream_grp: group object correponding to a folder in the HDF5 file. It contains the data of this stream
:param data_typeid: mcs type id of the data stored in the stream
:param meta_typeid: mcs type id of the meta data stored in the stream
"""
super().__init__(stream_grp)
self._data_typeid = data_typeid
self._meta_typeid = meta_typeid
self._entities = None
def _get_data_headers(self):
"""
retrieves all headers present in a dataset
return headers: all headers native to the data datasets in a certain stream instance
"""
headers = list()
try:
data_name = next(child for child in self._h5py_object if self._h5py_object[child].attrs['ID.TypeID'].decode('UTF-8') == self._data_typeid)
except StopIteration:
return list()
if hasattr(self._h5py_object[data_name].dtype, 'names'):
headers = list(self._h5py_object[data_name].dtype.names)
return headers
def _get_meta_headers(self):
"""
retrieves all headers of the meta data
return headers: all headers native to the meta datasets in a certain stream instance
"""
headers = list()
try:
meta_name = next(child for child in self._h5py_object if self._h5py_object[child].attrs['ID.TypeID'].decode('UTF-8') == self._meta_typeid)
except StopIteration:
pass
if hasattr(self._h5py_object[meta_name].dtype, 'names'):
headers = self._h5py_object[meta_name].dtype.names
return headers
@property
def Data(self):
"Access all datasets - collection of McsDataset objects"
return McsStreamList([self._children[id_set.mcs_instanceid] for id_set in self._child_inventory if id_set.mcs_typeid == self._data_typeid])
@property
def Meta(self):
"Access meta data"
return McsStreamList([self._children[id_set.mcs_instanceid] for id_set in self._child_inventory if id_set.mcs_typeid == self._meta_typeid])
Stream_Types = ["Analog Stream", "Event Stream", "Segment Stream", "TimeStamp Stream", "Frame Stream", "Spike Stream"]
class McsStreamEntity(object):
"""
Base Class for a McsStreamEntity object
"""
def __init__(self, parent, id):
self.mcspy_parent = parent
self._entity_id = id
class McsChannelStream(McsStream):
"""
Container class for one analog stream of several channels.
"""
channel_data_typeid = "5efe7932-dcfe-49ff-ba53-25accff5d622"
channel_meta_typeid = "9e8ac9cd-5571-4ee5-bbfa-8e9d9c436daa"
def __init__(self, channel_stream_grp):
"""
Initializes a channel stream object containing several sweeps of channels over time
:param channel_stream_grp: folder of the HDF5 file that contains the data of this analog stream
"""
super().__init__(channel_stream_grp, McsChannelStream.channel_data_typeid, McsChannelStream.channel_meta_typeid)
def __repr__(self):
return '<McsChannelStream object at '+str(hex(id(self)))+'>'
def _get_channel_sweeps_by_number(self, key):
"""
retrieves all dataset that belong to sweep number 'key'
:param key: key as int that identifies a sweep in the channel stream
:return: list of id set that correlates with sweeps with number 'key' in a channel stream
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
sweep_number = [int(s) for s in child if s.isdigit()]
try:
if sweep_number[0] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
@property
def DataChunk(self):
"""
The continuous data segments in the stream
"""
sweep_numbers = np.unique(self.ChannelMeta.GroupID).tolist()
out = {}
for sweep_number in sweep_numbers:
out[sweep_number] = _list_property.McsProxy(self._get_channel_sweeps_by_number(sweep_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
class McsChannelEntity(McsDataset, McsStreamEntity):
"""
Container class for one ChannelStream Entity.
"""
def __init__(self, channel_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsChannelEntity from a h5py_dataset of a hdf5 ChannelData entity
:param channel_stream_entity_dataset: h5py_dataset of a channel
"""
id = int(channel_stream_entity_dataset.name.split()[-1]) #_entity_id is Group ID
McsDataset.__init__(self, channel_stream_entity_dataset)
McsStreamEntity.__init__(self, mcspy_parent, id)
self.dimensions = '[ \'number of channels\' x \'samples\' ]'
def __repr__(self):
return '<McsChannelEntity object at '+str(hex(id(self)))+', channels='+str(self.shape[0])+', samples='+str(self.shape[1])+'>'
@property
def Meta(self):
"""
reads the subset of Meta data that belongs to the channels
"""
index = tuple(np.where(self.mcspy_parent.Meta[0].GroupID == self._entity_id)[0])
return self.mcspy_parent.Meta[0][index,]
class McsEventStream(McsStream):
"""
Container class for one Event Stream.
"""
event_data_typeid = "abca7b0c-b6ce-49fa-ad74-a20c352fe4a7"
event_meta_typeid = "8f58017a-1279-4d0f-80b0-78f2d80402b4"
def __init__(self, event_stream_grp):
"""
Initializes an event stream object
:param event_stream_grp: folder of the HDF5 file that contains the data of this event stream
"""
super().__init__(event_stream_grp, McsEventStream.event_data_typeid, McsEventStream.event_meta_typeid)
def __repr__(self):
return '<McsEventStream object at '+str(hex(id(self)))+', EventEntities='+str(len(self.EventEntity))+'>'
def _read_entities(self, entity_class_name):
"""
reads event stream entities into entity type associated objects
:param entity_class_name: class name of the associated stream entity
"""
try:
cls = globals()[entity_class_name] #getattr(__name__, entity_class_name)
except KeyError as err:
print(err)
self._entities = list()
for entity_type in np.unique(self.EventData.EventID):
self._entities.append(cls(self, entity_type))
@property
def EventData(self):
"""
All events of all event entities in the stream
"""
return self.Data[0]
@property
def EventMeta(self):
"""
The meta data for all event entities
"""
return self.Meta[0]
@property
def EventEntity(self):
"""
All event entities in the stream
"""
if self._entities == None:
self._read_entities('McsEventEntity')
return self._entities
class McsEventEntity(McsStreamEntity):
"""
Container class for Event Entity object
"""
def __init__(self, parent, event_id):
"""
Initializes an Mcs EventEntity Object
:param parent: parent McsEventStream instances
:param event_id: identifier of the event entity (the type of event)
"""
super().__init__(parent, event_id)
def _get_data_by_header(self, header):
index = list(np.where(self.mcspy_parent.data[0]['EventID'] == self._entity_id)[0])
return self.mcspy_parent.data[0][index,header]
def _get_meta_by_header(self, header):
index = list(np.where(self.mcspy_parent.meta[0]['EventID'] == self._entity_id)[0])
return self.mcspy_parent.meta[0][index,header]
def __getattr__(self, name):
if name in self.mcspy_parent._get_data_headers():
return self._get_data_by_header(name)
if name in self.mcspy_parent._get_meta_headers():
return self._get_meta_by_header(name)
raise AttributeError('{} is not a valid event attribute'.format(name))
def __str__(self):
return 'Event Entity \"' + self.meta['Label'][0].decode('UTF-8') + '\" Headers:\n'+'Event Data Headers: '+', '.join(self.mcspy_parent._get_data_headers())+'\nEvent Meta Headers: '+', '.join(self.mcspy_parent._get_meta_headers())
def __repr__(self):
return '<McsEventEntity object at '+str(hex(id(self)))+', Label='+ self.meta['Label'][0].decode('UTF-8') +', events='+str(len(self.data))+'>'
@property
def events(self):
"""
The ids, timestamps and durations of the occurences of the event entity
"""
index = list(np.where(self.mcspy_parent.EventData['EventID'] == self._entity_id)[0])
return self.mcspy_parent.EventData[index]
@property
def meta(self):
"""
The meta data for an event entity
"""
index = list(np.where(self.mcspy_parent.EventMeta['EventID'] == self._entity_id)[0])
return self.mcspy_parent.EventMeta[index]
class McsSensorStream(McsStream):
"""
Container class for one Event Stream.
"""
sensor_data_typeid = "49da47df-f397-4121-b5da-35317a93e705"
sensor_meta_typeid = "ab2aa189-2e72-4148-a2ef-978119223412"
def __init__(self, sensor_stream_grp):
"""
Initializes an sensor stream object
:param sensor_stream_grp: folder of the HDF5 file that contains the data of this sensor stream
"""
super().__init__(sensor_stream_grp, McsSensorStream.sensor_data_typeid, McsSensorStream.sensor_meta_typeid)
def __repr__(self):
return '<McsSensorStream object at '+str(hex(id(self)))+'>'
def _read_entities(self, entity_class_name):
"""
reads event stream entities into entity type associated objects
:param entity_class_name: class name of the associated stream entity
"""
try:
cls = globals()[entity_class_name] #getattr(__name__, entity_class_name)
except KeyError as err:
print(err)
self._entities = list()
for entity_type in np.unique(self.EventData.EventID):
self._entities.append(cls(self, entity_type))
def _get_sensor_sweeps_by_number(self, key):
"""
retrieves all dataset that belong to sweep number 'key' in a sensor stream
:param key: key as int that identifies a sweep in the sensor stream
:return: list of id set that correlates with sweeps with number 'key'
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
sweep_number = [int(s) for s in child if s.isdigit()]
try:
if sweep_number[1] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
def _get_sensor_rois_by_number(self, key):
"""
retrieves all dataset that belong to roi number 'key' in a sensor stream
:param key: key as int that identifies a roi in the sensor stream
:return: list of id set that correlates with roi with number 'key'
"""
if isinstance(key, int):
out = list()
for child in self._h5py_object.keys():
roi_number = [int(s) for s in child if s.isdigit()]
try:
if roi_number[0] == key and self._data_typeid == self.h5py_object[child].attrs["ID.TypeID"].decode('UTF-8'):
out.append(next(id_set for id_set in self._child_inventory if child in id_set))
except IndexError:
pass
return out
raise KeyError('{} must be an instance of int!'.format(key))
@property
def DataChunk(self):
"""
The groups of data that have been acquired. Intended for acquisition of multiple time windows
"""
sweep_numbers = np.unique(self.SensorMeta.GroupID).tolist()
out = dict()
for sweep_number in sweep_numbers:
out[sweep_number] = _list_property.McsProxy(self._get_sensor_sweeps_by_number(sweep_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
@property
def Regions(self):
"""
The regions of interest (ROI) on the sensor for which data has been acquired, usually from a rectangular subset of the sensors
"""
roi_numbers = np.unique(self.SensorMeta.RegionID).tolist()
out = dict()
for roi_number in roi_numbers:
out[roi_number] = _list_property.McsProxy(self._get_sensor_rois_by_number(roi_number), obj=self, fget=self._get_children, fset=None, fdel=None)
return out
@property
def SensorData(self):
"""
The sensor data as a numpy array of shape (frames x sensors_Y x sensors_X)
"""
return self.Data
@property
def SensorMeta(self):
"""
The meta data for the acquired sensor data
"""
return self.Meta[0]
class McsSensorEntity(McsDataset, McsStreamEntity):
"""
Container class for one McsSensorEntity - a sensor stream entity.
"""
def __init__(self, sensor_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsSensorEntity from a h5py_dataset of a hdf5 SensorData entity
:param channel_stream_entity_dataset: h5py_dataset of a cahn
"""
id = re.findall(r'\d+', sensor_stream_entity_dataset.name.split('/')[-1] )
id = tuple(map(int, id))
McsDataset.__init__(self, sensor_stream_entity_dataset)
McsStreamEntity.__init__( self, mcspy_parent, id )
self.dimensions = '[ \'frames\' x \'region height\' x \'region width\' ]'
def __repr__(self):
return '<McsSensorEntity object at '+str(hex(id(self)))+', frames='+str(self.shape[0])+', height='+str(self.shape[1])+', width='+str(self.shape[2])+'>'
class McsSpikeStream(McsStream):
"""
Container class for one Spike Stream.
"""
spike_data_typeid = "3e8aaacc-268b-4057-b0bb-45d7dc9ec73b"
spike_meta_typeid = "e1d7616f-621c-4a26-8f60-a7e63a9030b7"
def __init__(self, spike_stream_grp, spike_data_typeid="3e8aaacc-268b-4057-b0bb-45d7dc9ec73b"):
"""
Initializes an event stream object
:param spike_stream_grp: folder of the HDF5 file that contains the data of this spike stream
"""
super().__init__(spike_stream_grp, spike_data_typeid, McsSpikeStream.spike_meta_typeid)
def __repr__(self):
return '<McsSpikeStream object at '+str(hex(id(self)))+'>'
def get_spikes_at_sensor(self, sensor_id):
"""
retrieves all spikes that occured at the sensor with id sensor_id
:param sensor_id: valid identifier for a sensor on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: numpy structured array of all spikes that have been detected on the sensor with id sensor_id
"""
if not isinstance(sensor_id, int):
raise TypeError('The given sensor id \'{}\' must be of type \'int\'.'.format(sensor_id))
if not sensor_id in range(1,65**2+1):
raise ValueError('The given sensor id \'{}\' must satify 1 <= sensor_id <= 65*65'.format(sensor_id))
row_numbers = np.where(self.SpikeData['SensorID'] == sensor_id)[0]
return self.SpikeData[tuple(row_numbers),]
def get_spikes_in_interval(self, interval):
"""
Retrieves all spikes that occured in a given time interval. Intervals exceeding the time range of the dataset will throw a warning,
and retrieval of maximally sized subset of the interval is attempted.
:param interval: interval in s as instance of
- list(start,stop) of length 2
- tuple(start,stop) of length 2
start must be a number, stop must be a number or the keyword 'end', start and stop must satisfy start < stop
:result: numpy structured array which includes all spikes occuring in the given interval
"""
if not isinstance(interval, (list,tuple)):
raise TypeError('The given interval \'{}\' must be an instance of list(start,stop) or tuple(start,stop)'.format(interval))
if not len(interval) == 2:
raise ValueError('The given interval \'{}\' must provide a start and a stop value'.format(interval))
if not isinstance(interval[0], Number):
raise TypeError('start \'{}\' must be a number'.format(interval[0]))
if not (isinstance(interval[1], Number) or interval[1]=='end'):
raise TypeError('stop \'{}\' must be a number or the keyword \'end\''.format(interval[0]))
#tick = self.SpikeMeta.Tick[0]
if interval[1]=='end':
interval[1] = self.SpikeData.TimeStamp[-1]*(10**-6)
if interval[0]>=interval[1]:
raise ValueError('start={} and stop={} do not satisfy start < stop'.format(interval[0], interval[1]))
interval[0] *= (10**6)
interval[1] *= (10**6)
row_numbers = np.logical_and(interval[0] <= self.SpikeData['TimeStamp'], self.SpikeData['TimeStamp'] <= interval[1])
return self.SpikeData[row_numbers,]
def get_spike_timestamps_at_sensors(self, sensor_ids):
"""
Retrieves all spike timestamps for all given sensors as a dictionary
:param sensor_ids: valid identifiers for sensors on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: dictionary of all spike timestamps that have been detected on the given sensors. Key: sensor_id, value: spike timestamps
"""
if isinstance(sensor_ids, Number):
sensor_ids = [sensor_ids]
spike_dict = {}
for sensor in sensor_ids:
spikes = self.get_spikes_at_sensor(sensor)
timestamps = [t[1] for t in spikes]
spike_dict[sensor] = timestamps
return spike_dict
def get_spike_cutouts_at_sensor(self, sensor_id):
"""
Retrieves the spike cutouts for all spikes for the given sensor_id
:param sensor_id: valid identifier for a sensor on the MCS CMOS chip as int: 1 <= sensor_id <= 65*65
:return: Numpy array spikes x samples of the spike cutouts
"""
spikes = self.get_spikes_at_sensor(sensor_id)
cutouts = [list(s)[2:] for s in spikes]
return np.array(cutouts)
@property
def SpikeStreamEntity(self):
return self.Data
@property
def SpikeData(self):
"""
The detected spikes, each with a sensor ID, a timestamp and (optionally) with a cutout
"""
return self.Data[0]
@property
def SpikeMeta(self):
"""
The meta data for spike detection, e.g. pre- and post interval
"""
return self.Meta[0]
class McsSpikeEntity(McsDataset, McsStreamEntity):
"""
Container class for one SpikeStream Entity.
"""
def __init__(self, spike_stream_entity_dataset, mcspy_parent):
"""
initializes a new McsSpikeEntity from a h5py_dataset of a hdf5 SpikeData entity
:param spike_stream_entity_dataset: h5py_dataset of a cahn
"""
McsDataset.__init__(self, spike_stream_entity_dataset)
McsStreamEntity.__init__(self, mcspy_parent, 0)
self.dimensions = '[ \'# of spikes\' x \'SensorID + Timestamp + n cutout values\' ]'
def __repr__(self):
return '<McsSpikeEntity object at '+str(hex(id(self)))+', spikes='+str(self.shape[0])+'>'
class McsSegmentStream(McsStream):
"""
Container class for one segment stream of different segment entities
"""
def __init__(self, segment_stream_grp):
super().__init__(self, segment_stream_grp)
def __repr__(self):
return '<McsSegmentStream object at '+str(hex(id(self)))+'>'
class McsSegmentStreamEntity(object):
"""
Segment entity class,
"""
pass
class McsTimeStampStream(McsStream):
"""
Container class for one TimeStamp stream
"""
def __init__(self, timestamp_stream_grp):
super().__init__(self, timestamp_stream_grp)
def __repr__(self):
return '<McsTimeStampStream object at '+str(hex(id(self)))+'>'
class McsTimeStampStreamEntity(object):
"""
TimeStamp stream entity class
"""
pass
class NetworkExplorer(McsGroup):
"""
Container class for a NetworkExplorer object
"""
def __init__(self, network_explorer_group):
self.__network_explorer_group = network_explorer_group
self._sta_key_type = self.get_sta_entity_by_sourceID
self._map_sensorID_to_sourceID = {}
self._sta_entity = None
super().__init__(network_explorer_group)
def __str__(self):
"""
provides a string method that prepares the object attributes for printing
"""
if(self.__network_explorer_group):
out = 'The NetworkExplorer objects hold the following information:\n'
out += 'Attributes:\n'
for (name, value) in self.__network_explorer_group.attrs.items():
if hasattr(value, "decode"):
out += ("\t"+name.ljust(20)+"\t"+value.decode('UTF-8')+"\n")
else:
out += ("\t"+name.ljust(20)+"\t"+str(value).strip('[]')+"\n")
out += '------------------------------------------\nSubgroups\n'
out += '------------------------------------------\nDatasets\n'
for (name, value) in self.__network_explorer_group.items():
if hasattr(value, "decode"):
out += ("\t"+name.ljust(20)+"\t"+value.decode('UTF-8')+"\n")
else:
out += ("\t"+name.ljust(20)+"\t"+str(value).strip('[]')+"\n")
return out
def __repr__(self):
if self._sta_entity is None:
return '<NetworkExplorer object at '+str(hex(id(self)))+'>'
else:
return '<NetworkExplorer object at '+str(hex(id(self)))+', entities='+str(len(self._sta_entity))+'>'
def _read_sta_entities(self):
"""
Retrieves all stored sta_entities and saves them in a dictionary with special access methods
"""
self._sta_entity = {}
self._neural_network = {}
entity_dict = {}
sta_type = b'442b7514-fe3a-4c66-8ae9-4f249ef48f2f'
spikes_type = b'1b4e0b8b-6af1-4b55-a685-a6d28a922eb3'
stddev_type = b'a056832a-013d-4215-b8a6-cb1debeb1c56'
network_type = b'235c3c9c-1e94-40ca-8d4b-c5db5b079f16'
for (name, _) in self.__network_explorer_group.items():
type_id = self.__network_explorer_group[name].attrs['ID.TypeID']
if type_id in [sta_type, spikes_type, stddev_type]:
source_id = int(self.__network_explorer_group[name].attrs['SourceID'])
if not source_id in entity_dict.keys():
entity_dict[source_id] = {}
entity_dict[source_id][type_id] = name
elif type_id == network_type:
self._read_neural_network(self.__network_explorer_group[name])
for source_id in entity_dict.keys():
new_sta_entity = STAEntity(self.__network_explorer_group,entity_dict[source_id][sta_type],
entity_dict[source_id].get(spikes_type, None), entity_dict[source_id].get(stddev_type, None),
self.get_axon_for_entity_by_sourceID(source_id))
self._sta_entity[new_sta_entity._sta_entity_sourceID] = new_sta_entity
self._map_sensorID_to_sourceID[new_sta_entity._sta_entity_sensorID] = new_sta_entity._sta_entity_sourceID
def _read_neural_network(self, group):
for entry in group:
unit_id = int(entry['UnitID'])
axon_id = int(entry['AxonID'])
segment_id = int(entry['SegmentID'])
if not unit_id in self._neural_network.keys():
self._neural_network[unit_id] = {}
if axon_id != -1 and not axon_id in self._neural_network[unit_id].keys():
self._neural_network[unit_id][axon_id] = {}
if segment_id != -1 and not segment_id in self._neural_network[unit_id][axon_id].keys():
self._neural_network[unit_id][axon_id][segment_id] = []
if axon_id != -1 and segment_id != -1:
self._neural_network[unit_id][axon_id][segment_id].append((entry['PosX'], entry['PosY']))
def get_sta_entity_by_sourceID(self, key):
"""
Retrieve the STA Entity for the given source ID.
:param key: A valid source ID. See the sourceIDs attribute for a list of valid source IDs
:return: The STA Entity for the given source ID
"""
if self._sta_entity is None:
self._read_sta_entities()
try:
return self._sta_entity[key]
except KeyError:
print("Oops! That was not a valid sourceID. For a list of all available sourceIDs use My_sta_explorer_object.sourceIDs ")
except TypeError as err:
print(err)
def get_sta_entity_by_sensorID(self, key):
"""
Retrieve the STA Entity for the given sensor ID.
:param key: A valid sensor ID. See the sensorIDs attribute for a list of valid sensor IDs
:return: The STA Entity for the given sensor ID
"""
if self._sta_entity is None:
self._read_sta_entities()
try:
return self._sta_entity[self._map_sensorID_to_sourceID[key]]
except KeyError:
print("Oops! That was not a valid sensorID. For a list of all available sensorIDs use My_sta_explorer_object.sensorIDs ")
except TypeError as err:
print(err)
def get_sta_entity(self, key):
"""
Retrieve the STA Entity for the given key.
:param key: A valid key, either a sensor or a source ID, depending on the sta_key_type attribute
:return: The STA Entity for the given key
"""
if self._sta_entity is None:
self._read_sta_entities()
return self._sta_key_type(key)
#if self.sta_key_type == 'sensorID':
# return self._sta_entity[self._map_sensorID_to_sourceID[key]].data
#return self._sta_entity[key].data
def set_sta_entity(self, key, value):
"""
Sets an entity to a value
"""
dprint("Setting _sta_entity[",key,"] to ",value)
self._sta_entity[key]=value
def del_sta_entity(self, key):
"""
Deletes an entity
"""
dprint("Deleting _sta_entity[",key,"]")
del self._sta_entity[key]
def get_axon_for_entity_by_sourceID(self, key, axon=1, segment=1):
"""
Retrieve the path of the axon for a given sensor or source ID.
:param key: A valid key, either a sensor or a source ID, depending on the sta_key_type attribute
:param axon: A valid axon ID, in case multiple axons have been found for a unit. Default: 1
:param segment: A valid axon ID, in case multiple segments have been found for an axon. Default: 1
:return: The axon path as a list of (X,Y) tuples in sensor coordinates. Returns None if no axon is found
"""
if self._sta_entity is None:
self._read_sta_entities()
if not key in self._neural_network.keys():
return None
if not axon in self._neural_network[key] or not segment in self._neural_network[key][axon]:
return None
return self._neural_network[key][axon][segment]
sta_entity = DictProperty_for_Classes(get_sta_entity, set_sta_entity, del_sta_entity)
@property
def sta_key_type(self):
"""
The type of key used in the access functions. Either 'sourceID' or 'sensorID'
"""
if self._sta_key_type == self.get_sta_entity_by_sourceID:
return 'sourceID'
elif self._sta_key_type == self.get_sta_entity_by_sensorID:
return 'sensorID'
else:
return None
@sta_key_type.setter
def sta_key_type(self, value):
if value=='sourceID':
print("All STA entity retrievals are now by "+value)
_sta_key_type = self.get_sta_entity_by_sourceID
elif value=='sensorID':
print("All STA entity retrievals are now by "+value)
_sta_key_type = self.get_sta_entity_by_sourceID
else:
print("Oops! That is not a valid way of selecting STA entities. Try 'sourceID' or 'sensorID'")
@property
def sourceIDs(self):
"""
A list of valid source IDs
"""
if self._sta_entity is None:
self._read_sta_entities()
return list(self._map_sensorID_to_sourceID.values())
@property
def sensorIDs(self):
"""
A list of valid sensor IDs
"""
if self._sta_entity is None:
self._read_sta_entities()
return list(self._map_sensorID_to_sourceID.keys())
@property
def attributes(self):
return self.__network_expl_group.attrs.items()
class STAEntity(object):
"""
Container Class for a STAEntity object
"""
def __init__(self, sta_explorer, sta_entity, spikes_entity=None, stastddev_entity=None, axon=None):
self._sta_explorer = sta_explorer
self._sta_entity_string = sta_entity
self._sta_entity_sourceID = int(sta_explorer[sta_entity].attrs['SourceID'])
self._sta_entity_sensorID = int(sta_explorer[sta_entity].attrs['SensorID'])
x,y = McsCMOSMEAData.sensorID_to_coordinates(self._sta_entity_sensorID)
self._sta_entity_coordinates = np.array([int(x),int(y)])
self._spikes_entity = spikes_entity
self._stastddev_entity = stastddev_entity
self._axon = axon
def __repr__(self):
return '<STAEntity object at '+str(hex(id(self)))+'>'
@property
def data(self):
"""
The STA data as a numpy array of shape (frames x sensors_Y x sensor_X)
"""
return self._sta_explorer[self._sta_entity_string]
@property
def spikes(self):
"""
Detected spikes in the STA
"""
if self._spikes_entity is None:
return None
return self._sta_explorer[self._spikes_entity]
@property
def sta_stddev(self):
"""
Returns the standard deviation for each channel in the STA. Used for spike detection on the STA
"""
if self._stastddev_entity is None:
return None
return self._sta_explorer[self._stastddev_entity]
@property
def sensor_coordinates(self):
"""
Returns the STA source coordinates on the chip as [X,Y]. Note: X and Y are 1-based
"""
return self._sta_entity_coordinates
@property
def axon(self):
"""
Returns the axon path as a list of (X,Y) tuples in sensor coordinates. None if no axon has been found
"""
return self._axon
class SpikeExplorer(McsSpikeStream):
"""
Container Class for an SpikeExplorer object
"""
def __init__(self, spike_explorer_group):
self._spike_explorer_group = spike_explorer_group
super().__init__(spike_explorer_group, spike_data_typeid='1b4e0b8b-6af1-4b55-a685-a6d28a922eb3')
def __repr__(self):
return '<SpikeExplorer object at '+str(hex(id(self)))+'>'
class SpikeSorter(McsGroup):
"""
Container for SpikeSorter object
"""
def __init__(self, spike_sorter_group):
self._spike_sorter_group = spike_sorter_group
self._units = {}
super().__init__(spike_sorter_group)
unit_type = b'0e5a97df-9de0-4a22-ab8c-54845c1ff3b9'
for (name, _) in self._spike_sorter_group.items():
type_id = self._spike_sorter_group[name].attrs['ID.TypeID']
if type_id == unit_type:
unit_id = int(self._spike_sorter_group[name].attrs['UnitID'])
child = self.ischild(name)
self._units[unit_id] = getattr(self, child.mcspy)
def __repr__(self):
return '<SpikeSorter object at '+str(hex(id(self)))+'>'
def get_unit(self, unit_id):
"""
Retrieves a single unit by its UnitID
:param unit_id: A valid unit ID.
"""
return self._units[unit_id]
def get_units_by_id(self):
"""
Returns a list of units sorted by unit ID
"""
unit_ids = list(self._units.keys())
unit_ids.sort()
return [self._units[i] for i in unit_ids]
def get_units_by_measure(self, measure, descending=True):
"""
Returns a list of units ordered by the given quality measure.
:param measure: The name of a quality measure. See get_unit_measures() for a list of valid quality measure names.
:param descending: The ordering of the list. Default: True (=descending order)
"""
if not measure in self.get_unit_measures():
raise ValueError(measure + " is not a valid measure. See get_unit_measures() for valid parameters")
m = self.Units[measure]
idx = np.argsort(m)
ids = self.Units['UnitID'][idx]
if descending:
ids = ids[::-1]
return [self._units[i] for i in ids]
def get_unit_measures(self):
"""
Returns a list of the available unit quality measure names
"""
lf = list(self.Units.dtype.fields)
return lf[5:]
class SpikeSorterUnitEntity(McsGroup):
"""
Container for Spike Sorter Units
"""
def __init__(self, unit_group):
self._unit_group = unit_group
self._unit_entity_unitID = int(unit_group.attrs['UnitID'])
self._unit_entity_sensorID = int(unit_group.attrs['SensorID'])
x,y = McsCMOSMEAData.sensorID_to_coordinates(self._unit_entity_sensorID)
self._unit_entity_coordinates = np.array([int(x),int(y)])
self._included_peaks = None
super().__init__(unit_group)
def __repr__(self):
return '<SpikeSorterUnitEntity object at '+str(hex(id(self)))+', id='+str(self._unit_entity_unitID)+', sensor='+str(self._unit_entity_coordinates)+'>'
def get_peaks(self):
"""
Retrieves all peaks in the source signal where the 'IncludePeak' flag is set.
"""
if self._included_peaks is None:
self._included_peaks = self.Peaks['IncludePeak'] == 1
return self.Peaks[self._included_peaks]
def get_peaks_timestamps(self):
"""
Retrieves the timestamps for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
return self.get_peaks()['Timestamp']
def get_peaks_amplitudes(self):
"""
Retrieves the peak amplitudes for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
return self.get_peaks()['PeakAmplitude']
def get_peaks_cutouts(self):
"""
Retrieves the cutouts for all peaks in the source signal where the 'IncludePeak' flag is set.
"""
peaks = self.get_peaks()
cutouts = [list(p)[3:] for p in peaks]
return np.stack(cutouts)
def get_measures(self):
"""
Gets a list of valid unit quality measures names
"""
lf = list(self.Unit_Info.dtype.fields)
return lf[5:]
def get_measure(self, measure):
"""
Gets a quality measure for this unit
:param measure: The name of a quality measure. See get_measures() for a list of valid quality measure names.
"""
if not measure in self.get_measures():
raise ValueError(measure + " is not a valid measure. See get_measures() for valid parameters")
return self.Unit_Info[measure][0]
class FilterTool(McsGroup):
"""
Container for FilterTool object
"""
def __init__(self, filter_tool):
self._filter_tool = filter_tool
super().__init__(filter_tool)
def __repr__(self):
return '<SpikeSorter object at '+str(hex(id(self)))+'>'
class ActivitySummary(McsGroup):
"""
Container for ActivitySummary object
"""
def __init__(self, activity_summary):
self._activity_summary = activity_summary
super().__init__(activity_summary)
def __repr__(self):
return '<ActivitySummary object at '+str(hex(id(self)))+'>'
|
{"hexsha": "5d55a524489e3d19ef31315af54788bac01e1054", "size": 75192, "ext": "py", "lang": "Python", "max_stars_repo_path": "McsPyDataTools/McsPy/McsCMOSMEA.py", "max_stars_repo_name": "multichannelsystems/McsPyDataTools", "max_stars_repo_head_hexsha": "45777d5955043cc6849ea2f01ea442aa19141edd", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-01-02T09:02:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T16:49:38.000Z", "max_issues_repo_path": "McsPyDataTools/McsPy/McsCMOSMEA.py", "max_issues_repo_name": "multichannelsystems/McsPyDataTools", "max_issues_repo_head_hexsha": "45777d5955043cc6849ea2f01ea442aa19141edd", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-06-25T12:34:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T16:22:09.000Z", "max_forks_repo_path": "McsPyDataTools/McsPy/McsCMOSMEA.py", "max_forks_repo_name": "multichannelsystems/McsPyDataTools", "max_forks_repo_head_hexsha": "45777d5955043cc6849ea2f01ea442aa19141edd", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2019-09-27T13:37:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T07:37:25.000Z", "avg_line_length": 43.1889718553, "max_line_length": 304, "alphanum_fraction": 0.5984014257, "include": true, "reason": "import numpy,from numpy", "num_tokens": 17674}
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 4 17:10:31 2021
Script to visualise the results in a dataset folder.
@author: Rebecca Gedda
"""
##############################################################################
# VISUALISE PREDICTIONS
##############################################################################
# IMPORTS
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
### FUNCTIONS ################################################################
def plot_shifting_backgorund(data, cps):
plt.figure(figsize=(16,4))
plt.plot(data, label = 'Data')
start = 0
colour = 0.3
for end in cps:
plt.axvspan(start, end, facecolor='grey', alpha=colour)
start = end
if colour == 0.3:
colour = 0.2
else:
colour = 0.3
def read_dataset_folder(name_tag):
# Read data and true change points
directory = 'Datasets/'+name_tag
for filename in os.listdir(directory):
if filename == 'CPs.dat':
CPs = pd.read_csv(os.path.join(directory, filename), delimiter=",", index_col=0)
CPs = CPs['0'].values
elif filename!='Results':
#data = pd.read_csv(filename)
data = pd.read_csv(os.path.join(directory, filename), delimiter=",", index_col=0)
del directory, filename
return data, CPs
def print_ruptures_results(row_df):
print('----- Results for optimisation approach \n using '+
row_df['Cost_function'][0] +' with '+ row_df['Search_direction'][0] +
' \n \n'+ str(row_df['CPs'][0]) +
'\n\n K: ' + str(row_df['K'][0]) +
'\n AE: ' + str(row_df['AE'][0]) +
'\n Meantime: ' + str(row_df['MeanTime'][0]) +
'\n Precision: ' + str(row_df['Percision'][0]) +
'\n Recall: ' + str(row_df['Recall'][0]) +
'\n F1: ' + str(row_df['F1'][0]) +
'\n Randindex: ' + str(row_df['RI'][0]) +
'\n ------------------------------------------------------ \n\n')
def print_bayes_results(CPs_true, CPs):
import ruptures.metrics as rpt_met
# Metrics
K = len(CPs)
# Annotation error
AE = np.abs(K - len(CPs_true))
# Meantime
MT = rpt_met.meantime(CPs_true, CPs)
# RandIndex
RI = rpt_met.randindex(CPs, CPs_true)
# Precision/Recall (Note margin can be changed)
margin = np.ceil(data.shape[0]*0.01)
#margin = 10
Precision = rpt_met.precision_recall(CPs_true, CPs, margin = margin)
# F1-score
if Precision[0] + Precision[1] != 0:
F1 = (2*Precision[0]*Precision[1])/(Precision[0] + Precision[1])
else:
F1 = 0
print('----- Results for Bayesian approach'
#' ----- \n Using: '+ prior_name +'prior and '+ likelihood_name +
#' likelihood function \n PAA window: '+ str(PAA_size)+
'\n \n' + str(CPs) +
'\n \n K: ' + str(K) +
'\n AE: '+ str(AE) +
'\n Meantime: ' + str(MT) +
'\n Precision: '+ str(Precision[0])+
'\n Recall: ' +str(Precision[1])+
'\n F1: ' + str(F1) +
'\n Randindex: ' + str( RI) +
'\n ------------------------------------------- \n \n')
### MAIN SCRIPT ##############################################################
data_tag = 'P2' # P1, P2, P3, P4, P5, P6, PRONTO
# Settings # SELECTIONS
search_direction = 'WIN' # WIN or PELT
cost_functions = ['ridge', 'lasso', 'ar', 'l1', 'l2'] # l1, l2, ar, normal, LinReg, ridge, lasso
selection_metric = 'F1' #
maximise = True # If false, minimise
bayesian = True
# Esthetic settings
ruptures_y = -0.1
ruptures_space = -0.1
bayes_y = -0.8
bayes_space = -0.1
print_results = True # True or False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Plot the data and true change points
directory = 'Datasets/'+data_tag+'/Results'
data, CPs_true = read_dataset_folder(data_tag)
if data.shape[1]!=1:
data = data[data.columns[0]]
plot_shifting_backgorund(data, CPs_true)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Ruptures results
print('Dataset '+ data_tag +'\n - - - - - - - - - - - - - - - -')
for cost in cost_functions:
for file in os.listdir(directory):
if file.startswith(cost+'_'+search_direction):
# Read file and filter search direction
df = pd.read_csv(directory+'/'+file)
# Maximise or minimise
if maximise:
row = df[selection_metric].argmax()
else:
row = df[selection_metric].argmin()
if print_results:
print_ruptures_results(df.iloc[[row]])
cps = df['CPs'][row].split()[1:-1]
# Plot the predictions and update new y position
plt.plot(np.array(cps).astype(int), np.ones(len(cps))*ruptures_y , 'o', label = cost)
ruptures_y += ruptures_space
del cost, file, row, cps, df
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Bayesian results
if bayesian:
for file in os.listdir(directory):
if file.startswith('Bayes_CPs'):
df = pd.read_csv(directory+'/'+file)
cps = df['0'].values
# Plot the predictions and update new y position
lab = 'Bayes '+file[-12:-4]
plt.plot(cps, np.ones(len(cps))*bayes_y , 'X', label = lab )
bayes_y += bayes_space
if print_results:
print_bayes_results(CPs_true, cps)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Add title etc
plt.title('Predictions on ' + data_tag, fontsize = 14)
plt.legend(loc = 'upper left', fontsize = 14)
# Read Bayes
|
{"hexsha": "9a264f0269b0e7b447c9dc9688c471f8891b829e", "size": 6227, "ext": "py", "lang": "Python", "max_stars_repo_path": "visualise_results.py", "max_stars_repo_name": "geddar/Interactive-change-point-detection", "max_stars_repo_head_hexsha": "6b7e86b847586542d96e84df1aba6bc5095f2539", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-20T03:48:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T03:48:19.000Z", "max_issues_repo_path": "visualise_results.py", "max_issues_repo_name": "geddar/Interactive-change-point-detection", "max_issues_repo_head_hexsha": "6b7e86b847586542d96e84df1aba6bc5095f2539", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "visualise_results.py", "max_forks_repo_name": "geddar/Interactive-change-point-detection", "max_forks_repo_head_hexsha": "6b7e86b847586542d96e84df1aba6bc5095f2539", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-04T17:54:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-04T17:54:35.000Z", "avg_line_length": 33.8423913043, "max_line_length": 99, "alphanum_fraction": 0.4642685081, "include": true, "reason": "import numpy", "num_tokens": 1630}
|
import argparse
from typing import List
from datetime import datetime, timedelta
import numpy as np
from sklearn import linear_model
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from gamestonk_terminal.stocks.government import quiverquant_model
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
check_positive,
plot_autoscale,
)
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal import feature_flags as gtff
# pylint: disable=C0302
def last_government(other_args: List[str], gov_type: str):
"""Last government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
gov_type: str
Type of government data between: congress, senate and house
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="last_" + gov_type,
description=f"Last {gov_type} trading. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=5,
help="Past transaction days",
)
parser.add_argument(
"-r",
"--representative",
action="store",
dest="representative",
type=str,
default="",
help="Representative",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_gov = quiverquant_model.get_government_trading(gov_type)
if df_gov.empty:
print(f"No {gov_type} trading data found\n")
return
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
df_gov = df_gov[
df_gov["TransactionDate"].isin(
df_gov["TransactionDate"].unique()[: ns_parser.past_transactions_days]
)
]
if gov_type == "congress":
df_gov = df_gov[
[
"TransactionDate",
"Ticker",
"Representative",
"Transaction",
"Range",
"House",
"ReportDate",
]
].rename(
columns={
"TransactionDate": "Transaction Date",
"ReportDate": "Report Date",
}
)
else:
df_gov = df_gov[
[
"TransactionDate",
"Ticker",
"Representative",
"Transaction",
"Range",
]
].rename(columns={"TransactionDate": "Transaction Date"})
if ns_parser.representative:
df_gov_rep = df_gov[
df_gov["Representative"].str.split().str[0] == ns_parser.representative
]
if df_gov_rep.empty:
print(
f"No representative {ns_parser.representative} found in the past {ns_parser.past_transactions_days}"
f" days. The following are available: "
f"{', '.join(df_gov['Representative'].str.split().str[0].unique())}"
)
else:
print(df_gov_rep.to_string(index=False))
else:
print(df_gov.to_string(index=False))
print("")
except Exception as e:
print(e, "\n")
def buy_government(other_args: List[str], gov_type: str):
"""Top buy government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
gov_type: str
Type of government data between: congress, senate and house
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="buy_" + gov_type,
description=f"Top buy {gov_type} trading. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top_num",
type=check_positive,
default=10,
help="Number of top tickers",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_gov = quiverquant_model.get_government_trading(gov_type)
if df_gov.empty:
print(f"No {gov_type} trading data found\n")
return
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date].dropna()
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_gov.groupby("Ticker")["upper"].sum().div(1000).sort_values(
ascending=False
).head(n=ns_parser.top_num).plot(kind="bar", rot=0)
plt.ylabel("Amount [1k $]")
plt.title(
f"Top {ns_parser.top_num} most bought stocks since last {ns_parser.past_transactions_months} "
"months (upper bound)"
)
plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def sell_government(other_args: List[str], gov_type: str):
"""Top sell government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
gov_type: str
Type of government data between: congress, senate and house
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sell_" + gov_type,
description=f"Top sell {gov_type} trading. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top_num",
type=check_positive,
default=10,
help="Number of top tickers",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_gov = quiverquant_model.get_government_trading(gov_type)
if df_gov.empty:
print(f"No {gov_type} trading data found\n")
return
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date].dropna()
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["min"])
if x["Transaction"] == "Purchase"
else -float(x["max"]),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: float(x["max"])
if x["Transaction"] == "Purchase"
else -float(x["min"]),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_gov.groupby("Ticker")["lower"].sum().div(1000).sort_values().abs().head(
n=ns_parser.top_num
).plot(kind="bar", rot=0)
plt.ylabel("Amount [1k $]")
plt.title(
f"Top {ns_parser.top_num} most sold stocks since last {ns_parser.past_transactions_months} months"
" (upper bound)"
)
plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def plot_government(government: pd.DataFrame, ticker: str, gov_type: str):
"""Plot government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to plot government trading
gov_type: str
Type of government data between: congress, senate and house
"""
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.gca().fill_between(
government["TransactionDate"].unique(),
government.groupby("TransactionDate")["lower"].sum().values / 1000,
government.groupby("TransactionDate")["upper"].sum().values / 1000,
)
plt.xlim(
[
government["TransactionDate"].values[0],
government["TransactionDate"].values[-1],
]
)
plt.grid()
plt.title(f"{gov_type.capitalize()} trading on {ticker}")
plt.xlabel("Date")
plt.ylabel("Amount [1k $]")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y/%m/%d"))
plt.gcf().autofmt_xdate()
if gtff.USE_ION:
plt.ion()
plt.show()
def government_trading(other_args: List[str], ticker: str, gov_type: str):
"""Government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
gov_type: str
Type of government data between: congress, senate and house
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog=gov_type,
description=f"{gov_type} trading. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_months",
action="store",
dest="past_transactions_months",
type=check_positive,
default=6,
help="Past transaction months",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_gov = quiverquant_model.get_government_trading(gov_type, ticker)
if df_gov.empty:
print(f"No {gov_type} trading data found\n")
return
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
start_date = datetime.now() - timedelta(
days=ns_parser.past_transactions_months * 30
)
df_gov["TransactionDate"] = pd.to_datetime(df_gov["TransactionDate"])
df_gov = df_gov[df_gov["TransactionDate"] > start_date]
if df_gov.empty:
print(f"No recent {gov_type} trading data found\n")
return
df_gov["min"] = df_gov["Range"].apply(
lambda x: x.split("-")[0].strip("$").replace(",", "").strip()
)
df_gov["max"] = df_gov["Range"].apply(
lambda x: x.split("-")[1].replace(",", "").strip().strip("$")
if "-" in x
else x.strip("$").replace(",", "")
)
df_gov["lower"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(x["min"])
if x["Transaction"] == "Purchase"
else -int(x["max"]),
axis=1,
)
df_gov["upper"] = df_gov[["min", "max", "Transaction"]].apply(
lambda x: int(x["max"])
if x["Transaction"] == "Purchase"
else -int(x["min"]),
axis=1,
)
df_gov = df_gov.sort_values("TransactionDate", ascending=True)
plot_government(df_gov, ticker, gov_type)
print("")
except Exception as e:
print(e, "\n")
def raw_government(other_args: List[str], ticker: str, gov_type: str):
"""Raw government trading
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
gov_type: str
Type of government data between: congress, senate and house
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog=gov_type,
description=f"Raw {gov_type} trading. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=10,
help="Past transaction days",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_gov = quiverquant_model.get_government_trading(gov_type, ticker)
if df_gov.empty:
print(f"No {gov_type} trading data found\n")
return
df_gov = df_gov.sort_values("TransactionDate", ascending=False)
if gov_type == "congress":
df_gov = df_gov[
["TransactionDate", "Representative", "House", "Transaction", "Range"]
]
else:
df_gov = df_gov[
["TransactionDate", "Representative", "Transaction", "Range"]
]
df_gov = df_gov[
df_gov["TransactionDate"].isin(
df_gov["TransactionDate"].unique()[: ns_parser.past_transactions_days]
)
].rename(
columns={
"TransactionDate": "Transaction Date",
}
)
print(df_gov.to_string(index=False))
print("")
except Exception as e:
print(e, "\n")
def last_contracts(other_args: List[str]):
"""Last contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="last_contracts",
description="Last contracts. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=2,
help="Past transaction days",
)
parser.add_argument(
"-l",
"--limit",
action="store",
dest="limit_contracts",
type=check_positive,
default=20,
help="Limit of contracts to display",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading("contracts")
if df_contracts.empty:
print("No government contracts found\n")
return
df_contracts.sort_values("Date", ascending=False)
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"])
df_contracts.drop_duplicates(inplace=True)
df_contracts = df_contracts[
df_contracts["Date"].isin(
df_contracts["Date"].unique()[: ns_parser.past_transactions_days]
)
]
df_contracts = df_contracts[
["Date", "Ticker", "Amount", "Description", "Agency"]
][: ns_parser.limit_contracts]
print(df_contracts.to_string(index=False))
print("")
except Exception as e:
print(e, "\n")
def sum_contracts(other_args: List[str]):
"""Sum contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="sum_contracts",
description="Sum latest contracts. [Source: www.quiverquant.com]",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading("contracts")
if df_contracts.empty:
print("No government contracts found\n")
return
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date
df_contracts.drop_duplicates(inplace=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_contracts.groupby("Date").sum().div(1000).plot(
kind="bar", rot=0, ax=plt.gca()
)
plt.ylabel("Amount [1k $]")
plt.title("Sum of latest government contracts")
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def raw_contracts(other_args: List[str], ticker: str):
"""Raw contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="raw_contracts",
description="Raw contracts. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=10,
help="Past transaction days",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading("contracts", ticker)
if df_contracts.empty:
print("No government contracts found\n")
return
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date
df_contracts.drop_duplicates(inplace=True)
df_contracts = df_contracts[
df_contracts["Date"].isin(
df_contracts["Date"].unique()[: ns_parser.past_transactions_days]
)
]
df_contracts.drop_duplicates(inplace=True)
print(df_contracts.to_string(index=False))
print("")
except Exception as e:
print(e, "\n")
def contracts(other_args: List[str], ticker: str):
"""Contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="contracts",
description="Contracts associated with ticker. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-p",
"--past_transactions_days",
action="store",
dest="past_transactions_days",
type=check_positive,
default=10,
help="Past transaction days",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-p")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading("contracts", ticker)
if df_contracts.empty:
print("No government contracts found\n")
return
df_contracts["Date"] = pd.to_datetime(df_contracts["Date"]).dt.date
df_contracts = df_contracts[
df_contracts["Date"].isin(
df_contracts["Date"].unique()[: ns_parser.past_transactions_days]
)
]
df_contracts.drop_duplicates(inplace=True)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
df_contracts.groupby("Date").sum().div(1000).plot(
kind="bar", rot=0, ax=plt.gca()
)
plt.ylabel("Amount [1k $]")
plt.title(f"Sum of latest government contracts to {ticker}")
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def qtr_contracts(other_args: List[str]):
"""Quarter contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="qtr_contracts",
description="Quarterly-contracts, best regression slope. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top",
type=check_positive,
default=5,
help="Top promising stocks with best quarterly-contracts momentum",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading("quarter-contracts")
if df_contracts.empty:
print("No quarterly government contracts found\n")
return
df_coef = pd.DataFrame(columns=["Ticker", "Coef"])
for symbol in df_contracts["Ticker"].unique():
# Create linear regression object
regr = linear_model.LinearRegression()
amounts = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Amount"]
.values
)
# Train the model using the training sets
regr.fit(np.arange(0, len(amounts)).reshape(-1, 1), amounts)
df_coef = df_coef.append(
{"Ticker": symbol, "Coef": regr.coef_[0]}, ignore_index=True
)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
tickers = df_coef.sort_values(by=["Coef"], ascending=False).head(ns_parser.top)[
"Ticker"
]
max_amount = 0
quarter_ticks = []
for symbol in tickers:
amounts = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Amount"]
.values
)
qtr = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Qtr"]
.values
)
year = (
df_contracts[df_contracts["Ticker"] == symbol]
.sort_values(by=["Year", "Qtr"])["Year"]
.values
)
plt.plot(np.arange(0, len(amounts)), amounts / 1000, "-*", lw=2, ms=15)
if len(amounts) > max_amount:
max_amount = len(amounts)
quarter_ticks = [
f"{quarter[0]} - {quarter[1]} Qtr" for quarter in zip(year, qtr)
]
plt.xlim([-0.5, max_amount - 0.5])
plt.xticks(np.arange(0, max_amount), quarter_ticks)
plt.grid()
plt.legend(tickers)
plt.title("Quarterly Government Contracts - Top promising stocks")
plt.xlabel("Date")
plt.ylabel("Amount [1k $]")
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def qtr_contracts_hist(other_args: List[str], ticker: str):
"""Quarter contracts
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get congress trading data from
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="qtr_contracts_hist",
description="Quarterly-contracts historical [Source: www.quiverquant.com]",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_contracts = quiverquant_model.get_government_trading(
"quarter-contracts", ticker=ticker
)
if df_contracts.empty:
print("No quarterly government contracts found\n")
return
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
amounts = df_contracts.sort_values(by=["Year", "Qtr"])["Amount"].values
qtr = df_contracts.sort_values(by=["Year", "Qtr"])["Qtr"].values
year = df_contracts.sort_values(by=["Year", "Qtr"])["Year"].values
quarter_ticks = [
f"{quarter[0]}" if quarter[1] == 1 else "" for quarter in zip(year, qtr)
]
plt.plot(np.arange(0, len(amounts)), amounts / 1000, "-*", lw=2, ms=15)
plt.xlim([-0.5, len(amounts) - 0.5])
plt.xticks(np.arange(0, len(amounts)), quarter_ticks)
plt.grid()
plt.title(f"Quarterly Government Contracts Historical on {ticker.upper()}")
plt.xlabel("Date")
plt.ylabel("Amount [1k $]")
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def top_lobbying(other_args: List[str]):
"""Top lobbying based on tickers that have biggest amounts for the past couple months
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="top_lobbying",
description="Top lobbying. [Source: www.quiverquant.com]",
)
parser.add_argument(
"-t",
"--top",
action="store",
dest="top",
type=check_positive,
default=10,
help="Top corporate lobbying tickers with biggest amounts",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_lobbying = quiverquant_model.get_government_trading("corporate-lobbying")
if df_lobbying.empty:
print("No corporate lobbying found\n")
return
d_lobbying = {}
for symbol in df_lobbying["Ticker"].unique():
d_lobbying[symbol] = sum(
float(amount)
for amount in df_lobbying[df_lobbying["Ticker"] == symbol]
.replace(np.nan, 0)["Amount"]
.values
)
df_amount = pd.DataFrame.from_dict(
d_lobbying, orient="index", columns=["Amount"]
).sort_values(by=["Amount"], ascending=False)
plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)
plt.bar(
df_amount.head(ns_parser.top).index,
df_amount.head(ns_parser.top).values.flatten() / 1000,
)
plt.xlabel("Ticker")
plt.ylabel("Sum Amount [1k $]")
plt.title(
f"Total amount spent on corporate lobbying since {df_lobbying['Date'].min()}"
)
if gtff.USE_ION:
plt.ion()
plt.show()
print("")
except Exception as e:
print(e, "\n")
def lobbying(other_args: List[str], ticker: str):
"""Corporate lobbying details
Parameters
----------
other_args : List[str]
Command line arguments to be processed with argparse
ticker: str
Ticker to get corporate lobbying data from
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="lobbying",
description="Lobbying details [Source: www.quiverquant.com]",
)
parser.add_argument(
"-l",
"--last",
action="store",
dest="last",
type=check_positive,
default=10,
help="Last corporate lobbying details",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_lobbying = quiverquant_model.get_government_trading(
"corporate-lobbying", ticker=ticker
)
if df_lobbying.empty:
print("No corporate lobbying found\n")
return
for _, row in (
df_lobbying.sort_values(by=["Date"], ascending=False)
.head(ns_parser.last)
.iterrows()
):
amount = (
"$" + str(int(float(row["Amount"])))
if row["Amount"] is not None
else "N/A"
)
print(f"{row['Date']}: {row['Client']} {amount}")
if row["Amount"] is not None:
print("\t" + row["Specific_Issue"].replace("\n", " ").replace("\r", ""))
print("")
print("")
except Exception as e:
print(e, "\n")
|
{"hexsha": "d4f5f2673184cde9cb38b6db3eba1582e164ce6f", "size": 31207, "ext": "py", "lang": "Python", "max_stars_repo_path": "gamestonk_terminal/stocks/government/quiverquant_view.py", "max_stars_repo_name": "clairvoyant/GamestonkTerminal", "max_stars_repo_head_hexsha": "7b40cfe61b32782e36f5de8a08d075532a08c294", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-14T14:37:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-14T14:37:29.000Z", "max_issues_repo_path": "gamestonk_terminal/stocks/government/quiverquant_view.py", "max_issues_repo_name": "clairvoyant/GamestonkTerminal", "max_issues_repo_head_hexsha": "7b40cfe61b32782e36f5de8a08d075532a08c294", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gamestonk_terminal/stocks/government/quiverquant_view.py", "max_forks_repo_name": "clairvoyant/GamestonkTerminal", "max_forks_repo_head_hexsha": "7b40cfe61b32782e36f5de8a08d075532a08c294", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1381886088, "max_line_length": 120, "alphanum_fraction": 0.5666997789, "include": true, "reason": "import numpy", "num_tokens": 7002}
|
! ###############################################################
! # #
! # THE VECTOR LIDORT MODEL #
! # #
! # (Vector LInearized Discrete Ordinate Radiative Transfer) #
! # - -- - - - - #
! # #
! ###############################################################
! ###############################################################
! # #
! # Author : Robert. J. D. Spurr #
! # #
! # Address : RT Solutions, inc. #
! # 9 Channing Street #
! # Cambridge, MA 02138, USA #
! # Tel: (617) 492 1183 #
! # #
! # Email : rtsolutions@verizon.net #
! # #
! # Versions : 2.0, 2.2, 2.3, 2.4, 2.4R, 2.4RT, 2.4RTC, #
! # 2.5, 2.6, 2.7 #
! # Release Date : December 2005 (2.0) #
! # Release Date : March 2007 (2.2) #
! # Release Date : October 2007 (2.3) #
! # Release Date : December 2008 (2.4) #
! # Release Date : April 2009 (2.4R) #
! # Release Date : July 2009 (2.4RT) #
! # Release Date : October 2010 (2.4RTC) #
! # Release Date : March 2011 (2.5) #
! # Release Date : May 2012 (2.6) #
! # Release Date : August 2014 (2.7) #
! # #
! # NEW: TOTAL COLUMN JACOBIANS (2.4) #
! # NEW: BPDF Land-surface KERNELS (2.4R) #
! # NEW: Thermal Emission Treatment (2.4RT) #
! # Consolidated BRDF treatment (2.4RTC) #
! # f77/f90 Release (2.5) #
! # External SS / New I/O Structures (2.6) #
! # #
! # SURFACE-LEAVING / BRDF-SCALING (2.7) #
! # TAYLOR Series / OMP THREADSAFE (2.7) #
! # #
! ###############################################################
! #####################################################
! # #
! # This Version of VLIDORT comes with a GNU-style #
! # license. Please read the license carefully. #
! # #
! #####################################################
! ###############################################################
! # #
! # This is vbrdf_aux.f. Utility routines #
! # The subroutines in vbrdf_aux are listed below with their #
! # source of origin (order of appearance). #
! # #
! # brdf_gauleg: Numerical Recipes, 1992 #
! # derfc_e: V. Natraj, 2005 #
! # GETQUAD2 M. Christi, 2017 #
! # VBRDF_Fresnel_Complex R. Spurr, 2014 (Version 2.7) #
! # BRDF_QUADRATURE_Gaussian R. Spurr, 2004 #
! # BRDF_QUADRATURE_Trapezoid R. Spurr, 2004 (not used) #
! # #
! ###############################################################
MODULE vbrdf_sup_aux_m
USE VLIDORT_PARS, only : zero, one, two, half, pie, FPK, QUARTER
! Everything public here
public
CONTAINS
!
SUBROUTINE GETQUAD2(A,B,N,ROOTS,WGTS)
! Computes N roots and weights for Gauss-Legendre quadrature on the interval (a,b)
IMPLICIT NONE
! Limits of interval
REAL(FPK), INTENT(IN) :: A, B
! Dimension
INTEGER, INTENT(IN) :: N
! Quadrature roots and weights
REAL(FPK), INTENT(OUT) :: ROOTS(N), WGTS(N)
! Local variables
INTEGER :: I, M, N2, NM1
REAL(FPK) :: IR, MR, NR
REAL(FPK) :: MIDPT, SFAC
REAL(FPK) :: DLP_DX, LP, LPM1, LPM2, X, XOLD, XX
! Threshold for Newton's Method
REAL(FPK), PARAMETER :: QEPS = 1.0D-13
! Since roots are symmetric about zero on the interval (-1,1), split the interval
! in half and only work on the lower half of the interval (-1,0).
N2 = INT((N + 1)/2)
NR = REAL(N,FPK)
! Define the shift [midpoint of (a,b)] and scale factor to later move roots from
! the interval (-1,1) to the interval (a,b)
MIDPT = HALF*(B + A)
SFAC = HALF*(B - A)
DO M = 1, N2
! Find current root of the related Nth order Legendre Polynomial on (-1,0) by Newton's
! Method using two Legendre Polynomial recurrence relations (e.g. see Abramowitz &
! Stegan (1972))
!Define starting point [ after Tricomi (1950) ]
MR = REAL(M,FPK)
XX = PIE*(MR - QUARTER)/(NR + HALF)
X = (ONE - (NR - ONE)/(8.0_FPK*NR**3) &
- ONE/(384.0_FPK*NR**4)*(39.0_FPK - 28.0_FPK/SIN(XX)**2))*COS(XX)
!Use Newton's Method
DO
LPM1 = ZERO ; LP = ONE
DO I = 1, N
IR = REAL(I,FPK) ; LPM2 = LPM1 ; LPM1 = LP
LP = ((TWO*IR - ONE)*X*LPM1 - (IR - ONE)*LPM2)/IR
ENDDO
DLP_DX = NR*(X*LP - LPM1)/(X**2 - ONE)
XOLD = X ; X = XOLD - LP/DLP_DX
IF (ABS(X-XOLD) <= QEPS) EXIT
ENDDO
! Shift and scale the current root (and its symmetric counterpart) from the interval (-1,1)
! to the interval (a,b). Define their related weights (e.g. see Abramowitz & Stegan (1972)).
! Note:
! If (1) N is even or (2) N is odd and M /= N2, then ROOTS(M) and ROOTS(NM1) are unique.
! If N is odd and M = N2, then M = NM1 and ROOTS(M) = ROOTS(NM1) are one and the same root.
!On interval lower half: (a,midpt)
ROOTS(M) = MIDPT - SFAC*X
WGTS(M) = (TWO*SFAC)/((ONE - X**2)*DLP_DX**2)
!On interval upper half: (midpt,b)
NM1 = N - M + 1
ROOTS(NM1) = MIDPT + SFAC*X
WGTS (NM1) = WGTS(M)
ENDDO
END SUBROUTINE GETQUAD2
!
SUBROUTINE BRDF_GAULEG(X1,X2,X,W,N)
implicit none
INTEGER N
DOUBLE PRECISION X1,X2,X(N),W(N)
INTEGER I, M, J
DOUBLE PRECISION EPS,XM,XL,P1,P2,P3,PP,Z,Z1
PARAMETER (EPS=3.D-14)
M=(N+1)/2
XM=0.5D0*(X2+X1)
XL=0.5D0*(X2-X1)
DO I=1,M
Z=DCOS(3.141592654D0*(I-.25D0)/(N+.5D0))
1 CONTINUE
P1=1.D0
P2=0.D0
DO J=1,N
P3=P2
P2=P1
P1=((2.D0*J-1.D0)*Z*P2-(J-1.D0)*P3)/J
ENDDO
PP=N*(Z*P1-P2)/(Z*Z-1.D0)
Z1=Z
Z=Z1-P1/PP
IF(DABS(Z-Z1).GT.EPS)GO TO 1
X(I)=XM-XL*Z
X(N+1-I)=XM+XL*Z
W(I)=2.D0*XL/((1.D0-Z*Z)*PP*PP)
W(N+1-I)=W(I)
ENDDO
RETURN
END SUBROUTINE BRDF_GAULEG
!
double precision function derfc_e(x)
implicit none
double precision :: x
! Returns the complementary error function erfc(x) with fractional error
! everywhere less than 1.2 * 10^7.
double precision :: t,z
z = dabs(x)
t = 1.d0/(1.d0+0.5d0*z)
derfc_e = t*dexp(-z*z-1.26551223d0+t*(1.00002368d0+t*(.37409196d0+ &
t*(.09678418d0+t*(-.18628806d0+t*(.27886807d0+t* &
(-1.13520398d0+t*(1.48851587d0+t*(-.82215223d0+t* &
.17087277d0)))))))))
if (x .lt. 0.d0) derfc_e = 2.d0-derfc_e
return
END function derfc_e
!
Subroutine VBRDF_Fresnel_Complex ( MR, MI, COSCHI, FP )
! Renamed for the BRDF supplement.
! (Same routine occurs also in the VSLEAVE suite)
! Adapted from SixS code, this is essentially Born/Wolf computation
implicit none
! Arguments
double precision, intent(in) :: MR, MI, COSCHI
double precision, intent(out) :: FP
! Local
double precision :: MRSQ, MISQ, MSQ, MRMI2, SINCHI_SQ, AA, A1, A2, B1, B2
double precision :: U, V, VSQ, CMU, CPU, RR2
double precision :: B1MU, B1PU, B2MV, B2PV, RL2
! Calculation of FP, Complex RI
IF ( MI.eq.zero) goto 67
MRSQ = MR * MR ; MISQ = MI * MI
MSQ = MRSQ - MISQ
MRMI2 = two * MR * MI
SINCHI_SQ = one - COSCHI * COSCHI
AA = MSQ - SINCHI_SQ
A1 = abs(AA)
A2 = SQRT ( AA*AA + MRMI2 * MRMI2 )
U = sqrt(half*abs(A1+A2))
V = sqrt(half*abs(-A1+A2))
VSQ = V * V
CMU = ( COSCHI - U ) ; CPU = ( COSCHI + U )
RR2 = ( CMU*CMU + VSQ ) / ( CPU*CPU + VSQ )
B1 = MSQ * COSCHI
B2 = MRMI2 * COSCHI
B1MU = B1 - U ; B1PU = B1 + U
B2PV = B2 + V ; B2MV = B2 - V
RL2 = ( B1MU*B1MU + B2PV*B2PV ) / ( B1PU*B1PU + B2MV*B2MV )
FP = half * ( RR2 + RL2 )
return
! Calculation of FP. Real RI
67 continue
MSQ = MR * MR
SINCHI_SQ = one - COSCHI * COSCHI
U = sqrt(abs(MSQ - SINCHI_SQ))
CMU = ( COSCHI - U ) ; CPU = ( COSCHI + U )
RR2 = CMU*CMU / ( CPU*CPU )
B1 = MSQ * COSCHI
B1MU = B1 - U ; B1PU = B1 + U
RL2 = B1MU*B1MU / ( B1PU*B1PU )
FP = half * ( RR2 + RL2 )
! Finish
return
end subroutine VBRDF_Fresnel_Complex
SUBROUTINE BRDF_QUADRATURE_GAUSSIAN &
( DO_BRDF_SURFEMISSION, NSTREAMS_BRDF, NBRDF_HALF, &
X_BRDF, CX_BRDF, SX_BRDF, A_BRDF, &
BAX_BRDF, CXE_BRDF, SXE_BRDF )
! include file of dimensions and numbers
USE VLIDORT_PARS
IMPLICIT NONE
! Input
! =====
! Emission flag
LOGICAL :: DO_BRDF_SURFEMISSION
! Number of streams
INTEGER :: NSTREAMS_BRDF, NBRDF_HALF
! OUTPUT
! ======
! azimuth quadrature streams for BRDF
DOUBLE PRECISION :: X_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: CX_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: SX_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: A_BRDF ( MAXSTREAMS_BRDF )
! For emission calculations
DOUBLE PRECISION :: BAX_BRDF ( MAXSTHALF_BRDF )
DOUBLE PRECISION :: CXE_BRDF ( MAXSTHALF_BRDF )
DOUBLE PRECISION :: SXE_BRDF ( MAXSTHALF_BRDF )
! local variables
! ---------------
INTEGER :: I, I1, K
! BRDF quadrature (Gauss-Legendre)
! ---------------
! Save these quantities for efficient coding
CALL GETQUAD2 ( ZERO, ONE, NBRDF_HALF, X_BRDF, A_BRDF )
DO I = 1, NBRDF_HALF
I1 = I + NBRDF_HALF
X_BRDF(I1) = - X_BRDF(I)
A_BRDF(I1) = A_BRDF(I)
CXE_BRDF(I) = X_BRDF(I)
SXE_BRDF(I) = DSQRT(ONE-X_BRDF(I)*X_BRDF(I))
ENDDO
DO I = 1, NSTREAMS_BRDF
X_BRDF(I) = PIE * X_BRDF(I)
CX_BRDF(I) = DCOS ( X_BRDF(I) )
SX_BRDF(I) = DSIN ( X_BRDF(I) )
ENDDO
! Half space cosine-weight arrays (emission only, non-Lambertian)
IF ( DO_BRDF_SURFEMISSION ) THEN
DO K = 1, NBRDF_HALF
BAX_BRDF(K) = X_BRDF(K) * A_BRDF(K) / PIE
ENDDO
ENDIF
! Finish
RETURN
END SUBROUTINE BRDF_QUADRATURE_GAUSSIAN
!
SUBROUTINE BRDF_QUADRATURE_TRAPEZOID &
( DO_BRDF_SURFEMISSION, NSTREAMS_BRDF, NBRDF_HALF, &
X_BRDF, CX_BRDF, SX_BRDF, A_BRDF, &
BAX_BRDF, CXE_BRDF, SXE_BRDF )
! include file of dimensions and numbers
USE VLIDORT_PARS
IMPLICIT NONE
! Input
! =====
! Emission flag
LOGICAL :: DO_BRDF_SURFEMISSION
! Number of streams
INTEGER :: NSTREAMS_BRDF, NBRDF_HALF
! OUTPUT
! ======
! azimuth quadrature streams for BRDF
DOUBLE PRECISION :: X_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: CX_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: SX_BRDF ( MAXSTREAMS_BRDF )
DOUBLE PRECISION :: A_BRDF ( MAXSTREAMS_BRDF )
! For emission calculations
DOUBLE PRECISION :: BAX_BRDF ( MAXSTHALF_BRDF )
DOUBLE PRECISION :: CXE_BRDF ( MAXSTHALF_BRDF )
DOUBLE PRECISION :: SXE_BRDF ( MAXSTHALF_BRDF )
! local variables
! ---------------
INTEGER :: I, I1, K
DOUBLE PRECISION :: DF1, DEL
! BRDF quadrature (Trapezium)
! ---------------
! Save these quantities for efficient coding
DF1 = DBLE(NSTREAMS_BRDF - 1 )
DEL = TWO * PIE / DF1
DO I = 1, NSTREAMS_BRDF
I1 = I - 1
X_BRDF(I) = DBLE(I1) * DEL - PIE
X_BRDF(I) = DBLE(I1) * DEL
CX_BRDF(I) = DCOS ( X_BRDF(I) )
SX_BRDF(I) = DSIN ( X_BRDF(I) )
CXE_BRDF(I) = CX_BRDF(I)
SXE_BRDF(I) = SX_BRDF(I)
ENDDO
DO I = 2, NSTREAMS_BRDF - 1
A_BRDF(I) = DEL / PIE
ENDDO
A_BRDF(1) = DEL * HALF / PIE
A_BRDF(NSTREAMS_BRDF) = DEL * HALF / PIE
! Half space cosine-weight arrays (emission only, non-Lambertian)
IF ( DO_BRDF_SURFEMISSION ) THEN
DO K = 1, NBRDF_HALF
BAX_BRDF(K) = X_BRDF(K) * A_BRDF(K) / PIE
ENDDO
ENDIF
! Finish
RETURN
END SUBROUTINE BRDF_QUADRATURE_TRAPEZOID
! End module
END MODULE vbrdf_sup_aux_m
|
{"hexsha": "3bf1647156b8dea305015ac46b903fa880cd0ee5", "size": 13784, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/Components/rtms/RTSI/VLIDORT2OS/vsup/vbrdf/vbrdf_sup_aux.f90", "max_stars_repo_name": "GEOS-ESM/AeroApps", "max_stars_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_stars_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:23:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T15:39:30.000Z", "max_issues_repo_path": "src/Components/rtms/RTSI/VLIDORT90/vsup/vbrdf/vbrdf_sup_aux.f90", "max_issues_repo_name": "GEOS-ESM/AeroApps", "max_issues_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_issues_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-04-15T16:22:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T13:59:25.000Z", "max_forks_repo_path": "src/Components/rtms/RTSI/VLIDORT90/vsup/vbrdf/vbrdf_sup_aux.f90", "max_forks_repo_name": "GEOS-ESM/AeroApps", "max_forks_repo_head_hexsha": "874dad6f34420c014d98eccbe81a061bdc0110cf", "max_forks_repo_licenses": ["NASA-1.3", "ECL-2.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.5631929047, "max_line_length": 94, "alphanum_fraction": 0.4648142774, "num_tokens": 4266}
|
# ------------------------------------------ This app is a cycle detector --------------------------------------------
# Simple API. JSON Request with following format:
# {
# 'year': "2020",
# 'month': "12",
# 'day': "24",
import pandas as pd
import numpy as np
from flask import Flask,jsonify,json,request
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import SGD,Adam
import requests,os, datetime
from tensorflow.keras import layers
from tensorflow.keras.models import load_model
import os,random
from scipy.signal import find_peaks
import tensorflow as tf
from model_template import *
window_size = 3600
resolution = 20
margin = 20
def load_DF (sensorVsCycle,year,month,day):
url = "172.19.0.1:7770/"+sensorVsCycle+"/fetch_data"
RawDataRequest = {
'year': year,
'month': month,
'day': day
}
response = requests.get(url,json=RawDataRequest).json()
df1 = pd.DataFrame(data = np.array(response['data']),columns = response['columns'])
return df1
@app.route('/cycle_train',methods=['GET'])
def cycle_train():
days, cycles = [],[]
random.seed(1)
url = "172.19.0.1:7770/Cycle/fetch_data"
days,cycles,X,Y = [],[], [],[]
window_size = 3600
Nays_stamps = np.random.randint(0,86400,90)
# ----------------------------------------- Cycle Times with Values of 1
for i in range (1,12,1):
df1 = load_DF ("Cycle",year,month,str(i))
A_Positive = df1[df1['Sts']==8].index
for i in A_Positive:
if i<(86400-3600):
cyc = np.array(df1.iloc[i:i+3600]['Cycle']).reshape(-1,1)
X.append(cyc)
Y.append(np.array([1]).reshape(-1))
days.append('2021-1-'+str(i))
# ---------------------------------------- Other timestamps
for i in range(0,len(Nays_stamps),1):
if Nays_stamps[i]<(86400-3600):
cyc = np.array(df1.iloc[Nays_stamps[i]:Nays_stamps[i]+3600]['Cycle']).reshape(-1,1)
X.append(cyc)
Y.append(np.array([0]).reshape(-1))
X=np.array(X)
Y=np.array(Y)
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state = 1 , shuffle = True)
model_m = CONV1D_model(window_size)
temp_H5_Dir = 'model/temp/'
for file in os.scandir(temp_H5_Dir):
if file.name.endswith(".h5"):
os.remove(file)
opt = Adam(learning_rate=0.01)
model_m.compile(loss='binary_crossentropy',
optimizer=opt, metrics=['accuracy'])
BATCH_SIZE = 20
EPOCHS = 100
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='model/temp/best_model.{epoch:02d}-{loss:.2f}.h5',
monitor='val_loss', save_best_only=True),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=30)]
history = model_m.fit(X,
Y,
batch_size=BATCH_SIZE,
callbacks=callbacks_list,
epochs=EPOCHS,
validation_split=0.4,
shuffle=True
)
model_m.save("model/DummyTestModel_last.h5")
return jsonify({"message":"Model saved succesfully!"})
def cycle_detector(year,month, day , Resolution, window_size):
Feed= []
df1 = load_DF ("Cycle",year,month,day)
for i in range(0,len(df1['Cycle'])-window_size,Resolution):
f = np.array(df1.iloc[i:i+window_size]['Cycle']).reshape(-1,1)
Feed.append(f)
day=np.array(Feed)
model_m=load_model('model/DummyTestModel.h5')
Y_predict2 = model_m.predict(day)
peaks, _ = find_peaks(np.array(Y_predict2.reshape(-1)), height=1)
XXX = np.array(peaks*Resolution).reshape(-1)
YYY = Probability[peaks].reshape(-1)
return (peaks*Resolution)
@app.route('/anomaly_train',methods=['GET'])
def anomaly_train():
days,cycles,X,Y = [],[], [],[]
for i in range (1,12,1):
df1 = load_DF ("Cycle","2020","1",str(i))
df2 = load_DF ("Sensor","2020","1",str(i))
A_Positive = df1[df1['Sts']==8].index
for row in A_Positive:
if row<(86400-3600):
if df2.iloc[row]['anomaly']==8:
cyc = np.array(df2.iloc[row+margin:row+window_size-margin]['sensor']).reshape(-1,1)
X.append(cyc)
days.append('2021-1-'+str(i))
X = np.array(X)
model = DeepAnt_Model()
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='model/DummyAnomaly_Test.h5',
monitor='val_loss',
mode='min',
patience = 90,
save_best_only=True)
history = model.fit(
X,
X,
epochs=600,
batch_size=4,
validation_split=0.4,
callbacks=[model_checkpoint_callback],
)
x_train_pred = model.predict(X)
x_train_pred = x_train_pred.reshape(46,window_size-2*margin,1)
train_mae_loss = np.mean(np.abs(x_train_pred - X), axis=1)
threshold = np.max(train_mae_loss)
|
{"hexsha": "081fa95e735037b846fd50a1db28a0e1a35a983a", "size": 4927, "ext": "py", "lang": "Python", "max_stars_repo_path": "cycle_detector/train.py", "max_stars_repo_name": "sinakm/AnomalyDetector", "max_stars_repo_head_hexsha": "68a0a4d7de4fdc34181b79a8267ab24d8001580c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cycle_detector/train.py", "max_issues_repo_name": "sinakm/AnomalyDetector", "max_issues_repo_head_hexsha": "68a0a4d7de4fdc34181b79a8267ab24d8001580c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cycle_detector/train.py", "max_forks_repo_name": "sinakm/AnomalyDetector", "max_forks_repo_head_hexsha": "68a0a4d7de4fdc34181b79a8267ab24d8001580c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-12T23:59:53.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-12T23:59:53.000Z", "avg_line_length": 30.226993865, "max_line_length": 119, "alphanum_fraction": 0.5985386645, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1322}
|
[STATEMENT]
lemma tendsto_rabs_zero_iff: "((\<lambda>x. \<bar>f x\<bar>) \<longlongrightarrow> (0::real)) F \<longleftrightarrow> (f \<longlongrightarrow> 0) F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((\<lambda>x. \<bar>f x\<bar>) \<longlongrightarrow> 0) F = (f \<longlongrightarrow> 0) F
[PROOF STEP]
by (fold real_norm_def) (rule tendsto_norm_zero_iff)
|
{"llama_tokens": 144, "file": null, "length": 1}
|
#
# This file is part of Orchid and related technologies.
#
# Copyright (c) 2017-2022 Reveal Energy Services. All Rights Reserved.
#
# LEGAL NOTICE:
# Orchid contains trade secrets and otherwise confidential information
# owned by Reveal Energy Services. Access to and use of this information is
# strictly limited and controlled by the Company. This file may not be copied,
# distributed, or otherwise disclosed outside of the Company's facilities
# except under appropriate precautions to maintain the confidentiality hereof,
# and may not be used in any way not expressly authorized by the Company.
#
from abc import ABCMeta, abstractmethod
from typing import Callable, Union
import numpy as np
import pandas as pd
from orchid import (
dom_project_object as dpo,
dot_net_dom_access as dna,
project_store as loader,
unit_system as units,
)
# noinspection PyUnresolvedReferences
from Orchid.FractureDiagnostics.SDKFacade import ScriptAdapter
# noinspection PyUnresolvedReferences
from Orchid.FractureDiagnostics.TimeSeries import IQuantityTimeSeries
class BaseTimeSeriesAdapter(dpo.DomProjectObject, metaclass=ABCMeta):
def __init__(self, adaptee: IQuantityTimeSeries, net_project_callable: Callable):
"""
Construct an instance that adapts a .NET `IStageSampledQuantityTimeSeries` instance.
Args:
adaptee: The .NET stage time series to be adapted.
"""
super().__init__(adaptee, net_project_callable)
sampled_quantity_name = dna.dom_property('sampled_quantity_name',
'Return the sampled quantity name for this curve.')
@abstractmethod
def quantity_name_unit_map(self, project_units):
"""
Return a map (dictionary) between quantity names and units (from `unit_system`) of the data_points.
This method plays the role of "Primitive Operation" in the *Template Method* design pattern. In this
role, the "Template Method" defines an algorithm and delegates some steps of the algorithm to derived
classes through invocation of "Primitive Operations".
Args:
project_units: The unit system of the project.
"""
pass
def sampled_quantity_unit(self) -> Union[units.UsOilfield, units.Metric]:
"""
Return the measurement unit of the data_points in this curve.
This method plays the role of "Template Method" in the *Template Method* design pattern. In this role
it specifies an algorithm to calculate the units of the sampled quantity of the curve delegating some
algorithm steps to derived classes by invoking the "Primitive Operation-", `quantity_name_unit_map()`
and `get_net_project_units()`.
Returns:
A `UnitSystem` member containing the unit for the sample in this curve.
"""
quantity_name_unit_map = self.quantity_name_unit_map(self.expect_project_units)
return quantity_name_unit_map[self.sampled_quantity_name]
def data_points(self) -> pd.Series:
"""
Return the time series for this curve.
Returns
The `pandas` time `Series` for this curve.
"""
python_time_series_arrays = loader.as_python_time_series_arrays(self.dom_object)
result = pd.Series(data=np.fromiter(python_time_series_arrays.SampleMagnitudes, dtype='float'),
index=pd.DatetimeIndex(np.fromiter(python_time_series_arrays.UnixTimeStampsInSeconds,
dtype='datetime64[s]'), tz='UTC'),
name=self.name)
return result
|
{"hexsha": "77577ad312197c6f638c890d7a4b47f84691c5ab", "size": 3681, "ext": "py", "lang": "Python", "max_stars_repo_path": "orchid/base_time_series_adapter.py", "max_stars_repo_name": "Reveal-Energy-Services/orchid", "max_stars_repo_head_hexsha": "4c177ac5a511a80a3588ab8da873fc2d74358da8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "orchid/base_time_series_adapter.py", "max_issues_repo_name": "Reveal-Energy-Services/orchid", "max_issues_repo_head_hexsha": "4c177ac5a511a80a3588ab8da873fc2d74358da8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "orchid/base_time_series_adapter.py", "max_forks_repo_name": "Reveal-Energy-Services/orchid", "max_forks_repo_head_hexsha": "4c177ac5a511a80a3588ab8da873fc2d74358da8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0108695652, "max_line_length": 112, "alphanum_fraction": 0.6992665037, "include": true, "reason": "import numpy", "num_tokens": 746}
|
import numpy as np
from matplotlib import pyplot as plt
class Reader:
def __init__(self):
self.data = None
self.codec = None
self.width = None
self.height = None
def read_pgm(self, pgm_file_name):
with open(pgm_file_name, 'rb') as f:
codec = f.readline()
print(f"Codec: {codec}")
if codec == b"P2\n":
return self._read_p2(pgm_file_name)
elif codec == b'P5\n':
return self._read_p5(pgm_file_name)
else:
raise Exception(f"Incorrect format of PGM: {codec}")
def _read_p2(self, pgm_name):
print(f"Reading P2 maps")
with open(pgm_name, 'r') as f:
lines = f.readlines()
for l in list(lines):
if l[0] == '#':
lines.remove(l)
# here,it makes sure it is ASCII format (P2)
codec = lines[0].strip()
# Converts data to a list of integers
data = []
for line in lines[1:]:
data.extend([int(c) for c in line.split()])
data = (np.array(data[3:]),(data[1],data[0]),data[2])
self.width = data[1][1]
self.height = data[1][0]
data = np.reshape(data[0],data[1])
self.data = data
return data
def _read_p5(self, pgm_name):
print(f"Reading P5 maps")
with open(pgm_name, 'rb') as pgmf:
assert pgmf.readline() == b'P5\n'
t = pgmf.readline()
while t[0] == '#':
t = pgmf.readline()
wh_line = pgmf.readline().split()
(width, height) = [int(i) for i in wh_line]
depth = int(pgmf.readline())
assert depth <= 255
raster = []
for y in range(height):
row = []
for y in range(width):
row.append(ord(pgmf.read(1)))
raster.append(row)
data = np.array(raster)
self.height = height
self.width = width
self.data = data
return data
def show_img(self):
plt.imshow(self.data)
plt.show()
def test():
f = 'pypgm/race_track.pgm'
reader = Reader()
image = reader.read_pgm(f)
plt.imshow(image)
plt.show()
if __name__ == "__main__":
test()
|
{"hexsha": "5675692cc47573a91159743ba5bf678598f2f8e1", "size": 2352, "ext": "py", "lang": "Python", "max_stars_repo_path": "pgm_reader/pgm_reader.py", "max_stars_repo_name": "mridulgain/pgm", "max_stars_repo_head_hexsha": "d89be8260f0d5650369ad61a985350dcd2b77bd7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-11T10:22:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-11T10:22:39.000Z", "max_issues_repo_path": "pgm_reader/pgm_reader.py", "max_issues_repo_name": "mridulgain/pgm", "max_issues_repo_head_hexsha": "d89be8260f0d5650369ad61a985350dcd2b77bd7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-11-11T12:52:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-27T07:07:56.000Z", "max_forks_repo_path": "pgm_reader/pgm_reader.py", "max_forks_repo_name": "mridulgain/pgm", "max_forks_repo_head_hexsha": "d89be8260f0d5650369ad61a985350dcd2b77bd7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-11T11:41:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T11:41:02.000Z", "avg_line_length": 24.2474226804, "max_line_length": 64, "alphanum_fraction": 0.4982993197, "include": true, "reason": "import numpy", "num_tokens": 584}
|
MODULE DOT
! Module for dot product of two real arrays of rank 1.
! The caller needs to ensure that exceptions do not cause halting.
! Source https://j3-fortran.org/doc/year/18/18-007r1.pdf
USE, INTRINSIC :: IEEE_EXCEPTIONS
LOGICAL :: MATRIX_ERROR = .FALSE.
INTERFACE OPERATOR(.dot.)
MODULE PROCEDURE MULT
END INTERFACE
CONTAINS
REAL FUNCTION MULT (A, B)
REAL, INTENT (IN) :: A(:), B(:)
INTEGER I
LOGICAL OVERFLOW
IF (SIZE(A) /= SIZE(B)) THEN
MATRIX_ERROR = .TRUE.
RETURN
END IF
! The processor ensures that IEEE_OVERFLOW is quiet.
MULT = 0.0
DO I = 1, SIZE (A)
MULT = MULT + A(I)*B(I)
END DO
CALL IEEE_GET_FLAG (IEEE_OVERFLOW, OVERFLOW)
IF (OVERFLOW) MATRIX_ERROR = .TRUE.
END FUNCTION MULT
END MODULE DOT
|
{"hexsha": "17ccfb9c90d0ffe9bb8e5262a5cf9a882663d347", "size": 868, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/interface4.f90", "max_stars_repo_name": "Thirumalai-Shaktivel/lfortran", "max_stars_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 316, "max_stars_repo_stars_event_min_datetime": "2019-03-24T16:23:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:28:33.000Z", "max_issues_repo_path": "tests/interface4.f90", "max_issues_repo_name": "Thirumalai-Shaktivel/lfortran", "max_issues_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-07-29T04:58:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-04T16:40:06.000Z", "max_forks_repo_path": "tests/interface4.f90", "max_forks_repo_name": "Thirumalai-Shaktivel/lfortran", "max_forks_repo_head_hexsha": "bb39faf1094b028351d5aefe27d64ee69302300a", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-03-28T19:40:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T07:28:55.000Z", "avg_line_length": 26.303030303, "max_line_length": 66, "alphanum_fraction": 0.5979262673, "num_tokens": 233}
|
from keras.engine.topology import Layer
from keras_frcnn.vgg_occlusion_net import OcclusionNet
from keras_frcnn import thresholding_helper
import keras.backend as K
import numpy as np
if K.backend() == 'tensorflow':
import tensorflow as tf
class NewPoolingOutputProcessor(Layer):
'''ROI pooling layer for 2D inputs.
See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,
K. He, X. Zhang, S. Ren, J. Sun
# Arguments
pool_size: int
Size of pooling region to use. pool_size = 7 will result in a 7x7 region.
num_rois: number of regions of interest to be used
# Input shape
x=[pooling_output,occlusion_mask]
pooling_output= (1,None,7,7,512)
occlusion_mask=(1,None,7,7,2)
'''
def __init__(self,pool_size, num_rois, **kwargs):
self.dim_ordering = K.image_dim_ordering()
assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
self.pool_size = pool_size
self.num_rois = num_rois
super(NewPoolingOutputProcessor, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
if self.dim_ordering == 'th':
return None, self.num_rois, 512, self.pool_size, self.pool_size
else:
return None, self.num_rois, self.pool_size, self.pool_size, 512
#format x=[pooling_output,occlusion_mask]
#pooling_output and occlusion_mask are of the same dimension
#pooling_output =(1,10,7,7,512)
#occlusion_mask =(1,10,7,7,1) =>zeros and ones
def call(self, x,mask=None):
# #unpack variables
pooling_output=x[0]
occlusion_mask=x[1]
zeros=K.zeros_like(pooling_output)
#modify pooling_output based on the mask
#drop out all of the spatial locations that correspond to the 1's in the mask
output=K.switch(occlusion_mask,zeros,pooling_output)
# print("Output: ",K.int_shape(output))
return output
def get_config(self):
config = {'pool_size': self.pool_size,
'num_rois': self.num_rois}
base_config = super(NewPoolingOutputProcessor, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{"hexsha": "0f18fa19826f011ee045daf6e6306282f2e6c9c6", "size": 2319, "ext": "py", "lang": "Python", "max_stars_repo_path": "keras_frcnn/NewPoolingOutputProcessor.py", "max_stars_repo_name": "finleylee2507/frcnn-from-scratch-with-keras", "max_stars_repo_head_hexsha": "402127efd440cb23849d70599cb3f0caec971697", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keras_frcnn/NewPoolingOutputProcessor.py", "max_issues_repo_name": "finleylee2507/frcnn-from-scratch-with-keras", "max_issues_repo_head_hexsha": "402127efd440cb23849d70599cb3f0caec971697", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keras_frcnn/NewPoolingOutputProcessor.py", "max_forks_repo_name": "finleylee2507/frcnn-from-scratch-with-keras", "max_forks_repo_head_hexsha": "402127efd440cb23849d70599cb3f0caec971697", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7671232877, "max_line_length": 87, "alphanum_fraction": 0.6528676154, "include": true, "reason": "import numpy", "num_tokens": 584}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
from eight import *
from bw2analyzer.econ import (
concentration_ratio,
gini_coefficient,
herfindahl_index,
theil_index,
)
import numpy as np
import unittest
class EconometricsTestCase(unittest.TestCase):
def test_concentration_ratio(self):
x = (0.2, 0.2, 0.2, 0.2, 0.1, 0.1)
self.assertEqual(concentration_ratio(x), 0.8)
self.assertTrue(isinstance(concentration_ratio(x), float))
def test_concentration_ratio_normalization(self):
x = np.array((0.2, 0.2, 0.2, 0.2, 0.1, 0.1)) * 2
self.assertEqual(concentration_ratio(x), 0.8)
def test_concentration_ratio_number(self):
x = np.array((0.2, 0.2, 0.2, 0.2, 0.1, 0.1))
self.assertEqual(concentration_ratio(x, 2), 0.4)
def test_herfindahl(self):
x = np.array((1.0, 1.0, 1.0), dtype=float)
# Correct answer is 3 * (1/3) ^ 2 = 1/3
self.assertEqual(herfindahl_index(x, False), 1 / 3)
# Normalized it is zero (all values are the same)
self.assertEqual(herfindahl_index(x), 0)
x = np.array((0.8, 0.1, 0.1))
self.assertAlmostEqual(herfindahl_index(x, False), 0.64 + 0.01 + 0.01)
self.assertTrue(isinstance(herfindahl_index(x, False), float))
def test_gini(self):
x = np.array((0.2, 0.3, 0.4, 0.5, 0.6))
# From wikipedia page
self.assertAlmostEqual(gini_coefficient(x), 0.2)
def test_theil(self):
# Include negative and zero values to test filtering
x = np.array((0.0, -2.0, 2.0, 6.0, 20.0))
average = 30 / 4
y = np.array((2.0, 2.0, 6.0, 20.0))
answer = 1 / 4 * ((y / average) * np.log(y / average)).sum()
self.assertAlmostEqual(float(answer), theil_index(x))
|
{"hexsha": "d88ef927c7a7a7a4eec92f645c2bae6ee6a067c5", "size": 1827, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/econ.py", "max_stars_repo_name": "aleksandra-kim/brightway2-analyzer-copy", "max_stars_repo_head_hexsha": "c85961bd2b644458fbeef92e5471ec76d520ab38", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/econ.py", "max_issues_repo_name": "aleksandra-kim/brightway2-analyzer-copy", "max_issues_repo_head_hexsha": "c85961bd2b644458fbeef92e5471ec76d520ab38", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/econ.py", "max_forks_repo_name": "aleksandra-kim/brightway2-analyzer-copy", "max_forks_repo_head_hexsha": "c85961bd2b644458fbeef92e5471ec76d520ab38", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8235294118, "max_line_length": 78, "alphanum_fraction": 0.6212370005, "include": true, "reason": "import numpy", "num_tokens": 592}
|
### This file in in ./models/research/audioset/
from __future__ import print_function
import numpy as np
import tensorflow as tf
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
import librosa
import sys
import pandas as pd
import csv
print('\nTesting your install of VGGish\n')
# Paths to downloaded VGGish files.
checkpoint_path = './vggish_model.ckpt'
pca_params_path = './vggish_pca_params.npz'
# Relative tolerance of errors in mean and standard deviation of embeddings.
rel_error = 0.1 # Up to 10%
## Generate a 1 kHz sine wave at 44.1 kHz (we use a high sampling rate
## to test resampling to 16 kHz during feature extraction).
#num_secs = 3
#freq = 1000
#sr = 44100
#t = np.linspace(0, num_secs, int(num_secs * sr))
#x = np.sin(2 * np.pi * freq * t)
file_path = sys.argv[1]
x, sr = librosa.load(file_path, sr=None)
num_secs = librosa.get_duration(x, sr)
print (len(x)*sr)
print (librosa.get_duration(x,sr))
# Produce a batch of log mel spectrogram examples.
input_batch = vggish_input.waveform_to_examples(x, sr)
print('Log Mel Spectrogram example: ', input_batch[0])
#np.testing.assert_equal(
# input_batch.shape,
# [num_secs, vggish_params.NUM_FRAMES, vggish_params.NUM_BANDS])
# Define VGGish, load the checkpoint, and run the batch through the model to
# produce embeddings.
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
with tf.Graph().as_default(), tf.Session(config=config) as sess:
vggish_slim.define_vggish_slim()
vggish_slim.load_vggish_slim_checkpoint(sess, checkpoint_path)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: input_batch})
print('VGGish embedding: ', embedding_batch[0])
# Postprocess the results to produce whitened quantized embeddings.
pproc = vggish_postprocess.Postprocessor(pca_params_path)
postprocessed_batch = pproc.postprocess(embedding_batch)
print('Postprocessed VGGish embedding: ', postprocessed_batch[0])
results_path = file_path[:-7] + "VGGish_PCA.csv"
with open(results_path, 'w') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for i in range(postprocessed_batch.shape[0]):
spamwriter.writerow(postprocessed_batch[i])
print ("Saved VGGish embeddings.")
print ('Embedding shape: ', embedding_batch.shape)
|
{"hexsha": "b6eec46c66fc1b616328b5963bfc1cb450865ccf", "size": 2622, "ext": "py", "lang": "Python", "max_stars_repo_path": "audioset/calculate_embeddings.py", "max_stars_repo_name": "shayenne/VoiceDetection", "max_stars_repo_head_hexsha": "5b9ce0950da245fa9488301e3a024b06f363f4db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "audioset/calculate_embeddings.py", "max_issues_repo_name": "shayenne/VoiceDetection", "max_issues_repo_head_hexsha": "5b9ce0950da245fa9488301e3a024b06f363f4db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audioset/calculate_embeddings.py", "max_forks_repo_name": "shayenne/VoiceDetection", "max_forks_repo_head_hexsha": "5b9ce0950da245fa9488301e3a024b06f363f4db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9756097561, "max_line_length": 76, "alphanum_fraction": 0.731884058, "include": true, "reason": "import numpy", "num_tokens": 684}
|
#!/usr/bin/env python
"""
Author : Jaganadh Gopinadhan
e-mail : jaganadhg@gmail.com
Licence : MIT
Third Eye V1.0 Udacity Self Driving Car Nano Degree Project 1
Finding Lane Lines on the Road
"""
import glob
import matplotlib.image as mpimg
import numpy as np
import cv2
from lineutil import plot_img,LineDrawBase
from pputil import FrameTransformer
from pipeline import Pipeline
def save_img(img,fn):
cv2.imwrite(img,fn)
def test_pipe_img(img):
pipe = Pipeline()
frm = pipe.fit_frame(img)
plot_img(frm)
return frm
def test_pipe_vid(vid,outf):
pipe = Pipeline()
ann_vid = pipe.fit_vid(vid)
ann_vid.write_videofile(outf, audio=False)
if __name__ == "__main__":
images = glob.glob("/Users/jagan/Documents/workspace/ThirdEye/thirdeye_v1.0/test_images/*.jpg")
videos = glob.glob("/Users/jagan/Documents/workspace/ThirdEye/thirdeye_v1.0/test_videos/*.mp4")
imout = "/Users/jagan/Documents/workspace/ThirdEye/thirdeye_v1.0/test_images_output/"
vidout = "/Users/jagan/Documents/workspace/ThirdEye/thirdeye_v1.0/test_videos_output/"
for img in images:
print "Processing :", img
imd = mpimg.imread(img)
pimg = test_pipe_img(imd)
fn = img.split("/")[-1]
save_img(imout + fn , pimg)
print "Saved :", img
for vd in videos:
print "Processing :", vd
fn = vd.split("/")[-1]
ofile = vidout + fn
test_pipe_vid(vd,ofile)
print "Processed :", vd
|
{"hexsha": "d77d5acd3364fd84a7fef2c6b426235a20fb1a15", "size": 1500, "ext": "py", "lang": "Python", "max_stars_repo_path": "thirdeye/testcase.py", "max_stars_repo_name": "jaganadhg/sdcnd_p1_lld", "max_stars_repo_head_hexsha": "74e6c9c4e7c5b03bbba0aab1e4eda31b53d7eb2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thirdeye/testcase.py", "max_issues_repo_name": "jaganadhg/sdcnd_p1_lld", "max_issues_repo_head_hexsha": "74e6c9c4e7c5b03bbba0aab1e4eda31b53d7eb2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thirdeye/testcase.py", "max_forks_repo_name": "jaganadhg/sdcnd_p1_lld", "max_forks_repo_head_hexsha": "74e6c9c4e7c5b03bbba0aab1e4eda31b53d7eb2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5901639344, "max_line_length": 99, "alphanum_fraction": 0.674, "include": true, "reason": "import numpy", "num_tokens": 404}
|
import warnings
import packaging.version
def future_warning(msg, error_version, error_category, warning_category=FutureWarning):
"""Raise a warning until a specific version, then raise an error."""
import openproblems
current_version = packaging.version.parse(openproblems.__version__)
if current_version < packaging.version.parse(error_version):
msg += " This will raise a {} in openproblems v{}".format(
error_category.__name__, error_version
)
warnings.warn(msg, warning_category)
else:
raise error_category(msg)
def ignore_warnings():
"""Ignore irrelevant warnings."""
warnings.filterwarnings(
"ignore",
category=FutureWarning,
message="is_categorical is deprecated and will be removed in a future version."
" Use is_categorical_dtype instead",
)
try:
import numba
except ImportError:
return
warnings.filterwarnings("ignore", category=numba.NumbaWarning)
|
{"hexsha": "718f30a3f3bd83ad0c7141c05203a614ed5fb109", "size": 1003, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/utils/warnings.py", "max_stars_repo_name": "yingstat/SingleCellOpenProblems", "max_stars_repo_head_hexsha": "52de9c5a7aa922fc5c0e2940c949a05920196cbc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-26T18:41:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-26T18:41:59.000Z", "max_issues_repo_path": "test/utils/warnings.py", "max_issues_repo_name": "yingstat/SingleCellOpenProblems", "max_issues_repo_head_hexsha": "52de9c5a7aa922fc5c0e2940c949a05920196cbc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/utils/warnings.py", "max_forks_repo_name": "yingstat/SingleCellOpenProblems", "max_forks_repo_head_hexsha": "52de9c5a7aa922fc5c0e2940c949a05920196cbc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5, "max_line_length": 87, "alphanum_fraction": 0.6939182453, "include": true, "reason": "import numba", "num_tokens": 192}
|
!>
!!
!! @brief Module MO_SIMPLE_PLUMES: provides anthropogenic aerosol optical properties as a function of lat, lon
!! height, time, and wavelength
!!
!! @remarks
!!
!! @author Bjorn Stevens, Stephanie Fiedler and Karsten Peters MPI-Met, Hamburg (v1 release 2016-11-10)
!!
!! @change-log:
!! - 2016-12-05: beta release (BS, SF and KP, MPI-Met)
!! - 2016-09-28: revised representation of Twomey effect (SF, MPI-Met)
!! - 2015-09-28: bug fixes (SF, MPI-Met)
!! - 2016-10-12: revised maximum longitudinal extent of European plume (KP, SF, MPI-Met)
!! $ID: n/a$
!!
!! @par Origin
!! Based on code originally developed at the MPI-Met by Karsten Peters, Bjorn Stevens, Stephanie Fiedler
!! and Stefan Kinne with input from Thorsten Mauritsen and Robert Pincus
!!
!! @par Copyright
!!
!
MODULE MO_SIMPLE_PLUMES
USE netcdf
IMPLICIT NONE
INTEGER, PARAMETER :: &
nplumes = 9 ,& !< Number of plumes
nfeatures = 2 ,& !< Number of features per plume
ntimes = 52 ,& !< Number of times resolved per year (52 => weekly resolution)
nyears = 251 !< Number of years of available forcing
LOGICAL, SAVE :: &
sp_initialized = .FALSE. !< parameter determining whether input needs to be read
REAL :: &
plume_lat (nplumes) ,& !< latitude of plume center (AOD maximum)
plume_lon (nplumes) ,& !< longitude of plume center (AOD maximum)
beta_a (nplumes) ,& !< parameter a for beta function vertical profile
beta_b (nplumes) ,& !< parameter b for beta function vertical profile
aod_spmx (nplumes) ,& !< anthropogenic AOD maximum at 550 for plumes
aod_fmbg (nplumes) ,& !< anthropogenic AOD at 550 for fine-mode natural background (idealized to mimic Twomey effect)
asy550 (nplumes) ,& !< asymmetry parameter at 550nm for plume
ssa550 (nplumes) ,& !< single scattering albedo at 550nm for plume
angstrom (nplumes) ,& !< Angstrom parameter for plume
sig_lon_E (nfeatures,nplumes) ,& !< Eastward extent of plume feature
sig_lon_W (nfeatures,nplumes) ,& !< Westward extent of plume feature
sig_lat_E (nfeatures,nplumes) ,& !< Southward extent of plume feature
sig_lat_W (nfeatures,nplumes) ,& !< Northward extent of plume feature
theta (nfeatures,nplumes) ,& !< Rotation angle of plume feature
ftr_weight (nfeatures,nplumes) ,& !< Feature weights
time_weight (nfeatures,nplumes) ,& !< Time weights
time_weight_bg (nfeatures,nplumes) ,& !< as time_weight but for natural background in Twomey effect
year_weight (nyears,nplumes) ,& !< Yearly weight for plume
ann_cycle (nfeatures,ntimes,nplumes) !< annual cycle for plume feature
PUBLIC sp_aop_profile
CONTAINS
!
! ------------------------------------------------------------------------------------------------------------------------
! SP_SETUP: This subroutine should be called at initialization to read the netcdf data that describes the simple plume
! climatology. The information needs to be either read by each processor or distributed to processors.
!
SUBROUTINE sp_setup(history,scenario)
IMPLICIT NONE
CHARACTER(LEN=*) , INTENT(IN) :: history , scenario
!
! ----------
!
INTEGER :: iret, ncid, DimID, VarID, xdmy
REAL :: temp(nyears-164,nplumes)
!
! ----------
!
iret = nf90_open(history, NF90_NOWRITE, ncid)
IF (iret /= NF90_NOERR) THEN
write (0,*) 'Cannot find file '//trim(history)
STOP 'NetCDF File not opened'
END IF
!
! read dimensions and make sure file conforms to expected size
!
iret = nf90_inq_dimid(ncid, "plume_number" , DimId)
iret = nf90_inquire_dimension(ncid, DimId, len = xdmy)
IF (xdmy /= nplumes) STOP 'NetCDF improperly dimensioned -- plume_number'
iret = nf90_inq_dimid(ncid, "plume_feature", DimId)
iret = nf90_inquire_dimension(ncid, DimId, len = xdmy)
IF (xdmy /= nfeatures) STOP 'NetCDF improperly dimensioned -- plume_feature'
iret = nf90_inq_dimid(ncid, "year_fr" , DimId)
iret = nf90_inquire_dimension(ncid, DimID, len = xdmy)
IF (xdmy /= ntimes) STOP 'NetCDF improperly dimensioned -- year_fr'
iret = nf90_inq_dimid(ncid, "years" , DimId)
iret = nf90_inquire_dimension(ncid, DimID, len = xdmy)
IF (xdmy /= nyears) STOP 'NetCDF improperly dimensioned -- years'
!
! read variables that define the simple plume climatology
!
iret = nf90_inq_varid(ncid, "plume_lat", VarId)
iret = nf90_get_var(ncid, VarID, plume_lat(:), start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading plume_lat'
iret = nf90_inq_varid(ncid, "plume_lon", VarId)
iret = nf90_get_var(ncid, VarID, plume_lon(:), start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading plume_lon'
iret = nf90_inq_varid(ncid, "beta_a" , VarId)
iret = nf90_get_var(ncid, VarID, beta_a(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading beta_a'
iret = nf90_inq_varid(ncid, "beta_b" , VarId)
iret = nf90_get_var(ncid, VarID, beta_b(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading beta_b'
iret = nf90_inq_varid(ncid, "aod_spmx" , VarId)
iret = nf90_get_var(ncid, VarID, aod_spmx(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading aod_spmx'
iret = nf90_inq_varid(ncid, "aod_fmbg" , VarId)
iret = nf90_get_var(ncid, VarID, aod_fmbg(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading aod_fmbg'
iret = nf90_inq_varid(ncid, "ssa550" , VarId)
iret = nf90_get_var(ncid, VarID, ssa550(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading ssa550'
iret = nf90_inq_varid(ncid, "asy550" , VarId)
iret = nf90_get_var(ncid, VarID, asy550(:) , start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading asy550'
iret = nf90_inq_varid(ncid, "angstrom" , VarId)
iret = nf90_get_var(ncid, VarID, angstrom(:), start=(/1/),count=(/nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading angstrom'
iret = nf90_inq_varid(ncid, "sig_lat_W" , VarId)
iret = nf90_get_var(ncid, VarID, sig_lat_W(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading sig_lat_W'
iret = nf90_inq_varid(ncid, "sig_lat_E" , VarId)
iret = nf90_get_var(ncid, VarID, sig_lat_E(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading sig_lat_E'
iret = nf90_inq_varid(ncid, "sig_lon_E" , VarId)
iret = nf90_get_var(ncid, VarID, sig_lon_E(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading sig_lon_E'
iret = nf90_inq_varid(ncid, "sig_lon_W" , VarId)
iret = nf90_get_var(ncid, VarID, sig_lon_W(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading sig_lon_W'
iret = nf90_inq_varid(ncid, "theta" , VarId)
iret = nf90_get_var(ncid, VarID, theta(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading theta'
iret = nf90_inq_varid(ncid, "ftr_weight" , VarId)
iret = nf90_get_var(ncid, VarID, ftr_weight(:,:) , start=(/1,1/),count=(/nfeatures,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading plume_lat'
iret = nf90_inq_varid(ncid, "year_weight" , VarId)
iret = nf90_get_var(ncid, VarID, year_weight(:,:) , start=(/1,1/),count=(/164,nplumes /))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading year_weight'
iret = nf90_inq_varid(ncid, "ann_cycle" , VarId)
iret = nf90_get_var(ncid, VarID, ann_cycle(:,:,:) , start=(/1,1,1/),count=(/nfeatures,ntimes,nplumes/))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading ann_cycle'
iret = nf90_close(ncid)
iret = nf90_open(scenario, NF90_NOWRITE, ncid)
IF (iret /= NF90_NOERR) THEN
write (0,*) 'Cannot find file '//trim(scenario)
STOP 'NetCDF File not opened'
END IF
iret = nf90_inq_varid(ncid, "year_weight" , VarId)
iret = nf90_get_var(ncid, VarID, temp, start=(/165,1/),count=(/nyears-164,nplumes /))
IF (iret /= NF90_NOERR) STOP 'NetCDF Error reading year_weight'
year_weight(165:,:) = temp
iret = nf90_close(ncid)
!
sp_initialized = .TRUE.
RETURN
END SUBROUTINE sp_setup
!
! ------------------------------------------------------------------------------------------------------------------------
! SET_TIME_WEIGHT: The simple plume model assumes that meteorology constrains plume shape and that only source strength
! influences the amplitude of a plume associated with a given source region. This routine retrieves the temporal weights
! for the plumes. Each plume feature has its own temporal weights which varies yearly. The annual cycle is indexed by
! week in the year and superimposed on the yearly mean value of the weight.
!
SUBROUTINE set_time_weight(year_fr)
!
! ----------
!
REAL, INTENT(IN) :: &
year_fr !< Fractional Year (1850.0 - 2100.99)
INTEGER :: &
iyear ,& !< Integer year values between 1 and 156 (1850-2100)
iweek ,& !< Integer index (between 1 and ntimes); for ntimes=52 this corresponds to weeks (roughly)
iplume ! plume number
!
! ----------
!
iyear = FLOOR(year_fr) - 1849
iweek = FLOOR((year_fr - FLOOR(year_fr)) * ntimes) + 1
IF ((iweek > ntimes) .OR. (iweek < 1) .OR. (iyear > nyears) .OR. (iyear < 1)) STOP 'Time out of bounds in set_time_weight'
DO iplume=1,nplumes
time_weight(1,iplume) = year_weight(iyear,iplume) * ann_cycle(1,iweek,iplume)
time_weight(2,iplume) = year_weight(iyear,iplume) * ann_cycle(2,iweek,iplume)
time_weight_bg(1,iplume) = ann_cycle(1,iweek,iplume)
time_weight_bg(2,iplume) = ann_cycle(2,iweek,iplume)
END DO
RETURN
END SUBROUTINE set_time_weight
!
! ------------------------------------------------------------------------------------------------------------------------
! SP_AOP_PROFILE: This subroutine calculates the simple plume aerosol and cloud active optical properties based on the
! the simple plume fit to the MPI Aerosol Climatology (Version 2). It sums over nplumes to provide a profile of aerosol
! optical properties on a host models vertical grid.
!
SUBROUTINE sp_aop_profile ( &
historic ,scenario ,nlevels ,ncol , &
lambda ,oro ,lon ,lat , &
year_fr ,z ,dz ,dNovrN , &
aod_prof ,ssa_prof ,asy_prof )
IMPLICIT NONE
!
! ----------
!
CHARACTER(LEN=*), INTENT(IN) :: historic, scenario
INTEGER, INTENT(IN) :: &
nlevels, & !< number of levels
ncol !< number of columns
REAL, INTENT(IN) :: &
lambda, & !< wavelength
year_fr, & !< Fractional Year (1903.0 is the 0Z on the first of January 1903, Gregorian)
oro(ncol), & !< orographic height (m)
lon(ncol), & !< longitude
lat(ncol), & !< latitude
z (ncol,nlevels), & !< height above sea-level (m)
dz(ncol,nlevels) !< level thickness (difference between half levels) (m)
REAL, INTENT(OUT) :: &
dNovrN(ncol) , & !< anthropogenic increase in cloud drop number concentration (factor)
aod_prof(ncol,nlevels) , & !< profile of aerosol optical depth
ssa_prof(ncol,nlevels) , & !< profile of single scattering albedo
asy_prof(ncol,nlevels) !< profile of asymmetry parameter
INTEGER :: iplume, icol, k
REAL :: &
eta(ncol,nlevels), & !< normalized height (by 15 km)
z_beta(ncol,nlevels), & !< profile for scaling column optical depth
prof(ncol,nlevels), & !< scaled profile (by beta function)
beta_sum(ncol), & !< vertical sum of beta function
ssa(ncol), & !< single scattering albedo
asy(ncol), & !< asymmetry parameter
cw_an(ncol), & !< column weight for simple plume (anthropogenic) AOD at 550 nm
cw_bg(ncol), & !< column weight for fine-mode natural background AOD at 550 nm
caod_sp(ncol), & !< column simple plume anthropogenic AOD at 550 nm
caod_bg(ncol), & !< column fine-mode natural background AOD at 550 nm
a_plume1, & !< gaussian longitude factor for feature 1
a_plume2, & !< gaussian longitude factor for feature 2
b_plume1, & !< gaussian latitude factor for feature 1
b_plume2, & !< gaussian latitude factor for feature 2
delta_lat, & !< latitude offset
delta_lon, & !< longitude offset
delta_lon_t, & !< threshold for maximum longitudinal plume extent used in transition from 360 to 0 degrees
lon1, & !< rotated longitude for feature 1
lat1, & !< rotated latitude for feature 2
lon2, & !< rotated longitude for feature 1
lat2, & !< rotated latitude for feature 2
f1, & !< contribution from feature 1
f2, & !< contribution from feature 2
f3, & !< contribution from feature 1 in natural background of Twomey effect
f4, & !< contribution from feature 2 in natural background of Twomey effect
aod_550, & !< aerosol optical depth at 550nm
aod_lmd, & !< aerosol optical depth at input wavelength
lfactor !< factor to compute wavelength dependence of optical properties
!
! ----------
!
! initialize input data (by calling setup at first instance)
!
IF (.NOT.sp_initialized) CALL sp_setup(historic,scenario)
!
! get time weights
!
CALL set_time_weight(year_fr)
!
! initialize variables, including output
!
DO k=1,nlevels
DO icol=1,ncol
aod_prof(icol,k) = 0.0
ssa_prof(icol,k) = 0.0
asy_prof(icol,k) = 0.0
! z_beta(icol,k) = MERGE(1.0, 0.0, z(icol,k) >= oro(icol))
!FAB in sigma-coordinates the first atm level altitude should always be above
!topography !
z_beta(icol,k) = 1.!
eta(icol,k) = MAX(0.0,MIN(1.0,z(icol,k)/15000.))
END DO
END DO
DO icol=1,ncol
dNovrN(icol) = 1.0
caod_sp(icol) = 0.0
caod_bg(icol) = 0.02
END DO
!
! sum contribution from plumes to construct composite profiles of aerosol optical properties
!
DO iplume=1,nplumes
!
! calculate vertical distribution function from parameters of beta distribution
!
DO icol=1,ncol
beta_sum(icol) = 0.
END DO
DO k=1,nlevels
DO icol=1,ncol
prof(icol,k) = (eta(icol,k)**(beta_a(iplume)-1.) * (1.-eta(icol,k))**(beta_b(iplume)-1.)) * dz(icol,k)
beta_sum(icol) = beta_sum(icol) + prof(icol,k)
END DO
END DO
DO k=1,nlevels
DO icol=1,ncol
prof(icol,k) = ( prof(icol,k) / beta_sum(icol) ) * z_beta(icol,k)
END DO
END DO
!
! calculate plume weights
!
DO icol=1,ncol
!
! get plume-center relative spatial parameters for specifying amplitude of plume at given lat and lon
!
delta_lat = lat(icol) - plume_lat(iplume)
delta_lon = lon(icol) - plume_lon(iplume)
delta_lon_t = MERGE (260., 180., iplume == 1)
delta_lon = MERGE ( delta_lon-SIGN(360.,delta_lon) , delta_lon , ABS(delta_lon) > delta_lon_t)
a_plume1 = 0.5 / (MERGE(sig_lon_E(1,iplume), sig_lon_W(1,iplume), delta_lon > 0)**2)
b_plume1 = 0.5 / (MERGE(sig_lat_E(1,iplume), sig_lat_W(1,iplume), delta_lon > 0)**2)
a_plume2 = 0.5 / (MERGE(sig_lon_E(2,iplume), sig_lon_W(2,iplume), delta_lon > 0)**2)
b_plume2 = 0.5 / (MERGE(sig_lat_E(2,iplume), sig_lat_W(2,iplume), delta_lon > 0)**2)
!
! adjust for a plume specific rotation which helps match plume state to climatology.
!
lon1 = COS(theta(1,iplume))*(delta_lon) + SIN(theta(1,iplume))*(delta_lat)
lat1 = - SIN(theta(1,iplume))*(delta_lon) + COS(theta(1,iplume))*(delta_lat)
lon2 = COS(theta(2,iplume))*(delta_lon) + SIN(theta(2,iplume))*(delta_lat)
lat2 = - SIN(theta(2,iplume))*(delta_lon) + COS(theta(2,iplume))*(delta_lat)
!
! calculate contribution to plume from its different features, to get a column weight for the anthropogenic
! (cw_an) and the fine-mode natural background aerosol (cw_bg)
!
f1 = time_weight(1,iplume) * ftr_weight(1,iplume) * EXP(-1.* (a_plume1 * ((lon1)**2) + (b_plume1 * ((lat1)**2))))
f2 = time_weight(2,iplume) * ftr_weight(2,iplume) * EXP(-1.* (a_plume2 * ((lon2)**2) + (b_plume2 * ((lat2)**2))))
f3 = time_weight_bg(1,iplume) * ftr_weight(1,iplume) * EXP(-1.* (a_plume1 * ((lon1)**2) + (b_plume1 * ((lat1)**2))))
f4 = time_weight_bg(2,iplume) * ftr_weight(2,iplume) * EXP(-1.* (a_plume2 * ((lon2)**2) + (b_plume2 * ((lat2)**2))))
cw_an(icol) = f1 * aod_spmx(iplume) + f2 * aod_spmx(iplume)
cw_bg(icol) = f3 * aod_fmbg(iplume) + f4 * aod_fmbg(iplume)
!
! calculate wavelength-dependent scattering properties
!
lfactor = MIN(1.0,700.0/lambda)
ssa(icol) = (ssa550(iplume) * lfactor**4) / ((ssa550(iplume) * lfactor**4) + ((1-ssa550(iplume)) * lfactor))
asy(icol) = asy550(iplume) * SQRT(lfactor)
END DO
!
! distribute plume optical properties across its vertical profile weighting by optical depth and scaling for
! wavelength using the angstrom parameter.
!
lfactor = EXP(-angstrom(iplume) * LOG(lambda/550.0))
!FAB TEST
DO k=1,nlevels
DO icol = 1,ncol
aod_550 = prof(icol,k) * cw_an(icol)
aod_lmd = aod_550 * lfactor
caod_sp(icol) = caod_sp(icol) + aod_550
caod_bg(icol) = caod_bg(icol) + prof(icol,k) * cw_bg(icol)
asy_prof(icol,k) = asy_prof(icol,k) + aod_lmd * ssa(icol) * asy(icol)
ssa_prof(icol,k) = ssa_prof(icol,k) + aod_lmd * ssa(icol)
aod_prof(icol,k) = aod_prof(icol,k) + aod_lmd
END DO
END DO
END DO
!
! complete optical depth weighting
!
DO k=1,nlevels
DO icol = 1,ncol
asy_prof(icol,k) = MERGE(asy_prof(icol,k)/ssa_prof(icol,k), 0.0, ssa_prof(icol,k) > TINY(1.))
ssa_prof(icol,k) = MERGE(ssa_prof(icol,k)/aod_prof(icol,k), 1.0, aod_prof(icol,k) > TINY(1.))
END DO
END DO
!
! calculate effective radius normalization (divisor) factor
!
DO icol=1,ncol
dNovrN(icol) = LOG((1000.0 * (caod_sp(icol) + caod_bg(icol))) + 1.0)/LOG((1000.0 * caod_bg(icol)) + 1.0)
END DO
RETURN
END SUBROUTINE sp_aop_profile
END MODULE MO_SIMPLE_PLUMES
|
{"hexsha": "8e857c6cf47c06a1770c82c0d46f56e6f0a76017", "size": 20334, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "external/aerosols/mo_simple_plumes_v1.f90", "max_stars_repo_name": "taobrienlbl/RegCM", "max_stars_repo_head_hexsha": "bda1c78790f0a1501916d0979b843216a08b2cef", "max_stars_repo_licenses": ["AFL-1.1"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2019-04-23T08:36:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-15T08:55:01.000Z", "max_issues_repo_path": "external/aerosols/mo_simple_plumes_v1.f90", "max_issues_repo_name": "taobrienlbl/RegCM", "max_issues_repo_head_hexsha": "bda1c78790f0a1501916d0979b843216a08b2cef", "max_issues_repo_licenses": ["AFL-1.1"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-02-20T06:43:03.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-24T11:26:46.000Z", "max_forks_repo_path": "external/aerosols/mo_simple_plumes_v1.f90", "max_forks_repo_name": "taobrienlbl/RegCM", "max_forks_repo_head_hexsha": "bda1c78790f0a1501916d0979b843216a08b2cef", "max_forks_repo_licenses": ["AFL-1.1"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2019-06-10T12:49:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-14T06:55:20.000Z", "avg_line_length": 49.7163814181, "max_line_length": 144, "alphanum_fraction": 0.5796203403, "num_tokens": 6121}
|
/*****************************************************************************/
/* Copyright (c) 2015, Karl Pauwels */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* 3. Neither the name of the copyright holder nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*****************************************************************************/
#include <cmath>
#include <cstdio>
#include <translation_rotation_3d.h>
#undef Success
#include <Eigen/Geometry>
namespace pose {
const double TranslationRotation3D::PI_ = 3.1415926535897931;
TranslationRotation3D::TranslationRotation3D(bool valid) : valid_{ valid } {
double tmpT[3] = { 0.0, 0.0, 0.0 };
setT(tmpT);
double tmpR[3] = { 0.0, 0.0, 0.0 };
setR(tmpR);
}
TranslationRotation3D::TranslationRotation3D(const double *T_in,
const double *R_in)
: valid_{ true } {
setT(T_in);
setR(R_in);
}
TranslationRotation3D::TranslationRotation3D(Eigen::Vector3d T,
Eigen::Vector3d R) {
double T_in[3];
double R_in[3];
Eigen::Map<Eigen::Vector3d> T_tmp(T_in);
Eigen::Map<Eigen::Vector3d> R_tmp(R_in);
T_tmp = T;
R_tmp = R;
setT(T_in);
setR(R_in);
}
template <typename Type>
TranslationRotation3D::TranslationRotation3D(const Type TR_in[6])
: valid_{ true } {
double T_in[3], R_in[3];
for (int i = 0; i < 3; i++) {
T_in[i] = static_cast<double>(TR_in[i]);
R_in[i] = static_cast<double>(TR_in[i + 3]);
}
setT(T_in);
setR(R_in);
}
template TranslationRotation3D::TranslationRotation3D<float>(
const float TR_in[6]);
template TranslationRotation3D::TranslationRotation3D<double>(
const double TR_in[6]);
TranslationRotation3D::TranslationRotation3D(
const Ogre::Vector3 &ogre_translation,
const Ogre::Quaternion &ogre_rotation)
: valid_{ true } {
double tmpT[3] = { ogre_translation.x, ogre_translation.y,
ogre_translation.z };
double tmpR_mat[9];
Eigen::Quaterniond q_eigen(ogre_rotation.w, ogre_rotation.x, ogre_rotation.y,
ogre_rotation.z);
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_eig(tmpR_mat);
rot_eig = q_eigen.toRotationMatrix();
setT(tmpT);
setR_mat(tmpR_mat);
}
bool TranslationRotation3D::operator==(const TranslationRotation3D &op) const {
bool equal = true;
for (int i = 0; i < 3; i++) {
equal = (equal && (op.T_[i] == T_[i]));
equal = (equal && (op.R_[i] == R_[i]));
}
equal = (equal && (op.valid_ == valid_));
return (equal);
}
bool TranslationRotation3D::operator!=(const TranslationRotation3D &op) const {
return (!(*this == op));
}
TranslationRotation3D &TranslationRotation3D::
operator*=(const TranslationRotation3D &rhs) {
// Fl = Fl*Fr;
Eigen::Map<Eigen::Vector3d> tra_left(T_);
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_left(R_mat_);
double T_rhs[3];
rhs.getT(T_rhs);
Eigen::Map<Eigen::Vector3d> tra_right(T_rhs);
double R_mat_rhs[9];
rhs.getR_mat(R_mat_rhs);
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_right(
R_mat_rhs);
tra_left = rot_left * tra_right + tra_left;
rot_left *= rot_right;
updateR();
// convert back to rotation matrix to increase numerical stability
updateR_mat();
// apply logical AND to validity
setValid(isValid() && rhs.isValid());
return *this;
}
double TranslationRotation3D::normT2() const {
return (T_[0] * T_[0] + T_[1] * T_[1] + T_[2] * T_[2]);
}
double TranslationRotation3D::normR2() const {
return (R_[0] * R_[0] + R_[1] * R_[1] + R_[2] * R_[2]);
}
TranslationRotation3D TranslationRotation3D::changeHandedness() const {
// Eigen conversion
// ----------------
// Eigen::Vector3d t(t_buf[0], t_buf[1], t_buf[2]);
// Eigen::Matrix3d Rl;
// Rl << r_buf[0], r_buf[1], r_buf[2],
// r_buf[3], r_buf[4], r_buf[5],
// r_buf[6], r_buf[7], r_buf[8];
// // transform translation to right-handed system (flip-y)
// t(1) = -t(1);
// // transform rotation to right-handed system
// // flip-y, followed by 180 degree rotation around x-axis to convert ogre
// (z-out)
// // to vision (z-forward)
// Eigen::Matrix3d Sy = Eigen::Matrix<double, 3, 3>::Identity();
// Sy(1,1) = -1.0;
// Eigen::Matrix3d Rx_180 = Eigen::Matrix<double, 3, 3>::Identity();
// Rx_180(1,1) = -1.0;
// Rx_180(2,2) = -1.0;
// Eigen::Matrix3d R = Sy * Rl * Sy * Rx_180;
// direct conversion
// -----------------
// flip ty
double T_out[]{ T_[0], -T_[1], T_[2] };
// (flip row 2 and then flip column 3)
double R_out[]{ R_mat_[0], R_mat_[1], -R_mat_[2], -R_mat_[3], -R_mat_[4],
R_mat_[5], R_mat_[6], R_mat_[7], -R_mat_[8] };
TranslationRotation3D TR_out;
TR_out.setT(T_out);
TR_out.setR_mat(R_out);
TR_out.setValid(valid_);
return TR_out;
}
TranslationRotation3D TranslationRotation3D::rotateX180() const {
double T_out[]{ T_[0], T_[1], T_[2] };
double R_out[]{ R_mat_[0], -R_mat_[1], -R_mat_[2], R_mat_[3], -R_mat_[4],
-R_mat_[5], R_mat_[6], -R_mat_[7], -R_mat_[8] };
TranslationRotation3D TR_out;
TR_out.setT(T_out);
TR_out.setR_mat(R_out);
TR_out.setValid(valid_);
return TR_out;
}
TranslationRotation3D TranslationRotation3D::inverseTransform() const {
Eigen::Map<const Eigen::Vector3d> tra(T_);
Eigen::Map<const Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot(R_mat_);
double rot_inv_ptr[9];
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_inv(
rot_inv_ptr);
rot_inv = rot.transpose();
double tra_inv_ptr[3];
Eigen::Map<Eigen::Vector3d> tra_inv(tra_inv_ptr);
tra_inv = -rot_inv * tra;
TranslationRotation3D pose_inv;
pose_inv.setT(tra_inv_ptr);
pose_inv.setR_mat(rot_inv_ptr);
pose_inv.setValid(valid_);
return (pose_inv);
}
Ogre::Vector3 TranslationRotation3D::ogreTranslation() const {
return Ogre::Vector3(T_[0], T_[1], T_[2]);
}
void TranslationRotation3D::getQuaternion(double &x, double &y, double &z,
double &w) const {
Eigen::Matrix3d R_eigen;
R_eigen << R_mat_[0], R_mat_[1], R_mat_[2], R_mat_[3], R_mat_[4], R_mat_[5],
R_mat_[6], R_mat_[7], R_mat_[8];
Eigen::Quaterniond q_eigen;
q_eigen = R_eigen;
x = q_eigen.x();
y = q_eigen.y();
z = q_eigen.z();
w = q_eigen.w();
}
Ogre::Quaternion TranslationRotation3D::ogreRotation() const {
double x, y, z, w;
getQuaternion(x, y, z, w);
return Ogre::Quaternion(w, x, y, z);
}
Eigen::Vector3d TranslationRotation3D::eigenTranslation() const {
Eigen::Map<const Eigen::Vector3d> trans(T_);
return trans;
}
Eigen::Matrix3d TranslationRotation3D::eigenRotation() const {
Eigen::Map<const Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_mat(
R_mat_);
return rot_mat;
}
Eigen::MatrixXd TranslationRotation3D::adjoint() const {
double F_ptr[4 * 4];
Eigen::Map<const Eigen::Matrix<double, 4, 4, Eigen::RowMajor> > F(F_ptr);
getF(F_ptr);
Eigen::MatrixXd adj(6, 6);
Eigen::Vector3d T = F.topRightCorner<3, 1>();
Eigen::Matrix3d R = F.topLeftCorner<3, 3>();
Eigen::Matrix3d skewT;
skewT << 0.0, -T(2), T(1), T(2), 0.0, -T(0), -T(1), T(0), 0.0;
adj << R, skewT *R, Eigen::Matrix3d::Zero(), R;
return adj;
}
void TranslationRotation3D::createGLModelMatrix(float *M_out) const {
double TGL[3], RGL[3];
getT(TGL);
getR(RGL);
TGL[2] = -TGL[2];
RGL[2] = -RGL[2];
TranslationRotation3D TR(TGL, RGL);
double R_matGL[9];
TR.getR_mat(R_matGL);
M_out[0] = R_matGL[0];
M_out[1] = R_matGL[1];
M_out[2] = R_matGL[2];
M_out[3] = 0.0;
M_out[4] = R_matGL[3];
M_out[5] = R_matGL[4];
M_out[6] = R_matGL[5];
M_out[7] = 0.0;
M_out[8] = R_matGL[6];
M_out[9] = R_matGL[7];
M_out[10] = R_matGL[8];
M_out[11] = 0.0;
M_out[12] = TGL[0];
M_out[13] = TGL[1];
M_out[14] = TGL[2];
M_out[15] = 1.0;
}
bool TranslationRotation3D::isFinite() const {
bool res = true;
for (int i = 0; i < 3; i++) {
res = res && std::isfinite(T_[i]);
res = res && std::isfinite(R_[i]);
}
return res;
}
void TranslationRotation3D::getEuler(double &Ex, double &Ey, double &Ez) const {
// [ 0 1 2
// 3 4 5
// 6 7 8 ]
// http://nghiaho.com/?page_id=846
double r32 = R_mat_[7];
double r33 = R_mat_[8];
double r31 = R_mat_[6];
double r21 = R_mat_[3];
double r11 = R_mat_[0];
Ex = atan2(r32, r33);
Ey = atan2(-r31, sqrt(r32 * r32 + r33 * r33));
Ez = atan2(r21, r11);
}
void TranslationRotation3D::getF(double *F_out) const {
Eigen::Map<Eigen::Matrix<double, 4, 4, Eigen::RowMajor> > hom(F_out);
hom << eigenRotation(), eigenTranslation(), 0.0, 0.0, 0.0, 1.0;
}
void TranslationRotation3D::setT(const double *T_in) {
for (int i = 0; i < 3; i++)
T_[i] = T_in[i];
}
void TranslationRotation3D::setR(const double *R_in) {
for (int i = 0; i < 3; i++)
R_[i] = R_in[i];
updateR_mat();
}
void TranslationRotation3D::setR_mat(double *R_mat_in) {
for (int i = 0; i < 9; i++)
R_mat_[i] = R_mat_in[i];
updateR();
}
void TranslationRotation3D::setF(const std::vector<double> &F_in) {
if (F_in.size() != 16)
throw std::runtime_error(
"TranslationRotation3D::setF: F_in requires 16 elements");
if ((F_in.at(12) != 0.0) || (F_in.at(13) != 0.0) || (F_in.at(14) != 0.0) ||
(F_in.at(15) != 1.0))
throw std::runtime_error(
"TranslationRotation3D::setF: bottom row of F_in should be [0 0 0 1]");
Eigen::Map<const Eigen::Matrix<double, 4, 4, Eigen::RowMajor> > F_in_eig(
F_in.data());
Eigen::Transform<double, 3, Eigen::Affine> F;
F = F_in_eig;
double tmpT[3];
Eigen::Map<Eigen::Vector3d> tra_eig(tmpT);
tra_eig = F.translation();
double tmpR_mat[9];
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_eig(tmpR_mat);
rot_eig = F.rotation();
setT(tmpT);
setR_mat(tmpR_mat);
updateR_mat(); // for stability
}
void TranslationRotation3D::translateX(double Tx) { T_[0] += Tx; }
void TranslationRotation3D::translateY(double Ty) { T_[1] += Ty; }
void TranslationRotation3D::translateZ(double Tz) { T_[2] += Tz; }
void TranslationRotation3D::rotateX(double angle_deg) {
double angle_rad = angle_deg * PI_ / 180.0;
double c = cos(angle_rad);
double s = sin(angle_rad);
Eigen::Matrix3d Rx;
Rx << 1.0, 0.0, 0.0, 0.0, c, -s, 0.0, s, c;
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot(R_mat_);
rot *= Rx;
updateR();
updateR_mat(); // for stability
}
void TranslationRotation3D::rotateY(double angle_deg) {
double angle_rad = angle_deg * PI_ / 180.0;
double c = cos(angle_rad);
double s = sin(angle_rad);
Eigen::Matrix3d Rx;
Rx << c, 0.0, s, 0.0, 1.0, 0.0, -s, 0, c;
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot(R_mat_);
rot *= Rx;
updateR();
updateR_mat(); // for stability
}
void TranslationRotation3D::rotateZ(double angle_deg) {
double angle_rad = angle_deg * PI_ / 180.0;
double c = cos(angle_rad);
double s = sin(angle_rad);
Eigen::Matrix3d Rx;
Rx << c, -s, 0.0, s, c, 0.0, 0.0, 0.0, 1.0;
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot(R_mat_);
rot *= Rx;
updateR();
updateR_mat(); // for stability
}
void TranslationRotation3D::show() const {
printf("T = %+2.6f %+2.6f %+2.6f\n", T_[0], T_[1], T_[2]);
printf("R = %+2.6f %+2.6f %+2.6f\n", R_[0], R_[1], R_[2]);
printf("R_mat = %+2.6f %+2.6f %+2.6f\n", R_mat_[0], R_mat_[1], R_mat_[2]);
printf(" %+2.6f %+2.6f %+2.6f\n", R_mat_[3], R_mat_[4], R_mat_[5]);
printf(" %+2.6f %+2.6f %+2.6f\n", R_mat_[6], R_mat_[7], R_mat_[8]);
}
void TranslationRotation3D::showCompact() const {
printf("T = %+2.6f %+2.6f %+2.6f | R = %+2.6f %+2.6f %+2.6f ", T_[0], T_[1],
T_[2], R_[0], R_[1], R_[2]);
if (isValid())
printf("valid\n");
else
printf("invalid\n");
}
void TranslationRotation3D::updateR() {
Eigen::Map<const Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_mat(
R_mat_);
Eigen::Map<Eigen::Vector3d> rot_axis_angle(R_);
Eigen::AngleAxis<double> tmp(rot_mat);
rot_axis_angle = tmp.angle() * tmp.axis();
}
void TranslationRotation3D::updateR_mat() {
Eigen::Map<Eigen::Matrix<double, 3, 3, Eigen::RowMajor> > rot_mat(R_mat_);
Eigen::Map<const Eigen::Vector3d> rot_axis_angle(R_);
double angle = rot_axis_angle.norm();
if (angle < 1e-15) {
// identity matrix
rot_mat = Eigen::Matrix<double, 3, 3>::Identity();
} else {
rot_mat = Eigen::AngleAxis<double>(angle, rot_axis_angle / angle)
.toRotationMatrix();
}
}
}
|
{"hexsha": "141ee08dd4fb29683b2f9a363d0115625c80c729", "size": 14722, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "pose_estimation/src/translation_rotation_3d.cpp", "max_stars_repo_name": "carlo-/simtrack", "max_stars_repo_head_hexsha": "8209c5305c76c6e5d7783fbaea992959f7b44f71", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 99.0, "max_stars_repo_stars_event_min_datetime": "2015-07-06T11:18:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T08:20:12.000Z", "max_issues_repo_path": "pose_estimation/src/translation_rotation_3d.cpp", "max_issues_repo_name": "carlo-/simtrack", "max_issues_repo_head_hexsha": "8209c5305c76c6e5d7783fbaea992959f7b44f71", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 44.0, "max_issues_repo_issues_event_min_datetime": "2015-10-09T19:11:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-25T03:51:39.000Z", "max_forks_repo_path": "pose_estimation/src/translation_rotation_3d.cpp", "max_forks_repo_name": "carlo-/simtrack", "max_forks_repo_head_hexsha": "8209c5305c76c6e5d7783fbaea992959f7b44f71", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 45.0, "max_forks_repo_forks_event_min_datetime": "2015-07-06T11:36:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T01:32:18.000Z", "avg_line_length": 31.2569002123, "max_line_length": 80, "alphanum_fraction": 0.6024317348, "num_tokens": 4637}
|
import torch
import numpy as np
def compare(cuda_out, pt_out, pt_out_control, rows):
print( "Pytorch ops in fp16: ", pt_out )
print( "Kernel result: ", cuda_out )
print("Control (Pytorch ops, sticking to fp32): ", pt_out_control)
# Make upconverted copies for error check against fp32 control
cuda_out_fp32 = cuda_out.float()
pt_out_fp32 = pt_out.float()
# Flatten all but the slowest dimension
cuda_out = cuda_out.view(rows,-1)
pt_out = pt_out.view(rows,-1)
cuda_out_fp32 = cuda_out_fp32.view(rows,-1)
pt_out_fp32 = pt_out_fp32.view(rows,-1)
pt_out_control = pt_out_control.view(rows,-1)
cuda_maxdiffs, cuda_maxdiff_locs = torch.max((pt_out_control - cuda_out_fp32).abs(),1)
pt_maxdiffs, pt_maxdiff_locs = torch.max((pt_out_control - pt_out_fp32 ).abs(),1)
print( "cuda_maxdiffs = ", cuda_maxdiffs )
print("cuda_maxdiff_locs = ", cuda_maxdiff_locs)
print( "pt_maxdiffs = ", pt_maxdiffs )
print( "pt_maxdiff_locs = ", pt_maxdiff_locs )
row_indices = torch.LongTensor(np.arange(rows))
print("cuda_out at cuda_maxdiff_locs in each row:")
# bizarrely, this will work if you do it at the python prompt:
# print(cuda_out[row_indices,cuda_maxdiff_locs])
# ...but it only seems to work here if you wrap with numpy arrays:
print( cuda_out[np.array(row_indices),np.array(cuda_maxdiff_locs)])
print("pt_out_control at cuda_maxdiff_locs in each row:")
print(pt_out_control[np.array(row_indices),np.array(cuda_maxdiff_locs)])
print("pt_out at pt_maxdiff_locs in each row:" )
print( pt_out[np.array(row_indices),np.array(pt_maxdiff_locs)])
print("pt_out_control at pt_maxdiff_locs in each row:" )
print(pt_out_control[np.array(row_indices),np.array(pt_maxdiff_locs)])
|
{"hexsha": "72af1817cebc4c2e8435071cdc56fe9b55a574dc", "size": 1951, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/raw_ops/compare.py", "max_stars_repo_name": "FDecaYed/apex", "max_stars_repo_head_hexsha": "789afd89fe2c5a3e772f557055a9cf0f5e9d1241", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/raw_ops/compare.py", "max_issues_repo_name": "FDecaYed/apex", "max_issues_repo_head_hexsha": "789afd89fe2c5a3e772f557055a9cf0f5e9d1241", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/raw_ops/compare.py", "max_forks_repo_name": "FDecaYed/apex", "max_forks_repo_head_hexsha": "789afd89fe2c5a3e772f557055a9cf0f5e9d1241", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3720930233, "max_line_length": 90, "alphanum_fraction": 0.6586365966, "include": true, "reason": "import numpy", "num_tokens": 509}
|
from rl_games.common import a2c_common
from rl_games.algos_torch import torch_ext
from rl_games.algos_torch import central_value
from rl_games.common import common_losses
from rl_games.common import datasets
from rl_games.algos_torch import ppg_aux
from rl_games.common.ewma_model import EwmaModel
from torch import optim
import torch
from torch import nn
import numpy as np
import gym
class A2CAgent(a2c_common.ContinuousA2CBase):
def __init__(self, base_name, params):
a2c_common.ContinuousA2CBase.__init__(self, base_name, params)
obs_shape = self.obs_shape
build_config = {
'actions_num' : self.actions_num,
'input_shape' : obs_shape,
'num_seqs' : self.num_actors * self.num_agents,
'value_size': self.env_info.get('value_size',1),
'normalize_value' : self.normalize_value,
'normalize_input': self.normalize_input,
}
self.model = self.network.build(build_config)
self.model.to(self.ppo_device)
self.states = None
if self.ewma_ppo:
self.ewma_model = EwmaModel(self.model, ewma_decay=0.889)
self.init_rnn_from_model(self.model)
self.last_lr = float(self.last_lr)
self.bound_loss_type = self.config.get('bound_loss_type', 'bound') # 'regularisation' or 'bound'
self.optimizer = optim.Adam(self.model.parameters(), float(self.last_lr), eps=1e-08, weight_decay=self.weight_decay)
if self.has_central_value:
cv_config = {
'state_shape' : self.state_shape,
'value_size' : self.value_size,
'ppo_device' : self.ppo_device,
'num_agents' : self.num_agents,
'horizon_length' : self.horizon_length,
'num_actors' : self.num_actors,
'num_actions' : self.actions_num,
'seq_len' : self.seq_len,
'normalize_value' : self.normalize_value,
'network' : self.central_value_config['network'],
'config' : self.central_value_config,
'writter' : self.writer,
'max_epochs' : self.max_epochs,
'multi_gpu' : self.multi_gpu
}
self.central_value_net = central_value.CentralValueTrain(**cv_config).to(self.ppo_device)
self.use_experimental_cv = self.config.get('use_experimental_cv', True)
self.dataset = datasets.PPODataset(self.batch_size, self.minibatch_size, self.is_discrete, self.is_rnn, self.ppo_device, self.seq_len)
if self.normalize_value:
self.value_mean_std = self.central_value_net.model.value_mean_std if self.has_central_value else self.model.value_mean_std
if 'phasic_policy_gradients' in self.config:
self.has_phasic_policy_gradients = True
self.ppg_aux_loss = ppg_aux.PPGAux(self, self.config['phasic_policy_gradients'])
self.has_value_loss = (self.has_central_value and self.use_experimental_cv) \
or (not self.has_phasic_policy_gradients and not self.has_central_value)
self.algo_observer.after_init(self)
def update_epoch(self):
self.epoch_num += 1
return self.epoch_num
def save(self, fn):
state = self.get_full_state_weights()
torch_ext.save_checkpoint(fn, state)
def restore(self, fn):
checkpoint = torch_ext.load_checkpoint(fn)
self.set_full_state_weights(checkpoint)
def get_masked_action_values(self, obs, action_masks):
assert False
def calc_gradients(self, input_dict):
value_preds_batch = input_dict['old_values']
old_action_log_probs_batch = input_dict['old_logp_actions']
advantage = input_dict['advantages']
old_mu_batch = input_dict['mu']
old_sigma_batch = input_dict['sigma']
return_batch = input_dict['returns']
actions_batch = input_dict['actions']
# if self.modality_dict is not None:
# obs_batch = {}
# for modality_name in self.modality_dict:
# obs_batch[modality_name] = input_dict[modality_name]
# obs_batch = self._preproc_obs(obs_batch)
#
# else:
obs_batch = input_dict['obs']
obs_batch = self._preproc_obs(obs_batch)
lr_mul = 1.0
curr_e_clip = lr_mul * self.e_clip
batch_dict = {
'is_train': True,
'prev_actions': actions_batch,
'obs' : obs_batch,
}
rnn_masks = None
if self.is_rnn:
rnn_masks = input_dict['rnn_masks']
batch_dict['rnn_states'] = input_dict['rnn_states']
batch_dict['seq_length'] = self.seq_len
with torch.cuda.amp.autocast(enabled=self.mixed_precision):
res_dict = self.model(batch_dict)
action_log_probs = res_dict['prev_neglogp']
values = res_dict['values']
entropy = res_dict['entropy']
mu = res_dict['mus']
sigma = res_dict['sigmas']
if self.ewma_ppo:
ewma_dict = self.ewma_model(batch_dict)
proxy_neglogp = ewma_dict['prev_neglogp']
a_loss = common_losses.decoupled_actor_loss(old_action_log_probs_batch, action_log_probs, proxy_neglogp, advantage, curr_e_clip)
old_action_log_probs_batch = proxy_neglogp # to get right statistic later
old_mu_batch = ewma_dict['mus']
old_sigma_batch = ewma_dict['sigmas']
else:
a_loss = common_losses.actor_loss(old_action_log_probs_batch, action_log_probs, advantage, self.ppo, curr_e_clip)
if self.has_value_loss:
c_loss = common_losses.critic_loss(value_preds_batch, values, curr_e_clip, return_batch, self.clip_value)
else:
c_loss = torch.zeros(1, device=self.ppo_device)
if self.bound_loss_type == 'regularisation':
b_loss = self.reg_loss(mu)
elif self.bound_loss_type == 'bound':
b_loss = self.bound_loss(mu)
else:
b_loss = torch.zeros(1, device=self.ppo_device)
losses, sum_mask = torch_ext.apply_masks([a_loss.unsqueeze(1), c_loss, entropy.unsqueeze(1), b_loss.unsqueeze(1)], rnn_masks)
a_loss, c_loss, entropy, b_loss = losses[0], losses[1], losses[2], losses[3]
loss = a_loss + 0.5 * c_loss * self.critic_coef - entropy * self.entropy_coef + b_loss * self.bounds_loss_coef
if self.multi_gpu:
self.optimizer.zero_grad()
else:
for param in self.model.parameters():
param.grad = None
self.scaler.scale(loss).backward()
#TODO: Refactor this ugliest code of they year
if self.truncate_grads:
if self.multi_gpu:
self.optimizer.synchronize()
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
with self.optimizer.skip_synchronize():
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.unscale_(self.optimizer)
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
self.scaler.step(self.optimizer)
self.scaler.update()
with torch.no_grad():
reduce_kl = rnn_masks is None
kl_dist = torch_ext.policy_kl(mu.detach(), sigma.detach(), old_mu_batch, old_sigma_batch, reduce_kl)
if rnn_masks is not None:
kl_dist = (kl_dist * rnn_masks).sum() / rnn_masks.numel() #/ sum_mask
if self.ewma_ppo:
self.ewma_model.update()
self.diagnostics.mini_batch(self,
{
'values' : value_preds_batch,
'returns' : return_batch,
'new_neglogp' : action_log_probs,
'old_neglogp' : old_action_log_probs_batch,
'masks' : rnn_masks
}, curr_e_clip, 0)
self.train_result = (a_loss, c_loss, entropy, \
kl_dist, self.last_lr, lr_mul, \
mu.detach(), sigma.detach(), b_loss)
def train_actor_critic(self, input_dict):
self.calc_gradients(input_dict)
return self.train_result
def reg_loss(self, mu):
if self.bounds_loss_coef is not None:
reg_loss = (mu*mu).sum(axis=-1)
else:
reg_loss = 0
return reg_loss
def bound_loss(self, mu):
if self.bounds_loss_coef is not None:
soft_bound = 1.1
mu_loss_high = torch.clamp_min(mu - soft_bound, 0.0)**2
mu_loss_low = torch.clamp_max(mu + soft_bound, 0.0)**2
b_loss = (mu_loss_low + mu_loss_high).sum(axis=-1)
else:
b_loss = 0
return b_loss
|
{"hexsha": "d2e009c2464e00917d87adcbf537dfbe4c81e60a", "size": 9148, "ext": "py", "lang": "Python", "max_stars_repo_path": "rl_games/algos_torch/a2c_continuous.py", "max_stars_repo_name": "yzqin/rl_games", "max_stars_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rl_games/algos_torch/a2c_continuous.py", "max_issues_repo_name": "yzqin/rl_games", "max_issues_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rl_games/algos_torch/a2c_continuous.py", "max_forks_repo_name": "yzqin/rl_games", "max_forks_repo_head_hexsha": "6e09fec1e60d70c1dc1934ec65ed3265950a8c34", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.7716894977, "max_line_length": 144, "alphanum_fraction": 0.6116090949, "include": true, "reason": "import numpy", "num_tokens": 2070}
|
import pickle
import h5py
import numpy as np
import sklearn
import sklearn.decomposition
import sklearn.linear_model
import sklearn.preprocessing
from fastprogress.fastprogress import force_console_behavior
import mabe
import mabe.config
import mabe.loss
import mabe.model
import mabe.ringbuffer
# %%
num_splits = 32
# %%
master_bar, progress_bar = force_console_behavior()
# %%
feature_path = mabe.config.ROOT_PATH / "features_task123_final_pca.hdf5"
# %%
with h5py.File(feature_path, "r") as hdf:
def load_all(groupname):
return list(map(lambda v: v[:].astype(np.float32), hdf[groupname].values()))
X_labeled = load_all("train/x")
Y_labeled = load_all("train/y")
annotators_labeled = np.array(list(map(lambda v: v[()], hdf["train/annotators"].values())))
num_annotators = len(np.unique(annotators_labeled))
clf_tasks_labeled = np.array(list(map(lambda v: v[()], hdf["train/clf_tasks"].values())))
num_clf_tasks = len(np.unique(clf_tasks_labeled))
X_unlabeled = load_all("test/x")
Y_unlabeled = load_all("test/y")
groups_unlabeled = list(map(lambda v: v[()], hdf["test/groups"].values()))
# %%
X = X_labeled + X_unlabeled
Y = Y_labeled + Y_unlabeled
scaler = sklearn.preprocessing.StandardScaler().fit(np.concatenate(X))
X = list(map(lambda x: scaler.transform(x), X))
X_labeled = list(map(lambda x: scaler.transform(x), X_labeled))
X_unlabeled = list(map(lambda x: scaler.transform(x), X_unlabeled))
# %%
sample_lengths = np.array(list(map(len, X)))
p_draw = sample_lengths / np.sum(sample_lengths)
len(X), len(X_labeled), min(sample_lengths), max(sample_lengths)
# %%
for i in range(0, num_splits):
indices_labeled = np.arange(len(X_labeled))
indices_unlabeled = len(X_labeled) + np.arange(len(X_unlabeled))
indices = np.arange(len(X))
# sample until the train split has at least one sample from each annotator
valid = False
while not valid:
train_indices_labeled = np.random.choice(
indices_labeled, int(0.85 * len(X_labeled)), replace=False
)
val_indices_labeled = np.array(
[i for i in indices_labeled if i not in train_indices_labeled]
)
valid = len(np.unique(annotators_labeled[train_indices_labeled])) == num_annotators
valid &= len(np.unique(clf_tasks_labeled[train_indices_labeled])) == num_clf_tasks
valid &= len(np.unique(clf_tasks_labeled[val_indices_labeled])) >= (
num_clf_tasks - 1
) # one task with only one trajectory
train_indices_unlabeled = np.random.choice(
indices_unlabeled, int(0.85 * len(X_unlabeled)), replace=False
)
train_indices = np.concatenate((train_indices_labeled, train_indices_unlabeled))
val_indices_unlabeled = np.array(
[i for i in indices_unlabeled if i not in train_indices_unlabeled]
)
val_indices = np.concatenate((val_indices_labeled, val_indices_unlabeled))
split = dict(
indices_labeled=indices_labeled,
indices_unlabeled=indices_unlabeled,
indices=indices,
train_indices_labeled=train_indices_labeled,
train_indices_unlabeled=train_indices_unlabeled,
train_indices=train_indices,
val_indices_labeled=val_indices_labeled,
val_indices_unlabeled=val_indices_unlabeled,
val_indices=val_indices,
)
pickle.dump(split, open(mabe.config.ROOT_PATH / f"split_{i}.pkl", "wb"))
|
{"hexsha": "080ecb21e1f88d2587e7e67fadcde5a43826fc76", "size": 3433, "ext": "py", "lang": "Python", "max_stars_repo_path": "hydrogen/create_splits.py", "max_stars_repo_name": "nebw/mabe", "max_stars_repo_head_hexsha": "2f8d4c4ad82202f614147791a9d1b5ab799fd86f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-01-29T21:09:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-27T20:17:20.000Z", "max_issues_repo_path": "hydrogen/create_splits.py", "max_issues_repo_name": "nebw/mabe", "max_issues_repo_head_hexsha": "2f8d4c4ad82202f614147791a9d1b5ab799fd86f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hydrogen/create_splits.py", "max_forks_repo_name": "nebw/mabe", "max_forks_repo_head_hexsha": "2f8d4c4ad82202f614147791a9d1b5ab799fd86f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0096153846, "max_line_length": 95, "alphanum_fraction": 0.710457326, "include": true, "reason": "import numpy", "num_tokens": 862}
|
import functools
import multiprocessing
import random
import matplotlib.pyplot as plt
import numpy as np
import tqdm
import phyre
tier = 'ball'
eval_setup = 'ball_cross_template'
fold_id = 0
random.seed(0)
train, dev, test = phyre.get_fold(eval_setup, fold_id)
cache = phyre.get_default_100k_cache(tier)
print('cache.action_array() and shape:', cache.action_array.shape,
cache.action_array)
task_id = random.choice(train)
print('Randomly selected task:', task_id)
statuses = cache.load_simulation_states(task_id)
print('Cached simulation status of actions on task', task_id, ':',
statuses.shape, statuses)
print('Share of SOLVED statuses:', (statuses == phyre.SimulationStatus.SOLVED).mean())
cached_status = phyre.simulation_cache.INVALID
while cached_status == 0: # Let's make sure we chose a valid action.
action_index = random.randint(0, len(cache))
action = cache.action_array[action_index]
# Get the status for this action from the cache.
cached_status = statuses[action_index]
# Now let's create a simulator for this task to simulate the action.
simulator = phyre.initialize_simulator([task_id], tier)
simulation = simulator.simulate_action(0,
action,
need_images=True)
# Let's compare.
print('Cached status is:', cached_status)
print('Simulated status is:', simulation.status)
print('Simulator considers task solved?', simulation.status.is_solved())
img = phyre.vis.observations_to_float_rgb(simulation.images[-1])
|
{"hexsha": "980da5084dcad5219fb0c6021404251540f52041", "size": 1575, "ext": "py", "lang": "Python", "max_stars_repo_path": "Datatest_cache.py", "max_stars_repo_name": "kaiyuanmifen/PhyreData", "max_stars_repo_head_hexsha": "231cf05af4e4c81786246ec0f3544b808b209483", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Datatest_cache.py", "max_issues_repo_name": "kaiyuanmifen/PhyreData", "max_issues_repo_head_hexsha": "231cf05af4e4c81786246ec0f3544b808b209483", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Datatest_cache.py", "max_forks_repo_name": "kaiyuanmifen/PhyreData", "max_forks_repo_head_hexsha": "231cf05af4e4c81786246ec0f3544b808b209483", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6363636364, "max_line_length": 86, "alphanum_fraction": 0.713015873, "include": true, "reason": "import numpy", "num_tokens": 340}
|
# (C) ColorfulSoft corp., 2021. All Rights reserved
# PyTorch to Torch.NET checkpoint converter
import torch;
import sys;
import numpy;
import struct;
def main(argv: [str]) -> None:
if(len(argv) != 2):
print("Expected pytorch file name");
with open(argv[1] + ".thn", 'wb') as ckpt:
header = struct.Struct('= B 10s i B');
integer = struct.Struct('= i');
byte = struct.Struct('= B');
ckpt.write(header.pack(10, "Torch.NET>".encode('ASCII'), 0, 1));
m = torch.load(argv[1], map_location = 'cpu');
for p in m:
bin = bytearray((' ' + str(p)).encode('ASCII'));
bin[0] = len(str(p));
ckpt.write(bin);
size = torch.tensor(m[p].shape).int();
ckpt.write(integer.pack(len(m[p].shape)));
ckpt.write(size.numpy().tobytes());
if(m[p].dtype == torch.float16):
bin = byte.pack(0);
break;
elif(m[p].dtype == torch.float32):
bin = byte.pack(1);
elif(m[p].dtype == torch.float64):
bin = byte.pack(2);
elif(m[p].dtype == torch.uint8):
bin = byte.pack(3);
elif(m[p].dtype == torch.int8):
bin = byte.pack(4);
elif(m[p].dtype == torch.int16):
bin = byte.pack(5);
elif(m[p].dtype == torch.int32):
bin = byte.pack(6);
elif(m[p].dtype == torch.int64):
bin = byte.pack(7);
elif(m[p].dtype == torch.bool):
bin = byte.pack(8);
else:
raise "Unsupported tensor type.";
ckpt.write(bin);
ckpt.write(m[p].numpy().tobytes());
if __name__ == "__main__":
main(sys.argv);
|
{"hexsha": "972fb254cf19345501569d82856e8017ef48a788", "size": 1797, "ext": "py", "lang": "Python", "max_stars_repo_path": "Utils/pytorch2torchnet.py", "max_stars_repo_name": "ColorfulSoft/System.AI", "max_stars_repo_head_hexsha": "5f65dbfc06337b202b618780a255396d13c41267", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-02-06T20:19:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-28T02:15:52.000Z", "max_issues_repo_path": "Utils/pytorch2torchnet.py", "max_issues_repo_name": "ColorfulSoft/System.AI", "max_issues_repo_head_hexsha": "5f65dbfc06337b202b618780a255396d13c41267", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-10-26T10:55:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T03:38:35.000Z", "max_forks_repo_path": "Utils/pytorch2torchnet.py", "max_forks_repo_name": "ColorfulSoft/System.AI", "max_forks_repo_head_hexsha": "5f65dbfc06337b202b618780a255396d13c41267", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-28T18:09:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-23T10:43:17.000Z", "avg_line_length": 35.2352941176, "max_line_length": 72, "alphanum_fraction": 0.4813578186, "include": true, "reason": "import numpy", "num_tokens": 440}
|
# -*-mode:python; mode:font-lock;-*-
"""
file IntegralUpperLimits.py
@brief Function to calculate upper limits by integrating Likelihood function
to given \"probability\" level.
@author Stephen Fegan <sfegan@llr.in2p3.fr>
$Id: IntegralUpperLimit.py,v 1.7 2016/10/14 17:41:40 echarles Exp $
See help for IntegralUpperLimits.calc for full details.
"""
# 2011-09-27: Whenever possible call the Python Likelihood classes
# rather than the underlying C++ class - therefore, all references to
# "like.logLike" removed. This allows the code to work with
# SummedLikelihood. Fixes from Joshua Lande for transposed letters in
# a variable name which causes crashes occasionally - thanks!
# 2010-06-11: New algorithm to find integration limits. See below.
# Renamed some of the arguments for consistence with Jim's code, in
# particular "verbosity" and "cl". New code to allow the new search
# algorithm to be used to calculate chi-squared style upper limit.
# 2010-06-10: Add computation of probability for arbitrary flux
# values. Allow skipping of global minimization if user has already
# done it. Some small optimizations.
# 2009-04-01: Added nuisance cache to make better initial guesses for
# nuisance parameters by extrapolating them from previous iterations.
# This makes Minuit quicker (at least when using strategy 0)
import UnbinnedAnalysis
import scipy.integrate
import scipy.interpolate
import scipy.optimize
import scipy.stats
import math
from LikelihoodState import LikelihoodState
def _guess_nuisance(x, like, cache):
"""Internal function which guesses the value of a nuisance
parameter before the optimizer is called by interpolating from
previously found values. Not intended for use outside of this
package."""
X = list(cache.keys())
X.sort()
if len(X)<2:
return
elif x>max(X):
_reset_nuisance(max(X), like, cache)
return
elif x<min(X):
_reset_nuisance(min(X), like, cache)
return
sync_name = ""
icache = 0
for iparam in range(len(like.model.params)):
if sync_name != like[iparam].srcName:
like.syncSrcParams(sync_name)
sync_name = ""
if(like.model[iparam].isFree()):
Y = []
for ix in X: Y.append(cache[ix][icache])
# Simple interpolation is best --- DO NOT use splines!
p = scipy.interpolate.interp1d(X,Y)(x).item()
limlo, limhi = like.model[iparam].getBounds()
p = max(limlo, min(p, limhi))
like.model[iparam].setValue(p)
sync_name = like[iparam].srcName
icache += 1
if sync_name != "":
like.syncSrcParams(sync_name)
def _reset_nuisance(x, like, cache):
"""Internal function which sets the values of the nuisance
parameters to those found in a previous iteration of the
optimizer. Not intended for use outside of this package."""
sync_name = ""
icache = 0
if x in cache:
params = cache[x]
for iparam in range(len(like.model.params)):
if sync_name != like[iparam].srcName:
like.syncSrcParams(sync_name)
sync_name = ""
if(like.model[iparam].isFree()):
like.model[iparam].setValue(params[icache])
sync_name = like[iparam].srcName
icache += 1
if sync_name != "":
like.syncSrcParams(sync_name)
return True
return False
def _cache_nuisance(x, like, cache):
"""Internal function which caches the values of the nuisance
parameters found after optimization so that they can be used
again. Not intended for use outside of this package."""
params = []
for iparam in range(len(like.model.params)):
if(like.model[iparam].isFree()):
params.append(like.model[iparam].value())
cache[x] = params
def _loglike(x, like, par, srcName, offset, verbosity, no_optimizer,
optvalue_cache, nuisance_cache):
"""Internal function used by the SciPy integrator and root finder
to evaluate the likelihood function. Not intended for use outside
of this package."""
# Optimizer uses verbosity level one smaller than given here
optverbosity = max(verbosity-1, 0)
par.setFree(False)
par.setValue(x)
like.syncSrcParams(srcName)
# This flag skips calling the optimizer - and is used when calculating the
# approximate function or in the case when all parameters are frozen or
# since some optimizers might have problems being called with nothing to do
if no_optimizer:
return -like() - offset
# Call the optimizer of the optimum value is not in the cache OR if
# we fail to reset the nuisance parameters to those previously found
optvalue = None
if ((optvalue_cache == None) or (nuisance_cache == None) or
(x not in optvalue_cache) or
(_reset_nuisance(x, like, nuisance_cache) == False)):
try:
if(nuisance_cache != None):
_guess_nuisance(x, like, nuisance_cache)
like.optimize(optverbosity)
if(nuisance_cache != None):
_cache_nuisance(x, like, nuisance_cache)
except RuntimeError:
like.optimize(optverbosity)
if(nuisance_cache != None):
_cache_nuisance(x, like, nuisance_cache)
optvalue = -like()
if(optvalue_cache != None):
optvalue_cache[x] = optvalue
else:
optvalue = optvalue_cache[x]
return optvalue - offset
def _integrand(x, f_of_x, like, par, srcName, maxval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache):
"""Internal function used by the SciPy integrator to evaluate the
likelihood function. Not intended for use outside of this package."""
f = math.exp(_loglike(x,like,par,srcName,maxval,verbosity,no_optimizer,
optvalue_cache,nuisance_cache))
f_of_x[x] = f
if verbosity:
print ("Function evaluation:", x, f)
return f
def _approxroot(x, approx_cache, like, par, srcName, subval, verbosity):
"""Internal function used by the SciPy root finder to evaluate the
approximate likelihood function. Not intended for use outside of
this package."""
if x in approx_cache:
f = approx_cache[x]
else:
f = _loglike(x,like,par,srcName,subval,verbosity,True,None,None)
approx_cache[x]=f
if verbosity:
print ("Approximate function root evaluation:", x, f)
return f
def _root(x, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache):
"""Internal function used by the SciPy root finder to evaluate the
likelihood function. Not intended for use outside of this package."""
f = _loglike(x, like, par, srcName, subval, verbosity, no_optimizer,
optvalue_cache, nuisance_cache)
if verbosity:
print ("Exact function root evaluation:", x, f)
return f
def _splintroot(xhi, yseek, xlo, spl_rep):
"""Internal function used by the SciPy root finder to find the
point where integral of (spline) likelihood passes desired
threshold. Not intended for use outside of this package."""
return scipy.interpolate.splint(xlo,xhi,spl_rep)-yseek
def _splevroot(x, yseek, spl_rep):
"""Internal function used by the SciPy root finder to find the
point where the (spline of the) log-likelihood passes desired
threshold. Not intended for use outside of this package."""
return scipy.interpolate.splev(x, spl_rep)-yseek
def _int1droot(x, yseek, int_rep):
"""Internal function used by the SciPy root finder to find the
point where the (linear interpolation of the) log-likelihood
passes desired threshold. Not intended for use outside of this
package."""
return int_rep(x).item()-yseek
def _find_interval(like, par, srcName, no_optimizer,
maxval, fitval, limlo, limhi,
delta_log_like_limits = 2.71/2, verbosity = 0, tol = 0.01,
no_lo_bound_search = False, nloopmax = 5,
optvalue_cache = dict(), nuisance_cache = dict()):
"""Internal function to search for interval of the normalization
parameter in which the log Likelihood is larger than predefined
value. Used to find the upper limit in the profile method and to
find sensible limits of integration in the Bayesian method. Use
the SciPy Brent method root finder to do the search. Use new fast
method for up to nloopmax iterations then fall back to old method."""
subval = maxval - delta_log_like_limits
search_xtol = limlo*0.1
search_ytol = tol
# 2010-06-11: NEW and FASTER algorithm to find integration
# limits. Instead of evaluating the real function while searching
# for the root (which requires calling the optimizer) we now
# evaluate an approximate function, in which all the background
# parameters are kept constant. When we find the root (flux) of
# the approximate function then optimize at that flux to evaluate
# how close the real function is there. Then repeat this up to
# "nloopmax" times, after which revert to old method if we haven't
# converged. Each time the real function is evaluated at the root
# of the approximate it forces the approximate function in the
# next iteration to equal the real function at that point (since
# the background parameters values are changed to those optimized
# at that point) and so the real and approximate functions get
# closer and closer around the region of the roots.
# 2009-04-16: modified to do logarithmic search before calling
# Brent because the minimizer does not converge very well when it
# is called alternatively at extreme ends of the flux range,
# because the "nuisance" parameters are very far from their
# optimal values from call to call. THIS COMMENT IS OBSOLETED
# BY PREVIOUS COMMENT EXCEPT IF/WHEN NEW METHOD FAILS.
exact_root_evals = -len(optvalue_cache)
approx_root_evals = 0
temp_saved_state = LikelihoodState(like)
# HI BOUND
xlft = fitval
xrgt = limhi
xtst = fitval
ytst = delta_log_like_limits
iloop = 0
while (iloop<nloopmax) and (xrgt>xlft) and (abs(ytst)>search_ytol):
approx_cache = dict()
approx_cache[xtst] = ytst
if _approxroot(xrgt,approx_cache,like,par,srcName,subval,verbosity)<0:
xtst = scipy.optimize.brentq(_approxroot, xlft, xrgt,
xtol=search_xtol,
args = (approx_cache,like,par,
srcName,subval,verbosity))
else:
xtst = xrgt
ytst = _root(xtst, like, par,srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)
if ytst<=0: xrgt=xtst
else: xlft=xtst
iloop += 1
approx_root_evals += len(approx_cache)-1
pass
xhi = xtst
yhi = ytst
if (xrgt>xlft) and (abs(ytst)>search_ytol):
xlft = fitval
for ix in optvalue_cache:
if(optvalue_cache[ix]-subval>0 and ix>xlft):
xlft = ix
xrgt = limhi
for ix in optvalue_cache:
if(optvalue_cache[ix]-subval<0 and ix<xrgt):
xrgt = ix
if(xrgt > max(xlft*10.0, xlft+(limhi-limlo)*1e-4)):
xtst = max(xlft*10.0, xlft+(limhi-limlo)*1e-4)
while(xtst<xrgt and\
_root(xtst, like,par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)>=0):
xtst *= 10.0
if(xtst<xrgt):
xrgt = xtst
if xrgt>limhi: xrgt=limhi
if xrgt<limhi or \
_root(xrgt, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)<0:
xhi = scipy.optimize.brentq(_root, xlft, xrgt, xtol=search_xtol,
args = (like,par,srcName,\
subval,verbosity,no_optimizer,
optvalue_cache,nuisance_cache))
pass
yhi = _root(xhi, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)
pass
temp_saved_state.restore()
# LO BOUND
if(no_lo_bound_search):
xlo = fitval
ylo = maxval
exact_root_evals += len(optvalue_cache)
return [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals]
xlft = limlo
xrgt = fitval
xtst = fitval
ytst = delta_log_like_limits
iloop = 0
while (iloop<nloopmax) and (xrgt>xlft) and (abs(ytst)>search_ytol):
approx_cache = dict()
approx_cache[xtst] = ytst
if _approxroot(xlft,approx_cache,like,par,srcName,subval,verbosity)<0:
xtst = scipy.optimize.brentq(_approxroot, xlft, xrgt,
xtol=search_xtol,
args = (approx_cache,like,par,
srcName,subval,verbosity))
else:
xtst = xlft
ytst = _root(xtst, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)
if ytst<=0: xlft=xtst
else: xrgt=xtst
approx_root_evals += len(approx_cache)-1
iloop += 1
pass
xlo = xtst
ylo = ytst
if (xrgt>xlft) and (abs(ytst)>search_ytol):
xrgt = fitval
for ix in optvalue_cache:
if(optvalue_cache[ix]-subval>0 and ix<xrgt):
xrgt = ix
xlft = limlo
for ix in optvalue_cache:
if(optvalue_cache[ix]-subval<0 and ix<xlft):
xlft = ix
if(xlft < min(xrgt*0.1, xrgt-(limhi-limlo)*1e-4)):
xtst = min(xrgt*0.1, xrgt-(limhi-limlo)*1e-4)
while(xtst>xlft and\
_root(xtst, like,par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)>=0):
xtst *= 0.1
if(xtst>xlft):
xlft = xtst
if xlft<limlo: xlft=limlo
if xlft>limlo or \
_root(xlft, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)<0:
xlo = scipy.optimize.brentq(_root, xlft, xrgt, xtol=search_xtol,
args = (like,par,srcName,\
subval,verbosity,no_optimizer,
optvalue_cache,nuisance_cache))
pass
ylo = _root(xlo, like, par, srcName, subval, verbosity,
no_optimizer, optvalue_cache, nuisance_cache)
pass
temp_saved_state.restore()
exact_root_evals += len(optvalue_cache)
return [xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals]
def calc(like, srcName, *args, **kwargs):
print ("IntegralUpperLimits.calc() is deprecated, use calc_int() instead")
return calc_int(like, srcName, *args,**kwargs)
def calc_int(like, srcName, cl=0.95, verbosity=0,
skip_global_opt=False, be_very_careful=False, freeze_all=False,
delta_log_like_limits = 10.0, profile_optimizer = None,
emin=100, emax=3e5, poi_values = []):
"""Calculate an integral upper limit by direct integration.
Description:
Calculate an integral upper limit by integrating the likelihood
function up to a point which contains a given fraction of the total
probability. This is a fairly standard Bayesian approach to
calculating upper limits, which assumes a uniform prior probability.
The likelihood function is not assumed to be distributed as
chi-squared.
This function first uses the optimizer to find the global minimum,
then uses the scipy.integrate.quad function to integrate the
likelihood function with respect to one of the parameters. During the
integration, the other parameters can be frozen at their values found
in the global minimum or optimized freely at each point.
Inputs:
like -- a binned or unbinned likelihood object which has the
desired model. Be careful to freeze the index of the source for
which the upper limit is being if you want to quote a limit with a
fixed index.
srcName -- the name of the source for which to compute the limit.
cl -- probability level for the upper limit.
verbosity -- verbosity level. A value of zero means no output will
be written. With a value of one the function writes some values
describing its progress, but the optimizers don't write
anything. Values larger than one direct the optimizer to produce
verbose output.
skip_global_opt -- if the model is already at the global minimum
value then you can direct the integrator to skip the initial step
to find the minimum. If you specify this option and the model is
NOT at the global minimum your results will likely be wrong.
be_very_careful -- direct the integrator to be even more careful
in integrating the function, by telling it to use a higher
tolerance and to specifically pay attention to the peak in the
likelihood function. More evaluations of the integrand will be
made, which WILL be slower and MAY result in a more accurate
limit. NOT RECOMMENDED
freeze_all -- freeze all other parameters at the values of the
global minimum.
delta_log_like_limits -- the limits on integration is defined by
the region around the global maximum in which the log likelihood
is close enough to the peak value. Too small a value will mean the
integral does not include a significant amount of the likelihood
function. Too large a value may make the integrator miss the peak
completely and get a bogus answer (although the
\"be_very_careful\" option will help here).
profile_optimizer -- Alternative optimizer to use when computing
the profile, after the global minimum has been found. Only set
this if you want to use a different optimizer for calculating the
profile than for calculating the global minimum.
emin, emax -- Bounds on energy range over which the flux should be
integrated.
poi_values -- Points of interest: values of the normalization
parameter corresponding to fluxes of interest to the user. The
integrator will calculate the integral of the probability
distribution to each of these values and return them in the vector
\"results.poi_probs\". This parameter must be a vector, and can be
empty.
Outputs: (limit, results)
limit -- the flux limit found.
results -- a dictionary of additional results from the
calculation, such as the value of the peak, the profile of the
likelihood and two profile-likelihood upper-limits.
"""
saved_state = LikelihoodState(like)
###########################################################################
#
# This function has 4 main components:
#
# 1) Find the global maximum of the likelihood function using ST
# 2) Define the integration limits by finding the points at which the
# log likelihood has fallen by a certain amount
# 3) Integrate the function using the QUADPACK adaptive integrator
# 4) Calculate the upper limit by re-integrating the function using
# the evaluations made by the adaptive integrator. Two schemes are
# tried, splines to the function points and trapezoidal quadrature.
#
###########################################################################
# Optimizer uses verbosity level one smaller than given here
optverbosity = max(verbosity-1, 0)
###########################################################################
#
# 1) Find the global maximum of the likelihood function using ST
#
###########################################################################
par = like.normPar(srcName)
fitstat = None
if not skip_global_opt:
# Make sure desired parameter is free during global optimization
par.setFree(True)
like.syncSrcParams(srcName)
# Perform global optimization
if verbosity:
print ("Finding global maximum")
try:
like.fit(optverbosity)
fitstat = like.optObject.getRetCode()
if verbosity and fitstat != 0:
print ("Minimizer returned with non-zero code: ",fitstat)
except RuntimeError:
print ("Failed to find global maximum, results may be wrong")
pass
pass
original_optimizer = like.optimizer
if profile_optimizer != None:
like.optimizer = profile_optimizer
# Store values of global fit
maxval = -like()
fitval = par.getValue()
fiterr = par.error()
limlo, limhi = par.getBounds()
# limlo should not be allowed to go down to 0
limlo = max(limlo,0.01*fiterr,1e-4)
if verbosity:
print ("Maximum of %g with %s = %g +/- %g"\
%(-maxval,srcName,fitval,fiterr))
# Freeze all other model parameters if requested (much faster!)
if(freeze_all):
for i in range(len(like.model.params)):
like.model[i].setFree(False)
like.syncSrcParams(like[i].srcName)
# Freeze the parameter of interest
par.setFree(False)
like.syncSrcParams(srcName)
# Set up the caches for the optimum values and nuisance parameters
optvalue_cache = dict()
nuisance_cache = dict()
optvalue_cache[fitval] = maxval
_cache_nuisance(fitval, like, nuisance_cache)
# Test if all parameters are frozen (could be true if we froze
# them above or if they were frozen in the user's model
all_frozen = True
for i in range(len(like.model.params)):
if like.model[i].isFree():
all_frozen = False
break
###########################################################################
#
# 2) Define the integration limits by finding the points at which the
# log likelihood has fallen by a certain amount
#
###########################################################################
if verbosity:
print ("Finding integration bounds (delta log Like=%g)"\
%(delta_log_like_limits))
[xlo, xhi, ylo, yhi, exact_root_evals, approx_root_evals] = \
_find_interval(like, par, srcName, all_frozen,
maxval, fitval, limlo, limhi,
delta_log_like_limits, verbosity, like.tol,
False, 5, optvalue_cache, nuisance_cache)
if poi_values != None and len(poi_values)>0:
xlo = max(min(xlo, min(poi_values)/2.0), limlo)
xhi = min(max(xhi, max(poi_values)*2.0), limhi)
if verbosity:
print ("Integration bounds: %g to %g (%d full fcn evals and %d approx)"\
%(xlo,xhi,exact_root_evals,approx_root_evals))
profile_dlogL1 = -0.5*scipy.stats.chi2.isf(1-cl, 1)
profile_dlogL2 = -0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)
if yhi - delta_log_like_limits > profile_dlogL1:
print ("calc_int error: parameter max", xhi, "is not large enough")
print ("delta logLike =", yhi - delta_log_like_limits)
return -1, {}
###########################################################################
#
# 3) Integrate the function using the QUADPACK adaptive integrator
#
###########################################################################
#
# Do integration using QUADPACK routine from SciPy -- the "quad"
# routine uses adaptive quadrature, which *should* spend more time
# evaluating the function where it counts the most.
#
points = []
epsrel = (1.0-cl)*1e-3
if be_very_careful:
# In "be very careful" mode we explicitly tell "quad" that it
# should examine more carefully the point at x=fitval, which
# is the peak of the likelihood. We also use a tighter
# tolerance value, but that seems to have a secondary effect.
points = [ fitval ]
epsrel = (1.0-cl)*1e-8
if verbosity:
print ("Integrating probability distribution")
nfneval = -len(optvalue_cache)
f_of_x = dict()
quad_ival, quad_ierr = \
scipy.integrate.quad(_integrand, xlo, xhi,\
args = (f_of_x, like, par, srcName, maxval,\
verbosity, all_frozen,
optvalue_cache, nuisance_cache),\
points=points, epsrel=epsrel, epsabs=1)
nfneval += len(optvalue_cache)
if verbosity:
print ("Total integral: %g +/- %g (%d fcn evals)"\
%(quad_ival,quad_ierr,nfneval))
###########################################################################
#
# 4) Calculate the upper limit by re-integrating the function using
# the evaluations made by the adaptive integrator. Two schemes are
# tried, splines to the function points and trapezoidal quadrature.
#
###########################################################################
# Calculation of the upper limit requires integrating up to
# various test points, and finding the one that contains the
# prescribed fraction of the probability. Using the "quad"
# function to do this by evaluating the likelihood function
# directly would be computationally prohibitive, it is preferable
# to use the function evaluations that have been saved in the
# "f_of_x" variable.
# We try 2 different integration approaches on this data:
# trapezoidal quadrature and integration of a fitted spline, with
# the expectation that the spline will be better, but that perhaps
# the trapezoidal might be more robust if the spline fit goes
# crazy. The method whose results are closest to those from "quad"
# is picked to do the search.
# Organize values computed into two vectors x & y
x = list(f_of_x.keys())
x.sort()
y=[]
logy=[]
for xi in x:
y.append(f_of_x[xi])
logy.append(math.log(f_of_x[xi]))
# Evaluate upper limit using trapezoidal rule
trapz_ival = scipy.integrate.trapz(y,x)
cint = 0
Cint = [ 0 ]
for i in range(len(x)-1):
cint += 0.5*(f_of_x[x[i+1]]+f_of_x[x[i]])*(x[i+1]-x[i])
Cint.append(cint)
int_irep = scipy.interpolate.interp1d(x, Cint)
xlim_trapz = scipy.optimize.brentq(_int1droot, x[0], x[-1],
args = (cl*cint, int_irep))
ylim_trapz = int_irep(xlim_trapz).item()/cint
# Evaluate upper limit using spline
spl_irep = scipy.interpolate.splrep(x,y,xb=xlo,xe=xhi)
spl_ival = scipy.interpolate.splint(xlo,xhi,spl_irep)
xlim_spl = scipy.optimize.brentq(_splintroot, xlo, xhi,
args = (cl*spl_ival, xlo, spl_irep))
ylim_spl = scipy.interpolate.splint(xlo,xlim_spl,spl_irep)/spl_ival
# Test which is closest to QUADPACK adaptive method: TRAPZ or SPLINE
if abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):
# Evaluate upper limit using spline
if verbosity:
print ("Using spline integral: %g (delta=%g)"\
%(spl_ival,abs(spl_ival/quad_ival-1)))
xlim = xlim_spl
ylim = ylim_spl
if verbosity:
print ("Spline search: %g (P=%g)"%(xlim,ylim))
else:
# Evaluate upper limit using trapezoidal rule
if verbosity:
print ("Using trapezoidal integral: %g (delta=%g)"\
%(trapz_ival,abs(trapz_ival/quad_ival-1)))
xlim = xlim_trapz
ylim = ylim_trapz
if verbosity:
print ("Trapezoidal search: %g (P=%g)"%(xlim,cl))
like.optimizer = original_optimizer
###########################################################################
#
# Since we have computed the profile likelihood, calculate the
# right side of the 2-sided confidence region at the CL% and
# 2*(CL-50)% levels under the assumption that the likelihood is
# distributed as chi^2 of 1 DOF. Again, use the root finder on a
# spline and linear representation of logL.
#
###########################################################################
# The spline algorithm is prone to noise in the fitted logL,
# especially in "be_very_careful" mode, so fall back to a linear
# interpolation if necessary
spl_drep = scipy.interpolate.splrep(x,logy,xb=xlo,xe=xhi)
spl_pflux1 = scipy.optimize.brentq(_splevroot, fitval, xhi,
args = (profile_dlogL1, spl_drep))
spl_pflux2 = scipy.optimize.brentq(_splevroot, fitval, xhi,
args = (profile_dlogL2, spl_drep))
int_drep = scipy.interpolate.interp1d(x,logy)
int_pflux1 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x),
args = (profile_dlogL1, int_drep))
int_pflux2 = scipy.optimize.brentq(_int1droot, max(min(x),fitval), max(x),
args = (profile_dlogL2, int_drep))
if (2.0*abs(int_pflux1-spl_pflux1)/abs(int_pflux1+spl_pflux1) > 0.05 or \
2.0*abs(int_pflux2-spl_pflux2)/abs(int_pflux2+spl_pflux2) > 0.05):
if verbosity:
print ("Using linear interpolation for profile UL estimate")
profile_flux1 = int_pflux1
profile_flux2 = int_pflux2
else:
if verbosity:
print ("Using spline interpolation for profile UL estimate")
profile_flux1 = spl_pflux1
profile_flux2 = spl_pflux2
###########################################################################
#
# Evaluate the probabilities of the "points of interest" using the integral
#
###########################################################################
poi_probs = [];
poi_dlogL_interp = [];
poi_chi2_equiv = [];
for xval in poi_values:
dLogL = None
if(xval >= xhi):
pval = 1.0
elif(xval <= xlo):
pval = 0.0
# Same test as above to decide between TRAPZ and SPLINE
elif abs(spl_ival - quad_ival) < abs(trapz_ival - quad_ival):
pval = scipy.interpolate.splint(xlo,xval,spl_irep)/spl_ival
dlogL = scipy.interpolate.splev(xval, spl_drep)
else:
pval = int_irep(xval).item()/cint
dlogL = int_drep(xval).item()
poi_probs.append(pval)
poi_dlogL_interp.append(dlogL)
poi_chi2_equiv.append(scipy.stats.chi2.isf(1-pval,1))
###########################################################################
#
# Calculate the integral flux at the upper limit parameter value
#
###########################################################################
# Set the parameter value that corresponds to the desired C.L.
par.setValue(xlim)
# Evaluate the flux corresponding to this upper limit.
ul_flux = like[srcName].flux(emin, emax)
saved_state.restore()
# Pack up all the results
results = dict(all_frozen = all_frozen,
ul_frac = cl,
ul_flux = ul_flux,
ul_value = xlim,
ul_trapz = xlim_trapz,
ul_spl = xlim_spl,
int_limits = [xlo, xhi],
profile_x = x,
profile_y = y,
peak_fitstatus = fitstat,
peak_value = fitval,
peak_dvalue = fiterr,
peak_loglike = maxval,
prof_ul_frac1 = cl,
prof_ul_dlogL1 = profile_dlogL1,
prof_ul_value1 = profile_flux1,
prof_ul_frac2 = 2*(cl-0.5),
prof_ul_dlogL2 = profile_dlogL2,
prof_ul_value2 = profile_flux2,
poi_values = poi_values,
poi_probs = poi_probs,
poi_dlogL_interp = poi_dlogL_interp,
poi_chi2_equiv = poi_chi2_equiv,
flux_emin = emin,
flux_emax = emax)
return ul_flux, results
def calc_chi2(like, srcName, cl=0.95, verbosity=0,
skip_global_opt=False, freeze_all=False,
profile_optimizer = None, emin=100, emax=3e5, poi_values = []):
"""Calculate an integral upper limit by the profile likelihood (chi2) method.
Description:
Calculate an upper limit using the likelihood ratio test, i.e. by
supposing the Likelihood is distributed as chi-squared of one degree of
freedom and finding the point at which the it decreases by the
required amount to get an upper limit at a certain confidence limit.
This function first uses the optimizer to find the global minimum,
then uses the new root finding algorithm to find the point at which
the Likelihood decreases by the required amount. The background
parameters can be frozen at their values found in the global minimum
or optimized freely at each point.
Inputs:
like -- a binned or unbinned likelihood object which has the
desired model. Be careful to freeze the index of the source for
which the upper limit is being if you want to quote a limit with a
fixed index.
srcName -- the name of the source for which to compute the limit.
cl -- probability level for the upper limit.
verbosity -- verbosity level. A value of zero means no output will
be written. With a value of one the function writes some values
describing its progress, but the optimizers don't write
anything. Values larger than one direct the optimizer to produce
verbose output.
skip_global_opt -- if the model is already at the global minimum
value then you can direct the integrator to skip the initial step
to find the minimum. If you specify this option and the model is
NOT at the global minimum your results will likely be wrong.
freeze_all -- freeze all other parameters at the values of the
global minimum.
profile_optimizer -- Alternative optimizer to use when computing
the profile, after the global minimum has been found. Only set
this if you want to use a different optimizer for calculating the
profile than for calculating the global minimum.
emin, emax -- Bounds on energy range over which the flux should be
integrated.
poi_values -- Points of interest: values of the normalization
parameter corresponding to fluxes of interest to the user. The
profile likelihood be evaluated at each of these values and the
equivalent probability under the LRT returned in the vector
\"results.poi_probs\". This parameter must be a vector, and can be
empty.
Outputs: (limit, results)
limit -- the flux limit found.
results -- a dictionary of additional results from the calculation,
such as the value of the peak value etc.
"""
saved_state = LikelihoodState(like)
###########################################################################
#
# This function has 2 main components:
#
# 1) Find the global maximum of the likelihood function using ST
# 2) Find the point at which it falls by the appropriate amount
#
###########################################################################
# Optimizer uses verbosity level one smaller than given here
optverbosity = max(verbosity-1, 0)
###########################################################################
#
# 1) Find the global maximum of the likelihood function using ST
#
###########################################################################
par = like.normPar(srcName)
fitstat = None
if not skip_global_opt:
# Make sure desired parameter is free during global optimization
par.setFree(True)
like.syncSrcParams(srcName)
# Perform global optimization
if verbosity:
print ("Finding global maximum")
try:
like.fit(optverbosity)
fitstat = like.optObject.getRetCode()
if verbosity and fitstat != 0:
print ("Minimizer returned with non-zero code: ",fitstat)
except RuntimeError:
print ("Failed to find global maximum, results may be wrong")
pass
pass
original_optimizer = like.optimizer
if profile_optimizer != None:
like.optimizer = profile_optimizer
# Store values of global fit
maxval = -like()
fitval = par.getValue()
fiterr = par.error()
limlo, limhi = par.getBounds()
if verbosity:
print ("Maximum of %g with %s = %g +/- %g"\
%(-maxval,srcName,fitval,fiterr))
# Freeze all other model parameters if requested (much faster!)
if(freeze_all):
for i in range(len(like.model.params)):
like.model[i].setFree(False)
like.syncSrcParams(like[i].srcName)
# Freeze the parameter of interest
par.setFree(False)
like.syncSrcParams(srcName)
# Set up the caches for the optimum values and nuisance parameters
optvalue_cache = dict()
nuisance_cache = dict()
optvalue_cache[fitval] = maxval
_cache_nuisance(fitval, like, nuisance_cache)
# Test if all parameters are frozen (could be true if we froze
# them above or if they were frozen in the user's model
all_frozen = True
for i in range(len(like.model.params)):
if like.model[i].isFree():
all_frozen = False
break
###########################################################################
#
# 2) Find the point at which the likelihood has fallen by the
# appropriate amount
#
###########################################################################
delta_log_like = 0.5*scipy.stats.chi2.isf(1-2*(cl-0.5), 1)
if verbosity:
print ("Finding limit (delta log Like=%g)"\
%(delta_log_like))
[xunused, xlim, yunused, ylim, exact_root_evals, approx_root_evals] = \
_find_interval(like, par, srcName, all_frozen,
maxval, fitval, limlo, limhi,
delta_log_like, verbosity, like.tol,
True, 5, optvalue_cache, nuisance_cache)
if verbosity:
print ("Limit: %g (%d full fcn evals and %d approx)"\
%(xlim,exact_root_evals,approx_root_evals))
###########################################################################
#
# Evaluate the probabilities of the "points of interest" using the LRT
#
###########################################################################
poi_dlogL = [];
poi_probs = [];
for xval in poi_values:
if(xval >= limhi):
dlogL = None
pval = 1.0
elif(xval <= limlo):
dlogL = None
pval = 0.0
else:
dlogL = _loglike(xval, like, par, srcName, maxval, verbosity,
all_frozen, optvalue_cache, nuisance_cache)
if(xval<fitval):
pval = 0.5*(1-scipy.stats.chi2.cdf(-2*dlogL,1))
else:
pval = 0.5*(1+scipy.stats.chi2.cdf(-2*dlogL,1))
if verbosity:
print ("POI %g: Delta log Like = %g (Pr=%g)"%(xval,dlogL,pval))
poi_probs.append(pval)
poi_dlogL.append(dlogL)
like.optimizer = original_optimizer
###########################################################################
#
# Calculate the integral flux at the upper limit parameter value
#
###########################################################################
# Set the parameter value that corresponds to the desired C.L.
par.setValue(xlim)
# Evaluate the flux corresponding to this upper limit.
ul_flux = like[srcName].flux(emin, emax)
saved_state.restore()
# Pack up all the results
results = dict(all_frozen = all_frozen,
ul_frac = cl,
ul_flux = ul_flux,
ul_value = xlim,
ul_loglike = maxval+ylim-delta_log_like,
ul_dloglike = ylim-delta_log_like,
peak_fitstatus = fitstat,
peak_value = fitval,
peak_dvalue = fiterr,
peak_loglike = maxval,
poi_values = poi_values,
poi_probs = poi_probs,
poi_dlogL = poi_dlogL,
flux_emin = emin,
flux_emax = emax)
return ul_flux, results
if __name__ == "__main__":
import sys
srcName = "EMS0001"
obs = UnbinnedAnalysis.UnbinnedObs('ft1_roi.fits',
scFile = 'ft2.fits',
expMap = 'expMap.fits',
expCube = 'expCube.fits',
irfs = 'P6_V9_DIFFUSE')
#min_opt = 'InteractiveMinuit,MIN 0 $TOL,HESSE,.q'
#pro_opt = 'InteractiveMinuit,SET STR 0,MIN 0 $TOL,.q'
min_opt = 'MINUIT'
pro_opt = None
like = UnbinnedAnalysis.UnbinnedAnalysis(obs, 'model.xml', min_opt)
src_spectrum = like[srcName].funcs['Spectrum']
par = src_spectrum.getParam("Index")
if par:
par.setFree(False)
par.setValue(-2.0)
like.syncSrcParams(srcName)
ul, results = calc_int(like, srcName, verbosity=1)
print (results)
for i in range(len(results["profile_x"])):
print (results["profile_x"][i], results["profile_y"][i])
print ("Profile UL 1: %g (%g, %g)"%(results["prof_ul_flux1"],results["ul_frac"],results["prof_ul_dlogL1"]))
print ("Profile UL 2: %g (%g, %g)"%(results["prof_ul_flux2"],results["prof_ul_frac2"],results["prof_ul_dlogL2"]))
print ("UL: ",ul)
|
{"hexsha": "763f722b6f9279d57d5342d19b9367e7050b7bc3", "size": 42752, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/IntegralUpperLimit.py", "max_stars_repo_name": "fermi-lat/pyLikelihood", "max_stars_repo_head_hexsha": "7dec59151e452c702b05b65498adb36c4831b86f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2017-11-20T14:14:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-05T20:05:17.000Z", "max_issues_repo_path": "python/IntegralUpperLimit.py", "max_issues_repo_name": "fermi-lat/pyLikelihood", "max_issues_repo_head_hexsha": "7dec59151e452c702b05b65498adb36c4831b86f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-05-29T10:25:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-28T08:40:10.000Z", "max_forks_repo_path": "python/IntegralUpperLimit.py", "max_forks_repo_name": "fermi-lat/pyLikelihood", "max_forks_repo_head_hexsha": "7dec59151e452c702b05b65498adb36c4831b86f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-08-18T01:04:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-22T16:49:58.000Z", "avg_line_length": 40.0674789128, "max_line_length": 117, "alphanum_fraction": 0.5952236153, "include": true, "reason": "import scipy", "num_tokens": 10074}
|
\documentclass{CUP-JNL-PPS}
%%%% Packages
\usepackage{latexsym}
\usepackage{graphicx}
\usepackage{multicol,multirow}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{mathrsfs}
\usepackage{amsthm}
\usepackage{rotating}
\usepackage{appendix}
\usepackage[authoryear]{natbib}
\usepackage{ifpdf}
\usepackage[T1]{fontenc}
\usepackage[type1,lining]{ebgaramond}
\usepackage[type1,lining]{sourcesanspro}
\usepackage{newtxmath}
\usepackage{textcomp}%
\usepackage{xcolor}%
\usepackage{hyperref}
\usepackage{lipsum}
%%%%
%\articletype{RESEARCH ARTICLE}
\jname{Perspectives on Politics}
%\artid{20}
\jyear{2021}
\jvol{10}
%\jissue{1}
%\raggedbottom
%\usepackage{showframe}
\begin{document}
\begin{Frontmatter}
\title[Panorama Report]{CMSC426PJ2 Report}
\author{Yingqiao Gou \texttt{ygou@terpmail.umd.edu}}
\author{Yizhan Ao \texttt{josephao@umd.edu}}
\author{Daniel Song \texttt{dsong12@umd.edu}}
\abstract{Abstracts should be 250 words. It must be able to stand alone and so cannot contain citations to the paper's references, equations, etc. An abstract must consist of a single paragraph and be concise. Because of online formatting, abstracts must appear as plain as possible.}
\end{Frontmatter}
\dropcap{T}he general idea of our implementation is to follow the pipeline that was given to us. We need to calculate the second image to the last image, with the geometric transformation once the feature matching is successful with the current picture. By stitching the picture we can walk through every transformation, and the final presentation will be one single panorama picture.
Seting the lower limit as 35 we have the matched features allowing to match. Our opinion towards the lower is to be a random image which will not fit the panorama when this happen an error will occur. The decision worked in our customTestSet 1. Where the image was
.
\section[]{ANMS (Adaptive Non-Maximal Suppression)}
The goal of ANMS is to pick out the stronger corner points.
\begin{itemize}
\item Using the cornermetric method to find all the corners of each grayscale image.
\item After applying imreagionalmax, we can get the strong corners in the image.
\item Using ANMS alogrithm we can find the best pixel of each image.
\item We analyze a 40*40 pixel square area selecting an intial 300 N-best to trial.
\item Note: Each image may vary from sizes in real life cases
\end{itemize}
\subsection{Shortcomings}
\begin{itemize}
\item We specify the N-best manually choosing from 450 - 300 - 200- 100 -50
\item The problem we think is that the level of the variable is affecting to understand the sets
\item Even though the variable is performed well in terms of the point indication, however, we think the point may be not accurate in some points
\item ANMS takes the amount of points which are equally distributed. However, removing some of better score points in a high score area will resulted in less accurate data in dense areas.
\item This could resulted to a decrease in accuracy in the feature matching and warping.
\end{itemize}
% \begin{figure}[t]%
% \FIG{\includegraphics[width=290]{outputimages/ANMS/anms1.jpg}}
% {\caption{This is an example of caption }
% \label{fig1}}
% \end{figure}
% \begin{figure}[t]%
% \FIG{\includegraphics[width=290]{outputimages/ANMS/anms2.png}}
% {\caption{This is an example of caption }
% \label{fig2}}
% \end{figure}
% \begin{figure}[t]%
% \FIG{\includegraphics[width=290]{outputimages/ANMS/anms2.png}}
% {\caption{This is an example of caption }
% \label{fig2}}
% \end{figure}
\begin{center}
\includegraphics[width=275]{outputimages/ANMS/anms1.jpg}\\
\includegraphics[width=275]{outputimages/ANMS/anms2.png}\\
\includegraphics[width=275]{outputimages/ANMS/ANMS4.png}\\
\includegraphics[width=275]{outputimages/ANMS/ANMS5.png}
\end{center}
\section[]{Feature Descriptor}
During the ANMS stage, we discovered feature points. Each feature point must be described by a feature vector, which is equivalent to storing the information at each feature point into a vector. Next, we'll go through one of the most basic feature descriptors.
Take a patch of size 4040 and center it on the key point. At every N-Best point, getFeatureDescriptors() would take a patch around the point ofthe form: I(row-19:row+20, column-19:column+20), blur, reshape and standardize as described.The value is returned as a 64 x N-Best matrix. Apply the gaussian blur to the patch after that. We subsample the blurred output to 8*8 and then shrink the size to 64*1.
\subsection{Shortcoming}
\begin{itemize}
\item Because the output provides no visible cues, the function is reliant on the accuracy of earlier portions of the project.
\item The matching discovered enough pairs between the floors (cases of test images - TestSet4) that the whole panorama was influenced in the case of test images - TestSet1.
\item Before matching, a more robust algorithm may seek the subject first.
\end{itemize}
\section{Feature Matching}
\begin{itemize}
\item
\item
\item
\end{itemize}
\section{RANSAC to estimate Robust Homography}
\begin{itemize}
\item
\item
\item
\end{itemize}
\section{Blending Images}
To blend images and form a panorama, we need to estimate transformations between images.
Those transformation matrices (H) are estimated using est-(homography()), which takes
coordinates of inliers from RANSAC. We then create a projective2d datatype (tforms) based on each H matrix, then we compute the specific transformation matrix of each image (tforms.T) by applying \\
\begin{equation}
T(n) = T(n) * T(n-1) * … * T(1)
\end{equation}
The reason for us to do this is that we need to apply several projective2d functions for blending.
\begin{figure}[t]%
\FIG{\includegraphics[width=240]{outputimages/blending/p1.jpg}}
{\caption{Figure 1. Example of tforms, projective2d structure}
}
\end{figure}
\begin{figure}[t]%
\FIG{\includegraphics[width=240]{outputimages/blending/p2.jpg}}
{\caption{Figure 2. Reason to apply outputLimits() for transformation}
}
\end{figure}
% \includegraphics[width=240]{outputimages/blending/p2.jpg}
After we get specific transform matrices, we then calculate coordinate limits for each transform.The reason for us to do this is that images don’t remain in their original shape when they transform. As a result, we need to estimate the limit to make sure we have enough pixel space to hold them after transformations. We uses outputLimits() function from projective2d to achieve this.
To improve the result, we added a step where we recalculate to find a specific picture as the better centerpiece for the panorama. Initially, we estimate all transformations based on the first picture (using the first picture as the center). This action sometimes will make the final panorama looks incorrect if the first picture isn’t a reasonable center (consider if we use an edge picture as the center).
\section{Cylinirical Projection}
\begin{figure}[t]%
\FIG{\includegraphics[width=240]{outputimages/blending/p3.jpg}}
{\caption{Figure 3. code portion: outputLimits()}
}
\end{figure}
\begin{figure}[t]%
\FIG{\includegraphics[width=240]{outputimages/blending/p4.jpg}}
{\caption{Figure 4. Example of distorted panorama based on the wrong center}
}
\end{figure}
After we find the new center, we recalculate each picture’s transformation matrix based on it. To do such, we calculate the inverted matrix of the current center’s transformation matrix, and then update other images’ transformation matrices by tforms(i).T = tforms(i).T * Tinv.T. Base on new transformation matrices, we recalculate the limit for each image during transformation. Then, wefind the overall max and min coordinates for all images limit to make sure we have enough space to hold them all for the panorama. Base on the overall max and min, we then initialize the panorama (filled with black pixels).
\begin{figure}[t]%
\FIG{\includegraphics[width=240]{outputimages/blending/p5.jpg}}
{\caption{Figure 5. code portion: Update transformation matrices based on new center Figure}
}
\end{figure}
Finally, we loop through each image, wrap it based on its transformation matrix using imwarp()function, generate a binary mask, then overlay the wrapped image onto the panorama using the step() function. After this stage, we shall have our panorama finished.
% \begin{table}[t]
% \tabcolsep=0pt%
% \TBL{\caption{Tables which are too long to fit,
% should be written using the ``table*'' environment\label{tab2}}}
% {\begin{fntable}
% \begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcccccc@{}}\toprule%
% & \multicolumn{3}{@{}c@{}}{\TCH{Element 1}}& \multicolumn{3}{@{}c@{}}{\TCH{Element 2\smash{\footnotemark[1]}}}
% \\\cmidrule{2-4}\cmidrule{5-7}%
% \TCH{Project} & \TCH{Energy} & \TCH{$\boldsymbol{\sigma_{\text{calc}}}$} & \TCH{$\boldsymbol{\sigma_{\text{expt}}}$} &
% \TCH{Energy} & \TCH{$\boldsymbol{\sigma_{\text{calc}}}$} & \TCH{$\boldsymbol{\sigma_{\text{expt}}}$} \\\midrule
% \TCH{Stage 3}&990 A &168 &47$\pm$12 &78 A &66 &39$\pm$10\\
% {\TCH{Stage 4}}&500 A &961 &22$\pm$10 &90 A &68 &92$\pm$40\\
% \botrule
% \end{tabular*}%
% \footnotetext[]{{Note:} This is an example of table footnote this is an example of table footnote this is an example of table footnote this is an example of~table footnote this is an example of table footnote}
% \footnotetext[1]{This is an example of table footnote}%
% \end{fntable}}
% \vspace*{7pt}
% \end{table}
\section{Conclusion}
Some Conclusions here.
\begin{Backmatter}
\begin{thebibliography}{}
\bibitem[Ananin and Mironov (2000)]{bib1}
{Ananin, Beth, and Mironov, Antony}. 2000. ``The moduli space of $2$-dimensional algebras'', \textit{Comm. Algebra} {28}(9), {4481}--{4488}.
\bibitem[Bai and Meng (2001)]{bib2}
{Bai, Clifton, and Meng, Dyck}. 2001. ``The classification of Novikov algebras in low dimension'', \textit{J. Phys. A: Math. Gen.} {34}, {1581}--{1594}.
\bibitem[Ca\~{n}ete and Khudoyberdiyev (2013)]{bib3}
{Ca\~{n}ete, Enderson, and Khudoyberdiyev, Angus}. 2013. ``The classification of $4$-dimensional Leibniz algebras'', \textit{Linear Algebra and its Applications} {439}(1), {273}--{288}.
\bibitem[Goze and Remm (2011)]{bib4}
{Goze, Michael, and Remm, Edward}. 2011. ``$2$-dimensional algebras'', \textit{Afr. J. Math. Phys.} {10}(1), {81}--{91}.
\bibitem[Petersson (2000)]{bib5}
{Petersson, Hentry}. 2000. ``The classification of two-dimensional nonassociative algebras'', \textit{Results Math} {37}, no. 1-2, {120}--{154}.
\end{thebibliography}
\end{Backmatter}
\end{document}
|
{"hexsha": "a1462922b08fd930a85d9e912d52e284aa9811b2", "size": 10541, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Project2/report.tex", "max_stars_repo_name": "killerfrost22/CMSC426_group", "max_stars_repo_head_hexsha": "e89e4d48480a08492b3343eff8716b3c19c7853f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-02T14:21:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-02T14:21:10.000Z", "max_issues_repo_path": "Project2/report.tex", "max_issues_repo_name": "killerfrost22/CMSC426_group", "max_issues_repo_head_hexsha": "e89e4d48480a08492b3343eff8716b3c19c7853f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Project2/report.tex", "max_forks_repo_name": "killerfrost22/CMSC426_group", "max_forks_repo_head_hexsha": "e89e4d48480a08492b3343eff8716b3c19c7853f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-22T00:09:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-22T00:09:40.000Z", "avg_line_length": 45.632034632, "max_line_length": 611, "alphanum_fraction": 0.7517313348, "num_tokens": 2985}
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import pprint
import torch.nn.functional as F
import numpy as np
import torch
from torch.distributions import Categorical
from classy_vision.losses import ClassyLoss, register_loss
from classy_vision.generic.distributed_util import get_rank
from torch import nn
from vissl.config import AttrDict
from vissl.utils.distributed_utils import gather_from_all
@register_loss("dstl_issl_loss")
class DstlISSLLoss(ClassyLoss):
"""
This is the contrastive loss which was proposed in ISSL <IRL> paper.
See the paper for the details on the loss.
Config params:
n_Mx (int): number of maximal invariants.
temperature_assign (float): the temperature to be applied on the logits for assigning M(X).
temperature_pred (float): the temperature to be applied on the logits for predicting M(X)
num_crops (int): number of crops used
crops_for_assign (List[int]): what crops to use for assignment
beta_pM_unif (float): scaling to use for the entropy.
ema_weight_marginal (float): ema to use for the prior. If None no ema.
"""
def __init__(self, loss_config: AttrDict, device: str = "gpu"):
super(DstlISSLLoss, self).__init__()
self.loss_config = loss_config
self.dstl_criterion = DstlISSLCriterion(
n_Mx = self.loss_config.n_Mx,
temperature_assign = self.loss_config.temperature_assign,
temperature_pred = self.loss_config.temperature_pred,
num_crops = self.loss_config.num_crops,
crops_for_assign = self.loss_config.crops_for_assign,
beta_H_MlZ = self.loss_config.beta_H_MlZ,
beta_pM_unif = self.loss_config.beta_pM_unif,
ema_weight_marginal = self.loss_config.ema_weight_marginal,
warmup_teacher_iter = self.loss_config.warmup_teacher_iter,
warmup_beta_unif_iter = self.loss_config.warmup_beta_unif_iter
)
@classmethod
def from_config(cls, loss_config: AttrDict):
"""
Instantiates DstlISSLLoss from configuration.
Args:
loss_config: configuration for the loss
Returns:
DstlISSLLoss instance.
"""
return cls(loss_config)
def forward(self, output, target):
loss = self.dstl_criterion(output)
return loss
def __repr__(self):
repr_dict = {"name": self._get_name(), "dstl_criterion": self.dstl_criterion}
return pprint.pformat(repr_dict, indent=2)
class DstlISSLCriterion(nn.Module):
def __init__(self,
n_Mx : int = 16384,
temperature_assign : float = 0.5,
temperature_pred : float = 1,
num_crops : int = 2,
crops_for_assign : list[int] = [0,1],
beta_H_MlZ : float = 0.5,
beta_pM_unif: float = 1.9,
ema_weight_marginal : float = 0.7,
warmup_beta_unif_iter: int = None, # haven't tried but might be worth
warmup_teacher_iter: int= None
):
super(DstlISSLCriterion, self).__init__()
self.n_Mx = n_Mx
self.temperature_assign = temperature_assign
self.temperature_pred = temperature_pred
self.num_crops = num_crops
self.crops_for_assign = crops_for_assign
self.beta_pM_unif = beta_pM_unif
self.beta_H_MlZ = beta_H_MlZ
self.ema_weight_marginal = ema_weight_marginal
self.warmup_beta_unif_iter = warmup_beta_unif_iter
self.warmup_teacher_iter = warmup_teacher_iter
self.dist_rank = get_rank()
self.register_buffer("num_iteration", torch.zeros(1, dtype=int))
if self.beta_pM_unif >= self.beta_H_MlZ + 1:
logging.info(f"Theory suggests beta_pM_unif >= beta_H_MlZ + 1, which doesn't currently hold {beta_pM_unif} < {beta_H_MlZ + 1}.")
if self.ema_weight_marginal is not None:
# initialize running means with uniform
uniform = torch.ones(self.n_Mx) / self.n_Mx
self.running_means = nn.ModuleList([RunningMean(uniform,
alpha_use=self.ema_weight_marginal)
for _ in range(len(self.crops_for_assign))])
def forward(self, output: List[torch.Tensor]):
self.num_iteration += 1
logits_assign, logits_predict = output
logits_assign = logits_assign.float() / self.temperature_assign
all_p_Mlz = F.softmax(logits_assign, dim=-1
).chunk(len(self.crops_for_assign))
all_log_p_Mlz = F.log_softmax(logits_assign, dim=-1
).chunk(len(self.crops_for_assign))
all_log_q_Mlz = F.log_softmax(logits_predict.float() / self.temperature_pred, dim=-1
).chunk(self.num_crops)
CE_pMlz_qMlza = 0
H_M = 0
CE_pMlz_pMlza = 0
n_CE_pq = 0
n_CE_pp = 0
for i_p, p_Mlz in enumerate(all_p_Mlz):
##### Ensure maximality #####
# current marginal estimate p(M). batch shape: [] ; event shape: []
p_M = p_Mlz.mean(0, keepdim=True)
p_M = self.gather_marginal(p_M) # avg marginal across all gpus
if self.ema_weight_marginal is not None:
is_ema = self.num_iteration > 5000
if is_ema:
p_M = self.running_means[i_p](p_M)
else:
# first few steps you update the running mean
_ = self.running_means[i_p](p_M)
# D[\hat{p}(M) || Unif(\calM)]. shape: []
# for unif prior same as maximizing entropy. Could be computed once per GPU, but fast so ok
H_M = H_M + Categorical(probs=p_M).entropy()
#############################
##### Ensure invariance and determinism of assignement #####
for i_log_p, log_p_Mlza in enumerate(all_log_p_Mlz):
if i_p == i_log_p:
continue
CE_pMlz_pMlza = CE_pMlz_pMlza - (p_Mlz * log_p_Mlza).sum(-1)
n_CE_pp += 1
#########################
for i_q, log_q_Mlza in enumerate(all_log_q_Mlz):
if i_p == i_q:
# we skip cases where student and teacher operate on the same view
continue
# KL = - H[M|Z] - E_{p(M|Z)}[log q(M|Z)]. As you want to have a deterministic
# p(M|Z) you want to min H[M|Z]. So min KL + H[M|Z] = - E_{p(M|Z)}[log q(M|Z)]
CE_pMlz_qMlza = CE_pMlz_qMlza - (p_Mlz * log_q_Mlza).sum(-1).mean(0)
n_CE_pq += 1
CE_pMlz_qMlza /= n_CE_pq
H_M /= len(all_p_Mlz)
CE_pMlz_pMlza /= n_CE_pp
fit_pM_Unif = - H_M # want to max entropy
if self.ema_weight_marginal is not None and is_ema:
# try to balance the scaling in gradients due to running mean
fit_pM_Unif = fit_pM_Unif / self.ema_weight_marginal
if self.warmup_beta_unif_iter is not None and self.num_iteration < self.warmup_beta_unif_iter:
start_beta = self.beta_H_MlZ + 1
final_beta = self.beta_pM_unif
warming_factor = (1 + self.num_iteration) / self.warmup_beta_unif_iter
beta_pM_unif = start_beta + (final_beta - start_beta) * warming_factor
else:
beta_pM_unif = self.beta_pM_unif
if self.warmup_teacher_iter is not None and self.num_iteration < self.warmup_teacher_iter:
warming_factor = (1 + self.num_iteration) / self.warmup_teacher_iter
CE_pMlz_pMlza = CE_pMlz_pMlza * warming_factor
loss = CE_pMlz_qMlza + beta_pM_unif * fit_pM_Unif + self.beta_H_MlZ * CE_pMlz_pMlza
if self.num_iteration % 200 == 0 and self.dist_rank == 0:
logging.info(f"H[M]: {H_M.mean()}")
logging.info(f"Distil: {CE_pMlz_qMlza.mean()}")
logging.info(f"Inv + det: {CE_pMlz_pMlza.mean()}")
H_Mlz = Categorical(probs=torch.cat(all_p_Mlz, dim=0).detach()).entropy().mean()
logging.info(f"H[M|Z]: {H_Mlz}")
return loss.mean()
def __repr__(self):
repr_dict = {
"name": self._get_name(),
"n_Mx": self.n_Mx,
"temperature_assign": self.temperature_assign,
"temperature_pred": self.temperature_pred,
"num_crops": self.num_crops,
"crops_for_assign": self.crops_for_assign,
"beta_pM_unif": self.beta_pM_unif,
"beta_H_MlZ": self.beta_H_MlZ,
"ema_weight_marginal": self.ema_weight_marginal,
"warmup_beta_unif_iter": self.warmup_beta_unif_iter,
"warmup_teacher_iter": self.warmup_teacher_iter,
}
return pprint.pformat(repr_dict, indent=2)
@staticmethod
def gather_marginal(marginal: torch.Tensor):
"""
Do a gather over all marginals, so we can compute the entropy.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
marginal = gather_from_all(marginal)
else:
marginal = marginal
return marginal.mean(0)
class RunningMean(nn.Module):
"""Keep track of an exponentially moving average"""
def __init__(self, init: torch.tensor, alpha_use: float=0.5, alpha_store: float=0.1):
super().__init__()
assert 0.0 <= alpha_use <= 1.0
assert 0.0 <= alpha_store <= 1.0
self.alpha_use = alpha_use
self.alpha_store = alpha_store
self.register_buffer('running_mean', init.double())
def forward(self, x):
out = self.alpha_use * x + (1 - self.alpha_use) * self.running_mean.float()
# don't keep all the computational graph to avoid memory++
self.running_mean = (self.alpha_store * x.detach().double() + (1 - self.alpha_store) * self.running_mean).detach().double()
return out
|
{"hexsha": "06fb9badcd474c5b22a59eda2454d3b48612cfab", "size": 10336, "ext": "py", "lang": "Python", "max_stars_repo_path": "vissl/losses/dstl_issl_loss.py", "max_stars_repo_name": "YannDubs/vissl", "max_stars_repo_head_hexsha": "42bcd601339921833410b32db683c2001c1ad688", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vissl/losses/dstl_issl_loss.py", "max_issues_repo_name": "YannDubs/vissl", "max_issues_repo_head_hexsha": "42bcd601339921833410b32db683c2001c1ad688", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vissl/losses/dstl_issl_loss.py", "max_forks_repo_name": "YannDubs/vissl", "max_forks_repo_head_hexsha": "42bcd601339921833410b32db683c2001c1ad688", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5100401606, "max_line_length": 140, "alphanum_fraction": 0.61000387, "include": true, "reason": "import numpy", "num_tokens": 2598}
|
''' phase_uncert_thetar simulating Optical Neural Network
using Neuroptica and linearly separable datasets
Simply short fcns to test trained models with Loss/MZI and Phase Uncert.
Author: Simon Geoffroy-Gagnon
Edit: 2020.03.28
'''
import numpy as np
import calculate_accuracy as calc_acc
import ONN_Simulation_Class as ONN_Cls
import ONN_Setups
import digital_NN_main as dnn
import create_datasets as cd
import os
import matplotlib
import training_onn
if 1:
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams['mathtext.fontset'] = 'custom'
matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
def test_PT(onn, X, y, model, show_progress=True):
onn.same_phase_uncert = False
print('\nPhi + Theta')
onn.accuracy_PT = calc_acc.get_accuracy(onn, model, X, y, loss_diff=onn.loss_diff, show_progress=show_progress)
onn.PT_FoM = np.sum((np.array(onn.accuracy_PT) > onn.zeta*np.max(onn.accuracy_PT))*onn.PT_Area)
return onn, model
def test_LPU(onn, X, y, model, show_progress=True):
onn.same_phase_uncert = True
print('\nLoss + Phase Uncertainty')
onn.accuracy_LPU = calc_acc.get_accuracy(onn, model, X, y, loss_diff=onn.loss_diff, show_progress=show_progress)
print("Accuracy: \n", onn.accuracy_LPU)
onn.LPU_FoM = np.sum((np.array(onn.accuracy_LPU) > onn.zeta*np.max(onn.accuracy_LPU))*onn.LPU_Area)
return onn, model
def test_SLPU(onn, X, y, model, show_progress=True): #Only tests Loss/MZI at 0 dB Phase
onn.same_phase_uncert = True
print('\nLoss + Phase Uncertainty')
onn.accuracy_LPU = calc_acc.get_accuracy_SLPU(onn, model, X, y, loss_diff=onn.loss_diff, show_progress=show_progress)
print("Accuracy: \n", onn.accuracy_LPU)
return onn, model
def test_onn(onn, model, show_progress=True):
onn, model = test_PT(onn, onn.Xt, onn.yt, model, show_progress=show_progress)
onn, model = test_LPU(onn, onn.Xt, onn.yt, model, show_progress=show_progress)
return onn, model
|
{"hexsha": "dcebd80ab76a47bc37a2145912353b2bd0c91f77", "size": 2174, "ext": "py", "lang": "Python", "max_stars_repo_path": "Simulations/test_trained_onns.py", "max_stars_repo_name": "Xoreus/neuroptica", "max_stars_repo_head_hexsha": "2ab6771ea3cfe94a62701078a83712d2a33409e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-15T04:31:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-15T04:31:47.000Z", "max_issues_repo_path": "Simulations/test_trained_onns.py", "max_issues_repo_name": "Xoreus/neuroptica", "max_issues_repo_head_hexsha": "2ab6771ea3cfe94a62701078a83712d2a33409e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Simulations/test_trained_onns.py", "max_forks_repo_name": "Xoreus/neuroptica", "max_forks_repo_head_hexsha": "2ab6771ea3cfe94a62701078a83712d2a33409e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8076923077, "max_line_length": 121, "alphanum_fraction": 0.74425023, "include": true, "reason": "import numpy", "num_tokens": 642}
|
using Interp1d
using Random
@testset "1 input and 1 output test" begin
x = [-1.0, 0.0, 3.0/2.0]
y = [2.0, 1.0, 3.0]
f = interp(x, y, Nearest);
@test f(1.0) == 3.0
@test f(-1.0) == 2.0
@test f(-2.0) == 2.0
@test f(-Inf) == 2.0
@test f(Inf) == 3.0
@test f(0) == 1.0
end
@testset "1 input and 2 output test" begin
x = [-1.0, 0.0, 3.0/2.0];
y = [2.0 1.0 3.0;
-1.0 -4.4 2.0];
f = interp(x, y, Nearest);
@test f(1.0) == [3.0, 2.0]
@test f(-1.0) == [2.0, -1.0]
@test f(-2.0) == [2.0, -1.0]
@test f(-Inf) == [2.0, -1.0]
@test f(Inf) == [3.0, 2.0]
@test f(0) == [1.0, -4.4]
end
@testset "copy test" begin
x = [-1.0, 0.0, 3.0/2.0]
y = [2.0, 1.0, 3.0]
f = interp(x, y, Nearest, copy=true);
y[2] = 10.0
@test f(0) == 1.0
y = [2.0, 1.0, 3.0]
f = interp(x, y, Nearest, copy=false);
y[2] = 10.0
@test f(0) == 10.0
end
@testset "sort test" begin
x = [-1.0, 0.0, 3.0/2.0]
y = [2.0, 1.0, 3.0]
idxs = randperm(length(x))
x=x[idxs]
y=y[idxs]
f = interp(x, y, Nearest);
@test f(1.0) == 3.0
@test f(-1.0) == 2.0
@test f(-2.0) == 2.0
@test f(-Inf) == 2.0
@test f(Inf) == 3.0
@test f(0) == 1.0
end
|
{"hexsha": "a8db7e8ed4f68a75bce49fb854bcef3a147cc773", "size": 1246, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/NearestTest.jl", "max_stars_repo_name": "AtsushiSakai/Interp1d.jl", "max_stars_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-29T06:08:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T07:56:59.000Z", "max_issues_repo_path": "test/NearestTest.jl", "max_issues_repo_name": "AtsushiSakai/Interp1d.jl", "max_issues_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/NearestTest.jl", "max_forks_repo_name": "AtsushiSakai/Interp1d.jl", "max_forks_repo_head_hexsha": "0253a2e5669a2b65af0456d8ff161644efae37eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1186440678, "max_line_length": 42, "alphanum_fraction": 0.4406099518, "num_tokens": 661}
|
import json
import networkx as nx
class Hunter:
def __init__(self, name, start_node, max_stamina=3):
"""
Parameters
----------
name: str
name of the hunter
start_node: str
name of start node
max_stamina: int, default: 3
maximum stamina allowed
"""
self.name = name
self.start_node = start_node
self.node = start_node
self.stamina = max_stamina
self.max_stamina = max_stamina
self.actions = {
'travel': -1,
'group': -1,
'solo': -2,
'rest': 2,
}
def action(self, action_type, action_param=''):
"""
Emulates action done by hunter
Parameters
----------
action_type: {'travel', 'group', 'solo', 'rest'}
type of action to be carried out
action_param: str, default: ''
parameter input for action if applicable
eg: node name for travel action
"""
if (action_type not in self.actions):
raise Exception("Action type must be one of {'travel', 'group', 'solo', 'rest'}")
new_stamina = self.stamina + self.actions[action_type]
if (new_stamina < 0):
raise Exception('Stamina cannot be lower than 0.')
if (new_stamina > self.max_stamina):
new_stamina = self.max_stamina
if (action_type == 'travel'):
self.node = action_param
self.stamina = new_stamina
def reset(self):
"""
Resets hunter attributes to their default value
"""
self.stamina = self.max_stamina
self.node = self.start_node
class Jungle:
def __init__(self, hunting_map, start_node, end_node):
"""
Parameters
----------
hunting_map: dict
dictionary of adjacency lists
start_node: str
name of start node
end_node: str
name of end node
"""
if (start_node not in hunting_map):
raise Exception('Start node must be in hunting map.')
if (end_node not in hunting_map):
raise Exception('End node must be in hunting map.')
self.map = self.create_network(hunting_map)
self.start_node = start_node
self.end_node = end_node
def create_network(self, hunting_map):
"""
Returns a NetworkX directed graph
Parameters
----------
hunting_map: dict
"""
G = nx.DiGraph()
for source in hunting_map:
for destination in hunting_map[source]:
G.add_edge(source, destination)
return G
def reset(self, hunter=2, boar=3):
"""
Resets network attributes to their default value
Parameters
----------
hunter: int, default: 2
total number of hunters
boar: int, default: 3
initial number of boars
"""
nodes = list(self.map.nodes)
attributes = {node: {'hunter': 0, 'boar': 3} for node in nodes}
attributes[self.start_node]['hunter'] = hunter # initialize no. of hunters in the first node
nx.set_node_attributes(self.map, attributes)
def generate_paths(self):
"""
Returns a list of every path in the jungle
"""
return list(nx.all_simple_paths(self.map, self.start_node, self.end_node))
def hunter_count(self, node):
"""
Returns an integer for hunter count in given node
Parameters
----------
node: str
"""
if (node not in self.map):
raise Exception('Node must be in hunting map.')
return self.map.nodes[node]['hunter']
def boar_count(self, node):
"""
Returns an integer for boar count in given node
Parameters
----------
node: str
"""
if (node not in self.map):
raise Exception('Node must be in hunting map.')
return self.map.nodes[node]['boar']
def boar_caught(self, node):
"""
Updates the boar count in given node
Parameters
----------
node: str
"""
if not self.boar_count(node):
raise Exception('No boars left in node {}'.format(node))
self.map.nodes[node]['boar'] -= 1
def hunter_move(self, current_node, next_node):
"""
Updates the number of hunters in affected nodes
Parameters
----------
current_node: str
next_node: str
"""
self.map.nodes[current_node]['hunter'] -= 1
self.map.nodes[next_node]['hunter'] += 1
class Simulator:
def __init__(self, hunters, jungle, json):
"""
Parameters
----------
hunters: list of Hunter
jungle: Jungle
json: str
"""
self.hunters = hunters
self.jungle = jungle
self.json = json
self.results = []
self.turn = 0
self.catch = 0
def in_choppa(self):
"""
Returns a boolean
True if all hunters are in the end node
"""
return len(self.hunters) == self.jungle.hunter_count(self.jungle.end_node)
def reset(self):
"""
Resets the simulator count and catch
"""
self.turn = 0
self.catch = 0
self.jungle.reset()
for hunter in self.hunters:
hunter.reset()
def next_node(self, path, node):
"""
Returns a string for name of next node
Parameters
----------
path: list of str
node: str
"""
if (node not in path):
raise Exception('Node must be in path.')
current_index = path.index(node)
return path[current_index + 1]
def boar_caught(self, node):
"""
Updates the catch and no. of boars left in the node
Parameters
----------
node: str
"""
self.jungle.boar_caught(node)
self.catch += 1
def same_node(self, node):
"""
Returns a boolean, True if all hunters are in the node
Parameters
----------
node: str
"""
return len(self.hunters) == self.jungle.map.nodes[node]['hunter']
def append_results(self, path):
"""
Appends result after each iteration
Parameters
----------
path: list of str
"""
self.results.append({'turn': self.turn, 'catch': self.catch, 'path': path})
def run(self):
"""
Brains of the whole operation
"""
# error checking
if len(self.hunters) < 1:
raise Exception('At least one hunter is needed.')
# path generation
paths = self.jungle.generate_paths()
# iterate over paths
for path in paths:
if (len(path) < 2):
raise Exception('A path must have at least 2 nodes.')
self.reset()
while (not self.in_choppa()):
self.turn += 1
for i, hunter in enumerate(self.hunters):
if (hunter.node == self.jungle.end_node):
# hunter in choppa
continue
if (hunter.stamina == 0):
# hunter needs rest
hunter.action('rest')
elif (self.jungle.boar_count(hunter.node) == 0):
# no boars
next_node = self.next_node(path, hunter.node)
self.jungle.hunter_move(hunter.node, next_node)
hunter.action('travel', action_param=next_node)
elif (hunter.stamina >= 2):
# hunter able to hunt solo
hunter.action('solo')
self.boar_caught(hunter.node)
else:
if (all([otherHunter.stamina >= 1 for otherHunter in self.hunters]) and
self.same_node(hunter.node)
):
# hunters able to hunt together
for hunter in self.hunters:
hunter.action('group')
self.boar_caught(hunter.node)
break
hunter.action('rest')
self.append_results(path)
# print results
for result in self.results:
print('No. of turns: {}'.format(result['turn']))
print('No. of catch: {}'.format(result['catch']))
print('Path: {}\n'.format(result['path']))
# dump to json file if specified
if (self.json):
with open(self.json, 'w', encoding='utf8') as output_file:
json.dump(self.results, output_file)
print('Results dumped to {}'.format(output_file.name))
|
{"hexsha": "d66527eda386102d268e98b9825da728bd6b719e", "size": 7759, "ext": "py", "lang": "Python", "max_stars_repo_path": "classes.py", "max_stars_repo_name": "SwTan98/supahands-coding-test", "max_stars_repo_head_hexsha": "3c59c3afaeb526a5248966393cdd54bd9923a021", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classes.py", "max_issues_repo_name": "SwTan98/supahands-coding-test", "max_issues_repo_head_hexsha": "3c59c3afaeb526a5248966393cdd54bd9923a021", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classes.py", "max_forks_repo_name": "SwTan98/supahands-coding-test", "max_forks_repo_head_hexsha": "3c59c3afaeb526a5248966393cdd54bd9923a021", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0348432056, "max_line_length": 96, "alphanum_fraction": 0.5969841474, "include": true, "reason": "import networkx", "num_tokens": 1908}
|
"""
Implementation of different indirect quantization algorithms
"""
import numpy as np
import scipy.linalg as sla
import quadprog
import distributed_training.optimum_quantizer as opt_quantizer
class DeterministicISGQuantizer:
"""
implementation of the indirect quantization, G=X' Y.
naive: X and Y are quantized independently according to their expected distribution
mv: First, X and Y are quantized independently according to their expected distribution.
Next, the reconstruction points are optimized to minimize the error |G-X'Y|
mvu: First, X and Y are quantized independently according to their expected distribution.
Then, the reconstruction points are optimized such that |G-X'Y| is minimized subject to sum(G-X'Y)=0
"""
# initialize the quantizer engine, the supported quantization levels and data models
def __init__(self, num_levels=(2, 4, 8), models=('sn', 'sfn', 'u', 'su'), sparsity_thr=1e-6):
self._num_levels = num_levels
self._models = models
self._quantizers = {}
for t in self._models:
q = opt_quantizer.OptimumQuantizer()
q.initialize_quantizer(model=t, num_levels=self._num_levels, sparsity_thr=sparsity_thr)
self._quantizers[t] = q
# quantize the input signals based on their distribution model, number of quantization levels and method
def quantize(self, X, Y, model=('sfn', 'sn'), num_levels=(2, 2), method='naive', opt_iterations=1):
qX, cX = self._quantize(X, model[0], num_levels[0])
qY, cY = self._quantize(Y, model[1], num_levels[1])
if method == 'mv':
# optimize the centers of the bins for the minimum variance indirect quantizer
G = np.matmul(X.transpose(), Y)
# optimize for the centers of the quantizers
for _ in range(opt_iterations):
X_hat = cX[qX]
optimize_centers_mviq(A=X_hat.transpose(), B=G, Q=qY, centers=cY, keep_sparsity=(model[0][0] == 's'))
Y_hat = cY[qY]
optimize_centers_mviq(
A=Y_hat.transpose(), B=G.transpose(), Q=qX, centers=cX, keep_sparsity=(model[1][0] == 's')
)
elif method == 'mvu':
# optimize the centers of the bins for the minimum variance unbiased indirect quantizer
G = np.matmul(X.transpose(), Y)
# optimize for the centers of the quantizers
for _ in range(opt_iterations):
X_hat = cX[qX]
optimize_centers_mvuiq(A=X_hat.transpose(), B=G, Q=qY, centers=cY, keep_sparsity=(model[0][0] == 's'))
Y_hat = cY[qY]
optimize_centers_mvuiq(
A=Y_hat.transpose(), B=G.transpose(), Q=qX, centers=cX, keep_sparsity=(model[1][0] == 's')
)
return qX, cX, qY, cY
def _quantize(self, X, model, num_levels):
# 1- if necessary, normalize x
if model in ('uniform', 'u', 'sparse-uniform', 'su'):
scale = 1.0
else:
scale = sla.norm(X) / np.sqrt(np.count_nonzero(np.abs(X) > 1e-10) + 1e-12)
y = X / scale
qX, cX = self._quantizers[model].quantize(y, num_levels)
cX = scale * cX
return qX, cX
# =============================================================================
def optimize_centers_mviq(A, B, Q, centers, keep_sparsity=True):
""" minimize reconstruction error after weighting by matrix A
min_{c_i} \|A.(\sum_i Q_i c_i) - B\|_F^2
"""
num_levels = len(centers)
thr = sla.norm(A) * 1e-6
# 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process
valid_idx = []
AQ = [np.zeros(1) for _ in range(num_levels)]
for i in range(num_levels):
AQ[i] = np.matmul(A, Q == i)
if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):
# check whether the i-th bin has any effect on the quantization performance and
# do not consider sparse values (center=0)
valid_idx += [i]
if not valid_idx:
return
# 2- find the optimum reconstruction points for the non-empty quantization bins
# 2.a- create matrix M, used in the optimization problem
num_valid = len(valid_idx)
M = np.zeros(shape=(num_valid, num_valid))
e = np.zeros(shape=num_valid)
for r in range(num_valid):
for c in range(r, num_valid):
# np.trace(np.matmul(AQ[valid_idx[c]].transpose(), AQ[valid_idx[r]]))
M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])
M[c, r] = M[r, c]
# np.trace(np.matmul(B.transpose(), AQ[valid_idx[r]]))
e[r] = np.sum(AQ[valid_idx[r]] * B)
# 2.b- solve for Mx=e
v = sla.lstsq(M, e)[0]
# 3- copy the found center points
centers[valid_idx] = v
return centers
def optimize_centers_mvuiq(A, B, Q, centers, keep_sparsity=True):
""" minimize reconstruction error after weighting by matrix A and make it unbiased
min_{c_i} \|A.(\sum_i Q_i c_i) - B\|_F^2 such that sum(B-A(\sum_i Q_i c_i)) = 0
"""
num_levels = len(centers)
thr = sla.norm(A) * 1e-6
# 1- compute A*(Q==i) and store it. find the non-empty quantization bins in the process
valid_idx = []
AQ = [np.zeros(1) for _ in range(num_levels)]
for i in range(num_levels):
AQ[i] = np.matmul(A, Q == i)
if (sla.norm(AQ[i]) >= thr) and ((centers[i] != 0) or not keep_sparsity):
# check whether the i-th bin has any effect on the quantization performance and
# do not consider sparse values (center=0)
valid_idx += [i]
if not valid_idx:
return
# 2- find the optimum reconstruction points for the non-empty quantization bins
# 2.a- create matrix M, used in the optimization problem
num_valid = len(valid_idx)
d = np.sum(B)
f = np.zeros(num_valid)
M = np.zeros(shape=(num_valid, num_valid))
e = np.zeros(shape=num_valid)
for r in range(num_valid):
f[r] = np.sum(AQ[valid_idx[r]])
for c in range(r, num_valid):
# trace(AQ[valid_idx[c]].T @ AQ[valid_idx[r]])
M[r, c] = np.sum(AQ[valid_idx[c]] * AQ[valid_idx[r]])
M[c, r] = M[r, c]
# trace(B.T @ AQ[valid_idx[r]])
e[r] = np.sum(AQ[valid_idx[r]] * B)
# 2.b- solve for min |Mx-e| such that fx=d
if num_valid == 0:
v = 0
elif num_valid == 1:
v = d / f[0]
elif num_valid == 2:
# for the special binary case, the solution can be found easily
scale = sla.norm(f) + 1e-12
f /= scale
d /= scale
u = np.array([-f[1], f[0]])
a = (e - d * M.dot(f)).dot(u) / (M.dot(u).dot(u) + 1e-12)
v = d * f + a * u
else:
# use quadratic programming (Goldfarb-Idnani algorithm) to solve the problem
d = np.array([d]).astype(np.float)
f = np.reshape(f, newshape=(-1, 1))
v = quadprog.solve_qp(M, e, f, d, 1)[0]
# 3- copy the found center points
centers[valid_idx] = v
return centers
# =============================================================================
class DitheredISGQuantizer:
"""
implementation of the indirect quantization, G=X' Y, using random dithered quantization
"""
def set_seed(self, seed):
np.random.seed(seed)
# dithered quantization
def quantize(self, W, num_levels=2, sparse=True, bucket_size=None):
"""
the input tensor is reshaped into vector form and divided into buckets of length d.
it uses maximum value of the vector as the scaling parameter for quantization.
The output scale is such that by multiplying it with quantized values, the points will be reconstructed.
:param W: input tensor to be quantizer
:param bucket_size: bucket size
:param num_levels: number of levels for quantizing W, output will be in the range
[-num_levels, ..., +num_levels]
:return: quantized values and the scale
"""
if bucket_size is None:
bucket_size = W.size
if W.size % bucket_size != 0:
raise ValueError('the number of variables must be divisible by the bucket size.')
w = np.reshape(W, newshape=(-1, bucket_size))
# 1- normalize w to become in [-num_levels, num_levels]
max_w = np.amax(np.abs(w), axis=1) + 1e-12
scale = max_w / num_levels
y = w / scale[:, np.newaxis]
# 2- generate dither, add it to y and then quantize
u = np.random.uniform(-0.5, 0.5, size=y.shape)
# an integer number in the range -num_levels or 0, ..., num_levels
q = np.around(y + u).astype(np.int8)
Q = np.reshape(q, newshape=W.shape)
if sparse:
# quantize 0 values separately
Q[np.abs(W) < 1e-12] = num_levels + 1
return Q, scale
def dequantize(self, Q, scale, num_levels=2, sparse=True, bucket_size=None):
"""
dequantize the received quantized values, usign the bucket size d and scales
:param Q: quantized values
:param scale: scale to multiply to the quantized values to reconstruct the original data
:param bucket_size: bucket size
:return: ndarray of the same shape as Q, dequantized values
"""
if bucket_size is None:
bucket_size = Q.size
if Q.size % bucket_size != 0:
raise ValueError('the number of variables must be divisible by the bucket size.')
if bucket_size == Q.size:
u = np.random.uniform(-0.5, 0.5, size=Q.shape)
W = scale[0] * (Q - u)
else:
q = np.reshape(Q, (-1, bucket_size))
u = np.random.uniform(-0.5, 0.5, size=q.shape)
w = (q - u) * scale[:, np.newaxis]
W = np.reshape(w, newshape=Q.shape)
# check for the sparse dequantization
if sparse:
W[Q == (num_levels + 1)] = 0
return W
|
{"hexsha": "a9e2126760620448b606890b0643b7e5626ec03a", "size": 10108, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantizers/isgq_quantizer.py", "max_stars_repo_name": "afshinabdi/DistributedTraining-ISGQ", "max_stars_repo_head_hexsha": "0ffbaae481798e4b71c98fc54b8fb8b1ec4be059", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-14T05:23:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-27T06:27:26.000Z", "max_issues_repo_path": "quantizers/isgq_quantizer.py", "max_issues_repo_name": "afshinabdi/DistributedTraining-ISGQ", "max_issues_repo_head_hexsha": "0ffbaae481798e4b71c98fc54b8fb8b1ec4be059", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-10-06T06:44:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-06T11:35:12.000Z", "max_forks_repo_path": "quantizers/isgq_quantizer.py", "max_forks_repo_name": "afshinabdi/DistributedTraining-ISGQ", "max_forks_repo_head_hexsha": "0ffbaae481798e4b71c98fc54b8fb8b1ec4be059", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-05T08:48:12.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-05T08:48:12.000Z", "avg_line_length": 38.2878787879, "max_line_length": 118, "alphanum_fraction": 0.5851800554, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2680}
|
[STATEMENT]
lemma index_upt[simp]: "m \<le> i \<Longrightarrow> i < n \<Longrightarrow> index [m..<n] i = i-m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>m \<le> i; i < n\<rbrakk> \<Longrightarrow> index [m..<n] i = i - m
[PROOF STEP]
by (induction n) (auto simp add: index_append)
|
{"llama_tokens": 122, "file": "List-Index_List_Index", "length": 1}
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.nn import init
from torch.utils.data import Dataset, DataLoader
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
from matplotlib.widgets import TextBox
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from fastai import *
from fastai.text import *
from fastai.vision import *
from fastai.imports import *
from sklearn import preprocessing
from sklearn.preprocessing import OneHotEncoder
from sklearn import metrics
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import RandomOverSampler
from collections import OrderedDict
import json
import subprocess
import sys
import time
import xml.etree.ElementTree
import os
import io
import random
from scipy.stats import norm
from scipy.stats import genextreme
######################################################################
# multitask dataset overwrite of Dataset
class MultitaskDataset(Dataset):
"`Dataset` for joint single and multi-label image classification."
def __init__(self, data, labels_fold, labels_fam, paddings, cuda = True):
self.data = torch.FloatTensor(data.float())
self.y_fam = torch.FloatTensor(labels_fam.float())
self.y_fold = torch.FloatTensor(labels_fold.float())
self.paddings = torch.FloatTensor(798-2*paddings.float())
self.cuda = cuda
def __len__(self): return len(self.data)
def __getitem__(self,i:int):
if self.cuda:
return torch.FloatTensor(self.data[i]).float().cuda(), torch.FloatTensor([self.y_fold[i], self.y_fam[i]]).float().cuda(), self.paddings[i].cuda()
else:
return torch.FloatTensor(self.data[i]).float(), torch.FloatTensor([self.y_fold[i], self.y_fam[i]]).float(), self.paddings[i]
# a helper function to load the data into custom dataset
def Dataset_Loader(df, le_fam, le_fold, vocab, BATCH_SIZE, cuda = True):
x_train = torch.LongTensor(Map_Tokens(df.q3seqTokens, vocab))
y_train_fold = torch.LongTensor(le_fold.fit_transform(df["fold"].values.ravel()))
y_train_fam = torch.LongTensor(le_fam.fit_transform(df["family"].values.ravel()))
paddings = torch.LongTensor(df["paddings"].values.ravel())
ds = MultitaskDataset(x_train, y_train_fold, y_train_fam, paddings, cuda)
dl = DataLoader(
ds,
batch_size=BATCH_SIZE,
shuffle=False,
pin_memory=False)
return ds, dl
# a helper function for mapping strings to onehot code
def Map_Tokens(data, vocab):
indexed_tokens = []
for tokens in data:
indexed_token = []
for token in tokens:
if token in vocab:
indexed_token.append(vocab[token])
indexed_tokens.append(indexed_token)
return indexed_tokens
# a helper function for train test validation split
def Train_Test_Val_split(df, strantify_type="fold", split_rate=0.1, random_state=2020):
train_df, test_df = train_test_split(df, test_size=split_rate, random_state=random_state,
stratify=df[strantify_type])
train_df, val_df = train_test_split(train_df, test_size=(split_rate)/(1-split_rate), random_state=random_state,
stratify=train_df[strantify_type])
print(str(len(train_df))+" "+str(len(test_df))+" "+str(len(val_df)))
return train_df, test_df, val_df
# a helper function to plot confuction matrix
def plot_confusion_matrix(cm, labels_name, title):
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # 归一化
plt.imshow(cm, interpolation='nearest') # 在特定的窗口上显示图像
plt.title(title) # 图像标题
plt.colorbar()
num_local = np.array(range(len(labels_name)))
plt.xticks(num_local, labels_name, rotation=90) # 将标签印在x轴坐标上
plt.yticks(num_local, labels_name) # 将标签印在y轴坐标上
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.grid(b=True)
plt.plot()
# Evaluation the model
def model_evaluation(model, le_fam, le_fold, Test_dl, cm = "fold"):
try:
model.eval()
with torch.no_grad():
_predictions_1 = []
_predictions_2 = []
_gt_1 = []
_gt_2 = []
for xb, yb in Test_dl:
output1, output2, _ = model(xb)
_, predicted1 = torch.max(output1, 1)
_, predicted2 = torch.max(output2, 1)
_predictions_1.extend(predicted1.cpu().numpy())
_predictions_2.extend(predicted2.cpu().numpy())
_gt_1.extend(yb[:,0].cpu().numpy())
_gt_2.extend(yb[:,1].cpu().numpy())
_predictions_1 = le_fold.inverse_transform(_predictions_1)
_predictions_2 = le_fam.inverse_transform(_predictions_2)
#print(_gt_2)
_gt_1 = list(map(int, _gt_1))
_gt_1 = le_fold.inverse_transform(_gt_1)
_gt_2 = list(map(int, _gt_2))
_gt_2 = le_fam.inverse_transform(_gt_2)
print(classification_report(_gt_1, _predictions_1))
print(classification_report(_gt_2, _predictions_2))
#plot confusion matrix
if cm == "fold":
cm1 = plot_confusion_matrix(confusion_matrix(_gt_1, _predictions_1), df_large_zero["fold"].unique(), "Confusion Matrix Fold")
else:
cm2 = plot_confusion_matrix(confusion_matrix(_gt_2, _predictions_2), df_large_zero["family"].unique(), "Confusion Matrix Family")
except RuntimeError as exception:
if "out of memory" in str(exception):
print("WARNING: out of memory")
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
raise exception
####################################################
## Functino for plotting the boxplot of RE scores
def gen_boxplot(train,test):
train_all_fold=train.copy() # train_all
train_all_fold['Family']=train_all_fold['Family'].map(lambda x: x.split('-')[1])
data = pd.concat([train_all_fold, test])
ordered_list=test.groupby('Family').median().sort_values('Err').index.tolist()
ordered_list[:0] = ['A','B','C','lyso']
plt.figure(figsize=(15,15))
g=sns.boxplot(x='Err', y='Family', data=data,order=ordered_list)
g.axvline(0.107, alpha = 0.9, linestyle = ":",linewidth=3,color="red")
g.axvline(0.127, alpha = 0.9, linestyle = ":",linewidth=1,color="blue")
g.axvline(0.147, alpha = 0.9, linestyle = ":",linewidth=3,color="red")
g.set_xlim(0,0.5)
return(g)
####################################################
## Calculate the FAS scores
def calcSubScore(row,re_values):
allval=list()
for i in range(1,10):
cname=row.index[i]
val=float(row[i])
sc=float(re_values['SC'][cname])
ooc=float(re_values['OOC'][cname])
oof=float(re_values['OOF'][cname])
score=((((ooc-val)/(oof-sc))*(oof-ooc))-0.014)*100
allval.append(score)
return(pd.Series(allval))
|
{"hexsha": "8334d16504f4385813a8f3044cd751f3b224ae5c", "size": 7392, "ext": "py", "lang": "Python", "max_stars_repo_path": "Codes/Utils_auto.py", "max_stars_repo_name": "esbgkannan/GT-CNN", "max_stars_repo_head_hexsha": "4f3828d7ed8f6c3ed796fa4e2e166ef5c16cb3d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-16T15:48:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-16T15:48:54.000Z", "max_issues_repo_path": "Codes/Utils_auto.py", "max_issues_repo_name": "Douglas2Code/GT-CNN", "max_issues_repo_head_hexsha": "e86d5ddd7ef2b9d968a10462dcca5f1c5ca6495c", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Codes/Utils_auto.py", "max_forks_repo_name": "Douglas2Code/GT-CNN", "max_forks_repo_head_hexsha": "e86d5ddd7ef2b9d968a10462dcca5f1c5ca6495c", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-17T01:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-17T01:26:50.000Z", "avg_line_length": 37.9076923077, "max_line_length": 158, "alphanum_fraction": 0.6516504329, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1794}
|
import numpy as np
from torchio.transforms import RandomDownsample
from ...utils import TorchioTestCase
class TestRandomDownsample(TorchioTestCase):
"""Tests for `RandomDownsample`."""
def test_downsample(self):
transform = RandomDownsample(
axes=1,
downsampling=(2., 2.)
)
transformed = transform(self.sample)
self.assertEqual(self.sample.spacing[1] * 2, transformed.spacing[1])
def test_out_of_range_axis(self):
with self.assertRaises(ValueError):
RandomDownsample(axes=3)
def test_out_of_range_axis_in_tuple(self):
with self.assertRaises(ValueError):
RandomDownsample(axes=(0, -1, 2))
def test_wrong_axes_type(self):
with self.assertRaises(ValueError):
RandomDownsample(axes='wrong')
def test_wrong_downsampling_type(self):
with self.assertRaises(ValueError):
RandomDownsample(downsampling='wrong')
def test_below_one_downsampling(self):
with self.assertRaises(ValueError):
RandomDownsample(downsampling=0.2)
|
{"hexsha": "4003e04cb89cd5c1581f4aae4000ee7e50673fcd", "size": 1104, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/transforms/augmentation/test_random_downsample.py", "max_stars_repo_name": "OeslleLucena/torchio", "max_stars_repo_head_hexsha": "4c09bc78a15464f9bf0f0eec487828cf5fe566c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-22T21:49:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T21:49:30.000Z", "max_issues_repo_path": "tests/transforms/augmentation/test_random_downsample.py", "max_issues_repo_name": "neuronflow/torchio", "max_issues_repo_head_hexsha": "1d0a5ad069c59d74ec56ed6f340c87e9636a1488", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/transforms/augmentation/test_random_downsample.py", "max_forks_repo_name": "neuronflow/torchio", "max_forks_repo_head_hexsha": "1d0a5ad069c59d74ec56ed6f340c87e9636a1488", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6666666667, "max_line_length": 76, "alphanum_fraction": 0.6757246377, "include": true, "reason": "import numpy", "num_tokens": 228}
|
import os
import numpy as np
import torch
class MLP(torch.nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.layer1 = torch.nn.Linear(44 * 2, 128)
self.layer2 = torch.nn.Linear(128, 128)
self.layer3 = torch.nn.Linear(128, 64)
self.out = torch.nn.Linear(64, 3) # [3.0, 0.1, 0.5]
self.activation = torch.nn.ReLU()
def forward(self, x):
x = self.activation(self.layer1(x))
x = self.activation(self.layer2(x))
latent = self.layer3(x)
out = self.out(latent)
return out, latent
class PerceptionModule(object):
def __init__(self):
self.history_length = 3
self.queue = []
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.model = MLP().to(self.device) # [3.0, 0.1, 0.5]
self.model.load_state_dict(torch.load(os.path.dirname(
os.path.realpath(__file__)) + '/mlp_perception.pth'))
for param in self.model.parameters():
param.requires_grad = False
self.model.eval()
# Reformat received frame to mlp input
def format_data(self, frame, speeds):
# Flattens 5x3 agents position data to 1x15
def flatten_agent_coord(posture): return [pos[i] for pos in posture for i in range(3)]
cur_ball = frame.coordinates[2] # Current ball posture 1x3
cur_spd = [speeds[6*i:6*i+6][j]
for i in range(5) for j in range(2)] # Extract wheel speeds of ally agents, 1x10
cur_pos = flatten_agent_coord(frame.coordinates[0]) # Current ally posture 1x15
cur_pos_opp = flatten_agent_coord(frame.coordinates[1]) # Current opponent posture 1x15
# 44 items: state + ball xyz + ally spd lr wheel + ally pos xyz + opponent pos xyz
return [frame.game_state] + cur_ball + cur_spd + cur_pos + cur_pos_opp
def perceive(self, frame, speeds, formatted_frame=None):
# Convert received frame to mlp input format
if formatted_frame is None:
formatted_frame = self.format_data(frame, speeds)
# A queue is used to keep 3 recent frames to be used for friction recognition
self.queue.append(formatted_frame)
if len(self.queue) < self.history_length:
return None, None
self.queue = self.queue[-self.history_length:]
frames = np.asarray(self.queue)
assert frames.shape[-1] == 44 # each frame should have 44 items
x = []
for frame_idx in range(1, self.history_length): # convert positions to position diffs
temp_buffer = frames[frame_idx].copy()
temp_buffer[14:] = frames[frame_idx, 14:] - frames[frame_idx - 1, 14:]
x.extend(temp_buffer) # concat
x = torch.Tensor(x).to(self.device) # 88 dim
logit, latent = self.model(x)
mlp_label = logit.argmax(axis=-1).item()
return mlp_label, latent
if __name__ == '__main__':
data_idx = [idx for idx in range(46) if idx != 0 and idx != 2] + [2] # 0: frame id, 2: friction
data = np.loadtxt('./test/sample.csv', dtype=np.float32,
skiprows=1, usecols=data_idx, delimiter=',')
MLP = PerceptionModule()
num_sample = {"3.0": 0, "0.1": 0, "0.5": 0}
num_correct = {"3.0": 0, "0.1": 0, "0.5": 0}
for datum in data:
label = str(datum[-1])
# sample data is already formatted
mlp_label, latent = MLP.perceive(None, None, formatted_frame=datum[:-1])
if mlp_label is None:
continue
friction = [3.0, 0.1, 0.5][mlp_label]
num_sample[label] += 1
if str(friction) == label:
num_correct[label] += 1
sum = 0
sum_anomaly = 0
for case in num_sample:
sum += num_correct[case]
if case != "3.0":
sum_anomaly += num_correct[case]
print("Sample ditribution: {}".format(num_sample))
print("Correct perception: {}".format(num_correct))
print("Accuracy: {}".format(sum / data.shape[0]))
print("Recall: {}".format(sum_anomaly / (num_sample["0.1"] + num_sample["0.5"])))
|
{"hexsha": "ac2ca2d521c2ed84a9834170b378f20f29be5c05", "size": 4111, "ext": "py", "lang": "Python", "max_stars_repo_path": "perception/mlp_module.py", "max_stars_repo_name": "SAMMiCA/Scenario3-AISoccer", "max_stars_repo_head_hexsha": "8c93d3feab07d4ee4b7f234dbc5aaf2a16d0b4c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2021-09-06T05:36:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T04:47:02.000Z", "max_issues_repo_path": "perception/mlp_module.py", "max_issues_repo_name": "SAMMiCA/Scenario3-AISoccer", "max_issues_repo_head_hexsha": "8c93d3feab07d4ee4b7f234dbc5aaf2a16d0b4c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "perception/mlp_module.py", "max_forks_repo_name": "SAMMiCA/Scenario3-AISoccer", "max_forks_repo_head_hexsha": "8c93d3feab07d4ee4b7f234dbc5aaf2a16d0b4c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-09-06T05:37:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T13:58:16.000Z", "avg_line_length": 37.7155963303, "max_line_length": 100, "alphanum_fraction": 0.6073947945, "include": true, "reason": "import numpy", "num_tokens": 1109}
|
#HPUX program spsearch(ic1,ic2,ic3)
# Ratfor
#************************spprint*************************************
#code words:
#ccc name:
#ccc version date:
#ccc author(s):
#ccc language:
#ccc
#ccc short description:
#ccc
#ccc algorithm description:
#ccc system requirements:
#ccc subroutines called:
#ccc argument list description:
#ccc parameter description:
#ccc common description:
#ccc message files referenced:
#ccc internal variables:
#ccc file description:
#ccc user command lines:
#ccc update information:
#ccc NOTES:
#ccc
#*********************************************************************
implicit integer*4 (i-n)
include ../specpr/src.specpr/common/label1
include ../specpr/src.specpr/common/lbl4
include ../specpr/src.specpr/common/lundefs
include ../specpr/src.specpr/common/cmdarg
include ../specpr/src.specpr/common/blank
include ../specpr/src.specpr/common/alphabet
#HPUX character*80 ic1,ic2,ic3
character*1 ichil
character*8 inm
character*40 itlsav
character*80 filnam,sstring
character*1536 dummy
integer*4 recnum, fsize, arglen,ic,ichoice,lensstr
integer*4 idummy,filsiz,nextrec,descrec,nowrec,icount,igcount
integer*4 ier,testb,mm,dd,yy,a
integer*2 chkbit,ibit
equivalence (dummy,ititl)
character xptime*11,xpdate*10
include template.h
include tempfill.h # this must be last -- not declarations
#HPUX charg1 = ic1
#HPUX charg2 = ic2
#HPUX charg3 = ic3
call getcmdargs
maxrec=999999
maxchn=4852
maxtxt=19860
mode = 0
mode2 = 0
iclin = 0
ia=0
ih=0
im=0
ib=0
ic=1
id=1
itf =1
irec=0
lpline=0
write(6,*) charg1
if (ncmdarg.ge.1) filnam = charg1
if (ncmdarg.ge.1) go to 10
#
write (ttyout,1)
1 format (1x, 'input file not fully specified after program name',
/, 1x, 'proper use is spsearch (specpr)filname',/)
#
go to 5000
#
# redlun is defined as unit 10 in lundefs
#
10 open (redlun,file=filnam,iostat=idummy,status='old',
access='direct',recl=1536,
form='unformated')
if (idummy.ne.0) {
write (ttyout,15)idummy
15 format (1x, 'cant open file, error',1x, i6,/)
stop
}
#
# get current length of file, test if consistent with specpr file
# and set initial output record number to the end of the data
# already in the file.
#
filsiz = fsize(filnam)
#
if (mod(filsiz,1536).ne.0) {
write (ttyout,30) filnam, filsiz
30 format (1x, a, " does not appear to be a specpr file",
/, "length of", i9,
" is not a multiple of 1536",/)
stop
}
#
if (filsiz.gt.0) {
recnum = (filsiz-1536)/1536
}
if (filsiz.le.0) recnum = 0
#
# put basename of filename into mhistb
#
mhistb = filnam
mhista = filnam
do j = len(mhista),1,-1 {
if (mhista(j:j)=='/') {
mhistb = mhista(j+1:)
break
}
}
#
# open cmd file for crtin
#
open (16, file='.cmd', access='direct',recl=80,
iostat=ier,form='unformatted')
#
if (recnum <= 0 ) {
write (ttyout,45)
45 format (' no data in file, exiting',/)
go to 5000
}
if (recnum > maxrec) recnum = maxrec
call eralph
write(ttyout,41) filnam
41 format('SPSEARCH - Search spectral library'/,
'reading file: ',a)
ikey = 0
icount = 0
do i =1,maxrec {
ikey = ikey+1
if (ikey > recnum) goto 3999
call redspr(ikey,redlun,ier)
if (ier .ne. 0){
write (ttyout,55)
55 format (' read error, exiting',/)
goto 5000
}
ibit=1
itmp = chkbit(icflag,ibit)
if (itmp != 1) {
if (index(ititl,'ABS REF') > 0) {
icount = icount +1
call addgtbl(icflag,icount)
} else if (ititl(1:6) == 'errors') {
call adderr(icflag,icount)
} else if (index(ititl,'FEATANL') > 0) {
call addfeat(icflag,icount)
}
}
#
# for addgtbl():
# icflag is passed here as the starting address of label1
# icount is used as a consecutive array index for table[]
# NOTE: only data records are given a table entry.
# "ABS REF" && itmp==0
# NsG
}
3999 call mkgtbl(itmp) # puts gtable into ntable
igcount = icount
#
# ichoice = array index of search field :: template(ichoice)
#
write(ttyout,42)
42 format(/'Current search parameters:'/,
'Data Description: ABS REF'/)
4000 call doit(xa,ic,icount,filnam,igcount,recnum)
if (icount == -1) stop
#
# If we were doing things correcly, out table would be in node
# format now, and we would call mktbl to put it into array format.
#
# But dont bother, its not that much memory.
#
# call mktbl()
if (ic == ihx) goto 5000
goto 4000
stop
5000 write (ttyout, 5001)
5001 format (' done')
close (10,iostat=ier)
stop
end
|
{"hexsha": "60c6bf60f2e489f4a27853e8a70c949ead96de48", "size": 4468, "ext": "r", "lang": "R", "max_stars_repo_path": "src.sspp/spsearch.r", "max_stars_repo_name": "YannChemin/specpr_linux", "max_stars_repo_head_hexsha": "72a422aa3b71ab36085fa6392daf7bcfbd3673a3", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src.sspp/spsearch.r", "max_issues_repo_name": "YannChemin/specpr_linux", "max_issues_repo_head_hexsha": "72a422aa3b71ab36085fa6392daf7bcfbd3673a3", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src.sspp/spsearch.r", "max_forks_repo_name": "YannChemin/specpr_linux", "max_forks_repo_head_hexsha": "72a422aa3b71ab36085fa6392daf7bcfbd3673a3", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0098522167, "max_line_length": 71, "alphanum_fraction": 0.6559982095, "num_tokens": 1569}
|
// Copyright 2011-2020 Wason Technology, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "RobotRaconteur/Generator.h"
#include "RobotRaconteur/RobotRaconteurNode.h"
#include "RobotRaconteur/Client.h"
#include "RobotRaconteur/DataTypes.h"
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm.hpp>
#include <boost/foreach.hpp>
namespace RobotRaconteur
{
GeneratorClientBase::GeneratorClientBase(boost::string_ref name, int32_t id, RR_SHARED_PTR<ServiceStub> stub)
{
this->node=stub->RRGetNode();
this->endpoint=stub->GetContext()->GetLocalEndpoint();
this->name = RR_MOVE(name.to_string());
this->id = id;
this->stub = stub;
ROBOTRACONTEUR_LOG_TRACE_COMPONENT_PATH(node,Client,endpoint,service_path,name,"Created generator with id " << id);
}
RR_SHARED_PTR<ServiceStub> GeneratorClientBase::GetStub()
{
RR_SHARED_PTR<ServiceStub> out = stub.lock();
if (!out) throw InvalidOperationException("Generator has been closed");
return out;
}
void GeneratorClientBase::AsyncAbort(boost::function<void(RR_SHARED_PTR<RobotRaconteurException> err)> handler, int32_t timeout)
{
ROBOTRACONTEUR_LOG_TRACE_COMPONENT_PATH(node,Client,endpoint,service_path,name,"Requesting async generator abort with id " << id);
RR_INTRUSIVE_PTR<MessageEntry> m = CreateMessageEntry(MessageEntryType_GeneratorNextReq, GetMemberName());
AbortOperationException err("Generator abort requested");
RobotRaconteurExceptionUtil::ExceptionToMessageEntry(err, m);
m->AddElement("index", ScalarToRRArray(id));
GetStub()->AsyncProcessRequest(m, boost::bind(handler, RR_BOOST_PLACEHOLDERS(_2)), timeout);
}
void GeneratorClientBase::AsyncClose(boost::function<void(RR_SHARED_PTR<RobotRaconteurException> err)> handler, int32_t timeout)
{
ROBOTRACONTEUR_LOG_TRACE_COMPONENT_PATH(node,Client,endpoint,service_path,name,"Requesting async generator close with id " << id);
RR_INTRUSIVE_PTR<MessageEntry> m = CreateMessageEntry(MessageEntryType_GeneratorNextReq, GetMemberName());
StopIterationException err("");
RobotRaconteurExceptionUtil::ExceptionToMessageEntry(err, m);
m->AddElement("index", ScalarToRRArray(id));
GetStub()->AsyncProcessRequest(m, boost::bind(handler, RR_BOOST_PLACEHOLDERS(_2)), timeout);
}
std::string GeneratorClientBase::GetMemberName()
{
return name;
}
void GeneratorClientBase::AsyncNextBase(RR_INTRUSIVE_PTR<MessageElement> v, boost::function<void(RR_INTRUSIVE_PTR<MessageElement> m, RR_SHARED_PTR<RobotRaconteurException> err, RR_SHARED_PTR<RobotRaconteurNode>)> handler, int32_t timeout)
{
ROBOTRACONTEUR_LOG_TRACE_COMPONENT_PATH(node,Client,endpoint,service_path,name,"Calling async generator next with id " << id);
RR_INTRUSIVE_PTR<MessageEntry> m = CreateMessageEntry(MessageEntryType_GeneratorNextReq, GetMemberName());
m->AddElement("index", ScalarToRRArray(id));
if (v)
{
v->ElementName = "parameter";
m->elements.push_back(v);
}
RR_WEAK_PTR<RobotRaconteurNode> node = GetStub()->RRGetNode();
GetStub()->AsyncProcessRequest(m, boost::bind(&GeneratorClientBase::AsyncNextBase1,RR_BOOST_PLACEHOLDERS(_1),RR_BOOST_PLACEHOLDERS(_2),handler,node));
}
void GeneratorClientBase::AsyncNextBase1(RR_INTRUSIVE_PTR<MessageEntry> ret, RR_SHARED_PTR<RobotRaconteurException> err, boost::function<void(RR_INTRUSIVE_PTR<MessageElement>, RR_SHARED_PTR<RobotRaconteurException>, RR_SHARED_PTR<RobotRaconteurNode>)> handler, RR_WEAK_PTR<RobotRaconteurNode> node)
{
RR_SHARED_PTR<RobotRaconteurNode> node1 = node.lock();
if (!node1)
{
handler(RR_INTRUSIVE_PTR<MessageElement>(), RR_MAKE_SHARED<InvalidOperationException>("Node has been released"), node1);
return;
}
RR_INTRUSIVE_PTR<MessageElement> mret;
if (err)
{
handler(mret, err, node1);
return;
}
ret->TryFindElement("return", mret);
handler(mret,err,node1);
}
namespace detail
{
void GeneratorClient_AsyncNext2(RR_INTRUSIVE_PTR<MessageElement> v2, RR_SHARED_PTR<RobotRaconteurException> err, RR_SHARED_PTR<RobotRaconteurNode> node, boost::function<void(RR_SHARED_PTR<RobotRaconteurException> err)> handler)
{
if (err)
{
detail::InvokeHandlerWithException(node, handler, err);
return;
}
detail::InvokeHandler(node, handler);
}
}
}
|
{"hexsha": "3a7fe002d668148e37bf749915cb409155045023", "size": 4765, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "RobotRaconteurCore/src/Generator.cpp", "max_stars_repo_name": "robotraconteur/robotraconteur_pyodide", "max_stars_repo_head_hexsha": "e2a529c75b1603ae9471091095c9c6b10652dbbb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "RobotRaconteurCore/src/Generator.cpp", "max_issues_repo_name": "robotraconteur/robotraconteur_pyodide", "max_issues_repo_head_hexsha": "e2a529c75b1603ae9471091095c9c6b10652dbbb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2019-11-15T02:20:44.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-15T05:57:21.000Z", "max_forks_repo_path": "RobotRaconteurCore/src/Generator.cpp", "max_forks_repo_name": "johnwason/robotraconteur_pyodide", "max_forks_repo_head_hexsha": "438f1013ef144e27e8c714561cc4ab48815cb607", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-03-05T16:20:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-05T16:20:48.000Z", "avg_line_length": 40.3813559322, "max_line_length": 299, "alphanum_fraction": 0.7748163694, "num_tokens": 1291}
|
export bpffunction, bpfasm
# Compilation support
Base.@kwdef struct BPFCompilerParams <: AbstractCompilerParams
format::Symbol = :obj
license::String = ""
prog_section::String = "prog"
btf::Bool = true
end
BPFCompilerJob = CompilerJob{BPFCompilerTarget,BPFCompilerParams}
GPUCompiler.runtime_module(::BPFCompilerJob) = BPFnative
## host-side API
"""
bpffunction(f, tt=Tuple{}; kwargs...)
Compiles a function invocation into eBPF bytecode.
The following keyword arguments are supported:
- `name`: override the name that the kernel will have in the generated code
- `license::String=""`: license for the kernel source code and resulting object
- `prog_section::String=""`: ELF section that the kernel will be placed in
- `btf::Bool=true`: Whether to generate BTF debuginfo or not
The output of this function is automatically cached, i.e. you can simply call
`bpffunction` in a hot path without degrading performance. New code will be
generated automatically when the function changes or when different types or
keyword arguments are provided.
"""
function bpffunction(f::Core.Function, tt::Type=Tuple{}; name=nothing, kwargs...)
source = FunctionSpec(f, tt, false, name)
target = BPFCompilerTarget()
params = BPFCompilerParams(; kwargs...)
job = CompilerJob(target, source, params)
GPUCompiler.cached_compilation(bpffunction_cache, job,
bpffunction_compile,
bpffunction_link)
end
const bpffunction_cache = Dict{UInt,Any}()
# actual compilation
function bpffunction_compile(@nospecialize(job::CompilerJob))
params = job.params
format = params.format
btf = params.btf
# compile to BPF
method_instance, world = GPUCompiler.emit_julia(job)
ir, kernel = GPUCompiler.emit_llvm(job, method_instance; libraries=false)
if format == :llvm
return collect.(codeunits.(string.((ir, kernel)))) # TODO: Make more efficient
elseif format == :asm
format = LLVM.API.LLVMAssemblyFile
elseif format == :obj
format = LLVM.API.LLVMObjectFile
end
code, _ = GPUCompiler.emit_asm(job, ir; format=format, validate=true, strip=!btf)
return Vector{UInt8}(code)
end
bpffunction_link(@nospecialize(job::CompilerJob), exe) = exe
function GPUCompiler.optimize_module!(job::BPFCompilerJob, mod::LLVM.Module)
# extra optimizations to kill uses of `gpu_signal_exception`
# GPUCompiler does this slightly too late, causing an LLVM abort due to the
# BPF target not supporting lowering of `llvm.trap`
ModulePassManager() do pm
global_optimizer!(pm)
global_dce!(pm)
strip_dead_prototypes!(pm)
run!(pm, mod)
end
for func in LLVM.functions(mod)
# validate no-throw
if LLVM.name(func) == "gpu_signal_exception" && length(collect(LLVM.uses(func))) > 0
throw(GPUCompiler.KernelError(job, "eBPF does not support exceptions"))
end
end
mod
end
function GPUCompiler.finish_module!(job::BPFCompilerJob, mod::LLVM.Module, entry::LLVM.Function)
params = job.params
license = params.license
prog_section = params.prog_section
for func in LLVM.functions(mod)
# Set entry section for loaders like libbpf
LLVM.section!(func, prog_section)
end
# Set license
if license != ""
ctx = LLVM.context(mod)
i8 = LLVM.Int8Type(ctx)
glob = GlobalVariable(mod, LLVM.ArrayType(i8, length(license)+1), "_license")
linkage!(glob, LLVM.API.LLVMExternalLinkage)
constant!(glob, true)
section!(glob, "license")
str = ConstantArray(Vector{UInt8}(license*'\0'); ctx)
@assert context(glob) == context(str) == ctx
initializer!(glob, str)
end
# Set all map definitions as external linkage
for gv in filter(x->(section(x)=="maps")||(section(x)==".maps"), collect(LLVM.globals(mod)))
linkage!(gv, LLVM.API.LLVMExternalLinkage)
end
ModulePassManager() do pm
if Base.JLOptions().debug_level > 1
# Validate contexts, for my sanity
add!(pm, ModulePass("BPFValidateContexts", validate_contexts!))
end
# Promote `@malloc` intrinsics
add!(pm, FunctionPass("BPFHeapToStack", heap_to_stack!))
run!(pm, mod)
end
entry
end
"Validates LLVM contexts of all the things."
function validate_contexts!(mod::LLVM.Module)
ctx = LLVM.context(mod)
for fn in LLVM.functions(mod)
@assert context(fn) == ctx "Failed validation: $fn"
for bb in LLVM.blocks(fn)
for insn in LLVM.instructions(bb)
@assert context(insn) == ctx "Failed validation: $insn"
for op in LLVM.operands(insn)
@assert context(op) == ctx "Failed validation: $op"
end
end
end
end
for gv in LLVM.globals(mod)
@assert context(gv) == ctx "Failed validation: $gv"
end
false
end
"Promotes `@malloc` intrinsics to allocas."
function heap_to_stack!(fn::LLVM.Function)
changed = false
ctx = LLVM.context(fn)
for bb in LLVM.blocks(fn)
for insn in LLVM.instructions(bb)
if insn isa LLVM.CallInst && LLVM.name(LLVM.called_value(insn)) == "malloc"
sz = convert(Int64, LLVM.operands(insn)[1])
T_i8 = LLVM.Int8Type(ctx)
T_pi8 = LLVM.PointerType(T_i8)
T_buf = LLVM.ArrayType(T_i8, sz)
Builder(ctx) do builder
# Place alloca at beginning of entry
position!(builder, first(LLVM.instructions(first(LLVM.blocks(fn)))))
buf = alloca!(builder, T_buf)
# Replace malloc with bitcast'd alloca
position!(builder, insn)
new_insn = bitcast!(builder, buf, T_pi8)
replace_uses!(insn, new_insn)
unsafe_delete!(LLVM.parent(insn), insn)
end
changed = true
end
end
end
changed
end
|
{"hexsha": "381517da852d5d57d2c48d8232321707538841ce", "size": 6110, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/compiler.jl", "max_stars_repo_name": "lawless-m/BPFnative.jl", "max_stars_repo_head_hexsha": "b94c60059aae8cca90c396c463ca4e02741db2f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/compiler.jl", "max_issues_repo_name": "lawless-m/BPFnative.jl", "max_issues_repo_head_hexsha": "b94c60059aae8cca90c396c463ca4e02741db2f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/compiler.jl", "max_forks_repo_name": "lawless-m/BPFnative.jl", "max_forks_repo_head_hexsha": "b94c60059aae8cca90c396c463ca4e02741db2f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9142857143, "max_line_length": 96, "alphanum_fraction": 0.6410801964, "num_tokens": 1474}
|
# -*- coding: utf-8 -*-
"""ML RPS Submission.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1G-Br7RpdpJ5ChNWsSxubZPEdyAcJBRen
Nama : Wahyu Septiadi
Email : wahyusptd@gmail.com
"""
# Commented out IPython magic to ensure Python compatibility.
import tensorflow as tf
import zipfile,os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
from google.colab import files
from keras.preprocessing import image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
!wget --no-check-certificate \
https://dicodingacademy.blob.core.windows.net/picodiploma/ml_pemula_academy/rockpaperscissors.zip -O /tmp/rockpaperscissors.zip
# melakukan ekstraksi pada file zip
local_zip = '/tmp/rockpaperscissors.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
# base direktori
base_dir = '/tmp/rockpaperscissors/rps-cv-images'
# image generator (preprocessing data, pelabelan sampel otomatis, dan augmentasi gambar)
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
horizontal_flip=True,
shear_range = 0.2,
fill_mode = 'wrap',
zoom_range=0.2,
validation_split=0.4) # data validasi 40 % dari data training
validation_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
horizontal_flip=True,
shear_range = 0.2,
fill_mode = 'wrap',
zoom_range=0.2,)
# pembagian class (class index 0 = paper, class index 1 = rock, class index 2 = scissors)
train_generator = train_datagen.flow_from_directory(
base_dir, # direktori data latih
target_size=(150, 150),
shuffle = True,
class_mode='categorical',
subset = 'training')
validation_generator = train_datagen.flow_from_directory(
base_dir, # direktori data validasi
target_size=(150, 150),
shuffle = True,
class_mode='categorical',
subset = 'validation')
# melakukan model sequential
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(130, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax') # 3 layer
])
# menjalankan model dengan metrics accuracy
model.compile(loss='binary_crossentropy',
optimizer=tf.optimizers.Adam(),
metrics=['accuracy'])
# melatih model dengan metode fit (selama [+-] 12 menit)
model.fit(
train_generator,
steps_per_epoch=25,
epochs=22,
validation_data=validation_generator,
validation_steps=5,
verbose=2)
# pengujian gambar
uploaded = files.upload()
for fn in uploaded.keys():
path = fn
img = image.load_img(path, target_size=(150,150))
imgplot = plt.imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10) # semisal nilai classes [[1. 0. 0.]] di analogikan [[paper. rock. scissors.]]
check = np.argmax(classes) # output = PAPER
print(fn)
if check==0:
print('paper')
elif check==1:
print('rock')
else:
print('scissors')
|
{"hexsha": "0fd95738f308526b6e6b345ef762968f28e321b9", "size": 3852, "ext": "py", "lang": "Python", "max_stars_repo_path": "ml_rps_submission.py", "max_stars_repo_name": "WahyuSeptiadi/RockPaperScissors.ai", "max_stars_repo_head_hexsha": "7b281932be372869c876124cf576ada419a0b4dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-04T01:28:05.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-04T01:28:05.000Z", "max_issues_repo_path": "ml_rps_submission.py", "max_issues_repo_name": "WahyuSeptiadi/RockPaperScissors.ai", "max_issues_repo_head_hexsha": "7b281932be372869c876124cf576ada419a0b4dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ml_rps_submission.py", "max_forks_repo_name": "WahyuSeptiadi/RockPaperScissors.ai", "max_forks_repo_head_hexsha": "7b281932be372869c876124cf576ada419a0b4dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1, "max_line_length": 129, "alphanum_fraction": 0.6568016615, "include": true, "reason": "import numpy", "num_tokens": 982}
|
# Generate Gauss quadrature rules on cubes (0,0,...) - (1,1,...) by doing an outer product
# over all dimensions
import Base.Cartesian: @nloops, @nref, @ntuple, @nexprs
for dim in (1,2,3)
@eval begin
function (::Type{QuadratureRule{HyperCube{$dim}}})(quad_type::GaussLegendre, order::Int)
p,w = FastGaussQuadrature.gausslegendre(order)
weights = Vector{Float64}(undef, order^($dim))
points = Vector{Vec{$dim,Float64}}(undef, order^($dim))
count = 1
@nloops $dim i j->(1:order) begin
t = @ntuple $dim q-> 0.5*(p[$(Symbol("i"*"_q"))] .+ 1.0)
points[count] = Vec{$dim,Float64}(t)
weight = 1.0
@nexprs $dim j->(weight *= w[i_{j}])
weights[count] = weight
count += 1
end
return QuadratureRule{HyperCube{$dim},$dim,Float64}(weights./sum(weights), points)
end
end
end
|
{"hexsha": "aea95ea4ac7f7096fb20b2a1cf3d05b9380c149d", "size": 966, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Quads/GaussQuadRect.jl", "max_stars_repo_name": "Paulms/jFEMToools", "max_stars_repo_head_hexsha": "fab2579db69c3c5a3610a7aabc06ae0cbd627b2d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-01-28T16:35:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T17:08:23.000Z", "max_issues_repo_path": "src/Quads/GaussQuadRect.jl", "max_issues_repo_name": "Paulms/jFEMToools", "max_issues_repo_head_hexsha": "fab2579db69c3c5a3610a7aabc06ae0cbd627b2d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-11-28T17:27:48.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-28T17:27:48.000Z", "max_forks_repo_path": "src/Quads/GaussQuadRect.jl", "max_forks_repo_name": "Paulms/jFEMToools", "max_forks_repo_head_hexsha": "fab2579db69c3c5a3610a7aabc06ae0cbd627b2d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-12-04T22:33:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T17:08:28.000Z", "avg_line_length": 40.25, "max_line_length": 96, "alphanum_fraction": 0.5414078675, "num_tokens": 290}
|
# Algorithms Exercise 2
## Imports
```python
%matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
```
## Peak finding
Write a function `find_peaks` that finds and returns the indices of the local maxima in a sequence. Your function should:
* Properly handle local maxima at the endpoints of the input array.
* Return a Numpy array of integer indices.
* Handle any Python iterable as input.
```python
def find_peaks(a):
"""Find the indices of the local maxima in a sequence."""
# YOUR CODE HERE
#raise NotImplementedError()
ind=[]
#next two if checks end points
if a[0]> a[1]:
ind.append(0)
if a[len(a)-1]>a[len(a)-2]:
ind.append(len(a)-1)
#finds local maxima in string by comparing adjacent
for i in range(1,len(a)-1):
if a[i]>a[i-1] and a[i]>a[i+1]:
ind.append(i)
#sorts by increasing order
return sorted(ind)
find_peaks([2,0,1,0,2,0,1])
```
[0, 2, 4, 6]
```python
p1 = find_peaks([2,0,1,0,2,0,1])
assert np.allclose(p1, np.array([0,2,4,6]))
p2 = find_peaks(np.array([0,1,2,3]))
assert np.allclose(p2, np.array([3]))
p3 = find_peaks([3,2,1,0])
assert np.allclose(p3, np.array([0]))
```
Here is a string with the first 10000 digits of $\pi$ (after the decimal). Write code to perform the following:
* Convert that string to a Numpy array of integers.
* Find the indices of the local maxima in the digits of $\pi$.
* Use `np.diff` to find the distances between consequtive local maxima.
* Visualize that distribution using an appropriately customized histogram.
```python
from sympy import pi, N
pi_digits_str = str(N(pi, 10001))[2:]
```
```python
# YOUR CODE HERE
#raise NotImplementedError()
def pimax(x):
'''uses find_peaks to find the local maxima then finds the space between the maxima and
plots the distribution of space between local maxima'''
pi=np.ones(10000)
for i in range(len(x)):
pi[i]=int(x[i])
m = find_peaks(pi)
dist = np.diff(m)
p = plt.hist(dist,bins=17)
plt.title('Distances Between Local Maxima in First 10000 digtis of $\pi$')
plt.xlabel('Distance Between Local Maxima')
plt.ylabel('Number of Times Occured')
plt.grid(False)
plt.xlim([1,19])
a=range(2,19)
plt.xticks(a[::2])
plt.ylim(0,1100)
plt.show()
pimax(pi_digits_str)
```
```python
assert True # use this for grading the pi digits histogram
```
|
{"hexsha": "af91c073e4eccedd40a3e04431a1d61192013dc3", "size": 21086, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "assignments/assignment07/AlgorithmsEx02.ipynb", "max_stars_repo_name": "jpilgram/phys202-2015-work", "max_stars_repo_head_hexsha": "22ba6764bcb16dfc4117089590bd25e2590aa746", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignments/assignment07/AlgorithmsEx02.ipynb", "max_issues_repo_name": "jpilgram/phys202-2015-work", "max_issues_repo_head_hexsha": "22ba6764bcb16dfc4117089590bd25e2590aa746", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignments/assignment07/AlgorithmsEx02.ipynb", "max_forks_repo_name": "jpilgram/phys202-2015-work", "max_forks_repo_head_hexsha": "22ba6764bcb16dfc4117089590bd25e2590aa746", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 88.5966386555, "max_line_length": 15434, "alphanum_fraction": 0.8388504221, "converted": true, "num_tokens": 703}
|
from dwave.system.samplers import DWaveSampler # Library to interact with the QPU
from dwave.system.composites import EmbeddingComposite # Library to embed our problem onto the QPU physical graph
import itertools
import scipy.optimize
import TSP_utilities
import numpy as np
class DWaveTSPSolver(object):
"""
Class for solving Travelling Salesman Problem using DWave.
Specifying starting point is not implemented.
"""
def __init__(self, distance_matrix, sapi_token=None, url=None):
max_distance = np.max(np.array(distance_matrix))
scaled_distance_matrix = distance_matrix / max_distance
self.distance_matrix = scaled_distance_matrix
self.constraint_constant = 400
self.cost_constant = 10
self.chainstrength = 800
self.numruns = 1000
self.qubo_dict = {}
self.sapi_token = sapi_token
self.url = url
self.add_cost_objective()
self.add_time_constraints()
self.add_position_constraints()
def add_cost_objective(self):
n = len(self.distance_matrix)
for t in range(n):
for i in range(n):
for j in range(n):
if i == j:
continue
qubit_a = t * n + i
qubit_b = (t + 1)%n * n + j
self.qubo_dict[(qubit_a, qubit_b)] = self.cost_constant * self.distance_matrix[i][j]
def add_time_constraints(self):
n = len(self.distance_matrix)
for t in range(n):
for i in range(n):
qubit_a = t * n + i
if (qubit_a, qubit_a) not in self.qubo_dict.keys():
self.qubo_dict[(qubit_a, qubit_a)] = -self.constraint_constant
else:
self.qubo_dict[(qubit_a, qubit_a)] += -self.constraint_constant
for j in range(n):
qubit_b = t * n + j
if i!=j:
self.qubo_dict[(qubit_a, qubit_b)] = 2 * self.constraint_constant
def add_position_constraints(self):
n = len(self.distance_matrix)
for i in range(n):
for t1 in range(n):
qubit_a = t1 * n + i
if (qubit_a, qubit_a) not in self.qubo_dict.keys():
self.qubo_dict[(qubit_a, qubit_a)] = -self.constraint_constant
else:
self.qubo_dict[(qubit_a, qubit_a)] += -self.constraint_constant
for t2 in range(n):
qubit_b = t2 * n + i
if t1!=t2:
self.qubo_dict[(qubit_a, qubit_b)] = 2 * self.constraint_constant
def solve_tsp(self):
response = EmbeddingComposite(DWaveSampler(token=self.sapi_token, endpoint=self.url, solver='DW_2000Q_2_1')).sample_qubo(self.qubo_dict, chain_strength=self.chainstrength, num_reads=self.numruns)
self.decode_solution(response)
return self.solution, self.distribution
def decode_solution(self, response):
n = len(self.distance_matrix)
distribution = {}
min_energy = response.record[0].energy
for record in response.record:
sample = record[0]
solution_binary = [node for node in sample]
solution = TSP_utilities.binary_state_to_points_order(solution_binary)
distribution[tuple(solution)] = (record.energy, record.num_occurrences)
if record.energy <= min_energy:
self.solution = solution
self.distribution = distribution
def calculate_solution(self):
"""
Samples the QVM for the results of the algorithm
and returns a list containing the order of nodes.
"""
most_frequent_string, sampling_results = self.qaoa_inst.get_string(self.betas, self.gammas, samples=10000)
reduced_solution = TSP_utilities.binary_state_to_points_order(most_frequent_string)
full_solution = self.get_solution_for_full_array(reduced_solution)
self.solution = full_solution
all_solutions = sampling_results.keys()
distribution = {}
for sol in all_solutions:
reduced_sol = TSP_utilities.binary_state_to_points_order(sol)
full_sol = self.get_solution_for_full_array(reduced_sol)
distribution[tuple(full_sol)] = sampling_results[sol]
self.distribution = distribution
|
{"hexsha": "7d5ac334e43f06eeb241758fb55efa7e24d83bb8", "size": 4466, "ext": "py", "lang": "Python", "max_stars_repo_path": "quantum_tsp/src/dwave_tsp_solver.py", "max_stars_repo_name": "tangarfff/job-shop-scheduling", "max_stars_repo_head_hexsha": "1e8347dcce5fdf9d4cdf8cc94e49d1bef1851551", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 45, "max_stars_repo_stars_event_min_datetime": "2018-04-06T13:42:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T04:30:05.000Z", "max_issues_repo_path": "quantum_tsp/src/dwave_tsp_solver.py", "max_issues_repo_name": "tangarfff/job-shop-scheduling", "max_issues_repo_head_hexsha": "1e8347dcce5fdf9d4cdf8cc94e49d1bef1851551", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-12-02T17:40:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-21T09:11:18.000Z", "max_forks_repo_path": "quantum_tsp/src/dwave_tsp_solver.py", "max_forks_repo_name": "tangarfff/job-shop-scheduling", "max_forks_repo_head_hexsha": "1e8347dcce5fdf9d4cdf8cc94e49d1bef1851551", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2018-11-01T06:01:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-22T09:35:27.000Z", "avg_line_length": 40.6, "max_line_length": 216, "alphanum_fraction": 0.6112852665, "include": true, "reason": "import numpy,import scipy", "num_tokens": 972}
|
# -*- coding: utf-8 -*-
import os
import unittest
from unittest.mock import patch
import json
import uuid
import numpy as np
import pytest
from pytest import raises
from kb_RDP_Classifier.util.debug import dprint, where_am_i
from kb_RDP_Classifier.impl.globals import Var
from kb_RDP_Classifier.impl.params import Params
from kb_RDP_Classifier.impl.kbase_obj import AmpliconMatrix, AttributeMapping
from kb_RDP_Classifier.impl import report
from data import *
from config import *
run_dir = os.path.join(scratch, 'test_report_' + str(uuid.uuid4()))
os.mkdir(run_dir)
req = dict(
workspace_id='id',
amp_mat_upa='u/p/a',
output_name='out_name',
)
####################################################################################################
####################################################################################################
def test_small():
out_dir = os.path.join(TEST_DATA_DIR, 'return/enigma50by30/return/RDP_Classifier_output')
Var.out_allRank_flpth = os.path.join(out_dir, 'out_allRank.tsv')
Var.report_dir = os.path.join(run_dir, 'report_enigma50by30')
os.mkdir(Var.report_dir)
Var.params = Params(dict(
**req,
conf=0.8
))
html_links = report.HTMLReportWriter(
cmd_l = ['test,', 'test,', 'small']
).write()
####################################################################################################
####################################################################################################
def test_small_linspace():
out_dir = os.path.join(TEST_DATA_DIR, 'return/enigma50by30/return/RDP_Classifier_output')
Var.out_allRank_flpth = os.path.join(out_dir, 'out_allRank.tsv')
for i, conf in enumerate(np.linspace(0, 1, 11)):
Var.report_dir = os.path.join(run_dir, 'report_enigma50by30_conf%g' % conf)
os.mkdir(Var.report_dir)
Var.params = Params(dict(
**req,
conf=conf
))
html_links = report.HTMLReportWriter(
cmd_l = ['test,', 'test,', 'small', 'conf=%g' % conf]
).write()
####################################################################################################
####################################################################################################
@pytest.mark.parametrize('i,conf', list(enumerate(np.linspace(0, 1, 11))))
def test_dummy(i, conf):
out_dir = os.path.join(TEST_DATA_DIR, 'return/dummy10by8/return/RDP_Classifier_output')
Var.out_allRank_flpth = os.path.join(out_dir, 'out_allRank.tsv')
Var.report_dir = os.path.join(run_dir, 'report_dummy10by8_conf%g' % conf)
os.mkdir(Var.report_dir)
Var.params = Params(dict(
**req,
conf=conf,
))
html_links = report.HTMLReportWriter(
cmd_l = ['test,', 'test,', 'dummy10by8', 'conf=%g' % conf]
).write()
####################################################################################################
####################################################################################################
@pytest.mark.parametrize('i', list(range(5)))
def test_tiny(i):
out_dir = os.path.join(TEST_DATA_DIR, 'return/dummyTiny/return/RDP_Classifier_output')
Var.out_allRank_flpth = os.path.join(out_dir, 'out_allRank%d.tsv' % i)
Var.report_dir = os.path.join(run_dir, 'report_dummyTiny_%d' % i)
os.mkdir(Var.report_dir)
Var.params = Params(dict(
**req,
conf=0.55555,
))
with open(Var.out_allRank_flpth) as fh:
allRank_lines = fh.readlines()
html_links = report.HTMLReportWriter(
cmd_l = ['test,', 'test,', 'dummyTiny', 'i=%d' % i] + allRank_lines
).write()
|
{"hexsha": "7e9937f1af0230d47099832dae80ebd0414b14ab", "size": 3706, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/report_test.py", "max_stars_repo_name": "Tianhao-Gu/kb_RDP_Classifier", "max_stars_repo_head_hexsha": "d1d170275de8e4c62a6a54cf9a329a6cc41e3b56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/report_test.py", "max_issues_repo_name": "Tianhao-Gu/kb_RDP_Classifier", "max_issues_repo_head_hexsha": "d1d170275de8e4c62a6a54cf9a329a6cc41e3b56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/report_test.py", "max_forks_repo_name": "Tianhao-Gu/kb_RDP_Classifier", "max_forks_repo_head_hexsha": "d1d170275de8e4c62a6a54cf9a329a6cc41e3b56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6355140187, "max_line_length": 100, "alphanum_fraction": 0.5221262817, "include": true, "reason": "import numpy", "num_tokens": 813}
|
//
// irace_conf.cpp
// grammar2code
//
// Created by Franco Mascia on 18/03/13.
// Copyright (c) 2013 Franco Mascia. All rights reserved.
//
// This file is distributed under the BSD 2-Clause License. See LICENSE.TXT
// for details.
//
#include "grammar.hpp"
#include "error.hpp"
#include <boost/algorithm/string/join.hpp>
#include <boost/algorithm/string/erase.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <string>
#include <vector>
std::string grammar::irace_conf::fmt_rule_name(const std::string& path)
{
auto param_name = rule_name(path);
return param_name.first + "\t\"--" + param_name.second + "=\"\t";
}
std::string grammar::irace_conf::fmt_rule_cond(const std::string& path, const std::string& node_name, int rec_index)
{
auto cond = rule_cond(path, node_name, rec_index);
auto cond_name = rule_name(cond.first);
if (cond.first.empty() || cond.second.empty()) {
return "";
} else {
return "\t| " + cond_name.first + " %in% c(" + cond.second + ")";
}
}
void grammar::irace_conf::fmt_parameter(const std::string& rule_name, const std::string& rule_type, const std::vector<std::string>& values, std::string default_value, bool log_scale, std::string rule_cond)
{
std::string type;
if (rule_type == "int") {
type = "i";
} else if (rule_type == "real") {
type = "r";
} else if (rule_type == "categorical") {
type = "c";
} else if (rule_type == "recursive") {
type = "c";
}
std::string alternatives = boost::join(values, ", ");
parameters_.push_back(rule_name + " " + type + " (" + alternatives + ")" + rule_cond);
}
|
{"hexsha": "0b6763e38d109b0aa7a85f77dd1221d9cb9fa528", "size": 1661, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/irace_conf.cpp", "max_stars_repo_name": "federicoPagnozzi/grammar2code", "max_stars_repo_head_hexsha": "2c1d9e20549390655d0179c4030b7a3ecddc34da", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/irace_conf.cpp", "max_issues_repo_name": "federicoPagnozzi/grammar2code", "max_issues_repo_head_hexsha": "2c1d9e20549390655d0179c4030b7a3ecddc34da", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/irace_conf.cpp", "max_forks_repo_name": "federicoPagnozzi/grammar2code", "max_forks_repo_head_hexsha": "2c1d9e20549390655d0179c4030b7a3ecddc34da", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6607142857, "max_line_length": 205, "alphanum_fraction": 0.6369656833, "num_tokens": 441}
|
from __future__ import division
import logging
from itertools import combinations
from collections import namedtuple
import numpy as NP
import scipy.stats
logger = logging.getLogger('pyrsss.stats.linregress')
"""Linear regression assessment metrics""",
Assessment = namedtuple('Assessment',
'std_error ' \
't_stat '\
'p_value '\
'RSE '\
'R2 '\
'F_stat '\
'Cp '\
'AIC '\
'BIC '\
'adjusted_R2')
def linregress_assessment(y, X, beta_hat):
"""
Return an ss:`Assessment` of the linear regression *beta_hat* for
the model specified by *y* and *X* (see :func:`linregress`)
composed of several metrics.
"""
n, p_plus_1 = X.shape
p = p_plus_1 - 1
assert len(y) == n
RSS = NP.sum((y - NP.dot(X, beta_hat))**2)
RSE = NP.sqrt(RSS / (n - p - 1))
std_error = NP.sqrt(NP.diag(RSE**2 * NP.linalg.inv(NP.dot(X.T, X))))
t_stat = beta_hat / std_error
p_value = [2*scipy.stats.t.sf(NP.abs(t_stat_i), n - 2) for t_stat_i in t_stat]
TSS = NP.sum((y - NP.mean(y))**2)
R2 = 1 - RSS / TSS
F_stat = (TSS - RSS) / p / (RSS / (n - p - 1))
sigma_hat = RSE
d = p
Cp = (RSS + 2 * d * sigma_hat**2) / n
AIC = (RSS + 2 * d * sigma_hat**2) / (n * sigma_hat**2)
BIC = (RSS + NP.log(n) * d * sigma_hat**2) / n
adjusted_R2 = 1 - (RSS / (n - d - 1)) / (TSS / (n - 1))
return Assessment(std_error,
t_stat,
p_value,
RSE,
R2,
F_stat,
Cp,
AIC,
BIC,
adjusted_R2)
def linregress(df, y_column):
"""
Compute the linear regression given the model
.. math::
\mathbf{y} &= \mathbf{X}\,\boldsymbol{\beta} + \boldsymbol{\epsilon} \\
\boldsymbol{\epsilon} &\overset{\text{i.i.d.}}{\sim} \mathcal{N}\bigl(\mathbf{0},\, \sigma^2\,\mathbf{I}\bigr)
where :math:`\boldsymbol{\beta}` are the :math:`p` parameters,
:math:`y` is the length :math:`n` column in *df* with identifier
*y_column*, and the :math:`n \times p` matrix
:math:`\boldsymbol{X}` is constructed from the remaining columns
in *df*.
Return the tuple with the estimated parameters in the first
element and an :class:`Assessment` (see
:func:`linregress_assessment`) as the second.
"""
y = df[y_column].values
n = len(y)
cols = [NP.ones(n)]
for col in df.columns:
if col == y_column:
continue
cols.append(df[col].values)
X = NP.column_stack(cols)
p = X.shape[1] - 1
# compute least-squares fit
beta_hat, delme, _, _ = NP.linalg.lstsq(X, y)
return beta_hat, linregress_assessment(y, X, beta_hat)
def best_subset_selection(df, y_column):
"""
Compute the exhaustive multiple linear regression for the
combination of all parameters of the model *df* with respect to
*y_column* (see :func:`linregress`). Return the tuple of lists
with 1) the best parameter first, 2) the :class:`Assessment` for
each regression, and 3) the *df* columns for the best fit.
"""
predictor_cols = list(df.columns)
predictor_cols.remove(y_column)
beta_hat_max = []
assessment_max = []
R2_max_col = []
for k in range(1, len(predictor_cols) + 1):
logger.info('processing {}/{} predictors'.format(k, len(predictor_cols)))
beta_hat_max.append(None)
assessment_max.append(None)
R2_max_col.append(None)
for col_combo in combinations(predictor_cols, k):
df_fit = df[list(col_combo) + [y_column]]
beta_hat, assessment = linregress(df_fit, y_column)
if (assessment_max[-1] is None) or \
(assessment.R2 > assessment_max[-1].R2):
beta_hat_max[-1] = beta_hat
assessment_max[-1] = assessment
R2_max_col[-1] = list(col_combo)
return beta_hat_max, assessment_max, R2_max_col
|
{"hexsha": "5345f02227374146883799c105e891ea94c4b947", "size": 4196, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyrsss/stats/linregress.py", "max_stars_repo_name": "grawe/pyrsss", "max_stars_repo_head_hexsha": "31fd88734b00f814e7aaa5829c4ac49c7bf53563", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyrsss/stats/linregress.py", "max_issues_repo_name": "grawe/pyrsss", "max_issues_repo_head_hexsha": "31fd88734b00f814e7aaa5829c4ac49c7bf53563", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyrsss/stats/linregress.py", "max_forks_repo_name": "grawe/pyrsss", "max_forks_repo_head_hexsha": "31fd88734b00f814e7aaa5829c4ac49c7bf53563", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6776859504, "max_line_length": 114, "alphanum_fraction": 0.5579122974, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1152}
|
'''
Utilities for `optics_line_text_reducer`
----------------------------------------
This module provides utilities used to reduce the polygon-text extractions
for :mod:`panoptes_aggregation.reducers.optics_line_text_reducer`. It
assumes that all extracts are full lines of text in the document.
'''
import numpy as np
import Levenshtein
import collections
import copy
import re
from sklearn.cluster import DBSCAN
from .shape_metric import angle_distance, avg_angle
def strip_tags(s):
'''Remove square bracket tags from text and consolidating whitespace
Parameters
----------
s : string
The input string
Returns
-------
clean_s : string
The cleaned string
'''
no_brackets = re.sub(r'[\[].*?[\]]', '', s) # noqa: W605
unify_space = ' '.join(no_brackets.split())
return unify_space
def metric(a, b, data_in=[]):
'''Calculate the distance between two drawn lines that have text
associated with them. This distance is found by summing the euclidean
distance between the start points of each line, the euclidean distance
between the end points of each line, and the Levenshtein distance
of the text for each line. The Levenshtein distance is done after
stripping text tags and consolidating whitespace.
To use this metric within the clustering code without haveing to
precompute the full distance matrix `a` and `b` are index mappings to
the data contained in `data_in`. `a` and `b` also contain the user
information that is used to help prevent self-clustering.
Parameters
----------
a : list
A two element list containing [index mapping to data, index mapping to user]
b : list
A two element list containing [index mapping to data, index mapping to user]
data_in : list
A list of dicts that take the form
{`x`: [start_x, end_x], `y`: [start_y, end_y], 'text': ['text for line'], 'gold_standard', bool}
There is one element in this list for each classification made.
Returns
-------
distance: float
The distance between `a` and `b`
'''
if a[0] == b[0]:
# The same data point, the distance is zero
return 0
if a[1] == b[1]:
# The same users, distance is inf
return np.inf
data_a = data_in[int(a[0])]
data_b = data_in[int(b[0])]
dx = (np.array(data_a['x']) - np.array(data_b['x']))**2
dy = (np.array(data_a['y']) - np.array(data_b['y']))**2
dt = Levenshtein.distance(
strip_tags(data_a['text'][0]),
strip_tags(data_b['text'][0])
)
return np.sqrt(dx + dy).sum() + dt
def get_min_samples(N):
'''Get the `min_samples` attribute based on the number of
users who have transcribed the subject. These values were
found based on example data from ASM.
Parameters
----------
N : integer
The number of users who have see the subject
Returns
-------
min_samples : integer
The value to use for the min_samples keyword in OPTICS
'''
if N <= 6:
return 2
elif N <= 10:
return 3
elif N <= 15:
return 4
elif N <= 20:
return 5
else:
return int(0.25 * N)
def remove_user_duplication(labels_, core_distances_, users):
'''Make sure a users only shows up in a cluster at most once.
If a user does show up more than once in a cluster take the point
with the smallest core distance, all others are assigned as noise (-1).
Parameters
----------
labels_ : numpy.array
A list containing the cluster labels for each data point
core_distances_ : numpy.array
A list of core distance for each data point
users : numpy.array
A list of indices that map to users, one for each data point
Returns
-------
clean_labels_ : numpy.array
A list containing the new cluster labels.
'''
clean_labels = copy.deepcopy(labels_)
unique_labels = np.unique(labels_)
gdx = unique_labels > -1
for l in unique_labels[gdx]:
cdx = labels_ == l
user_counts = collections.Counter(users[cdx]).most_common()
if user_counts[0][1] > 1:
clean_labels_cdx = clean_labels[cdx]
for user_count in user_counts:
udx = users[cdx] == user_count[0]
clean_labels_cdx_udx = clean_labels_cdx[udx]
if user_count[1] > 1:
min_idx = core_distances_[cdx][udx].argmin()
mask = np.ones(udx.sum(), dtype=bool)
mask[min_idx] = False
clean_labels_cdx_udx[mask] = -1
else:
break
clean_labels_cdx[udx] = clean_labels_cdx_udx
clean_labels[cdx] = clean_labels_cdx
return clean_labels
def cluster_of_one(X, data, user_ids, extract_index):
'''Create "clusters of one" out of the data passed in. Lines of text
identified as noise are kept around as clusters of one so they can be
displayed in the front-end to the next user.
Parameters
----------
X : list
A nx2 list with each row containing [index mapping to data, index mapping to user]
data: list
A list containing dictionaries with the original data that X maps to, of the form
`{'x': [start_x, end_x], 'y': [start_y, end_y], 'text': ['text for line'], 'gold_standard': bool}`.
user_ids: list
A list of user_ids (The second column of X maps to this list)
extract_index: list
A list of n values with the extract index for each of rows in X
Returns
-------
clusters: list
A list with n clusters each containing only one classification
'''
clusters = []
for rdx, row in enumerate(X):
index = int(row[0])
user_index = int(row[1])
line = data[index]
dx = line['x'][-1] - line['x'][0]
dy = line['y'][-1] - line['y'][0]
slope = np.rad2deg(np.arctan2(dy, dx)) % 360
value = {
'clusters_x': line['x'],
'clusters_y': line['y'],
'clusters_text': [[w] for w in line['text'][0].split()],
'number_views': 1,
'line_slope': slope,
'consensus_score': 1.0,
'consensus_text': ' '.join(line['text'][0].split()),
'user_ids': [user_ids[user_index]],
'extract_index': [extract_index[rdx]],
'gold_standard': [line['gold_standard']],
'low_consensus': True,
'flagged': True
}
clusters.append(value)
return clusters
def order_lines(frame_in, angle_eps=30, gutter_eps=150):
'''Place the identified lines within a single frame in reading order
Parameters
----------
frame : list
A list of identified transcribed lines (one frame from
panoptes_aggregation.reducers.optics_line_text_reducer.optics_line_text_reducer)
angle_eps : float
The DBSCAN `eps` value to use for the slope clustering
gutter_eps : float
The DBSCAN `eps` value to use for the column clustering
Returns
-------
frame_ordered : list
The identified transcribed lines in reading order. The `slope_label` and
`gutter_label` values are added to each line to indicate what cluster it
belongs to.
'''
if len(frame_in) == 0:
return frame_in
xy_start = np.array([[l['clusters_x'][0], l['clusters_y'][0]] for l in frame_in])
xy_end = np.array([[l['clusters_x'][1], l['clusters_y'][1]] for l in frame_in])
slope = np.array([l['line_slope'] for l in frame_in])
frame = np.array(frame_in)
frame_ordered = []
# cluster by angle
db_angle = DBSCAN(min_samples=1, eps=angle_eps, metric=angle_distance)
db_angle.fit(slope.reshape(-1, 1))
# sort angle clusters
distance_to_zero = []
for l in np.unique(db_angle.labels_):
cdx = db_angle.labels_ == l
a = avg_angle(slope[cdx], limit='180')
distance_to_zero.append([l, a, angle_distance(a, 0)])
distance_to_zero = np.array(distance_to_zero)
distance_to_zero = distance_to_zero[distance_to_zero[:, 2].argsort()]
slope_label = 0
for angle_row in distance_to_zero:
# find midpoints of each line in angle cluster
cdx = db_angle.labels_ == angle_row[0]
mid_points = (xy_end[cdx] + xy_start[cdx]) / 2
mid_point = mid_points.mean(axis=0)
# rotate by this angle
c = np.cos(np.deg2rad(-angle_row[1]))
s = np.sin(np.deg2rad(-angle_row[1]))
R = np.array([[c, s], [-s, c]])
start_points_rot = np.dot(xy_start[cdx] - mid_point, R) + mid_point
# cluster in rotated `x` direction
db_start = DBSCAN(min_samples=1, eps=gutter_eps)
db_start.fit(start_points_rot[:, 0].reshape(-1, 1))
# sort column clusters
x_distance_to_zero = []
for ml in np.unique(db_start.labels_):
mdx = db_start.labels_ == ml
x_distance_to_zero.append([ml, start_points_rot[mdx, 0].mean()])
x_distance_to_zero = np.array(x_distance_to_zero)
x_distance_to_zero = x_distance_to_zero[x_distance_to_zero[:, 1].argsort()]
gutter_label = 0
for x_row in x_distance_to_zero:
mdx = db_start.labels_ == x_row[0]
# for each column sort in `y` direction
y_order = start_points_rot[mdx, 1].argsort()
# append to final list
new_frames = list(frame[cdx][mdx][y_order])
for nf in new_frames:
nf['line_slope'] = angle_row[1]
nf['slope_label'] = slope_label
nf['gutter_label'] = gutter_label
frame_ordered += new_frames
gutter_label += 1
slope_label += 1
return frame_ordered
|
{"hexsha": "ad6d0ce65a1a5eec79685b60d10e84b7b1e05568", "size": 9851, "ext": "py", "lang": "Python", "max_stars_repo_path": "panoptes_aggregation/reducers/optics_text_utils.py", "max_stars_repo_name": "alnah005/aggregation-for-caesar", "max_stars_repo_head_hexsha": "b2422f4c007857531ac3ff2636b567adb667dd0c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-04-11T13:44:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T16:39:26.000Z", "max_issues_repo_path": "panoptes_aggregation/reducers/optics_text_utils.py", "max_issues_repo_name": "alnah005/aggregation-for-caesar", "max_issues_repo_head_hexsha": "b2422f4c007857531ac3ff2636b567adb667dd0c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 217, "max_issues_repo_issues_event_min_datetime": "2017-07-27T09:20:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T11:15:33.000Z", "max_forks_repo_path": "panoptes_aggregation/reducers/optics_text_utils.py", "max_forks_repo_name": "hughdickinson/aggregation-for-caesar", "max_forks_repo_head_hexsha": "d6bca0a1126e0397315d5773401c71075c33ee2f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-11-12T21:36:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-07T11:50:03.000Z", "avg_line_length": 35.4352517986, "max_line_length": 107, "alphanum_fraction": 0.6139478226, "include": true, "reason": "import numpy", "num_tokens": 2480}
|
import numpy as np
import tensorflow as tf
from keras.datasets import cifar10
def normalize(x):
mean = 120.707
std = 64.15
return (x - mean) / (std + 1e-7)
def dataset(class_id, image_id, normed=True):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
filter = y_train[:, 0] == class_id
x_train = x_train[filter][image_id]
y_train = y_train[filter][image_id]
x_train = np.expand_dims(x_train.astype(np.float32), axis=0)
dataset = tf.data.Dataset.from_tensors(
(normalize(x_train) if normed else x_train, y_train)
)
return dataset
def train(directory, class_id, image_id, normed=True):
"""tf.data.Dataset object for MNIST training data."""
return dataset(class_id, image_id, normed)
def test(directory, class_id, image_id, normed=True):
"""tf.data.Dataset object for MNIST test data."""
return dataset(class_id, image_id, normed)
|
{"hexsha": "72b29339be7d9068a284e4b14fdae2b00fbceeee", "size": 914, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nninst/backend/tensorflow/dataset/cifar10.py", "max_stars_repo_name": "uchuhimo/Ptolemy", "max_stars_repo_head_hexsha": "5c8ae188af30ee49d38f27d54c67af2eab9489e7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2020-08-24T07:11:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-13T08:03:42.000Z", "max_issues_repo_path": "src/nninst/backend/tensorflow/dataset/cifar10.py", "max_issues_repo_name": "uchuhimo/Ptolemy", "max_issues_repo_head_hexsha": "5c8ae188af30ee49d38f27d54c67af2eab9489e7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-02-28T17:30:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-15T09:33:00.000Z", "max_forks_repo_path": "src/nninst/backend/tensorflow/dataset/cifar10.py", "max_forks_repo_name": "uchuhimo/Ptolemy", "max_forks_repo_head_hexsha": "5c8ae188af30ee49d38f27d54c67af2eab9489e7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-10-22T09:11:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-16T14:49:34.000Z", "avg_line_length": 28.5625, "max_line_length": 64, "alphanum_fraction": 0.6903719912, "include": true, "reason": "import numpy", "num_tokens": 251}
|
[STATEMENT]
lemma strongly_consistentI: "sym_factor (r\<^sup>+) \<subseteq> sym_factor (r\<^sup>=) \<Longrightarrow> strongly_consistent r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sym_factor (r\<^sup>+) \<subseteq> sym_factor (r\<^sup>=) \<Longrightarrow> strongly_consistent r
[PROOF STEP]
unfolding strongly_consistent_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sym_factor (r\<^sup>+) \<subseteq> sym_factor (r\<^sup>=) \<Longrightarrow> sym_factor (r\<^sup>+) \<subseteq> sym_factor (r\<^sup>=)
[PROOF STEP]
by blast
|
{"llama_tokens": 195, "file": "Szpilrajn_Szpilrajn", "length": 2}
|
\chapter[Limits of Convolutional Neural Networks on FLS Images]{Limits of Convolutional \newline Neural Networks on FLS Images}
\label{chapter:limits}
This chapter deals with a less applied problem than the rest of this thesis. While the use of Deep Neural Networks for different tasks has exploded during the last 5 years, many questions of practical importance remain unanswered \cite{zhang2016understanding}.
A general rule of thumb in machine learning is that more data always improves the generalization ability of a classifier or regressor. For Deep Learning it has been largely assumed that a large dataset is required. But experiments done in this thesis, specially in the previous chapter, show that CNNs can be trained with smaller datasets.
A big problem is then defining what is a "large" or a "small" dataset. In the computer vision community, a large dataset might be ImageNet \cite{russakovsky2015imagenet} with 1.2 million images and 1000 labeled classes, while for the marine robotics community 2000 images might be large, and 200 images with 2-4 classes might be small.
A very common requirement by practitioners is an estimate of how many data points are required to solve a given learning task. Typically as data needs to be gathered, an estimate of how much data is needed would be quite useful. Other effects are also of interest, such as what is the optimal object size for recognition, and what are the best ways to perform transfer learning, and how does that affect the training set size that is required for a given performance target.
In this chapter we would like to explore the following research questions:
\begin{itemize}
\item How does object size affect classification performance? Can small objects be recognized similarly to bigger ones?
\item How much data is required for FLS classification?
\item How can networks be made to require less training data given a classification performance target.
\item How effective is transfer learning in features learned from sonar images?
\end{itemize}
We try to answer these questions from an experimental point of view, by performing different synthetic experiments on our dataset of marine debris objects.
\section{Related Work}
Surprisingly, there is little literature about these research questions, even as they are quite important for practitioners.
Sharif et al. \cite[-3em]{sharif2014cnn} was one of the first to evaluate features learned by a CNN. They used a pre-trained OverFeat \cite[1em]{sermanet2013overfeat} as a feature extractor network, and computed features from the fc1 \footnote[][1em]{First fully connected layer} layer, which corresponds to a $4096$ long vector. This vector is then normalized with the L2 norm and used to train a multi-class SVM. An additional combination using data augmentation is also explored by the authors.
On the PASCAL VOC 2007 dataset, this method obtains better accuracy for 14 out of 21 classes, with a slightly worse accuracy on 7 classes. Note that the state of the art compared in this work is mostly composed of engineered features, like bag of visual words, clustering, and dictionary learning from HoG, SIFT and LBP features. Even as OverFeat is not trained on the same dataset, its features generalize outside of this set quite well.
On the MIT 67 indoor scenes dataset, the authors obtain $69.0$ \% mean accuracy with data augmentation, which is $5$ \% better than the state of the art. This dataset is considerably different from the ImageNet dataset used to train OverFeat.
In order to evaluate a more complex task, the authors used the Caltech-UCSD Birds dataset, where the task is to classify images of 200 different species of birds, where many birds "look alike" and are hard to recognize. Again this simple method outperforms the state of the art by $6$ \%, producing $61.8$ accuracy. This result shows how CNN features outperform engineered ones, even when the task is considerably different from the training set.
This work also shows the importance of data augmentation for computer vision tasks.
Pailhas, Petillot and Capus \cite[-3em]{pailhas2010high} have explored the relationship between sonar resolution and target recognition accuracy. While this is not the same question as we are exploring, it is similar enough to warrant inclusion in this state of the art. This work concentrates on the sonar resolution as a physical property of the device itself, while we want to explore this relation from the image processing point of view.
The authors use a sidescan sonar simulator that produces synthetic sonar images. The background that were considered are a flat seabed, sand ripples, a rocky seabed, and a cluttered environment (rocks). The target are mine-like objects, including six classes (manta, rockan, cuboid, hemisphere, a lying cylinder on the side and a standing one).
The classifier used in this work is based on a Principal Component Analysis representation, that is matched with templates in a training set by means of minimizing a distance in feature space. The authors analyse the use of shadow or highlight features.
For classification using highlight, $95$ \% accuracy is obtained with 5 cm pixel resolution, which is considerably fine grained for a sidescan sonar. In contrast, classification using shadow requires less than 20 cm pixel resolution to obtain close to $100$ \% accuracy, but highlight classification at 20 cm pixel resolution is close to $50$ \%.
This work shows that using the shadow of an object is fundamental for good classification performance, but we believe these results are skewed due to the use of a PCA-based classifier. Other classifiers might perform differently. There is also the issue of the objects used in this work, as marine debris is considerably different in shape variation and lack of shadow information.
Mishkin et al. \cite{mishkin2016systematic} do a systematic evaluation of many network parameters for the ImageNet dataset in the context of image classification. This work consists of a large number of ablation studies, varying activation functions, different kinds of pooling, learning rate policies, pre-processing and normalization, batch size, etc.
Two results from these work are of interest for this chapter. The authors evaluated the effect of varying the input image size, which shows that decreasing the input image has the effect of reducing accuracy from $50$ \% at $224 \times 224$ input size to $30$ \% at $66 \times 66$ pixels. The relationship between input image size and accuracy is almost linear.
One way to offset this loss is to vary the network architecture as a function of the input size, as the authors tried to vary the strides and filter sizes to produce a constant size pooling output, reducing the effect of image size as accuracy only varies from $40$ \% to $45$ \%.
The second result is the variation of the training set size. The authors down-sample the ImageNet dataset to $0.2$ M, $0.4$ M, $0.6$ M, $0.8$ M and 1.2 million images (the original size). Accuracy decreases from $45$ \% at $1.2$ M to $30$ \% at $0.2$ M. The relationship between training set size and accuracy is quite close to linear, as it slowly decreases linearly from $1.2$ M to $0.4$ M, but then decreases more sharply.
While both results are quite interesting, these authors have not controlled for the random weight initialization, and variations of accuracy should be computed. Due to the large size of the ImageNet dataset, it can expected that these kind of evaluation protocol is not available due to the large computational resources required.
\section{Transfer Learning}
\label{lim:secTransferLearning}
In this experiment we evaluate the transfer learning capabilities of three networks we designed: ClassicNet, TinyNet and FireNet.
Our general procedure to evaluate transfer learning in a network is to first define a set of layers $L$ that we wish to evaluate. The features produced as output from these layers are then used to train a multi-class SVM \cite{sharif2014cnn}. In order to produce a fair evaluation of the features, we decided to split the training and testing sets according to the classes they contain, in order to learn features on one set of objects and test them in a different set of objects.
This should aid to verify the generalization capabilities of the network architecture.
We first split the dataset $D$ into datasets $F$ and $T$ by selecting a random subset of $\lfloor \frac{C}{2} \rfloor$ classes and assigning all samples from those classes to $F$, while the remaining classes and their samples are assigned to $T$. As our Marine Debris dataset contains 11 samples, 6 classes are assigned to $F$ and 5 are assigned to $T$. We split both the training and testing splits of our dataset separately, producing $F_{tr}$, $F_{ts}$ and $T_{tr}$, $T_{ts}$.
Dataset $F$ is to learn features by training a network model for classification, while $T$ is used to evaluate features. A given network model is trained on $F_{tr}$ and then for each layer in $L$, features are extracted at that layer from the network model by passing each sample in $T_{tr}$. Then a multi-class linear SVM with regularization coefficient $C = 1$ and decision surface "one-versus-one" is trained on those features \footnote{$C$ was obtained by cross-validation on a small part of the dataset}. Using the same network, features are again extracted using $T_{ts}$ and the SVM is tested on this dataset, producing an accuracy score. We repeat this process $N = 20$ times to account for random initialization of the feature extraction network and compute mean and standard deviation of test accuracy. Note that $F_{ts}$ is not used by this procedure, but it could be used to evaluate test accuracy of the feature extractor.
ClassicNet with 5 modules was tested with four different configurations: 8 or 32 filters, and Batch Normalization or Dropout as regularization. Features are extracted from the batch normalized outputs in each module (layers bn1-5), or from the Max-Pooling outputs in the case of the Dropout configurations (layers mp1-5), and we also include the output from the first fully connected layer (fc1). We used TinyNet with 5 modules and 8 filters per module, and FireNet with 3 modules and 4 filters. For TinyNet, features are extracted at the output of each of the five modules, while for FireNet features are the outputs of each module (three in total) and we also consider the output of the initial convolution (called convS in the figures). Each feature extraction network is trained for 15 epochs with a batch size $B = 64$ using the ADAM optimizer \cite{kingma2014adam} with a learning rate $\alpha = 0.01$. The data is randomly shuffled after each epoch in order to prevent spurious patterns in the data ordering to influence the network.
Our results are shown in Figure \ref{lim:transferLearningNetworks}. A general pattern that appears in all three experimental plots is that testing accuracy decreases with deeper layers/modules in the network. For example, in ClassicNet, features in the fc1 layer have lower accuracy than bn1 and bn2. The same effect can be seen in TinyNet and FireNet, specially as the last module/layer has the lowest testing accuracy. It is also notable that the features in the first layers have $100$ \% accuracy, with zero variation. This can be explained that as shallow features are typically very high dimensional, a linear SVM has a high chance of finding a separating hyperplane and perfectly classifying test data.
ClassicNet feature results are shown in Figure \ref{lim:transferLearningNetworks}\subref*{lim:transferLearningNetworks:classic}. Generalization varies considerably with different layers. 8 filters with Batch Normalization produces quite good generalization, but 32 filters with Dropout has almost the same accuracy, with it being superior for bn5 and fc1. Dropout with 8 filters has a considerable drop in accuracy compared with the other configurations. 32 filters with Dropout seems to be the best option for good generalization, which is consistent with the use of Dropout to both de-correlate neurons and increase their generalization power \cite{srivastava2014dropout}.
\begin{figure*}
\subfloat[ClassicNet]{
\label{lim:transferLearningNetworks:classic}
\begin{tikzpicture}
\begin{axis}[ybar,
xlabel={Layers},
ylabel={Test Accuracy (\%)},
symbolic x coords={bn1, bn2, bn3, bn4, bn5, fc1},
xtick=data,
legend style={at={(0.5,1.15)},anchor=north,legend columns=-1},
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.9\textwidth]
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-ClassicNet-BN-5modules-8filters.csv};
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-ClassicNet-BN-5modules-32filters.csv};
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-ClassicNet-DO-5modules-8filters.csv};
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-ClassicNet-DO-5modules-32filters.csv};
\legend{8 filters with BN, 32 filters with BN, 8 filters with Dropout, 32 filters with Dropout}
\end{axis}
\end{tikzpicture}
}
\subfloat[TinyNet]{
\label{lim:transferLearningNetworks:tiny}
\begin{tikzpicture}
\begin{axis}[ybar,
xlabel={Modules},
ylabel={Test Accuracy (\%)},
xtick=data,
legend style={at={(0.5,1.15)},anchor=north,legend columns=-1},
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45\textwidth]
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-tinyNet5-8.csv};
\end{axis}
\end{tikzpicture}
}
\subfloat[FireNet]{
\label{lim:transferLearningNetworks:fire}
\begin{tikzpicture}
\begin{axis}[ybar,
xlabel={Layers},
ylabel={Test Accuracy (\%)},
symbolic x coords={convS, mod1, mod2, mod3},
xtick=data,
legend style={at={(0.5,1.15)},anchor=north,legend columns=-1},
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45\textwidth]
\addplot+[error bars/.cd, y dir=both,y explicit] table[x = layerIdx, y = meanTransferAcc, y error = stdTransferAcc, col sep = space] {chapters/data/limits/transferLearning-disjoint-fireNet3.csv};
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Transfer Learning on Sonar Images]{Transfer Learning on Sonar Images. Mean test accuracy produced by an SVM trained on features output by different layers. Three networks are shown.}
\label{lim:transferLearningNetworks}
\end{figure*}
Results for TinyNet are shown in Figure \ref{lim:transferLearningNetworks}\subref*{lim:transferLearningNetworks:tiny}, and for FireNet in Figure \ref{lim:transferLearningNetworks}\subref*{lim:transferLearningNetworks:fire}. The shape of both plots is quite similar, with a decrease from the first to the second module, and then a increase, followed by another decrease. Seems using the layer before the last might have the best generalization performance, but using the first layer has by far the best accuracy.
It should be noted that for both TinyNet and FireNet, their generalization capability is very good, with the minimum accuracy being greater than $96$ \%. Our results also seem to indicate that choosing features from the last layer might not always be the best option, as it has consistently been done by many researchers. This could be a peculiarity of sonar images, and we do not believe it applies for larger datasets.
As summary, we expected that transfer learning using CNN features will perform adequately and it did, but it was unexpected that we found negative correlation between layer depth and test accuracy, For this kind of data and architecture, the best choice is to extract a high dimensional feature vector from a layer close to the input.
\begin{marginfigure}
\centering
\stackunder[5pt] {
\includegraphics[width = 3cm]{tire-96x96.jpg}
}{$96 \times 96$}
\vspace{5pt}
\stackunder[5pt] {
\includegraphics[width = 2.5cm]{tire-96x96.jpg}
}{$80 \times 80$}
\vspace{5pt}
\stackunder[5pt] {
\includegraphics[width = 2.0cm]{tire-96x96.jpg}
}{$64 \times 64$}
\vspace{5pt}
\stackunder[5pt] {
\includegraphics[width = 1.5cm]{tire-96x96.jpg}
}{$48 \times 48$}
\vspace{5pt}
\stackunder[5pt] {
\includegraphics[width = 1.0cm]{tire-96x96.jpg}
}{$32 \times 32$}
\vspace{5pt}
\stackunder[5pt] {
\includegraphics[width = 0.5cm]{tire-96x96.jpg}
}{$16 \times 16$}
\caption[Example of object scales used for our experiment]{Example of object scales used for our experiment. These images correspond to a Tire.}
\end{marginfigure}
\FloatBarrier
\section{Effect of Object Size}
In this section we experiment with object size, as we would like to investigate how the object size/scale affects classification accuracy.
For this purpose we take the initial $96 \times 96$ image crops and downscale them using bilinear filtering to a predefined size. We use square pixel sizes $s \times s$ with $s \in [16, 32, 48, 64, 80, 96]$. To obtain the size parameters, we started from the natural size of the bounding boxes in our dataset ($96 \times 96$ pixels) and downscaled them in 16 pixel steps, until we reached the smallest size that can be classified by a 2-module ClassicNet, which is 16 pixels (due to downsampling by the use of Max-Pooling). Both the training and testing sets are resized. We evaluate accuracy on the test set. In order to account for the effect of random initialization, we train $N = 20$ networks and compute the mean and standard deviation of accuracy.
We found experimentally \cite{valdenegro2017limits} that the kind of regularization and optimizer that are used greatly affect the results. We evaluate four combinations, using Batch Normalization or Dropout for regularization, and ADAM or SGD for optimizer. All networks for every configuration are trained for $30$ epochs, using a learning rate $\alpha = 0.01$ with a batch size $B = 128$ samples.
We selected three networks to be evaluated: ClassicNet with 2 modules and 32 filters, TinyNet with 5 modules and 8 filters, and FireNet with 3 modules and 4 filters. We only evaluate ClassicNet with both Batch Normalization and Dropout, as it is not appropriate to use Dropout with a fully convolutional network such as TinyNet and FireNet. In these two networks we only use Batch Normalization as regularizer.
We present our results as a plot in Figure \ref{lim:graphicalObjectSize} and as numerical values in Table \ref{lim:numericalObjectSize}. For ClassicNet, we can see that a high accuracy classifier is produced by using both ADAM and Batch Normalization. ADAM with Dropout also produces quite high accuracy but lower than using Batch Normalization. Using SGD produces considerably lower accuracy classifiers, specially when combined with Dropout.
One of the results we expected is that accuracy should decrease with smaller object size, as less information is available for the classifier and its typical that smaller objects are harder to classify. Our results show that this happens when using SGD on ClassicNet, as accuracy monotonically increases as object size also increases. This is more noticeable with the SGD-Dropout configuration.
But unexpectedly, the ADAM combinations produce high accuracy that seems to be invariant to object size. The ADAM with Batch Normalization combination consistently produces results that are very accurate (only $1.5$ \% from perfect classification) with little variation.
\begin{table*}[!ht]
\centering
\forceversofloat
\begin{tabular}{llll}
\hline
Model / Pixel Size & 16 & 32 & 48\\
\hline
ClassicNet-ADAM-BN & $98.5 \pm 0.5$ \% & $98.6 \pm 0.3$ \% & $98.5 \pm 0.3$ \%\\
ClassicNet-SGD-BN & $85.7 \pm 2.6$ \% & $85.6 \pm 2.7$ \% & $89.9 \pm 4.7$ \%\\
ClassicNet-ADAM-DO & $91.5 \pm 1.5$ \% & $96.6 \pm 0.9$ \% & $97.2 \pm 0.6$ \%\\
ClassicNet-SGD-DO & $13.9 \pm 2.6$ \% & $18.2 \pm 5.5$ \% & $22.3 \pm 5.8$ \%\\
\hline
TinyNet-ADAM-BN & $95.8 \pm 1.1$ \% & $95.2 \pm 1.6$ \% & $93.7 \pm 1.6$ \%\\
TinyNet-SGD-BN & $70.2 \pm 9.7$ \% & $54.2 \pm 10.0$ \% & $39.7 \pm 10.0$ \%\\
\hline
FireNet-ADAM-BN & $93.7 \pm 2.9$ \% & $96.7 \pm 0.7$ \% & $96.1 \pm 1.0$ \%\\
FireNet-SGD-BN & $76.9 \pm 7.5$ \% & $62.6 \pm 9.5$ \% & $56.0 \pm 11.1$ \%\\
\hline
\end{tabular}
\begin{tabular}{llll}
\hline
Model / Pixel Size & 64 & 80 & 96\\
\hline
ClassicNet-ADAM-BN & $98.1 \pm 0.3$ \% & $98.2 \pm 0.5$ \% & $98.1 \pm 0.5$ \%\\
ClassicNet-SGD-BN & $90.1 \pm 1.5$ \% & $93.6 \pm 1.0$ \% & $95.1 \pm 1.0$ \%\\
ClassicNet-ADAM-DO & $96.5 \pm 0.7$ \% & $97.1 \pm 0.6$ \% & $97.5 \pm 0.5$ \%\\
ClassicNet-SGD-DO & $26.1 \pm 7.2$ \% & $39.1 \pm 10.0$ \% & $47.3 \pm 9.5$ \%\\
\hline
TinyNet-ADAM-BN & $89.3 \pm 5.2$ \% & $88.7 \pm 6.0$ \% & $85.0 \pm 9.1$ \%\\
TinyNet-SGD-BN & $36.9 \pm 6.9$ \% & $31.2 \pm 9.0$ \% & $33.0 \pm 5.7$ \%\\
\hline
FireNet-ADAM-BN & $92.1 \pm 2.2$ \% & $90.0 \pm 2.5$ \% & $91.1 \pm 2.6$ \%\\
FireNet-SGD-BN & $46.8 \pm 7.3$ \% & $45.4 \pm 6.4$ \% & $45.6 \pm 7.5$ \%\\
\hline
\end{tabular}
\vspace*{0.5cm}
\caption{Numerical summary of the effect of object size/scale for different CNN models.}
\label{lim:numericalObjectSize}
\end{table*}
TinyNet and FireNet results are not as good as ClassicNet. Both networks seem to have a negative correlation with object size, starting from high accuracy for small objects, and decreasing the precision of their predictions as objects gets bigger. This was quite unexpected. We believe this result can be explained by the fact that as these networks have a considerably lower number of parameters, the number of "acceptable" or "right" values for the weights is smaller, and thus these networks require more data in order to generalize properly.
Comparing these results with Chapter \ref{chapter:sonar-classification}, where we used data augmentation, we can see that not using data augmentation as we do here considerably reduces classifier generalization.
Using ADAM produces acceptable accuracy, but it still decreases slightly with bigger objects. These results also show that FireNet can be considerably more accurate than TinyNet, probably owning to the larger number of parameters.
Our combined results show that the combination of both ADAM and Batch Normalization produce a very good classifier that seems to be invariant to object size. This can be explained as both ADAM and Batch Normalization are adaptive algorithms. ADAM adapts the learning rate with the exponentially running mean of the gradients, so when the optimization process is close to a high-accuracy minima, it can adapt the learning rate in order to consistently reach that minima. SGD alone cannot do this, even if fixed learning rate schedules are used. Gradient information as part of learning rate calculation is a key for this process to succeed.
As a general summary of these results, it is possible to say that a convolutional neural network can be designed and trained in a way that it is approximately invariant to object size. This requires the use of an adaptive learning rate (ADAM) and an appropriate regularization and control of the co-variate shift throught the use of Batch Normalization. Dropout combined with ADAM also produces a size invariant classifier but it is less accurate than other configurations.
\begin{figure*}
\subfloat[ClassicNet]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Image size (Pixels)},
ylabel={Test Accuracy (\%)},
ymin=10, ymax=100,
xtick={16, 32, 48, 64, 80, 96},
ytick={20,30,40,50,60,70,80,90,100},
legend style={at={(0.5, 1.05)},anchor=south},
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/classicCNN-BN-ADAM-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN-BN-SGD-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/classicCNN-Dropout-ADAM-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/limits/classicCNN-Dropout-SGD-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{magenta}{0.4}
\legend{ADAM-BN, SGD-BN, ADAM-Dropout, SGD-Dropout}
\end{axis}
\end{tikzpicture}
}
\subfloat[TinyNet and FireNet]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Image size (Pixels)},
ylabel={Test Accuracy (\%)},
ymin=10, ymax=100,
xtick={16, 32, 48, 64, 80, 96},
ytick={20,30,40,50,60,70,80,90,100},
%ytick={0,20,40,60,80,100,120},
legend style={at={(0.5, 1.05)},anchor=south},
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/tinyNetCNN-BN-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/tinyNetCNN-BN-SGD-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/smallFireNetCNN-BN-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/limits/smallFireNetCNN-BN-SGD-AccuracyVsImageSize.csv}{pixelImageSize}{meanAcc}{stdAcc}{magenta}{0.4}
\legend{TinyNet-ADAM-BN, TinyNet-SGD-BN, FireNet-ADAM-BN, FireNet-SGD-BN}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\forceversofloat
\caption[Graphical summary of the effect of object size/scale for different CNN models]{Graphical summary of the effect of object size/scale for different CNN models. The shaded areas represent one $\sigma$ error bars.}
\label{lim:graphicalObjectSize}
\end{figure*}
\FloatBarrier
\section{Effect of Number of Training Samples}
\label{lim:secNumTrainingSamples}
In this section we investigate how many training samples are required for a given generalization target. We do this by a simple but powerful experiment.
The basic idea of this experiment is to take a given training set $T$ and produce sub-sampled version of that dataset. As we are approaching a classification problem, we decided to normalize the number of image samples in each class (abbreviated SPC). We decide a set of SPC values and produce several sub-sampled versions of $T$, where for $T_{i}$ the number of samples per class in that dataset is $i$. This allows comparisons using different SPC values. The testing set is not sub-sampled in anyway in order to enable comparisons. Note that as our dataset is not balanced, and using this procedure will produce a balanced training set, so it is expected that the results will not match the ones from Chapter \ref{chapter:sonar-classification}, but as we are using the same testing set, results are comparable.
As it has been previously done, in order to consider the effect of random initialization, we train $N = 10$ instances of the same network model on each dataset $T_{i}$, but also we must consider the variations in the sub-sampled training set, as sampling is performed randomly. Then we also generate $M = 10$ different training sets with the same value of SPC, and train $N$ networks on each of these sets. After both variations are taken into account, we will train $N \times M = 100$ networks for each value of SPC.
We selected $\text{SPC} \in \{ [1, 2, 3, ..., 20] \cup [25, 30, 35, ..., 50] \cup [60, 70, 80, \newline ..., 150] \}$. The first range is designed to show the differences in generalization with small samples of data, while the other ranges show behaviour with large samples. As our dataset is unbalanced, we only evaluate up to 150 samples per class, which is only two times the number of samples of the class with the least samples.
We evaluate three networks, as it has previously been done: ClassicNet with 2 modules and 32 filters, combined and Batch Normalization and Dropout as regularizers. TinyNet with 5 modules and 8 filters, and FireNet with 3 modules and 4 filters. We have also included a linear SVM with $C = 10$ as a comparison baseline.
\begin{figure*}[t]
\forcerectofloat
\centering
\begin{tikzpicture}
\begin{customlegend}[legend columns = 4,legend style = {column sep=1ex}, legend cell align = left,
legend entries={2 Modules-BN, 2 Modules-Dropout, SVM}]
\addlegendimage{mark=none,blue}
\addlegendimage{mark=none,red}
\addlegendimage{mark=none,green}
\end{customlegend}
\end{tikzpicture}
\subfloat[Full Plot]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/classicCNN2L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[Zoom into region SPC $1-30$]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmin=1, xmax=30,
ymin=35, ymax=100,
xtick={1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,25,30},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/classicCNN2L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for ClassicNet with 2 modules]{Samples per Class versus Accuracy for ClassicNet with 2 modules, including error regions.}
\label{lim:classicNetSPCVSAccuracy}
\end{figure*}
ClassicNet results are shown in Figure \ref{lim:classicNetSPCVSAccuracy}. Our results show that these networks scale quite well with the number of samples per class, and the results are comparable with what is produced by the SVM. But it is clear that the SVM outperforms and obtains slightly better accuracy than ClassicNet (both with Batch Normalization and Dropout).
For small samples, approximately less than 15 samples per class, Dropout produced better results than Batch Normalization. This is unexpected as Batch Normalization is considered to be a better regularizer than Dropout, but seems that when the number of training samples is small, the added noise from Dropout could better regularize the neural network. As the number of samples per class increases, then Batch Normalization dominates and produces slightly better results.
In the large sample case (more than 100 samples per class), ClassicNet outperforms the SVM by a small margin, which is expected and consistent with the results obtained in Chapter \ref{chapter:sonar-classification}. Variations in generalization (accuracy) considerably decrease as more samples are added. The SVM classifier does not seem to have any change in variation of accuracy as a function of the samples per class, unlike the neural networks shown here.
\begin{figure*}[t]
\centering
\begin{tikzpicture}
\begin{customlegend}[legend columns = 4,legend style = {column sep=1ex}, legend cell align = left,
legend entries={TinyNet, FireNet, SVM}]
\addlegendimage{mark=none,blue}
\addlegendimage{mark=none,red}
\addlegendimage{mark=none,green}
\end{customlegend}
\end{tikzpicture}
\subfloat[Full Plot]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/tinyNetCNN-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/fireNetCNN-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[Zoom into region SPC $1-30$]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmin=1, xmax=30,
ymin=35, ymax=100,
xtick={1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,25,30},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.45\textwidth]
\errorband{chapters/data/limits/tinyNetCNN-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/fireNetCNN-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for TinyNet-5 and FireNet-3]{Samples per Class versus Accuracy for TinyNet-5 and FireNet-3, including error regions.}
\label{lim:tinyFireNetSPCVsAccuracy}
\end{figure*}
Results for TinyNet and FireNet are shown in Figure \ref{lim:tinyFireNetSPCVsAccuracy}. For these networks, results show that they perform poorly with less data, specially when the number of samples per class is low, as it can be seen in Figure \ref{lim:tinyFireNetSPCVsAccuracy}b. This confirms the results obtained in the previous section, where we saw that training a network with varying image sizes decreased accuracy and generalization with this networks when image size was increased, but the number of samples was kept constant.
In all tested samples per class configurations, FireNet outperformed TinyNet by a considerably margin (up to $8$ \%). This can be expected as FireNet has more parameters than TinyNet, but it is unexpected as we know from Chapter \ref{chapter:sonar-classification} that TinyNet can achieve high accuracy close to $99$ \%. Then the only difference is the quantity of data that is required to learn the model with good generalization.
We believe that as these models have considerably less number of parameters, there are less possible combinations of parameters that produce good accuracy and generalization (local or global minima), so it seems more data is required to reach these sets of parameters. The loss function could be quite noisy instead of smooth. This theory is supported by the considerable variation in test accuracy, which stays almost constant as the number of samples is varied. In some cases during the experiment, we say accuracy of up to $90$ \% as maximum values for SPC 100-150, but still this is a rare example and not a consistent pattern as shown by the mean value.
We are aware that we could have used data augmentation in order to obtain a higher accuracy, but this would only correspond to higher SPC values. We did not perform these tests using data augmentation due to the considerably amount of time it takes for them to run on a GPU (several days), as hundreds of neural networks have to be trained. We leave this for future work.
Table \ref{lim:samplesPerClassVsAccuracy} shows a numerical view of our results, for selected values of the number of samples per class (SPC). A more accurate view of our results can be shown. For 150 samples per class, the baseline SVM obtains $96.9 \pm 0.4$ \% accuracy, while ClassicNet with Batch Normalization gets $97.4 \pm 0.7$ \% and the same network with Dropout obtains $96.6 \pm 2.0$ \%. TinyNet at the same samples per class configuration gets $71.3 \pm 9.7$ \%, and FireNet obtains $78.2 \pm 7.7$ \%.
Evaluating these networks at small sample sizes, approximately $40$ \% accuracy can be obtained with a single sample, which is not too bad, as it is better than the random chance limit for 11 classes ($\frac{100}{10} \% \sim 9.1$ \%), but it does not produce an accurate classifier that can be used for practical applications. If at least $90$ \% accuracy is desired, then at least 30-50 samples per class are required, and with no more than 150 samples per class might be required for a high accuracy classifier, as our experiments show.
\begin{table}[t]
\forcerectofloat
\begin{tabular}{llll}
\hline
Method/SPC & 1 & 5 & 10 \\
\hline
ClassicNet-2-BN & $37.8 \pm 12.0$ \% & $58.2 \pm 14.8$ \% & $66.6 \pm 14.2$ \%\\
ClassicNet-2-DO & $39.1 \pm 7.4$ \% & $67.7 \pm 9.9$ \% & $72.9 \pm 9.0$ \%\\
\hline
TinyNet-5-8 & $19.3 \pm 5.6$ \% & $23.4 \pm 6.7$ \% & $23.9 \pm 6.8$ \%\\
FireNet-3-4 & $26.5 \pm 5.9$ \% & $35.4 \pm 8.9$ \% & $35.4 \pm 9.1$ \%\\
\hline
SVM & $51.9 \pm 4.2$ \% & $77.5 \pm 3.3$ \% & $84.7 \pm 3.5$ \%\\
\hline
\end{tabular}
\begin{tabular}{llll}
\hline
Method/SPC & 30 & 50 & 100\\
\hline
ClassicNet-2-BN & $90.9 \pm 3.2$ \% & $93.5 \pm 1.5$ \% & $96.6 \pm 0.7$ \%\\
ClassicNet-2-DO & $89.9 \pm 2.8$ \% & $92.5 \pm 3.2$ \% & $96.2 \pm 1.6$ \%\\
\hline
TinyNet-5-8 & $37.6 \pm 8.9$ \% & $47.2 \pm 8.7$ \% & $64.4 \pm 9.6$ \%\\
FireNet-3-4 & $55.5 \pm 10.1$ \% & $62.9 \pm 10.5$ \% & $72.9 \pm 8.7$ \%\\
\hline
SVM & $92.7 \pm 1.1$ \% & $94.6 \pm 0.7$ \% & $96.9 \pm 0.3$ \%\\
\hline
\end{tabular}
\caption[Mean and standard deviation of test accuracy as the number of samples per class is varied]{Mean and standard deviation of test accuracy as the number of samples per class is varied, for a selected values of SPC.}
\label{lim:samplesPerClassVsAccuracy}
\end{table}
We also obtained experimental results using different module configurations of ClassicNet. We varied the number of modules from two to four, and these results can be seen in Figure \ref{lim:classicNetSPCVsAccuracyMultipleModules}. This figure shows that our results have little variation even as different number of modules are used. Some configurations, like using 3 modules with Batch Normalization, seem to generalize slightly better, which can be seen as accuracy closes up to $98$ \%.
As a summary, we can say that training a convolutional neural network (like ClassicNet) does not require the use of very large datasets, and good results can be obtained with only 30-50 samples per class. Testing accuracy will increase as one adds more samples, but the gains diminish as samples are added, which it can be expected as a natural phenomena. If very high accuracy (over $99$ \%) is desired, then large datasets are needed, and this falls out of the scope of our experimental results.
\begin{figure*}[t]
\centering
\vspace*{-2cm}
\begin{tikzpicture}
\begin{customlegend}[legend columns = 3,legend style = {column sep=1ex}, legend cell align = left,
legend entries={2 Modules-BN, 2 Modules-Dropout, 3 Modules-BN, 3 Modules-Dropout, 4 Modules-BN, 4 Modules-Dropout, SVM}]
\addlegendimage{mark=none,blue}
\addlegendimage{mark=none,red}
\addlegendimage{mark=none,brown}
\addlegendimage{mark=none,magenta}
\addlegendimage{mark=none,orange}
\addlegendimage{mark=none,purple}
\addlegendimage{mark=none,green}
\end{customlegend}
\end{tikzpicture}
\subfloat[Full Plot]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN2L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/classicCNN3L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{brown}{0.4}
\errorband{chapters/data/limits/classicCNN3L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[Zoom into region 1-30]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmin=1, xmax=30,
ymin=35, ymax=100,
xtick={1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,25,30},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN2L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/classicCNN3L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{brown}{0.4}
\errorband{chapters/data/limits/classicCNN3L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[SVM]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.25\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/SVM-C10-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-2-BN]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN2L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{blue}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-2-Dropout]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN2L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{red}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-3-BN]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN3L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{brown}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-3-Dropout]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN3L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-4-BN]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN4L-BN-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\end{axis}
\end{tikzpicture}
}
\subfloat[ClassicNet-4-Dropout]{
\begin{tikzpicture}
\begin{axis}[
xlabel={Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=35, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110,120,130,140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.24\textheight,
width = 0.32\textwidth]
\errorband{chapters/data/limits/classicCNN4L-Dropout-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for different ClassicNet configurations]{Samples per Class versus Accuracy for different ClassicNet configurations, varying the number of modules from two to four. Error regions are also displayed.}
\label{lim:classicNetSPCVsAccuracyMultipleModules}
\end{figure*}
\FloatBarrier
\section{Combining Transfer Learning with Variations of the Number of Training Samples}
In this section we combine the ideas of Section \ref{lim:secTransferLearning} and Section \ref{lim:secNumTrainingSamples}, into evaluating how transfer learning can be used to make a CNN that can produce good generalization with small number of samples.
\subsection{Varying the Training Set Size}
In this section we perform the first experiment, which consists of simply splitting the dataset as Section \ref{lim:secTransferLearning} recommended, but we use the splits differently. Our basic idea is that we will vary the number of samples per class in $T_{tr}$, while the rest of the procedure is kept the same. We use $\text{SPC} \in [1,10, 20, 30, ..., 150]$.
Then the idea is to train a CNN model in $F_{tr}$, and then subsample $T_{tr}$ to a given number of samples per class, and then train a multi-class linear SVM on $T_{tr}$ with $C = 1$ and decision surface "one-versus-one" and test this trained SVM on $T_{ts}$, after extracting features again. Motivated by the results produced by an SVM in the previous section, we believe this can show that less samples can be required by a combination of feature learning and an SVM classifier than just using a CNN to do both feature extraction and classification.
We also evaluate the effect of using the same set of objects in $F$ and $T$, or selecting a disjoint set of objects between $F$ and $T$. This could potentially show how learned features generalize outside their training set. We extract features from the \textit{fc1} layer of ClassicNet, as it is the usual approach when performing transfer learning in CNNs \cite{sharif2014cnn}.
Our results are shown in Figure \ref{lim:transferLearningSPCVsAccuracy}. In this figure we include the results from the previous section as a comparison. For different objects, we can see that learned features both outperform a SVM and the baseline networks by a considerably margin, specially when the number of samples is low. For a single sample per class, ClassicNet-BN-TL produces approximately $76$ \% accuracy, while ClassicNet-Dropout-TL produces $71$ \%. This is a considerably improvement over training a CNN, which produces accuracy no better than $40$ \%.
\begin{figure*}[t]
\subfloat[Different Objects]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={1,10,20,30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/limits/classicCNN-BN-TransferLearningVsTrainSetSize-disjointClasses.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN-Dropout-TransferLearningVsTrainSetSize-disjointClasses.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/classicCNN2L-BN-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\errorband{chapters/data/limits/SVM-C10-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\legend{ClassicNet-BN-TL, ClassicNet-Dropout-TL, ClassicNet-BN, ClassicNet-Dropout, SVM}
\end{axis}
\end{tikzpicture}
}
\subfloat[Same Objects]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={1,10,20,30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.45 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/limits/classicCNN-BN-TransferLearningVsTrainSetSize-sameClasses.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/limits/classicCNN-Dropout-TransferLearningVsTrainSetSize-sameClasses.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/limits/classicCNN2L-BN-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/limits/classicCNN2L-Dropout-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{magenta}{0.4}
\errorband{chapters/data/limits/SVM-C10-noSmall-AccuracyVsTrainSetSize.csv}{samplesPerClass}{meanAcc}{stdAcc}{green}{0.4}
\legend{ClassicNet-BN-TL, ClassicNet-Dropout-TL, ClassicNet-BN, ClassicNet-Dropout, SVM}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\forcerectofloat
\caption[Samples per Class versus Accuracy for Transfer Learning using an SVM]{Samples per Class versus Accuracy for Transfer Learning using an SVM. In this figure we only vary the number of samples per class used to train a SVM on features learned by ClassicNet.}
\label{lim:transferLearningSPCVsAccuracy}
\end{figure*}
In the same case, but sharing objects between $F$ and $T$, produces $80$ \% accuracy for the Dropout network, and $91$ \% for the Batch Normalized network. This shows that learning features with a CNN is key to obtaining good generalization, even when the number of samples is small. We believe that these results show that feature learning introduces some additional information that produces invariances into the learned features, which can then be exploited by the SVM trained on those features, producing a better generalization result.
Considering now ten samples per class. In the case of different objects, both networks produce generalization that is very close to $90$ \% accuracy, while for the same objects Dropout produces $93$ \% accuracy, and Batch Normalization $96$ \%. Both are results that can be considered usable for practical applications.
Now considering large sample sizes (more than 30 samples per class), the performance of the learned features is not considerably different from learning a classifier network from the data directly. This means the only advantage of learning features is when one has a small number of samples to train. Only in the case of using the same objects the generalization of feature learning is slightly better than the baselines from the previous section.
\FloatBarrier
\subsection{Varying the Training and Transfer Sets Sizes}
Motivated by the results in the previous section, we now repeat the last experiment, but we vary both the sizes of $F$ and $T$ by means of sub-sampling them to a fixed number of samples per class. We again use $\text{SPC} \in [1,10, 20, 30, ..., 150]$ for sub-sampling both sets.
We perform this experiment in order to know how many samples are actually needed, as we split the original training set into $F$ and $T$, we would like to know how many samples are needed for feature learning ($F$) and how many could potentially be used to train an SVM on those learned features ($T$).
For this experiment we do not perform any comparison with previous results, as we are pursuing a different question. Results are presented in Figures \ref{lim:tlBothSPCVsAccuracyClassicNetBNDifferent} and \ref{lim:tlBothSPCVsAccuracyClassicNetBNSame} for the Batch Normalized networks, and \ref{lim:tlBothSPCVsAccuracyClassicNetDropoutDifferent} and \ref{lim:tlBothSPCVsAccuracyClassicNetDropoutSame} for the networks using Dropout. In order to facilitate comparison in these figures, we split the variations of sub-sampling $F$ into different plots, aggregated as three sub-figures.
Results with different objects show that a single sample for feature learning performs poorly (as it could be expected), but this effect is much more noticeable with Dropout than with features learned by Batch Normalization. Using Dropout in this case produces generalization that quickly saturates to $50$ \% accuracy, which is far from ideal. The Batch Normalized features perform considerably better, overcoming the $80$ \% barrier without any problem.
Adding more samples to train the feature extractor improves transfer learning performance, which can be seen as features learned over ten samples per class have an improvement of $10$ \% in the Batch Normalization case, and more than $25$ \% in the case of Dropout. It can be seen that adding more samples per class for feature learning quickly saturated and performance increments diminish, starting from 40 samples per class in the Batch Normalization case, and 30 samples per class for Dropout features.
Performance of using a single sample to train the SVM ($T$) over learned features is the one most affected by the number of samples used to learn those features ($F$), as accuracy starts at $40$ \% and increases to $70$ \% with 60-70 samples per class in $F$, but also saturates and stops improving after using over 100 samples.
As the results from the previous section showed, in all cases generalization saturates at $95$ \% accuracy and it does not improve further than this point. In order to reliably obtain such generalization, 150 or more samples per class are needed.
\begin{figure*}[p]
\vspace*{-3cm}
\subfloat[1-50]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC1.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC10.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC20.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC30.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC40.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 1, Feature 10, Feature 20, Feature 30, Feature 40}
\end{axis}
\end{tikzpicture}
}
\subfloat[60-100]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC60.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC70.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC80.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC90.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC100.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 60, Feature 70, Feature 80, Feature 90, Feature 100}
\end{axis}
\end{tikzpicture}
}
\subfloat[110-150]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC110.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC120.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC130.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC140.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC150.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 110, Feature 120, Feature 130, Feature 140, Feature 150}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for ClassicCNN-BN Transfer Learning with different objects]{Samples per Class versus Accuracy for ClassicCNN-BN Transfer Learning with different objects. In this figure we vary both the samples per class to train the feature extractor (as different plots) and the samples for training the SVM for the target classes. Note that the scale of each figure is different.}
\label{lim:tlBothSPCVsAccuracyClassicNetBNDifferent}
\end{figure*}
\begin{figure*}[p]
\vspace*{-3cm}
\subfloat[1-50]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC1.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC10.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC20.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC30.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC40.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 1, Feature 10, Feature 20, Feature 30, Feature 40}
\end{axis}
\end{tikzpicture}
}
\subfloat[60-100]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC60.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC70.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC80.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC90.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC100.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 60, Feature 70, Feature 80, Feature 90, Feature 100}
\end{axis}
\end{tikzpicture}
}
\subfloat[110-150]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC110.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC120.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC130.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC140.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-disjointClasses-transferSPC150.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 110, Feature 120, Feature 130, Feature 140, Feature 150}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for ClassicCNN-Dropout Transfer Learning with different objects]{Samples per Class versus Accuracy for ClassicCNN-Dropout Transfer Learning with different objects. In this figure we vary both the samples per class to train the feature extractor (as different plots) and the samples for training the SVM for the target classes. Note that the scale of each figure is different.}
\label{lim:tlBothSPCVsAccuracyClassicNetDropoutDifferent}
\end{figure*}
\FloatBarrier
Results using the same objects for feature learning show improved generalization over using different objects. This is acceptable, as the learned features have a natural bias to well represent the learned objects. We believe that this invariance can be considerably improved with more data and variation among object classes.
In this case, achieving $95$ \% accuracy reliably requires only 40 samples per class for feature learning ($F$). Performance at a single sample per class for $T$ also improves considerably with more feature learning samples, starting at $40$ \% and increasing to $80$ \% for 40 samples per class, and it further increases up to $90$ \% when more samples are used for feature learning.
The same case of a single sample for training $T$ shows that Batch Normalization features are superior, as BN produces $50$ \% accuracy versus less than $40$ \% for Dropout. When more samples are added to $F$, single sample $T$ performance improves considerably, reaching more than $80$ \% with BN features and $70$ \% with Dropout. As more samples are used to $F$, performance continues to slowly improve, eventually achieving $98$ \% accuracy reliably with 100 samples per class in $F$. In the case of a large number of samples in $F$, Batch Normalization is still superior, reaching the $98$ \% barrier more consistently than Dropout.
\begin{figure*}[p]
\vspace*{-3cm}
\subfloat[1-50]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC1.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC10.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC20.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC30.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC40.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 1, Feature 10, Feature 20, Feature 30, Feature 40}
\end{axis}
\end{tikzpicture}
}
\subfloat[60-100]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,98,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC60.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC70.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC80.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC90.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC100.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 60, Feature 70, Feature 80, Feature 90, Feature 100}
\end{axis}
\end{tikzpicture}
}
\subfloat[110-150]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=80, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,97,98,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC110.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC120.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC130.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC140.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-BN-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC150.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 110, Feature 120, Feature 130, Feature 140, Feature 150}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for ClassicCNN-BN Transfer Learning with same objects]{Samples per Class versus Accuracy for ClassicCNN-BN Transfer Learning with same objects. In this figure we vary both the samples per class to train the feature extractor (as different plots) and the samples for training the SVM for the target classes. Note that the scale of each figure is different.}
\label{lim:tlBothSPCVsAccuracyClassicNetBNSame}
\end{figure*}
\begin{figure*}[p]
\vspace*{-3cm}
\subfloat[1-50]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
ylabel={Test Accuracy (\%)},
xmax = 150,
ymin=40, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={30,40,50,60,70,80,90,95,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC1.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC10.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC20.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC30.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC40.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 1, Feature 10, Feature 20, Feature 30, Feature 40}
\end{axis}
\end{tikzpicture}
}
\subfloat[60-100]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=70, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,98,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC60.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC70.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC80.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC90.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC100.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 60, Feature 70, Feature 80, Feature 90, Feature 100}
\end{axis}
\end{tikzpicture}
}
\subfloat[110-150]{
\begin{tikzpicture}
\begin{axis}[xlabel={SVM Samples per Class},
xmax = 150,
ymin=80, ymax=100,
xtick={1,10,20,30,40,50,60,70,80,90,100,110, 120, 130, 140,150},
ytick={70,75,80,85,90,95,97,98,100},
x tick label style={font=\tiny, rotate=90},
legend pos=south east,
ymajorgrids=true,
grid style=dashed,
height = 0.3\textheight,
width = 0.33 \textwidth,
legend style={font=\tiny}]
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC110.csv}{spc}{meanAcc}{stdAcc}{blue}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC120.csv}{spc}{meanAcc}{stdAcc}{red}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC130.csv}{spc}{meanAcc}{stdAcc}{green}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC140.csv}{spc}{meanAcc}{stdAcc}{cyan}{0.4}
\errorband{chapters/data/classicCNN-Dropout-TransferLearningVsTrainAndTransferSetSize-sameClasses-transferSPC150.csv}{spc}{meanAcc}{stdAcc}{brown}{0.4}
\legend{Feature 110, Feature 120, Feature 130, Feature 140, Feature 150}
\end{axis}
\end{tikzpicture}
}
\vspace*{0.5cm}
\caption[Samples per Class versus Accuracy for ClassicCNN-Dropout Transfer Learning with same objects]{Samples per Class versus Accuracy for ClassicCNN-Dropout Transfer Learning with same objects. In this figure we vary both the samples per class to train the feature extractor (as different plots) and the samples for training the SVM for the target classes. Note that the scale of each figure is different.}
\label{lim:tlBothSPCVsAccuracyClassicNetDropoutSame}
\end{figure*}
Two clear conclusions can be obtained from these experiments: High generalization ($95$ \% accuracy) can be achieved with small samples (10-30 samples per class with for both $T$ and $F$) but only if the same objects are used for both sets. This implies that generalization outside of the training set will probably be reduced. The second conclusion is that if $T$ and $F$ do not share objects, there will be a performance hit compared to sharing objects, but this case still learning features will improve generalization when compared to training a CNN over the same data.
It has to be mentioned that our results show that by using the same data, but changing the training procedure, a considerable improvement in generalization can be obtained, even when using low samples to learn features ($F$) and to train a SVM on those features ($T$).
\section{Summary of Results}
In this chapter we have explored different limitations in the use of convolutional neural networks with forward-looking sonar data.
First we evaluated how transfer learning performs in these images with varying neural networks and layer configurations. We found out that all layers produce very good features that can discriminate classes with good accuracy, but as depth increases, features become slightly less discriminative, which was unexpected. The best features are produced by layers that are close to the input.
Then we evaluated how changing the input size affects generalization. We found that ClassicNet can be trained to have the same generalization independent of the object size, but TinyNet and FireNet exhibit decreasing accuracy as objects become bigger. This was unexpected and shows that these networks require more training data than ClassicNet. Our results also indicate that it is possible to also reduce the input image size as a way to reduce the number of parameters and computation required, improving computational performance.
We also have evaluated the relationship between the number of training samples and generalization produced by a CNN. ClassicNet scales quite well with the number of samples per class in the training set, and requires 30-50 samples per class to reach $90$ \% accuracy. Training using Dropout seems to be slightly better than Batch Normalization in the low sample case, but Batch Normalization is better when many samples are available. TinyNet and FireNet scale poorly with the number of samples, producing less generalization than ClassicNet. This confirms our previous results that pointed that these networks require more training data than ClassicNet, even as they have less parameters. In theory, networks with less parameters require less data to be trained, but these models seem to require more data for a given accuracy target.
Finally we evaluated the combination of feature learning and how it affects generalization as a function of the size of the training set. We learn features in one part of the dataset, and use the other part to train a linear SVM that is evaluated on a test set. Our results show that learning features on a dataset that shares objects, accuracy increases to over $90$ \% when using a single sample per class to train an SVM. If feature learning is performed on a different set of objects, then single image per class accuracy can only reach $70-80$ \%, but it is still a considerable improvement over training the network on the same sub-sampled dataset.
Our last experiment evaluated transfer learning by varying both the samples per class in the feature learning dataset ($F$) and the SVM training dataset ($T$). We found out that high generalization, at $95$ \% accuracy, can be obtained with small datasets in the order of $10-30$ samples per class for $F$ and $T$, but only if the same objects are used in both datasets. In the case of learning features in one set of objects, and training an SVM for a different one, then more data is required to achieve $95$ \% accuracy, in the order of 100 $T$ samples per class and $40-50$ feature learning ($F$) samples.
We expect that our results will contribute to the discussion about how many samples are actually required to use Deep Neural Networks in different kinds of images. For the marine robotics community, we expect that our argument is convincing and more use of neural networks can be seen on the field.
|
{"hexsha": "61b8d6fde17bf6cbcab645f29224b273e9ff8af4", "size": 80400, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/limits-neural-networks.tex", "max_stars_repo_name": "mvaldenegro/phd-thesis", "max_stars_repo_head_hexsha": "ebc92c443d2100ccd030a118e5a1c24f0c4b105d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/limits-neural-networks.tex", "max_issues_repo_name": "mvaldenegro/phd-thesis", "max_issues_repo_head_hexsha": "ebc92c443d2100ccd030a118e5a1c24f0c4b105d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-03-09T12:53:54.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-16T10:33:21.000Z", "max_forks_repo_path": "chapters/limits-neural-networks.tex", "max_forks_repo_name": "mvaldenegro/phd-thesis", "max_forks_repo_head_hexsha": "ebc92c443d2100ccd030a118e5a1c24f0c4b105d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 66.7219917012, "max_line_length": 1040, "alphanum_fraction": 0.7548880597, "num_tokens": 23801}
|
"""This lobe enables the integration of huggingface pretrained wav2vec2/hubert/wavlm models.
Reference: https://arxiv.org/abs/2006.11477
Reference: https://arxiv.org/abs/1904.05862
Reference: https://arxiv.org/abs/2110.13900
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
Authors
* Titouan Parcollet 2021
* Boumadane Abdelmoumene 2021
"""
import os
import torch
import logging
import pathlib
import numpy as np
import torch.nn.functional as F
from torch import nn
from huggingface_hub import model_info
from speechbrain.pretrained.fetching import fetch
# We check if transformers is installed.
try:
import transformers
from transformers import Wav2Vec2Model, HubertModel, WavLMModel, Data2VecAudioModel
from transformers import Wav2Vec2Config, HubertConfig, WavLMConfig, Data2VecAudioConfig
from transformers import Wav2Vec2FeatureExtractor
from transformers import Wav2Vec2ForPreTraining
from transformers.models.wav2vec2.modeling_wav2vec2 import (
_compute_mask_indices,
)
except ImportError:
MSG = "Please install transformers from HuggingFace to use wav2vec2 / Hubert\n"
MSG += "E.G. run: pip install transformers"
raise ImportError(MSG)
logger = logging.getLogger(__name__)
HF_models = {
"wav2vec2": Wav2Vec2Model,
"hubert": HubertModel,
"wavlm": WavLMModel,
"data2vec": Data2VecAudioModel
}
HF_config = {
"wav2vec2": Wav2Vec2Config,
"hubert": HubertConfig,
"wavlm": WavLMConfig,
"data2vec": Data2VecAudioConfig
}
class HuggingFaceWav2Vec2(nn.Module):
"""This lobe enables the integration of HuggingFace and SpeechBrain
pretrained wav2vec2.0/Hubert models.
Source paper wav2vec2.0: https://arxiv.org/abs/2006.11477
Source paper Hubert: https://arxiv.org/abs/2106.07447
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The model can be used as a fixed feature extractor or can be finetuned. It
will download automatically the model from HuggingFace or use a local path.
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
output_norm : bool (default: True)
If True, a layer_norm (affine) will be applied to the output obtained
from the wav2vec model.
freeze : bool (default: True)
If True, the model is frozen. If False, the model will be trained
alongside with the rest of the pipeline.
freeze_feature_extractor : bool (default: False)
When freeze = False and freeze_feature_extractor True, the featue_extractor module of the model is Frozen. If False
all the wav2vec model will be trained including featue_extractor module.
apply_spec_augment : bool (default: False)
If True, the model will apply spec augment on the output of feature extractor
(inside huggingface Wav2VecModel() class).
If False, the model will not apply spec augment. We set this to false to prevent from doing it twice.
Example
-------
>>> inputs = torch.rand([10, 600])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2(model_hub, save_path)
>>> outputs = model(inputs)
"""
def __init__(
self,
source,
save_path,
output_norm=True,
freeze=True,
freeze_feature_extractor=False,
apply_spec_augment=False,
load_pretrained_weights=True,
):
super().__init__()
# Download the extractor from HuggingFace.
# The extractor is only used to retrieve the normalisation information
self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
source, cache_dir=save_path
)
# Select specific self-supervised loader (eg. Wav2Vec2, Hubert)
if "hubert" in source:
config = HF_config.get("hubert")
model = HF_models.get("hubert")
elif "wavlm" in source:
config = HF_config.get("wavlm")
model = HF_models.get("wavlm")
elif "data2vec" in source:
config = HF_config.get("data2vec")
model = HF_models.get("data2vec")
else:
config = HF_config.get("wav2vec2")
model = HF_models.get("wav2vec2")
# Download and load the model
self._from_pretrained(
source, config=config, model=model, save_path=save_path, load_weights=load_pretrained_weights
)
# set apply_spec_augment
self.model.config.apply_spec_augment = apply_spec_augment
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
self.normalize_wav = self.feature_extractor.do_normalize
self.freeze = freeze
self.freeze_feature_extractor = freeze_feature_extractor
self.output_norm = output_norm
if self.freeze:
logger.warning(
"speechbrain.lobes.models.huggingface_wav2vec - wav2vec 2.0 is frozen."
)
self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
else:
self.model.train()
if self.freeze_feature_extractor:
self.model.feature_extractor._freeze_parameters()
def _from_pretrained(self, source, config, model, save_path, load_weights):
"""This function manages the source checking and loading of the params.
# 1. Is the model from HF or a local path
# 2. Is the model pretrained with HF or SpeechBrain
# 3. Download (if appropriate) and load with respect to 1. and 2.
"""
is_sb, ckpt_file = self._check_model_source(source)
if not load_weights:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
elif is_sb:
config = config.from_pretrained(source, cache_dir=save_path)
self.model = model(config)
self.model.gradient_checkpointing_disable() # Required by DDP
# fetch the checkpoint file
ckpt_full_path = fetch(
filename=ckpt_file, source=source, savedir=save_path
)
# We transfer the parameters from the checkpoint.
self._load_sb_pretrained_w2v2_parameters(ckpt_full_path)
else:
if load_weights:
self.model = model.from_pretrained(source, cache_dir=save_path)
else:
self.model=model()
def _load_sb_pretrained_w2v2_parameters(self, path):
"""Loads the parameter of a w2v2 model pretrained with SpeechBrain and the
HuggingFaceWav2Vec2Pretrain Object. It is necessary to perform a custom
loading because HuggingFace adds a level to the checkpoint when storing
the model breaking the compatibility between HuggingFaceWav2Vec2Pretrain
and HuggingFaceWav2Vec2.
In practice a typical HuggingFaceWav2Vec2 checkpoint for a given parameter
would be: model.conv.weight.data while for HuggingFaceWav2Vec2Pretrain it
is: model.wav2vec2.weight.data (wav2vec2 must be removed before loading).
"""
modified_state_dict = {}
orig_state_dict = torch.load(path, map_location="cpu")
# We remove the .wav2vec2 in the state dict.
for key, params in orig_state_dict.items():
if "wav2vec2." in key:
save_key = key.replace("model.wav2vec2.", "")
modified_state_dict[save_key] = params
incompatible_keys = self.model.load_state_dict(
modified_state_dict, strict=False
)
for missing_key in incompatible_keys.missing_keys:
logger.warning(
f"During parameter transfer to {self.model} loading from "
+ f"{path}, the transferred parameters did not have "
+ f"parameters for the key: {missing_key}"
)
for unexpected_key in incompatible_keys.unexpected_keys:
logger.warning(
f"The param with the key: {unexpected_key} is discarded as it "
+ "is useless for wav2vec 2.0 finetuning."
)
def _check_model_source(self, path):
"""Checks if the pretrained model has been trained with SpeechBrain and
is hosted locally or on a HuggingFace hub.
"""
checkpoint_filename = ""
source = pathlib.Path(path)
is_local = True
is_sb = True
# If path is a huggingface hub.
if not source.exists():
is_local = False
if is_local:
# Test for HuggingFace model
if any(File.endswith(".bin") for File in os.listdir(path)):
is_sb = False
return is_sb, checkpoint_filename
# Test for SpeechBrain model and get the filename.
for File in os.listdir(path):
if File.endswith(".ckpt"):
checkpoint_filename = os.path.join(path, File)
is_sb = True
return is_sb, checkpoint_filename
else:
files = model_info(
path
).siblings # get the list of files of the Hub
# Test if it's an HuggingFace model or a SB one
for File in files:
if File.rfilename.endswith(".ckpt"):
checkpoint_filename = File.rfilename
is_sb = True
return is_sb, checkpoint_filename
for File in files:
if File.rfilename.endswith(".bin"):
checkpoint_filename = File.rfilename
is_sb = False
return is_sb, checkpoint_filename
err_msg = f"{path} does not contain a .bin or .ckpt checkpoint !"
raise FileNotFoundError(err_msg)
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
# If we freeze, we simply remove all grads and features from the graph.
if self.freeze:
with torch.no_grad():
return self.extract_features(wav).detach()
return self.extract_features(wav)
def extract_features(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
# Extract wav2vec output
out = self.model(wav)[0]
# We normalize the output if required
if self.output_norm:
out = F.layer_norm(out, out.shape)
return out
class HuggingFaceWav2Vec2Pretrain(nn.Module):
"""This lobe enables the integration of HuggingFace
wav2vec2.0 models to be pretrained.
Source paper: https://arxiv.org/abs/2006.11477
Transformer from HuggingFace needs to be installed:
https://huggingface.co/transformers/installation.html
The return is an HuggingFace format and the mask indices that contains:
https://huggingface.co/transformers/model_doc/wav2vec2.html#wav2vec2forpretraining
For instance, it returns the loss that can be accessed with .loss
Arguments
---------
source : str
HuggingFace hub name: e.g "facebook/wav2vec2-large-lv60"
save_path : str
Path (dir) of the downloaded model.
mask_prob : float (default: 0.65)
Probability of masking a given frame. Default is taken from the paper.
mask_length : float (default: 10)
Length (i.e. number of consecutive masked frames). Default is taken from
the paper.
Example
-------
>>> inputs = torch.rand([10, 32000])
>>> model_hub = "facebook/wav2vec2-base-960h"
>>> save_path = "savedir"
>>> model = HuggingFaceWav2Vec2Pretrain(model_hub, save_path)
>>> outputs, _ = model(inputs)
"""
def __init__(
self,
source,
save_path,
mask_prob=0.65,
mask_length=10,
normalize_wav=True,
):
super().__init__()
self.mask_prob = mask_prob
self.mask_length = mask_length
self.normalize_wav = normalize_wav
# Download the config of the model from HuggingFace.
self.config = Wav2Vec2Config.from_pretrained(
source, cache_dir=save_path
)
self.config.output_hidden_states = (
True # We want the hidden states as well!
)
self.model = Wav2Vec2ForPreTraining(self.config)
self.model.gradient_checkpointing_disable() # Required by DDP
self.model.train()
# We check if inputs need to be normalized w.r.t pretrained wav2vec2
def forward(self, wav):
"""Takes an input waveform and return its corresponding wav2vec encoding.
Arguments
---------
wav : torch.Tensor (signal)
A batch of audio signals to transform to features.
"""
batch_size, raw_sequence_length = wav.shape
if self.normalize_wav:
wav = F.layer_norm(wav, wav.shape)
sequence_length = self.model._get_feat_extract_output_lengths(
raw_sequence_length
)
# 1. Compute the indices that will be masked
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.mask_prob,
mask_length=self.mask_length,
)
torch_mask_time_indices = torch.tensor(
mask_time_indices, device=wav.device, dtype=torch.long,
)
# 2. Sample the negative samples from the entire sequence.
# Fairseq does it only on the masked indices, but this only work if you
# have long sentences. For more versatily, we sample on the entire sequence.
# value.
full_sentence_indices = np.ones((batch_size, sequence_length))
# print(np.sum(mask_time_indices, axis=1))
negative_sample_indices = torch.tensor(
transformers.models.wav2vec2.modeling_wav2vec2._sample_negative_indices(
(batch_size, sequence_length),
num_negatives=self.config.num_negatives,
mask_time_indices=full_sentence_indices,
),
device=wav.device,
dtype=torch.long,
)
return (
self.model(
wav,
mask_time_indices=torch_mask_time_indices,
sampled_negative_indices=negative_sample_indices,
),
torch_mask_time_indices,
)
|
{"hexsha": "f70f1b00fd0ae86be320e4714bb9c7b34baf8028", "size": 15022, "ext": "py", "lang": "Python", "max_stars_repo_path": "speechbrain/lobes/models/huggingface_wav2vec.py", "max_stars_repo_name": "RaphaelOlivier/speechbrain", "max_stars_repo_head_hexsha": "142dc6caa4b46ca4c9341b0cd39627f489808749", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "speechbrain/lobes/models/huggingface_wav2vec.py", "max_issues_repo_name": "RaphaelOlivier/speechbrain", "max_issues_repo_head_hexsha": "142dc6caa4b46ca4c9341b0cd39627f489808749", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "speechbrain/lobes/models/huggingface_wav2vec.py", "max_forks_repo_name": "RaphaelOlivier/speechbrain", "max_forks_repo_head_hexsha": "142dc6caa4b46ca4c9341b0cd39627f489808749", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1975903614, "max_line_length": 123, "alphanum_fraction": 0.6375981893, "include": true, "reason": "import numpy", "num_tokens": 3374}
|
import os
import numpy as np
import pandas as pd
import seaborn as sns
import glob
SNP_frequency_cutoff = 0.01
min_coverage = 100
calcBottlenecks = False
#Set Constants
potentialmixed = ['18VR001531', '19VR004455', '19VR003675', '19VR003920', '19VR003675']
#Samples which, based off visual inspection of mutations, seem to be mixed infections.
#This is determined by looking at 10 samples with the most mutations and seeing if those
#mutations, when close together, tend to be linked on the same read.
installDir = '/'.join(os.getcwd().split('/')[:-3])
metadataDir = installDir + '/data/sample_metadata/'
metadatafile = metadataDir + 'sample_metadata.csv'
completemetadatakey = metadataDir + 'subject_metadata_key.csv'
expandedMetadata = metadataDir + 'subject_metadata.csv'
figures = os.path.join(installDir, 'results', 'figures')
bottleneck_output = os.path.join(installDir, 'results', 'bottleneck_output')
secondaryDataFolders = [installDir + '/data/secondary_analysis/H3N2/18-19',
installDir + '/data/secondary_analysis/H3N2/17-18',
installDir + '/data/secondary_analysis/H1N1/18-19',
installDir + '/data/secondary_analysis/H1N1/17-18',
installDir + '/data/secondary_analysis/FluB/16-17',
installDir + '/data/secondary_analysis/FluB/17-18']
referenceDir = installDir + '/references'
vcfdirs = secondaryDataFolders
vcffiles = [f + '/all_snps_filtered.vcf' for f in vcfdirs]
references = ['A_Singapore_INFIMH-16-0019_2016',
'A_Hong_Kong_4801_2014_EPI834581',
'A_Michigan_45_2015_H1N1_18',
'A_Michigan_45_2015_H1N1_19',
'B_Phuket_3073_2013_17',
'B_Phuket_3073_2013_18']
consensusReferences = [mainSampleFolder + '/consensus/' + reference + '_consensus_noambig.fasta' for mainSampleFolder, reference in zip(secondaryDataFolders, references)]
gtfFiles = [referenceDir + '/' + reference + '_antigenic.gtf' for reference in references]
SnpGenieSegFolders = []
for f in secondaryDataFolders:
SnpGenieSegFolders.extend(glob.glob(f + '/SNPGenie_output/*'))
treefiles = [installDir + '/data/secondary_analysis/FluB/FluB.tree',
installDir + '/data/secondary_analysis/H3N2/H3N2.tree',
installDir + '/data/secondary_analysis/H1N1/H1N1.tree']
clade_references = installDir + '/data/references/subclade_definitions/Clade_reference_sequence_names.txt'
hongkongContigs = {'NP': 'A_Hong_Kong_4801_2014_834574_NP', 'NS': 'A_Hong_Kong_4801_2014_834575_NS',
'MP': 'A_Hong_Kong_4801_2014_834576_MP', 'PA': 'A_Hong_Kong_4801_2014_834577_PA',
'PB2': 'A_Hong_Kong_4801_2014_834578_PB2', 'PB1': 'A_Hong_Kong_4801_2014_834579_PB1',
'NA': 'A_Hong_Kong_4801_2014_834580_NA', 'HA': 'A_Hong_Kong_4801_2014_834581_HA'}
# location of all statistics tsvs:
dataFolder = installDir + '/results/dataframes'
subtypesToAnalyze = ['H1N1', 'H3N2', 'Influenza B']
# exclude 'NA' as a reserved term for nan when importing pandas dataframes
naValues = ['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A', 'N/A', '#NA', 'NULL', 'NaN', '-NaN', 'nan', '-nan', '', '*']
read_tsv_args = {'sep': '\t', 'keep_default_na': False, 'na_values': naValues}
read_csv_args = {'keep_default_na': False, 'na_values': naValues}
gene_to_seg_dict = {'HA': 'HA', 'NA': 'NA', 'PB1': 'PB1', 'PB2': 'PB2', 'PA': 'PA', 'NP': 'NP',
'NEP': 'NS', 'NS1': 'NS', 'M1': 'MP', 'M2': 'MP', 'PB1-F2': 'PB1', 'PA-X': 'PA',
'NB': 'NA', 'BM2': 'MP'}
referenceDict = {"A_Singapore_INFIMH-16-0019_2016": "H3N2",
"A_Hong_Kong_4801_2014_EPI834581": "H3N2",
"A_Michigan_45_2015_H1N1": "H1N1",
"A_Michigan_45_2015_H1N1_18": "H1N1",
"A_Michigan_45_2015_H1N1_19": "H1N1",
"B_Brisbane_60_2008": "Influenza B",
"B_Phuket_3073_2013_17": "Influenza B",
"B_Phuket_3073_2013_18": "Influenza B",
"Influenza A H3N2, Influenza B (Yamagata)": "Mixed"}
referenceSeasonDict = {"A_Singapore_INFIMH-16-0019_2016": "2018-2019 H3N2",
"A_Hong_Kong_4801_2014_EPI834581": "2017-2018 H3N2",
"A_Michigan_45_2015_H1N1_18": "2017-2018 H1N1",
"A_Michigan_45_2015_H1N1_19": "2018-2019 H1N1",
"B_Phuket_3073_2013_17": "2017-2018 Influenza B",
"B_Phuket_3073_2013_18": "2018-2019 Influenza B",
"B_Phuket_3073_2013_16": "2016-2017 Influenza B"}
sampleFolderDict = {'H3N2': {'17-18': installDir + '/data/secondary_analysis/H3N2/17-18',
'18-19': installDir + '/data/secondary_analysis/H3N2/18-19'},
'H1N1': {'17-18': installDir + '/data/secondary_analysis/H1N1/17-18',
'18-19': installDir + '/data/secondary_analysis/H1N1/18-19'},
'H1N1pdm': {'17-18': installDir + '/data/secondary_analysis/H1N1/17-18',
'18-19': installDir + '/data/secondary_analysis/H1N1/18-19'},
'Influenza B': {'16-17': installDir + '/data/secondary_analysis/FluB/16-17',
'17-18': installDir + '/data/secondary_analysis/FluB/17-18'}}
# Dictionary to convert myriad different subtypes in metadata file into consistent set of four subtypes
subtypeDict = {'Influenza A H3N2': 'H3N2', 'Flu A (H3)': 'H3N2',
'Flu A (Unable to Subtype)': 'H3N2', 'Flu B (Yamagata)': 'Influenza B', 'Flu A 09H1': 'H1N1',
'Influenza A H1N1': 'H1N1', 'Influenza A, Influenza B': 'Mixed', 'Influenza B': 'Influenza B',
'Influenza A H3N2, Influenza B (Yamagata)': 'Mixed', 'Influenza A H3N2, Influenza A H1N1': 'Mixed',
'Influenza A, Influenza B (Yamagata)': 'Mixed', 'Influenza B (Yamagata)': 'Influenza B', 'Influenza A': 'H3N2',
'Influenza B (Victoria)': 'Influenza B', 'H3N2': 'H3N2', 'H1N1': 'H1N1', 'Influenza B': 'Influenza B'}
H1N1_antigenic_sites = [87, 88, 90, 91, 92, 132, 141, 142,
143, 147, 171, 172, 174, 177, 180,
170, 173, 202, 206, 210, 211, 212,
151, 154, 156, 157, 158, 159, 200, 238]
H1N1_antigenic_sites = [site - 1 for site in H1N1_antigenic_sites] # convert to zero-index
antigenic_sites = {59, 60, 61, 62, 63, 65, 66, 68, 69, 72, 74, 77, 78, 82, 90, 93, 95, 96, 97, 98, 101, 102, 103, 106, 107, 109, 111, 117, 118, 124, 132, 136, 137, 139, 141, 143, 144, 145, 146, 147, 148, 150, 152, 153, 155, 157, 158, 159, 160, 161, 165, 167, 170, 171, 172, 173, 174, 178, 180, 182, 183, 185, 186, 187, 188, 189, 190, 191, 192, 194, 197, 201, 202, 203, 204, 205, 207, 208, 209, 211, 212, 213, 216, 218, 222, 223, 224, 227, 228, 229, 230, 231, 232, 233, 234, 241, 242, 243, 244, 245, 253, 255, 257, 259, 261, 262, 263, 275, 276, 277, 280, 288, 290, 291, 293, 294, 295, 309, 312, 314, 315, 319, 320, 322, 323, 324, 325, 326, 327}
def convertListofClassicH3N2SitestoZeroIndexedMStart(listOfSites):
return [site + 15 for site in listOfSites]
glycosylation_sites = [8, 22, 38, 45, 63, 81, 133, 126, 159, 160, 165, 246, 285]
glycosylation_sites = set(convertListofClassicH3N2SitestoZeroIndexedMStart(glycosylation_sites))
antigenic_sites = antigenic_sites.union(glycosylation_sites)
genelengths = {'H3N2': {'NEP': 366,
'HA': 1701,
'HA_antigenic': len(antigenic_sites) * 3,
'HA_nonantigenic': 1701 - len(antigenic_sites) * 3,
'M1': 759,
'M2': 294,
'NA': 1410,
'NP': 1497,
'NS1': 693,
'PA': 2151,
'PA-X': 759,
'PB1': 2274,
'PB1-F2': 273,
'PB2': 2280},
'H1N1': {'HA_antigenic': len(H1N1_antigenic_sites) * 3,
'HA_nonantigenic': 1701 - len(H1N1_antigenic_sites) * 3,
'HA': 1701,
'M1': 759,
'M2': 294,
'NA': 1410,
'NP': 1497,
'NEP': 366,
'NS1': 660,
'PA': 2151,
'PA-X': 699,
'PB1': 2274,
'PB1-F2': 273,
'PB2': 2280},
'Influenza B': {'HA': 1755,
'M1': 747,
'NA': 1401,
'NP': 1683,
'NEP': 369,
'NS1': 846,
'PA': 2181,
'PB1': 2259,
'PB2': 2313,
'BM2': 330,
'NB': 303}}
# Display constants
displayContext = 'poster'
palettes = dict()
snsblue, snsorange, snsgreen, snsred, snspurple, snsbrown, snspink, snsgrey, snsyellow, snssky = sns.color_palette('muted')
palettes['kind'] = sns.color_palette(('#eedc5b', '#d3494e'), 2)
palettes['subtype'] = sns.color_palette('deep')
palettes['AAtype'] = sns.color_palette((snsblue, snsorange, snsgreen), 3)
palettes['synon'] = sns.color_palette((snsblue, snsorange), 2)
palettes['vax'] = sns.color_palette('Reds', 2)[::-1]
palettes['age_category'] = sns.color_palette('Paired')
palettes['age_category_only'] = sns.color_palette('tab20')[8:10]
geneOrder = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", "NP", "NA", "M1", "M2", "NS1", "NEP"]
antigenicGeneOrder = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", 'HA_antigenic', 'HA_nonantigenic',"NP", "NA", "M1", "M2", "NS1", "NEP"]
segOrder = ['PB2', 'PB1', 'NP', 'HA', 'NA', 'PA', 'MP', 'NS']
subtypeOrder = ['H3N2', 'H1N1', 'Influenza B']
vaxOrder = [0, 1]
named_vaxOrder = ['Unvaccinated', 'Vaccinated']
ageOrder = ['18 or Under', 'Over 18']
NS_order = ['Nonsynon', 'Synon']
antigenicGeneNames = ["PB2", "PB1", "PA", "HA", 'Anti.\nHA', 'Nonanti.\nHA', "NP", "NA", "M1", "M2", "NS1", "NEP"]
antigenicGeneNames_withMinor = ["PB2", "PB1", 'PB1-F2', "PA", 'PA-X', "HA", 'Anti.\nHA', 'Nonanti.\nHA', "NP", "NA", "M1", "M2", "NS1", "NEP"]
errorBarArgs = {"capsize": .1, "errwidth": 2}
# Load data
print ('loading subjects...')
subjects = pd.read_csv(dataFolder + '/subjects.tsv', **read_tsv_args)
print ('loading samples...')
samples = pd.read_csv(dataFolder + '/samples.tsv', **read_tsv_args)
# For downstream analysis, it can be nice to have a few figure-specific variables
samples['age_category'] = '18 or Under'
samples.loc[samples.age > 18, 'age_category'] = 'Over 18'
meltedPiSamples = samples.melt(id_vars=['sampleID', 'subtype', 'recieved_flu_vaccine', 'age_category', 'symptom_severity'], value_vars=['piN_sample', 'piS_sample']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
print ('loading segments...')
segments = pd.read_csv(dataFolder + '/segments.tsv', **read_tsv_args)
# I'll go ahead and make a melted version of all dataframes with piN/piS measurements
meltedPiSegments = segments.melt(id_vars=['sampleID', 'subtype', 'segment', 'recieved_flu_vaccine', 'symptom_severity'], value_vars=['piN_segment', 'piS_segment']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
print ('loading genes...')
genes = pd.read_csv(dataFolder + '/genes.tsv', **read_tsv_args)
try:
meltedPiGenes = genes.melt(id_vars=['sampleID', 'subtype', 'segment', 'product', 'age_category', 'recieved_flu_vaccine', 'symptom_severity'], value_vars=['piN_gene', 'piS_gene']).rename(columns={'variable': 'PiN_PiS', 'value': 'Pi'})
except:
print (genes.columns)
raise
print ('loading SNPs...')
SNPs = pd.read_csv(dataFolder + '/SNPs_lenient_filter.gz', **read_tsv_args)
SNPs
print ('loading transmission pairs...')
transmissionPairs = pd.read_csv(dataFolder + '/transmissionPairs.tsv', **read_tsv_args)
print ('loading transmission segments...')
transmissionSegments = pd.read_csv(dataFolder + '/transmissionSegments.tsv', **read_tsv_args)
print ('loading transmission SNPs...')
transmissionSNPs = pd.read_csv(dataFolder + '/transmissionSNPs_lenient_filter.gz', **read_tsv_args)
# make all vs all distance DF for distance comparisons
allvsall = pd.read_csv('/mnt/d/orchards/H1N1/figures/allvsall.tsv', **read_tsv_args)
allvsall = allvsall.merge(samples, left_on='index', right_on='sampleID', how='left')
allvsall = allvsall.merge(samples, left_on='contact', right_on='sampleID', how='left', suffixes=('_index', '_contact'))
# limit comparisons to those where contact infected after index, and onset of symptoms are separated by less than one week
allvsall = allvsall.loc[(pd.to_datetime(allvsall['time_of_symptom_onset_contact']) - pd.to_datetime(allvsall['time_of_symptom_onset_index'])) >= pd.Timedelta(0)]
allvsall = allvsall.loc[pd.to_datetime(allvsall['time_of_symptom_onset_contact']) - pd.to_datetime(allvsall['time_of_symptom_onset_index']) <= pd.Timedelta('10 days')]
allvsall = allvsall.loc[allvsall.subtype_index == allvsall.subtype_contact]
allvsall['school_match'] = 'Does not attend'
allvsall.loc[allvsall.school_index == allvsall.school_contact, 'school_match'] = 'Within school'
allvsall.loc[allvsall.school_index != allvsall.school_contact, 'school_match'] = 'Between schools'
allvsall['household_match'] = 'Other'
allvsall.loc[allvsall.household_index != allvsall.household_contact, 'household_match'] = 'No'
allvsall.loc[allvsall.household_index == allvsall.household_contact, 'household_match'] = 'Yes'
allvsall = allvsall.reset_index(drop=True)
allvsall['Relatedness'] = 'Random'
allvsall.loc[allvsall.clade_index == allvsall.clade_contact, 'Relatedness'] = 'Same Clade'
allvsall.loc[allvsall.subclade_index == allvsall.subclade_contact, 'Relatedness'] = 'Same Subclade'
allvsall.loc[allvsall.household_index == allvsall.household_contact, 'Relatedness'] = 'Same Household'
allvsall.loc[allvsall.school_index == allvsall.school_contact, 'Relatedness'] = 'Same School'
id_columns = ['sampleID', 'subtype', 'season', 'age', 'age_category', 'recieved_flu_vaccine', 'clade', 'subclade']
sample_N_stats = ['nonsynon_snps_per_day_samp', 'Xue_nonsynon_divergence', 'num_of_nonsynon_muts', 'nonsynon_mutation_rate_samp', 'Xue_nonsynon_divergence_per_day', 'nonsynon_divergence_rate']
sample_S_stats = ['synon_snps_per_day_samp', 'Xue_synon_divergence', 'num_of_synon_muts', 'synon_mutation_rate_samp', 'Xue_synon_divergence_per_day', 'synon_divergence_rate']
segment_N_stats = ['nonsynon_snps_per_day_seg', 'Xue_nonsynon_divergence_segment', 'num_of_nonsynon_muts_segment', 'nonsynon_mutation_rate_seg', 'nonsynon_divergence_per_day_seg', 'nonsynon_divergence_rate_seg']
segment_S_stats = [col.replace('nonsynon_', 'synon_') for col in segment_N_stats]
gene_N_stats = [col.replace('_segment', '').replace('_seg', '')+'_gene' for col in segment_N_stats]
gene_S_stats = [col.replace('_segment', '').replace('_seg', '')+'_gene' for col in segment_S_stats]
sample_N_stats.append('piN_sample')
sample_S_stats.append('piS_sample')
segment_N_stats.append('piN_segment')
segment_S_stats.append('piS_segment')
gene_N_stats.append('piN_gene')
gene_S_stats.append('piS_gene')
N_sample_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in sample_N_stats}
S_sample_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in sample_S_stats}
N_segment_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in segment_N_stats}
S_segment_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in segment_S_stats}
N_gene_renameDict = {col: col.replace('nonsynon_', '').replace('piN', 'pi') for col in gene_N_stats}
S_gene_renameDict = {col: col.replace('synon_', '').replace('piS', 'pi') for col in gene_S_stats}
N_samples = samples[id_columns + sample_N_stats].rename(columns=N_sample_renameDict)
S_samples = samples[id_columns + sample_S_stats].rename(columns=S_sample_renameDict)
N_segments = segments[['segment'] + id_columns + segment_N_stats].rename(columns=N_segment_renameDict)
S_segments = segments[['segment'] + id_columns + segment_S_stats].rename(columns=S_segment_renameDict)
N_genes = genes[['segment', 'product'] + id_columns + gene_N_stats].rename(columns=N_gene_renameDict)
S_genes = genes[['segment', 'product'] + id_columns + gene_S_stats].rename(columns=S_gene_renameDict)
N_samples['Synon_Nonsynon'] = N_segments['Synon_Nonsynon'] = N_genes['Synon_Nonsynon'] = 'Nonsynon'
S_samples['Synon_Nonsynon'] = S_segments['Synon_Nonsynon'] = S_genes['Synon_Nonsynon'] = 'Synon'
NS_samples = N_samples.append(S_samples)
NS_segments = N_segments.append(S_segments)
NS_genes = N_genes.append(S_genes)
samples['recieved_flu_vaccine'] = samples['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
NS_samples['recieved_flu_vaccine'] = NS_samples['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
genes['recieved_flu_vaccine'] = genes['recieved_flu_vaccine'].map({0: 'Unvaccinated', 1: 'Vaccinated', np.nan: np.nan})
|
{"hexsha": "102902ac0c94633b54458b9854fa663172088efe", "size": 16744, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/secondary_analysis/pre_processing_scripts/figure_constants.py", "max_stars_repo_name": "JosephLalli/ORCHARDS", "max_stars_repo_head_hexsha": "1fc4d27121683e77edd02303e9ecd3d8d4caeb1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/secondary_analysis/pre_processing_scripts/figure_constants.py", "max_issues_repo_name": "JosephLalli/ORCHARDS", "max_issues_repo_head_hexsha": "1fc4d27121683e77edd02303e9ecd3d8d4caeb1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/secondary_analysis/pre_processing_scripts/figure_constants.py", "max_forks_repo_name": "JosephLalli/ORCHARDS", "max_forks_repo_head_hexsha": "1fc4d27121683e77edd02303e9ecd3d8d4caeb1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.0789473684, "max_line_length": 644, "alphanum_fraction": 0.6683588151, "include": true, "reason": "import numpy", "num_tokens": 5593}
|
import os.path
from multiprocessing.pool import ThreadPool
from sklearn.mixture import GaussianMixture
import librosa as l
from pathlib import Path
import numpy as np
from tqdm import tqdm
import collections
import warnings
# ignores librosa warnings
warnings.filterwarnings("ignore", category=UserWarning)
class SpeechGaussian:
speakers = []
speakers_iter = {}
gmm_arr = {}
gmm_ord = {}
class_cnt = 31
gaussian_cnt = 2
dev_orig_path = Path('./dataset/dev')
eval_orig_path = Path('./dataset/eval')
train_orig_path = Path('./dataset/train_big')
dev_path = Path('./temp/dev')
eval_path = Path('./temp/eval')
train_path = Path('./temp/train_big')
@classmethod
def waw_2_mfcc(cls, waw_path):
x, sr = l.load(waw_path, sr=16000)
n_fft = int(sr * 0.02) # window length: 0.02 s
hop_length = n_fft // 2 # usually one specifies the hop length as a fraction of the window length
mfccs = l.feature.mfcc(x, sr=sr, n_mfcc=24, hop_length=hop_length, n_fft=n_fft).T
mfcc_delta = np.concatenate((np.zeros((1, 24)), np.diff(mfccs, axis=0)), axis=0)
return np.concatenate((mfccs, mfcc_delta), axis=1)
@classmethod
def gmm_train_speaker(cls, speaker_dir):
speaker_features = []
speaker_idx = int(str(speaker_dir).split('/').pop())
for speaker_file in speaker_dir.iterdir():
if str(speaker_file).endswith('.wav'):
speaker_features.append(cls.waw_2_mfcc(speaker_file))
features = np.concatenate(speaker_features, axis=0)
gmm = GaussianMixture(n_components=cls.gaussian_cnt, max_iter=2000).fit(features)
cls.gmm_arr[speaker_idx] = gmm
@classmethod
def gmm_eval_speaker(cls, recording_path):
scores = []
try:
recording_mfcc = cls.waw_2_mfcc(recording_path)
except ValueError:
return -1
for key in cls.gmm_ord:
scores.append(sum(cls.gmm_ord[key].score_samples(recording_mfcc)))
np_s = np.array(scores)
return np_s.argmax() + 1, np_s
@classmethod
def gmm_evaluate_model(cls):
attempts = 0
true_accept = 0
for dev_dir in tqdm(cls.dev_path.iterdir(), 'Eval', len(list(cls.dev_path.iterdir())), unit='speaker'):
if str(dev_dir).__contains__('.DS_Store'):
continue
gt_idx = int(str(dev_dir).split('/').pop())
for speaker_file in dev_dir.iterdir():
if str(speaker_file).endswith('.wav'):
attempts += 1
try:
pred_class, _ = cls.gmm_eval_speaker(speaker_file)
except TypeError:
print(str(speaker_file))
continue
true_accept += 1 if pred_class == gt_idx else 0
model_acc = (true_accept / attempts)
print('Total accuracy: {0}%'.format(model_acc * 100))
@classmethod
def gmm_label_data(cls, eval_dir):
result_file = open("speech_gaussian.txt", "w")
for eval_file in tqdm(eval_dir.iterdir(), 'Label data', len(list(eval_dir.iterdir())), unit='files'):
if str(eval_file).endswith('.wav'):
try:
pred_class, probs = cls.gmm_eval_speaker(eval_file)
except TypeError:
print(str(eval_file))
continue
res_line = '{0} {1} {2}\n'.format(os.path.basename(eval_file).replace('.wav', ''), pred_class,
' '.join(str(x) for x in probs))
result_file.write(res_line)
result_file.close()
@classmethod
def train_gmm(cls):
for i in range(cls.class_cnt):
speaker_dir = cls.train_path.joinpath(str(i + 1))
cls.speakers.append(speaker_dir)
with ThreadPool(8) as pool:
list(
tqdm(
pool.imap(
cls.gmm_train_speaker,
cls.speakers
),
'Train',
len(cls.speakers),
unit="speaker"
)
)
cls.gmm_ord = collections.OrderedDict(sorted(cls.gmm_arr.items()))
|
{"hexsha": "6c5ecf192cd75de422ce85f1f2c85d2cf2d509ba", "size": 4338, "ext": "py", "lang": "Python", "max_stars_repo_path": "speech/speech_gaussian.py", "max_stars_repo_name": "AntonFirc/SUR", "max_stars_repo_head_hexsha": "3173a80731e601cdcc590166a8ba2ef801e60325", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "speech/speech_gaussian.py", "max_issues_repo_name": "AntonFirc/SUR", "max_issues_repo_head_hexsha": "3173a80731e601cdcc590166a8ba2ef801e60325", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "speech/speech_gaussian.py", "max_forks_repo_name": "AntonFirc/SUR", "max_forks_repo_head_hexsha": "3173a80731e601cdcc590166a8ba2ef801e60325", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1145038168, "max_line_length": 111, "alphanum_fraction": 0.571461503, "include": true, "reason": "import numpy", "num_tokens": 999}
|
# test hist
@test sum(fit(Histogram,[1,2,3]).weights) == 3
@test fit(Histogram,[]).weights == []
@test fit(Histogram,[1]).weights == [1]
@test fit(Histogram,[1,2,3],[0,2,4]) == Histogram([0,2,4],[2,1])
@test fit(Histogram,[1,2,3],[0,2,4]) != Histogram([0,2,4],[1,1])
@test fit(Histogram,[1,2,3],0:2:4) == Histogram(0:2:4,[2,1])
@test all(fit(Histogram,[1:100]/100,0.0:0.01:1.0).weights .==1)
@test fit(Histogram,[1,1,1,1,1]).weights[1] == 5
@test sum(fit(Histogram,(rand(100),rand(100))).weights) == 100
@test fit(Histogram,1:100,nbins=5,closed=:right).weights == [20,20,20,20,20]
@test fit(Histogram,1:100,nbins=5,closed=:left).weights == [19,20,20,20,20,1]
@test fit(Histogram,0:99,nbins=5,closed=:right).weights == [1,20,20,20,20,19]
@test fit(Histogram,0:99,nbins=5,closed=:left).weights == [20,20,20,20,20]
@test fit(Histogram,(1:100,1:100),nbins=5).weights == diagm([20,20,20,20,20])
@test fit(Histogram,(1:100,1:100),nbins=(5,5)).weights == diagm([20,20,20,20,20])
@test fit(Histogram,1:100,weights(ones(100)),nbins=5).weights == [20,20,20,20,20]
@test fit(Histogram,1:100,weights(2*ones(100)),nbins=5).weights == [40,40,40,40,40]
@test eltype(fit(Histogram,1:100,weights(ones(Int,100)),nbins=5).weights) == Int
@test eltype(fit(Histogram,1:100,weights(ones(Float64,100)),nbins=5).weights) == Float64
import StatsBase.midpoints
@test midpoints(1.0:1.0:10.0) == 1.5:1.0:9.5
@test midpoints(1:10) == 1.5:9.5
@test midpoints(Float64[1.0:1.0:10.0]) == Float64[1.5:1.0:9.5]
|
{"hexsha": "8e026f7383e2e711edd23309a0b8434c246c7e04", "size": 1482, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/hist.jl", "max_stars_repo_name": "wildart/StatsBase.jl", "max_stars_repo_head_hexsha": "3885542a93d2de71a1102da030c798cd51749481", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/hist.jl", "max_issues_repo_name": "wildart/StatsBase.jl", "max_issues_repo_head_hexsha": "3885542a93d2de71a1102da030c798cd51749481", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/hist.jl", "max_forks_repo_name": "wildart/StatsBase.jl", "max_forks_repo_head_hexsha": "3885542a93d2de71a1102da030c798cd51749481", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8064516129, "max_line_length": 88, "alphanum_fraction": 0.6599190283, "num_tokens": 617}
|
import pandas as pd
import numpy as np
import datetime
from Workshop import *
class HangarDatabase:
def __init__(self, H, G, hangar_profile_df):
self.Cities = {} # workshops in cities
self.Workshops = {} # all workshops
self.H = H
self.G = G
for b in G:
self.Cities[b] = np.array([])
workshop_incity_df = hangar_profile_df[hangar_profile_df['City'] == b]
for index in workshop_incity_df.index:
row = workshop_incity_df.loc[index]
workshop = Workshop(index, row['Workshop Type'], b, row['Slot'], row['Towing time (hour)'], row['Slot Utilization'],row['Slot Effective'])
self.Workshops[index] = workshop
self.Cities[b] = np.append(self.Cities[b], workshop)
def UpdateUsedSlot(self, initial_maintenance_status_df):
for index,row in initial_maintenance_status_df.iterrows():
self.Workshops[row['Hangar Code']].used_slot += 1
|
{"hexsha": "30ee78bb4567a9f84e87a406ea2d7d19231c6d79", "size": 1011, "ext": "py", "lang": "Python", "max_stars_repo_path": "HangarDatabase.py", "max_stars_repo_name": "ruthvelisia/Ruth-s-Progress", "max_stars_repo_head_hexsha": "670598c6b9285d09985587fb08d17f37fdd7b860", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HangarDatabase.py", "max_issues_repo_name": "ruthvelisia/Ruth-s-Progress", "max_issues_repo_head_hexsha": "670598c6b9285d09985587fb08d17f37fdd7b860", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HangarDatabase.py", "max_forks_repo_name": "ruthvelisia/Ruth-s-Progress", "max_forks_repo_head_hexsha": "670598c6b9285d09985587fb08d17f37fdd7b860", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.125, "max_line_length": 154, "alphanum_fraction": 0.6122650841, "include": true, "reason": "import numpy", "num_tokens": 235}
|
import numpy as np
import pandas as pd
from surprise import *
from surprise import accuracy
from surprise import Dataset
from surprise import Reader
from src.model.BaseModel import BaseModel
class surprise_SVD(BaseModel):
def __init__(self, params: dict):
super().__init__(params)
self.model = SVD(**self.params)
def fit(self, train_df, user_info, item_info):
## transform to surprise ready
reader = Reader(rating_scale = (1, 5))
data_s = Dataset.load_from_df(train_df[['user_id', 'business_id', 'stars']], reader)
trainset = data_s.build_full_trainset()
## train model
self.model.fit(trainset)
def transform(self, ui_pairs):
## get predicted score
predicted = []
for line in ui_pairs.itertuples():
uid = line.user_id
iid = line.business_id
pred = self.model.predict(uid, iid, verbose=False)
predicted.append(pred.est)
testset_copy = ui_pairs.copy()
testset_copy['prediction'] = predicted
return testset_copy
|
{"hexsha": "b96821c9ac5f39d9b0b8b78d560d6e4e82277c24", "size": 1148, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model/surprise_SVD.py", "max_stars_repo_name": "HenryNebula/Personalization_Final_Project", "max_stars_repo_head_hexsha": "5d18a8628bed2dfd2894b9d2f33c1e9a5df27ecc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-11-03T18:02:15.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-03T18:02:15.000Z", "max_issues_repo_path": "src/model/surprise_SVD.py", "max_issues_repo_name": "HenryNebula/Personalization_Final_Project", "max_issues_repo_head_hexsha": "5d18a8628bed2dfd2894b9d2f33c1e9a5df27ecc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/model/surprise_SVD.py", "max_forks_repo_name": "HenryNebula/Personalization_Final_Project", "max_forks_repo_head_hexsha": "5d18a8628bed2dfd2894b9d2f33c1e9a5df27ecc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-22T01:01:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-22T01:01:21.000Z", "avg_line_length": 25.5111111111, "max_line_length": 92, "alphanum_fraction": 0.6141114983, "include": true, "reason": "import numpy", "num_tokens": 245}
|
[STATEMENT]
lemma wt_app_eff_imp_wt_err:
assumes app_eff: "wt_app_eff r app step ts"
assumes bounded: "bounded (err_step (size ts) app step) (size ts)"
shows "wt_err_step r (err_step (size ts) app step) (map OK ts)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wt_err_step r (err_step (length ts) app step) (map OK ts)
[PROOF STEP]
proof (unfold wt_err_step_def wt_step_def, intro strip, rule conjI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> map OK ts ! p \<noteq> Err
2. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
fix p
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> map OK ts ! p \<noteq> Err
2. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
assume "p < size (map OK ts)"
[PROOF STATE]
proof (state)
this:
p < length (map OK ts)
goal (2 subgoals):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> map OK ts ! p \<noteq> Err
2. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
hence p: "p < size ts"
[PROOF STATE]
proof (prove)
using this:
p < length (map OK ts)
goal (1 subgoal):
1. p < length ts
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
p < length ts
goal (2 subgoals):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> map OK ts ! p \<noteq> Err
2. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
thus "map OK ts ! p \<noteq> Err"
[PROOF STATE]
proof (prove)
using this:
p < length ts
goal (1 subgoal):
1. map OK ts ! p \<noteq> Err
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
map OK ts ! p \<noteq> Err
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
map OK ts ! p \<noteq> Err
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
fix q t
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
assume q: "(q,t) \<in> set (err_step (size ts) app step p (map OK ts ! p))"
[PROOF STATE]
proof (state)
this:
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
with p app_eff
[PROOF STATE]
proof (chain)
picking this:
p < length ts
wt_app_eff r app step ts
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
[PROOF STEP]
obtain
"app p (ts ! p)" "\<forall>(q,t) \<in> set (step p (ts!p)). t <=_r ts!q"
[PROOF STATE]
proof (prove)
using this:
p < length ts
wt_app_eff r app step ts
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
goal (1 subgoal):
1. (\<lbrakk>app p (ts ! p); \<forall>(q, t)\<in>set (step p (ts ! p)). t \<sqsubseteq>\<^bsub>r\<^esub> ts ! q\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (unfold wt_app_eff_def) blast
[PROOF STATE]
proof (state)
this:
app p (ts ! p)
\<forall>(q, t)\<in>set (step p (ts ! p)). t \<sqsubseteq>\<^bsub>r\<^esub> ts ! q
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
app p (ts ! p)
\<forall>(q, t)\<in>set (step p (ts ! p)). t \<sqsubseteq>\<^bsub>r\<^esub> ts ! q
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
from q p bounded
[PROOF STATE]
proof (chain)
picking this:
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
p < length ts
bounded (err_step (length ts) app step) (length ts)
[PROOF STEP]
have "q < size ts"
[PROOF STATE]
proof (prove)
using this:
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
p < length ts
bounded (err_step (length ts) app step) (length ts)
goal (1 subgoal):
1. q < length ts
[PROOF STEP]
by - (rule boundedD)
[PROOF STATE]
proof (state)
this:
q < length ts
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
hence "map OK ts ! q = OK (ts!q)"
[PROOF STATE]
proof (prove)
using this:
q < length ts
goal (1 subgoal):
1. map OK ts ! q = OK (ts ! q)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
map OK ts ! q = OK (ts ! q)
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
map OK ts ! q = OK (ts ! q)
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
have "p < size ts"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. p < length ts
[PROOF STEP]
by (rule p)
[PROOF STATE]
proof (state)
this:
p < length ts
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
p < length ts
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
note q
[PROOF STATE]
proof (state)
this:
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
app p (ts ! p)
\<forall>(q, t)\<in>set (step p (ts ! p)). t \<sqsubseteq>\<^bsub>r\<^esub> ts ! q
map OK ts ! q = OK (ts ! q)
p < length ts
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
[PROOF STEP]
have "t <=_(Err.le r) map OK ts ! q"
[PROOF STATE]
proof (prove)
using this:
app p (ts ! p)
\<forall>(q, t)\<in>set (step p (ts ! p)). t \<sqsubseteq>\<^bsub>r\<^esub> ts ! q
map OK ts ! q = OK (ts ! q)
p < length ts
(q, t) \<in> set (err_step (length ts) app step p (map OK ts ! p))
goal (1 subgoal):
1. t \<sqsubseteq>\<^bsub>Err.le r\<^esub> map OK ts ! q
[PROOF STEP]
by (auto simp add: err_step_def map_snd_def)
[PROOF STATE]
proof (state)
this:
t \<sqsubseteq>\<^bsub>Err.le r\<^esub> map OK ts ! q
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
(?q2, ?t2) \<in> set (err_step (length ts) app step p (map OK ts ! p)) \<Longrightarrow> ?t2 \<sqsubseteq>\<^bsub>Err.le r\<^esub> map OK ts ! ?q2
goal (1 subgoal):
1. \<And>p. p < length (map OK ts) \<Longrightarrow> Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
thus "stable (Err.le r) (err_step (size ts) app step) (map OK ts) p"
[PROOF STATE]
proof (prove)
using this:
(?q2, ?t2) \<in> set (err_step (length ts) app step p (map OK ts ! p)) \<Longrightarrow> ?t2 \<sqsubseteq>\<^bsub>Err.le r\<^esub> map OK ts ! ?q2
goal (1 subgoal):
1. Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
[PROOF STEP]
by (unfold stable_def) blast
[PROOF STATE]
proof (state)
this:
Typing_Framework.stable (Err.le r) (err_step (length ts) app step) (map OK ts) p
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 3340, "file": null, "length": 31}
|
/-
Second localization test. If e^2 = e then R[1/e] ≅ R[1/(1-e)].
-/
import to_mathlib.localization.localization_alt
-- By Kenny.
example {R : Type*} [comm_ring R] (e : R) (he : e * e = e) : localization_alt.is_localization (powers e) (ideal.quotient.mk (ideal.span {1-e})) :=
begin
have H1 : ideal.quotient.mk (ideal.span {1 - e}) e = 1,
{ exact eq.symm (ideal.quotient.eq.2 $ ideal.subset_span $ or.inl rfl) },
have H2 : (1 - e) * e = 0,
{ rw [sub_mul, he, one_mul, sub_self] },
refine ⟨_, _, _⟩,
{ rintros ⟨_, n, rfl⟩, use 1,
change ideal.quotient.mk _ (e^n * 1) = _,
rw [mul_one, is_semiring_hom.map_pow (ideal.quotient.mk (ideal.span {1-e})) e n, H1, one_pow] },
{ rintro ⟨x⟩, use (1,x), exact one_mul _ },
{ ext x, split; intro hx,
{ replace hx := ideal.quotient.eq_zero_iff_mem.1 hx,
replace hx := ideal.mem_span_singleton'.1 hx,
refine ⟨⟨(x, ⟨e, 1, pow_one e⟩), _⟩, rfl⟩,
cases hx with y hx, change x * e = 0, rw [← hx, mul_assoc, H2, mul_zero] },
{ rcases hx with ⟨⟨⟨x, ⟨_, n, rfl⟩⟩, hx⟩, rfl⟩, change x * e^n = 0 at hx,
apply ideal.quotient.eq_zero_iff_mem.2,
apply ideal.mem_span_singleton'.2,
change ∃ a, a * (1-e) = x, induction n with n ih generalizing x,
{ rw [pow_zero, mul_one] at hx, subst hx, use 0, rw zero_mul },
rw [pow_succ, ← mul_assoc] at hx, cases ih _ hx with y hy,
use x + y, rw [add_mul, hy, ← mul_add, sub_add_cancel, mul_one] } },
end
|
{"author": "ramonfmir", "repo": "lean-scheme", "sha": "6d3ec18fecfd174b79d0ce5c85a783f326dd50f6", "save_path": "github-repos/lean/ramonfmir-lean-scheme", "path": "github-repos/lean/ramonfmir-lean-scheme/lean-scheme-6d3ec18fecfd174b79d0ce5c85a783f326dd50f6/src/to_mathlib/localization/localization_tests/localisation_test_2.lean"}
|
import numpy as np
import pytest
from gradgpad.foundations.metrics.bpcer_fixing_apcer import bpcer_fixing_apcer
@pytest.mark.unit
@pytest.mark.parametrize(
"scores, labels, expected_bpcer, apcer_working_point",
[
(
np.array([0.1, 0.11, 0.6, 0.25, 0.0, 0.1, 0.2]),
np.array([1, 1, 2, 2, 0, 0, 0]),
1.0, # 0.66,
0.1,
),
(
np.array([0.1, 0.11, 0.6, 0.25, 0.0, 0.1, 0.2]),
np.array([1, 1, 2, 2, 0, 0, 0]),
0.33,
0.5,
),
],
)
def test_should_compute_bpcer_fixing_apcer_correctly(
scores, labels, expected_bpcer, apcer_working_point
):
bpcer_fixing_apcer_value = bpcer_fixing_apcer(scores, labels, apcer_working_point)
assert pytest.approx(expected_bpcer, 0.1) == bpcer_fixing_apcer_value
|
{"hexsha": "32f0ae0cfac1909ade5cd14e0077de9933fbb098", "size": 839, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/foundations/metrics/test_bpcer_fixing_apcer.py", "max_stars_repo_name": "acostapazo/gradgpad", "max_stars_repo_head_hexsha": "bbf75e11eed844a0014f7cb6627c8fa0c73cd3f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-06-29T03:31:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T08:26:35.000Z", "max_issues_repo_path": "tests/unit/foundations/metrics/test_bpcer_fixing_apcer.py", "max_issues_repo_name": "acostapazo/gradgpad", "max_issues_repo_head_hexsha": "bbf75e11eed844a0014f7cb6627c8fa0c73cd3f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-04-14T19:15:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-09T22:30:10.000Z", "max_forks_repo_path": "tests/unit/foundations/metrics/test_bpcer_fixing_apcer.py", "max_forks_repo_name": "acostapazo/gradgpad", "max_forks_repo_head_hexsha": "bbf75e11eed844a0014f7cb6627c8fa0c73cd3f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-28T19:17:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T19:17:24.000Z", "avg_line_length": 27.9666666667, "max_line_length": 86, "alphanum_fraction": 0.5864123957, "include": true, "reason": "import numpy", "num_tokens": 296}
|
import zarr
import glob
import numpy as np
from tqdm import tqdm
from skimage.io import imread
import os
import argparse
import logging
try:
import gunpowder as gp
import torch.nn as nn
import torch
from torch.utils.data import Dataset, DataLoader
except ModuleNotFoundError:
print('some stuff not available')
logger = logging.getLogger(__name__)
def convert_to_n5(input_path, outfile, key='raw', resolution=None):
'''
:param input_path: input path can be a directory, where we will search for all tifs in directory,
or it can be a single tif file
:param outfile:
:param key:
:param resolution:
:return:
'''
assert os.path.exists(input_path)
if os.path.isdir(input_path):
files = sorted(glob.glob(os.path.join(input_path, '*.tif')))
elif input_path.endswith('tiff') or input_path.endswith('tif'):
files = [input_path]
else:
raise NotImplemented('Only takes a directory or tif')
assert len(files) > 0, f"No tif files in {input_path}"
print(files)
raw = np.array([imread(f) for f in tqdm(files)])
raw = np.squeeze(raw)
print(raw.shape)
print(raw.dtype)
print(raw.min(), raw.max())
print(outfile)
f = zarr.open(outfile, 'a')
print(f.keys())
f[key] = raw
# x, y, z, t for N5
if resolution is None:
resolution = np.ones(4)
f[key].attrs['resolution'] = resolution
f[key].attrs['offset'] = [0, 0, 0, 0]
try:
class Dataset_3DT(Dataset):
def __init__(self, input_z_arr_path):
## expects zarr file with at least raw data
self.input_z_arr_path = input_z_arr_path
self.datasource = zarr.open(self.input_z_arr_path)
self.build_pipelines()
def build_pipelines(self):
self._raw = gp.ArrayKey('raw')
self.raw_source = gp.ZarrSource(
self.input_z_arr_path,
{self._raw: 'raw', }
)
self._ground_truth = gp.ArrayKey('GT')
self.gt_source = gp.ZarrSource(
self.input_z_arr_path,
{self._ground_truth: 'GT', }
)
self.comb_source = ((self.raw_source, self.gt_source) + gp.MergeProvider())
random_location = gp.RandomLocation()
self.basic_request = gp.BatchRequest()
self.basic_request[self._raw] = gp.Roi((0, 0, 0, 0),
(1, 1, self.datasource['raw'].shape[2], self.datasource['raw'].shape[3]))
self.basic_request[self._ground_truth] = gp.Roi((0, 0, 0, 0), (
1, 1, self.datasource['raw'].shape[2], self.datasource['raw'].shape[3]))
self.random_sample = self.comb_source + random_location
def __getitem__(self):
with gp.build(self.random_sample):
batch = self.random_sample.request_batch(self.basic_request)
return batch[self._raw].data, batch[self._ground_truth].data
except NameError:
print('also other stuff not available')
|
{"hexsha": "c671a8628c5c11754374cae435958e16091cb687", "size": 3073, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "mattdloring/proliferation-and-track", "max_stars_repo_head_hexsha": "9381bd188e27ba7477951815534b4deaa419e31a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-10-02T13:59:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-02T13:59:48.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "mattdloring/proliferation-and-track", "max_issues_repo_head_hexsha": "9381bd188e27ba7477951815534b4deaa419e31a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "mattdloring/proliferation-and-track", "max_forks_repo_head_hexsha": "9381bd188e27ba7477951815534b4deaa419e31a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.73, "max_line_length": 124, "alphanum_fraction": 0.6088512854, "include": true, "reason": "import numpy", "num_tokens": 739}
|
from attacks.attack import deg_estimate, edge_sim_estimate, gcn_norm, gia_update_features, init_feat, node_sim_estimate
from attacks.injection import random_injection
from utils import feat_normalize
import random
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import attacks.metric as metric
import attacks.utils as utils
from attacks.utils import EarlyStop
class GIA(object):
r"""
Node similarity regularized PGD on graphs
"""
def __init__(self,
epsilon,
n_epoch,
n_inject_max,
n_edge_max,
feat_lim_min,
feat_lim_max,
loss=F.nll_loss,
eval_metric=metric.eval_acc,
device='cpu',
early_stop=False,
verbose=True,
disguise_coe=1.0,
hinge=False):
self.device = device
self.epsilon = epsilon
self.n_epoch = n_epoch
self.n_inject_max = n_inject_max
self.n_edge_max = n_edge_max
self.feat_lim_min = feat_lim_min
self.feat_lim_max = feat_lim_max
self.loss = loss
self.eval_metric = eval_metric
self.verbose = verbose
self.disguise_coe = disguise_coe
# Early stop
if early_stop:
self.early_stop = EarlyStop(patience=1000, epsilon=1e-4)
else:
self.early_stop = early_stop
self.hinge= hinge
def attack(self, model, adj, features, target_idx, labels=None):
model.to(self.device)
model.eval()
n_total, n_feat = features.shape
if labels == None:
pred_orig = model(features, adj)
origin_labels = torch.argmax(pred_orig, dim=1)
else:
origin_labels = labels.view(-1)
# self.adj_degs = deg_estimate(adj,self.n_inject_max)
self.adj_degs = torch.zeros((self.n_inject_max,)).long()+self.n_edge_max
adj_attack = random_injection(adj,self.n_inject_max, self.n_edge_max, target_idx, self.device)
# Random initialization
features_attack = init_feat(self.n_inject_max, features, self.device, style="random",
feat_lim_min=self.feat_lim_min, feat_lim_max=self.feat_lim_max)
features_h = node_sim_estimate(features,adj,self.n_inject_max)
# self.edges_h = edge_sim_estimate(features,adj,self.n_inject_max*self.n_edge_max)
# features_h = node_sim_estimate(features,adj,features_attack.shape[0],style='random')
features_attack = gia_update_features(self,model,adj_attack,features,features_attack,origin_labels,target_idx,features_h,hinge=self.hinge)
return adj_attack, features_attack
|
{"hexsha": "66ab1255492f018ee70f166ab0fc04f065d005df", "size": 2795, "ext": "py", "lang": "Python", "max_stars_repo_path": "attacks/gia.py", "max_stars_repo_name": "LFhase/GIA-HAO", "max_stars_repo_head_hexsha": "d07e7dd76a86686faae4ebfc60b91128c170dbd4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2022-01-29T12:01:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T07:35:42.000Z", "max_issues_repo_path": "attacks/gia.py", "max_issues_repo_name": "LFhase/GIA-HAO", "max_issues_repo_head_hexsha": "d07e7dd76a86686faae4ebfc60b91128c170dbd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "attacks/gia.py", "max_forks_repo_name": "LFhase/GIA-HAO", "max_forks_repo_head_hexsha": "d07e7dd76a86686faae4ebfc60b91128c170dbd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8333333333, "max_line_length": 146, "alphanum_fraction": 0.6411449016, "include": true, "reason": "import numpy,import scipy", "num_tokens": 615}
|
/****************************************************************************
**
** Copyright (C) 2017 TU Wien, ACIN, Vision 4 Robotics (V4R) group
** Contact: v4r.acin.tuwien.ac.at
**
** This file is part of V4R
**
** V4R is distributed under dual licenses - GPLv3 or closed source.
**
** GNU General Public License Usage
** V4R is free software: you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published
** by the Free Software Foundation, either version 3 of the License, or
** (at your option) any later version.
**
** V4R is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** Please review the following information to ensure the GNU General Public
** License requirements will be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
**
** Commercial License Usage
** If GPL is not suitable for your project, you must purchase a commercial
** license to use V4R. Licensees holding valid commercial V4R licenses may
** use this file in accordance with the commercial license agreement
** provided with the Software or, alternatively, in accordance with the
** terms contained in a written agreement between you and TU Wien, ACIN, V4R.
** For licensing terms and conditions please contact office<at>acin.tuwien.ac.at.
**
**
** The copyright holder additionally grants the author(s) of the file the right
** to use, copy, modify, merge, publish, distribute, sublicense, and/or
** sell copies of their contributions without any restrictions.
**
****************************************************************************/
/**
* @file main.cpp
* @author Johann Prankl (prankl@acin.tuwien.ac.at)
* @date 2017
* @brief
*
*/
#ifndef KP_TSF_POSE_TRACKER_KLT_HH
#define KP_TSF_POSE_TRACKER_KLT_HH
#include <float.h>
#include <pcl/io/io.h>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <v4r/camera_tracking_and_mapping/TSFData.h>
#include <v4r/core/macros.h>
#include <v4r/recognition/RansacSolvePnPdepth.h>
#include <Eigen/Dense>
#include <boost/shared_ptr.hpp>
#include <boost/thread.hpp>
#include <boost/thread/mutex.hpp>
#include <fstream>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <v4r/common/impl/DataMatrix2D.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/tracking.hpp"
namespace v4r {
/**
* TSFPoseTrackerKLT
*/
class V4R_EXPORTS TSFPoseTrackerKLT {
public:
/**
* Parameter
*/
class Parameter {
public:
cv::TermCriteria termcrit;
cv::Size win_size;
cv::Size subpix_win_size;
int max_count;
double pcent_reinit;
double conf_tracked_points_norm;
v4r::RansacSolvePnPdepth::Parameter rt;
Parameter()
: termcrit(cv::TermCriteria(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03)), win_size(cv::Size(31, 31)),
subpix_win_size(cv::Size(10, 10)), max_count(500), pcent_reinit(0.5), conf_tracked_points_norm(250),
rt(v4r::RansacSolvePnPdepth::Parameter(1.5, 0.01, 5000, INT_MIN, 4, 0.015)) {}
};
private:
Parameter param;
cv::Mat_<double> intrinsic;
std::vector<uchar> status;
std::vector<float> err;
std::vector<int> inliers;
bool run, have_thread;
boost::thread th_obectmanagement;
boost::thread th_init;
TSFData *data;
v4r::RansacSolvePnPdepth::Ptr pnp;
std::vector<int> plk_converged;
std::vector<float> depth;
double kal_dt;
cv::Mat_<double> measurement;
cv::KalmanFilter kalmanFilter;
void operate();
void getImage(const v4r::DataMatrix2D<Surfel> &cloud, cv::Mat &im);
bool needReinit(const std::vector<cv::Point2f> &points);
bool trackCamera(double &conf_ransac_iter, double &conf_tracked_points);
void getPoints3D(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, const std::vector<cv::Point2f> &points,
std::vector<Eigen::Vector3f> &points3d);
void filterValidPoints3D(std::vector<cv::Point2f> &points, std::vector<Eigen::Vector3f> &points3d);
void filterValidPoints3D(std::vector<cv::Point2f> &pts1, std::vector<Eigen::Vector3f> &pts3d1,
std::vector<cv::Point2f> &pts2, std::vector<Eigen::Vector3f> &pts3d2);
void filterInliers(std::vector<cv::Point2f> &pts1, std::vector<Eigen::Vector3f> &pts3d1,
std::vector<cv::Point2f> &pts2, std::vector<Eigen::Vector3f> &pts3d2, std::vector<int> &inliers);
void filterConverged(std::vector<cv::Point2f> &pts1, std::vector<Eigen::Vector3f> &pts3d1,
std::vector<cv::Point2f> &pts2, std::vector<Eigen::Vector3f> &pts3d2,
std::vector<int> &converged);
void initKalmanFilter(cv::KalmanFilter &kf, double dt);
void updateKalmanFilter(cv::KalmanFilter &kf, const Eigen::Matrix4f &pose, Eigen::Matrix4f &kal_pose, bool have_pose);
cv::Mat euler2rot(const cv::Mat &euler);
cv::Mat rot2euler(const cv::Mat &rotationMatrix);
inline float getInterpolated(const v4r::DataMatrix2D<v4r::Surfel> &cloud, const cv::Point2f &pt);
inline float getInterpolated(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, const cv::Point2f &pt);
inline float sqr(const float &d) {
return d * d;
}
public:
cv::Mat dbg;
TSFPoseTrackerKLT(const Parameter &p = Parameter());
~TSFPoseTrackerKLT();
void start();
void stop();
inline bool isStarted() {
return have_thread;
}
void reset();
void setData(TSFData *_data) {
data = _data;
}
void track(double &conf_ransac_iter, double &conf_tracked_points);
void setCameraParameter(const cv::Mat &_intrinsic);
void setParameter(const Parameter &p);
typedef std::shared_ptr<::v4r::TSFPoseTrackerKLT> Ptr;
typedef std::shared_ptr<::v4r::TSFPoseTrackerKLT const> ConstPtr;
};
/*************************** INLINE METHODES **************************/
/**
* @brief TSFPoseTrackerKLT::getInterpolated
* @param cloud
* @param pt
* @return
*/
inline float TSFPoseTrackerKLT::getInterpolated(const v4r::DataMatrix2D<v4r::Surfel> &cloud, const cv::Point2f &pt) {
int xt = (int)pt.x;
int yt = (int)pt.y;
float ax = pt.x - xt;
float ay = pt.y - yt;
float d = 0;
float sn = 0, n;
if (!std::isnan(cloud(yt, xt).pt[0])) {
n = (1. - ax) * (1. - ay);
d += n * cloud(yt, xt).pt[2];
sn += n;
}
if (!std::isnan(cloud(yt, xt + 1).pt[0])) {
n = ax * (1. - ay);
d += n * cloud(yt, xt + 1).pt[2];
sn += n;
}
if (!std::isnan(cloud(yt + 1, xt).pt[0])) {
n = (1. - ax) * ay;
d += n * cloud(yt + 1, xt).pt[2];
sn += n;
}
if (!std::isnan(cloud(yt + 1, xt + 1).pt[0])) {
n = ax * ay;
d += n * cloud(yt + 1, xt + 1).pt[2];
sn += n;
}
return (d > 0. ? d / sn : std::numeric_limits<float>::quiet_NaN());
}
/**
* @brief TSFPoseTrackerKLT::getInterpolated
* @param cloud
* @param pt
* @return
*/
inline float TSFPoseTrackerKLT::getInterpolated(const pcl::PointCloud<pcl::PointXYZRGB> &cloud, const cv::Point2f &pt) {
int xt = (int)pt.x;
int yt = (int)pt.y;
float ax = pt.x - xt;
float ay = pt.y - yt;
float d = 0;
float sn = 0, n;
if (!std::isnan(cloud(xt, yt).x)) {
n = (1. - ax) * (1. - ay);
d += n * cloud(xt, yt).z;
sn += n;
}
if (!std::isnan(cloud(xt + 1, yt).x)) {
n = ax * (1. - ay);
d += n * cloud(xt + 1, yt).z;
sn += n;
}
if (!std::isnan(cloud(xt, yt + 1).x)) {
n = (1. - ax) * ay;
d += n * cloud(xt, yt + 1).z;
sn += n;
}
if (!std::isnan(cloud(xt + 1, yt + 1).x)) {
n = ax * ay;
d += n * cloud(xt + 1, yt + 1).z;
sn += n;
}
return (d > 0. ? d / sn : std::numeric_limits<float>::quiet_NaN());
}
} // namespace v4r
#endif
|
{"hexsha": "a0b243f65f9d381bb6c4bbcc24b6c081a80158d4", "size": 7735, "ext": "hh", "lang": "C++", "max_stars_repo_path": "modules/camera_tracking_and_mapping/include/v4r/camera_tracking_and_mapping/TSFPoseTrackerKLT.hh", "max_stars_repo_name": "v4r-tuwien/v4r", "max_stars_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_stars_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-02-22T11:36:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-20T11:31:08.000Z", "max_issues_repo_path": "modules/camera_tracking_and_mapping/include/v4r/camera_tracking_and_mapping/TSFPoseTrackerKLT.hh", "max_issues_repo_name": "v4r-tuwien/v4r", "max_issues_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_issues_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/camera_tracking_and_mapping/include/v4r/camera_tracking_and_mapping/TSFPoseTrackerKLT.hh", "max_forks_repo_name": "v4r-tuwien/v4r", "max_forks_repo_head_hexsha": "ff3fbd6d2b298b83268ba4737868bab258262a40", "max_forks_repo_licenses": ["BSD-1-Clause", "BSD-2-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-10-19T10:39:23.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T13:39:03.000Z", "avg_line_length": 30.4527559055, "max_line_length": 120, "alphanum_fraction": 0.6421460892, "num_tokens": 2392}
|
import sys
sys.path.insert(1, sys.path[0] + "/../")
import eig
from eig.battleship import BattleshipHypothesisSpace, Parser, Executor, EqualSizesDistribution
import numpy as np
import time
PROG = "(any (map (lambda y0 (and (== (colL y0) 1) (== (color y0) Purple))) (set AllTiles)))"
OBS = np.array([[-1, -1, 1, 1, -1, 2],
[-1, 0, -1, 0, 0, 2],
[-1, 0, -1, -1, -1, -1],
[0, -1, -1, 0, 0, -1],
[-1, -1, 0, 0, -1, 0],
[-1, 0, 0, -1, -1, 0]])
def run_build_space():
return BattleshipHypothesisSpace(grid_size=6, ship_labels=[1, 2, 3],
ship_sizes=[2, 3, 4], orientations=['V', 'H'])
def run_update_context(hs):
prior = EqualSizesDistribution(ship_labels=[1, 2, 3])
belief = eig.Bayes(hs, prior)
context = eig.Context(hs, belief)
context.observe(OBS)
print("Valid hypothesis size:", len(context.valid_ids))
return context
def run_calculate(context):
question = Parser.parse(PROG)
executor = Executor(question)
eig_s = eig.compute_eig(executor, context)
return eig_s
def run_multiple(hs, time=100):
for _ in range(time):
eig_s = eig.compute_eig_basic(hs, PROG, OBS)
def run_multiple_fast(time=100):
for _ in range(time):
eig_s = eig.compute_eig_fast(PROG, OBS, grid_size=6, ship_labels=[1, 2, 3],
ship_sizes=[2, 3, 4], orientations=['V', 'H'])
if __name__ == "__main__":
# test run single time
time0 = time.time()
hs = run_build_space()
time1 = time.time()
ctx = run_update_context(hs)
time2 = time.time()
eig_s = run_calculate(ctx)
time3 = time.time()
t_create_space = time1 - time0
t_update_belief = time2 - time1
t_calc_eig = time3 - time2
t_total = time3 - time0
print("Total time: {:.3f}".format(t_total))
print("Time for creating hypothesis space: {:.3f}".format(t_create_space))
print("Time for updating belief: {:.3f}".format(t_update_belief))
print("Time for calculating EIG: {:.3f}".format(t_calc_eig))
# test run multiple time
time0 = time.time()
run_multiple(hs)
time1 = time.time()
print("Time for calculating EIG {} times using compute_eig_basic: {:.3f}".format(100, time1 - time0))
# test run multiple time
time0 = time.time()
run_multiple_fast()
time1 = time.time()
print("Time for calculating EIG {} times using compute_eig_fast: {:.3f}".format(100, time1 - time0))
|
{"hexsha": "b8f1031b56a905558821535a3389e74412daf27d", "size": 2390, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/eig_time.py", "max_stars_repo_name": "anselmrothe/EIG", "max_stars_repo_head_hexsha": "2e5da26bb0552bcaf6f9656519c8167808f0f6cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-03-12T15:55:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T23:09:30.000Z", "max_issues_repo_path": "test/eig_time.py", "max_issues_repo_name": "anselmrothe/EIG", "max_issues_repo_head_hexsha": "2e5da26bb0552bcaf6f9656519c8167808f0f6cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-24T16:55:26.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-24T16:55:26.000Z", "max_forks_repo_path": "test/eig_time.py", "max_forks_repo_name": "anselmrothe/EIG", "max_forks_repo_head_hexsha": "2e5da26bb0552bcaf6f9656519c8167808f0f6cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-18T18:44:05.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-30T05:47:15.000Z", "avg_line_length": 30.641025641, "max_line_length": 105, "alphanum_fraction": 0.6372384937, "include": true, "reason": "import numpy", "num_tokens": 752}
|
% Test some operations involving chebfuns with delta functions.
function pass = test_deltaOps(pref)
if ( nargin < 1 )
pref = chebfunpref();
end
tol = pref.deltaPrefs.deltaTol;
% These tests used to live in the scripts in tests/deltafun, but since they
% create chebfuns, they are better placed here.
% Some tests for cumsum() based on examples provided by LNT.
n = 6;
x = chebfun('x',[0 n]);
f = 0.5*sin(x);
for j = 1:n-1
f = f + randn*dirac(x-j);
end
err = norm(diff(cumsum(f)) - f);
pass(1) = err < tol;
x = chebfun('x');
f = dirac(x-.5) + dirac(x) + dirac(x+.5) + heaviside(x);
err = norm(diff(cumsum(f)) - f);
pass(2) = err < tol;
x = chebfun('x');
f = sign(x)+sign(x-.5);
f2 = f(-1) + cumsum(diff(f));
err = norm(f - f2);
pass(3) = err < tol;
% A test for diff() based on an example from LNT.
savedPrefs = chebfunpref();
chebfunpref.setDefaults('enableDeltaFunctions', true);
try
x = chebfun('x', [0 5]);
f = 0.5*sin(x);
A = randn(4, 1);
for j = 1:4
f = f + A(j)*dirac(x-j);
end
F = cumsum(.5*sin(x));
for j = 1:4
F = F + A(j)*heaviside(x-j);
end
err = norm(diff(F) - (f - f(0)));
pass(4) = err < tol;
catch ME
chebfunpref.setDefaults(savedPrefs);
rethrow(ME)
end
chebfunpref.setDefaults(savedPrefs);
% A test involving innerProduct:
x = chebfun('x');
pass(5) = innerProduct(dirac(x), x) < eps;
% Test quasi matrix construction
x = chebfun('x');
A = [];
A = [ A, dirac(x-1) ];
A = [ A, dirac(x-0.5) ];
A = [ A, dirac(x-0.0) ];
pass(6) = (norm(A(:, 2) - dirac(x-.5)) < eps) && (size(A,2) == 3);
end
|
{"author": "chebfun", "repo": "chebfun", "sha": "8c49396a55e46ddd57a1d108c6a8f32e37536d54", "save_path": "github-repos/MATLAB/chebfun-chebfun", "path": "github-repos/MATLAB/chebfun-chebfun/chebfun-8c49396a55e46ddd57a1d108c6a8f32e37536d54/tests/chebfun/test_deltaOps.m"}
|
#pragma once
#include <Eigen/Core>
#include <array>
namespace Discregrid
{
enum class NearestEntity
{
VN0,
VN1,
VN2,
EN0,
EN1,
EN2,
FN
};
float point_triangle_sqdistance(Eigen::Vector3f const &point,
std::array<Eigen::Vector3f const *, 3> const &triangle,
Eigen::Vector3f *nearest_point = nullptr,
NearestEntity *ne = nullptr);
}
|
{"hexsha": "b5d24cb0eef68b82278a9f67028d55c34d4de898", "size": 542, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "discregrid/src/geometry/point_triangle_distance.hpp", "max_stars_repo_name": "FeatherAntennae/Discregrid", "max_stars_repo_head_hexsha": "54ced899445d902470efe6b3d8df73c0fd9c23d1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "discregrid/src/geometry/point_triangle_distance.hpp", "max_issues_repo_name": "FeatherAntennae/Discregrid", "max_issues_repo_head_hexsha": "54ced899445d902470efe6b3d8df73c0fd9c23d1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "discregrid/src/geometry/point_triangle_distance.hpp", "max_forks_repo_name": "FeatherAntennae/Discregrid", "max_forks_repo_head_hexsha": "54ced899445d902470efe6b3d8df73c0fd9c23d1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.0740740741, "max_line_length": 92, "alphanum_fraction": 0.4557195572, "num_tokens": 114}
|
/-
Copyright (c) 2022 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
! This file was ported from Lean 3 source module ring_theory.polynomial.opposites
! leanprover-community/mathlib commit 932872382355f00112641d305ba0619305dc8642
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Polynomial.Degree.Definitions
/-! # Interactions between `R[X]` and `Rᵐᵒᵖ[X]`
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file contains the basic API for "pushing through" the isomorphism
`op_ring_equiv : R[X]ᵐᵒᵖ ≃+* Rᵐᵒᵖ[X]`. It allows going back and forth between a polynomial ring
over a semiring and the polynomial ring over the opposite semiring. -/
open Polynomial
open Polynomial MulOpposite
variable {R : Type _} [Semiring R] {p q : R[X]}
noncomputable section
namespace Polynomial
/- warning: polynomial.op_ring_equiv -> Polynomial.opRingEquiv is a dubious translation:
lean 3 declaration is
forall (R : Type.{u1}) [_inst_2 : Semiring.{u1} R], RingEquiv.{u1, u1} (MulOpposite.{u1} (Polynomial.{u1} R _inst_2)) (Polynomial.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2)) (MulOpposite.hasMul.{u1} (Polynomial.{u1} R _inst_2) (Polynomial.mul'.{u1} R _inst_2)) (MulOpposite.hasAdd.{u1} (Polynomial.{u1} R _inst_2) (Polynomial.add'.{u1} R _inst_2)) (Polynomial.mul'.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2)) (Polynomial.add'.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2))
but is expected to have type
forall (R : Type.{u1}) [_inst_2 : Semiring.{u1} R], RingEquiv.{u1, u1} (MulOpposite.{u1} (Polynomial.{u1} R _inst_2)) (Polynomial.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2)) (MulOpposite.mul.{u1} (Polynomial.{u1} R _inst_2) (Polynomial.mul'.{u1} R _inst_2)) (Polynomial.mul'.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2)) (MulOpposite.add.{u1} (Polynomial.{u1} R _inst_2) (Polynomial.add'.{u1} R _inst_2)) (Polynomial.add'.{u1} (MulOpposite.{u1} R) (MulOpposite.semiring.{u1} R _inst_2))
Case conversion may be inaccurate. Consider using '#align polynomial.op_ring_equiv Polynomial.opRingEquivₓ'. -/
/-- Ring isomorphism between `R[X]ᵐᵒᵖ` and `Rᵐᵒᵖ[X]` sending each coefficient of a polynomial
to the corresponding element of the opposite ring. -/
def opRingEquiv (R : Type _) [Semiring R] : R[X]ᵐᵒᵖ ≃+* Rᵐᵒᵖ[X] :=
((toFinsuppIso R).op.trans AddMonoidAlgebra.opRingEquiv).trans (toFinsuppIso _).symm
#align polynomial.op_ring_equiv Polynomial.opRingEquiv
/-! Lemmas to get started, using `op_ring_equiv R` on the various expressions of
`finsupp.single`: `monomial`, `C a`, `X`, `C a * X ^ n`. -/
#print Polynomial.opRingEquiv_op_monomial /-
-- for maintenance purposes: `by simp [op_ring_equiv]` proves this lemma
@[simp]
theorem opRingEquiv_op_monomial (n : ℕ) (r : R) :
opRingEquiv R (op (monomial n r : R[X])) = monomial n (op r) := by
simp only [op_ring_equiv, RingEquiv.trans_apply, RingEquiv.op_apply_apply,
RingEquiv.toAddEquiv_eq_coe, AddEquiv.mulOp_apply, [anonymous], AddEquiv.coe_trans,
op_add_equiv_apply, RingEquiv.coe_toAddEquiv, op_add_equiv_symm_apply, Function.comp_apply,
unop_op, to_finsupp_iso_apply, to_finsupp_monomial, AddMonoidAlgebra.opRingEquiv_single,
to_finsupp_iso_symm_apply, of_finsupp_single]
#align polynomial.op_ring_equiv_op_monomial Polynomial.opRingEquiv_op_monomial
-/
#print Polynomial.opRingEquiv_op_C /-
@[simp]
theorem opRingEquiv_op_C (a : R) : opRingEquiv R (op (C a)) = C (op a) :=
opRingEquiv_op_monomial 0 a
#align polynomial.op_ring_equiv_op_C Polynomial.opRingEquiv_op_C
-/
#print Polynomial.opRingEquiv_op_X /-
@[simp]
theorem opRingEquiv_op_X : opRingEquiv R (op (X : R[X])) = X :=
opRingEquiv_op_monomial 1 1
#align polynomial.op_ring_equiv_op_X Polynomial.opRingEquiv_op_X
-/
#print Polynomial.opRingEquiv_op_C_mul_X_pow /-
theorem opRingEquiv_op_C_mul_X_pow (r : R) (n : ℕ) :
opRingEquiv R (op (C r * X ^ n : R[X])) = C (op r) * X ^ n := by
simp only [X_pow_mul, op_mul, op_pow, map_mul, map_pow, op_ring_equiv_op_X, op_ring_equiv_op_C]
#align polynomial.op_ring_equiv_op_C_mul_X_pow Polynomial.opRingEquiv_op_C_mul_X_pow
-/
/-! Lemmas to get started, using `(op_ring_equiv R).symm` on the various expressions of
`finsupp.single`: `monomial`, `C a`, `X`, `C a * X ^ n`. -/
#print Polynomial.opRingEquiv_symm_monomial /-
@[simp]
theorem opRingEquiv_symm_monomial (n : ℕ) (r : Rᵐᵒᵖ) :
(opRingEquiv R).symm (monomial n r) = op (monomial n (unop r)) :=
(opRingEquiv R).Injective (by simp)
#align polynomial.op_ring_equiv_symm_monomial Polynomial.opRingEquiv_symm_monomial
-/
#print Polynomial.opRingEquiv_symm_C /-
@[simp]
theorem opRingEquiv_symm_C (a : Rᵐᵒᵖ) : (opRingEquiv R).symm (C a) = op (C (unop a)) :=
opRingEquiv_symm_monomial 0 a
#align polynomial.op_ring_equiv_symm_C Polynomial.opRingEquiv_symm_C
-/
#print Polynomial.opRingEquiv_symm_X /-
@[simp]
theorem opRingEquiv_symm_X : (opRingEquiv R).symm (X : Rᵐᵒᵖ[X]) = op X :=
opRingEquiv_symm_monomial 1 1
#align polynomial.op_ring_equiv_symm_X Polynomial.opRingEquiv_symm_X
-/
#print Polynomial.opRingEquiv_symm_C_mul_X_pow /-
theorem opRingEquiv_symm_C_mul_X_pow (r : Rᵐᵒᵖ) (n : ℕ) :
(opRingEquiv R).symm (C r * X ^ n : Rᵐᵒᵖ[X]) = op (C (unop r) * X ^ n) := by
rw [C_mul_X_pow_eq_monomial, op_ring_equiv_symm_monomial, ← C_mul_X_pow_eq_monomial]
#align polynomial.op_ring_equiv_symm_C_mul_X_pow Polynomial.opRingEquiv_symm_C_mul_X_pow
-/
/-! Lemmas about more global properties of polynomials and opposites. -/
#print Polynomial.coeff_opRingEquiv /-
@[simp]
theorem coeff_opRingEquiv (p : R[X]ᵐᵒᵖ) (n : ℕ) :
(opRingEquiv R p).coeff n = op ((unop p).coeff n) :=
by
induction p using MulOpposite.rec'
cases p
rfl
#align polynomial.coeff_op_ring_equiv Polynomial.coeff_opRingEquiv
-/
#print Polynomial.support_opRingEquiv /-
@[simp]
theorem support_opRingEquiv (p : R[X]ᵐᵒᵖ) : (opRingEquiv R p).support = (unop p).support :=
by
induction p using MulOpposite.rec'
cases p
exact Finsupp.support_mapRange_of_injective _ _ op_injective
#align polynomial.support_op_ring_equiv Polynomial.support_opRingEquiv
-/
#print Polynomial.natDegree_opRingEquiv /-
@[simp]
theorem natDegree_opRingEquiv (p : R[X]ᵐᵒᵖ) : (opRingEquiv R p).natDegree = (unop p).natDegree :=
by
by_cases p0 : p = 0
· simp only [p0, _root_.map_zero, nat_degree_zero, unop_zero]
·
simp only [p0, nat_degree_eq_support_max', Ne.def, AddEquivClass.map_eq_zero_iff, not_false_iff,
support_op_ring_equiv, unop_eq_zero_iff]
#align polynomial.nat_degree_op_ring_equiv Polynomial.natDegree_opRingEquiv
-/
#print Polynomial.leadingCoeff_opRingEquiv /-
@[simp]
theorem leadingCoeff_opRingEquiv (p : R[X]ᵐᵒᵖ) :
(opRingEquiv R p).leadingCoeff = op (unop p).leadingCoeff := by
rw [leading_coeff, coeff_op_ring_equiv, nat_degree_op_ring_equiv, leading_coeff]
#align polynomial.leading_coeff_op_ring_equiv Polynomial.leadingCoeff_opRingEquiv
-/
end Polynomial
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/RingTheory/Polynomial/Opposites.lean"}
|
# AUTOGENERATED! DO NOT EDIT! File to edit: perception.ipynb (unless otherwise specified).
__all__ = ['RADIUS', 'ORIGIN', 'Perception']
# Cell
# import import_ipynb
from .utility import normalize, distance
# Cell
import numpy as np
from numpy.random import default_rng
from dataclasses import dataclass
from dataclasses import field
RADIUS = 4
ORIGIN = np.array([0.,0.])
# Cell
@dataclass
class Perception:
radius: float = RADIUS
origin: np.ndarray = field(default_factory=lambda: ORIGIN.copy())
steven_exponent: float = 0.6
reward_radius: float = 0.0
epsilon: float = np.sqrt(1e-1)
use_clip: bool = False
# Normally distributed noise in multi_direction and single_direction expressed
# as standard deviation.
direction_noise: float = 0
# def __init__(self, radius=RADIUS, origin=ORIGIN, steven_exponent=0.6, reward_radius=0):
# self.radius = radius
# self.origin = origin.copy()
# self.steven_exponent = steven_exponent
# self.reward_radius = reward_radius
def __post_init__(self):
self._rng = default_rng()
# Inverse square law
# The stimulus caused by one object at distance d.
# The closer the object the smaller the distance and the bigger the stimulus
# There is a max stimulus (max firing) that one object can cause. We set it to 1.
def stimulus(self, d, radius=None):
if radius is None:
radius = self.radius
# epsilon = 1e-1
# epsilon = np.sqrt(1e-1)
d = np.maximum(d - self.reward_radius, 0)
v = 1 / (self.epsilon + d) ** 2
v[d > radius] = 0
if self.use_clip:
v[v > 1] = 1
return v
# This function sums the stimuli from all the objects.
# def stimuli(distances, radius=RADIUS):
def stimuli(self, distances):
return self.stimulus(distances, self.radius).sum(axis=0)
# Weber-Fechner's law
def weber(self, stimuli):
return np.log(1 + stimuli)
# Steven's law
# def steven(stimuli,exponent=0.6): # The exponent for smell in humans is 0.6
def steven(self, stimuli):
return stimuli ** self.steven_exponent
# The sensation of the aggregated stimuli from objects inside the radius.
def sensation(self, objects, position=None, radius=None):
if position is None:
position = self.origin
if radius is None:
radius = self.radius
total = self.stimuli(distance(position, objects))
return self.weber(total)
def _noise_rotation(self):
phi = self._rng.normal(scale=self.direction_noise) * 2 * np.pi
c, s = np.cos(phi), np.sin(phi)
return np.array([[c, -s], [s, c]])
# Computes the main direction to the objects within the radius, i.e. the sum of the perception vectors.
# Possible input to the policy network
def multi_direction(self, position, objects):
signals = self.stimulus(distance(position, objects)).reshape(-1, 1)
vector_sum = (signals * normalize(objects - position)).sum(
axis=0, keepdims=True
) # ta inte bort dim 1x2
vector_sum = normalize(vector_sum).reshape(-1)
return np.dot(self._noise_rotation(), vector_sum)
# return normalize(vector_sum).reshape(-1) # platta ut!
# Total smell from all objects
def total_intensity(self, position, objects):
if objects.size == 0:
return np.array([0.0])
return self.sensation(objects,position)
# Total smell from the closest object (disregarding all others)
def nearest_intensity(self, position, objects):
closest_point = self.closest(position, objects)
return self.total_intensity(position, closest_point)
# Filters out the visible objects from position.
def visible(self, position, objects, radius=None):
if radius is None:
radius = self.radius
return np.array([x for x in objects if distance(position, x) < radius])
# return objects[distance(position, objects) < radius] #Faster!
# This function returns the closest point to position in objects or the empty array
def closest(self, position, objects, radius=None):
if radius is None:
radius = self.radius
vis = self.visible(position, objects, radius)
if vis.shape[0] == 0:
return np.array([]) # Return an empty array if no objects are in sight
index = np.argmin(distance(position, vis))
return vis[index].reshape(1, 2)
# Computes the direction to the closest point from position among objects within the radius
# Returns the 0-vector if there are no such objects
def closest_direction(self, position, objects):
c = self.closest(position, objects)
if c.shape == (0,):
return np.zeros(2)
else:
v = normalize(c - position).reshape(-1)
return np.dot(self._noise_rotation(), v)
# return normalize(c - position).reshape(-1)
|
{"hexsha": "f12a1dbe966aa4d119dc29e35e03631ad193a046", "size": 5007, "ext": "py", "lang": "Python", "max_stars_repo_path": "ecotwins/perception.py", "max_stars_repo_name": "LogicalInvestigations/ecotwins", "max_stars_repo_head_hexsha": "e86cc91df6cd45cefbf1e43399908cf2f003c9b7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ecotwins/perception.py", "max_issues_repo_name": "LogicalInvestigations/ecotwins", "max_issues_repo_head_hexsha": "e86cc91df6cd45cefbf1e43399908cf2f003c9b7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ecotwins/perception.py", "max_forks_repo_name": "LogicalInvestigations/ecotwins", "max_forks_repo_head_hexsha": "e86cc91df6cd45cefbf1e43399908cf2f003c9b7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0888888889, "max_line_length": 107, "alphanum_fraction": 0.6496904334, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1216}
|
import os, torch, time, shutil, json, glob, argparse, shutil
import numpy as np
from easydict import EasyDict as edict
from datasets.dataloader import get_dataloader, get_datasets
from models.architectures import KPFCNN
from lib.utils import setup_seed, load_config
from lib.tester import get_trainer
from lib.loss import MetricLoss
from configs.models import architectures
from torch import optim
from torch import nn
setup_seed(0)
if __name__ == '__main__':
# load configs
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, help='Path to the config file.')
args = parser.parse_args()
config = load_config(args.config)
config['snapshot_dir'] = 'snapshot/%s' % config['exp_dir']
config['tboard_dir'] = 'snapshot/%s/tensorboard' % config['exp_dir']
config['save_dir'] = 'snapshot/%s/checkpoints' % config['exp_dir']
config = edict(config)
os.makedirs(config.snapshot_dir, exist_ok=True)
os.makedirs(config.save_dir, exist_ok=True)
os.makedirs(config.tboard_dir, exist_ok=True)
json.dump(
config,
open(os.path.join(config.snapshot_dir, 'config.json'), 'w'),
indent=4,
)
if config.gpu_mode:
config.device = torch.device('cuda')
else:
config.device = torch.device('cpu')
# backup the files
os.system(f'cp -r models {config.snapshot_dir}')
os.system(f'cp -r datasets {config.snapshot_dir}')
os.system(f'cp -r lib {config.snapshot_dir}')
shutil.copy2('main.py', config.snapshot_dir)
# model initialization
config.architecture = architectures[config.dataset]
config.model = KPFCNN(config)
# create optimizer
if config.optimizer == 'SGD':
config.optimizer = optim.SGD(
config.model.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
elif config.optimizer == 'ADAM':
config.optimizer = optim.Adam(
config.model.parameters(),
lr=config.lr,
betas=(0.9, 0.999),
weight_decay=config.weight_decay,
)
# create learning rate scheduler
config.scheduler = optim.lr_scheduler.ExponentialLR(
config.optimizer,
gamma=config.scheduler_gamma,
)
# create dataset and dataloader
train_set, val_set, benchmark_set = get_datasets(config)
config.train_loader, neighborhood_limits = get_dataloader(dataset=train_set,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers)
config.val_loader, _ = get_dataloader(dataset=val_set,
batch_size=config.batch_size,
shuffle=False,
num_workers=1,
neighborhood_limits=neighborhood_limits)
config.test_loader, _ = get_dataloader(dataset=benchmark_set,
batch_size=config.batch_size,
shuffle=False,
num_workers=1,
neighborhood_limits=neighborhood_limits)
# create evaluation metrics
config.desc_loss = MetricLoss(config)
trainer = get_trainer(config)
if (config.mode == 'train'):
trainer.train()
elif (config.mode == 'val'):
trainer.eval()
else:
trainer.test()
|
{"hexsha": "a775aacb2c8c4cbc069b846a12d982981ff58be4", "size": 3663, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "StrikerCC/OverlapPredator", "max_stars_repo_head_hexsha": "cae15b0d3ac60729961b5983a8f740a1a822c96b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "StrikerCC/OverlapPredator", "max_issues_repo_head_hexsha": "cae15b0d3ac60729961b5983a8f740a1a822c96b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "StrikerCC/OverlapPredator", "max_forks_repo_head_hexsha": "cae15b0d3ac60729961b5983a8f740a1a822c96b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.63, "max_line_length": 93, "alphanum_fraction": 0.5915915916, "include": true, "reason": "import numpy", "num_tokens": 711}
|
from keras import backend
from keras.utils import plot_model
from keras.layers import Lambda, Dense, Input, Reshape, Embedding
from keras.models import Model
from keras.callbacks import TensorBoard
from keras.losses import mse
import keras.metrics as metrics
import argparse
import numpy as np
from synthetic_data import load_training_data
N = 100 # individuals
M = 1000 # SNPs
# K = Kc + Kd -- Observed data dimension
Kc = 40
Kd = 10
H = 10 # Latent dimension
def reshape_rebatch(x, o, y):
r = x.shape[2] # M
assert len(x.shape) == 3 # Batch=1, N, M
x = x.swapaxes(0,2) # Batch=M, N, 1
assert len(o.shape) == 3 # 1, N, K=50
o = np.repeat(o, r, axis=0) # M, N, K
assert len(y.shape) == 3 # 1, N, 1
y = np.tile(y, r)
y = y.swapaxes(0, 2) # Batch=M, N, 1
# We have M batches of data
return x, o, y
#
# Reparameterization trick
# z = z_mean + var * epsilon
# where epsilon ~ Normal(0,1)
#
def sampling(args):
z_mean, z_log_var = args
batch = backend.shape(z_mean)[0]
dim1 = backend.int_shape(z_mean)[1]
dim2 = backend.int_shape(z_mean)[2]
epsilon = backend.random_normal(shape=(batch, dim1, dim2))
return z_mean + backend.exp(0.5 * z_log_var)*epsilon
# Read Train and reshape
# x = Alleles, y = traits, pi = probs, o = observations
#
train_x, train_pi, train_y, train_o = load_training_data()
train_x, train_o, train_y = reshape_rebatch(train_x, train_o, train_y)
# Parameters
num_discrete_classes = 3
# x = Input(shape=(N, num_discrete_classes), name='x_as_categorical')
x = Input(shape=(N, 1,), name='x_as_categorical')
oc = Input(shape=(N, Kc,), name='o_continuous')
od = Input(shape=(N, num_discrete_classes * Kd,), name='o_discrete') # one for each discrete o, Kd
y = Input(shape=(N, 1,), name='y') # traits, continuous
##### Encoder ######
interim = Dense(512, activation='relu')(x)
interim = Dense(512, activation='relu')(interim)
interim = Dense(256, activation='relu')(interim)
z = Dense(50)(interim) # ?, N, 50
# interim = Lambda(lambda x: backend.concatenate([x[0], x[1]], axis=2))([oc, od])
interim = Dense(512, activation='relu')(oc)
interim = Dense(512, activation='relu')(interim)
interim = Dense(256, activation='relu')(interim)
z_ = Dense(50)(interim) # ?, N, 50
#
latent_dim = 10
interim = Lambda(lambda x: backend.concatenate([x[0], x[1]], axis=2))([z, z_]) # ?, N, 100
interim = Dense(512, activation='relu')(interim)
interim = Dense(256, activation='relu')(interim)
u_mean = Dense(latent_dim, name='u_mean')(interim) # ?, N, 10
u_log_var = Dense(latent_dim, name='u_log_var')(interim) # ?, N, 10
# Sample
u = Lambda(sampling, output_shape=(N,latent_dim,))([u_mean, u_log_var])
# Encoder Model
encoder = Model([x, oc], [u_mean, u_log_var, u], name='encoder')
encoder.summary()
plot_model(encoder, to_file='cmed_encoder.png', show_shapes=True)
####### Decoder #####
latent_inputs = Input(shape=(N, latent_dim,), name='u_sampling')
interim = Dense(512,activation='relu')(latent_inputs)
interim = Dense(256,activation='relu')(interim)
outputs = Dense(1)(interim)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
plot_model(decoder, to_file='cmed_decoder.png', show_shapes=True)
inputs = [x, oc]
outputs = decoder(encoder(inputs)[2])
cmed_vae = Model(inputs, outputs, name='vae_cmed')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--weights', help="Saved model weights file")
args = parser.parse_args()
#
# https://github.com/keras-team/keras/issues/10137
# Compute VAE loss (important to have loss in a function
#
def my_vae_loss(y_true, y_pred):
xent_loss = N * \
1 * \
metrics.binary_crossentropy(
backend.flatten(y_true),
backend.flatten(y_pred))
kl_loss = - 0.5 * backend.sum(1 + u_log_var - backend.square(u_mean) - backend.exp(u_log_var), axis=-1)
vae_loss = backend.mean(xent_loss + kl_loss)
return vae_loss
cmed_vae.compile(optimizer='adam', loss=my_vae_loss)
cmed_vae.summary()
plot_model(cmed_vae, to_file='cmed_mlp.png', show_shapes=True)
if args.weights:
cmed_vae.load_weights(args.weights)
else:
# train
cmed_vae.fit(x=[train_x, train_o[:,:,:Kc]], y=train_y, epochs=3, batch_size=32) #,
# validation_data=(train_x, None))
cmed_vae.save_weights('cmed_syn_model.h5')
|
{"hexsha": "2ccc0a925e7fc9a677fe8be06dfbb57ee51ddf56", "size": 4284, "ext": "py", "lang": "Python", "max_stars_repo_path": "framework/causal_mediation_working_bak_20190114.py", "max_stars_repo_name": "mullachv/causal_notes", "max_stars_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "framework/causal_mediation_working_bak_20190114.py", "max_issues_repo_name": "mullachv/causal_notes", "max_issues_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "framework/causal_mediation_working_bak_20190114.py", "max_forks_repo_name": "mullachv/causal_notes", "max_forks_repo_head_hexsha": "509e1f5c9f793697949a3a6f6bfc53df85e7e9f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0434782609, "max_line_length": 105, "alphanum_fraction": 0.6986461251, "include": true, "reason": "import numpy", "num_tokens": 1311}
|
#!/usr/bin/env python
from collections import deque
import re
import numpy
assign = re.compile('([0-9]+) -> ([a-z]+)')
operation = re.compile('([a-z0-9]*) *([A-Z]+) ([a-z0-9]+) -> ([a-z]+)')
wires = {}
operation_queue = deque()
AND = 1
NOT = 2
OR = 3
LSHIFT = 4
RSHIFT = 5
class Operation:
def __init__(self, match):
self.op1 = match.group(1)
self.op2 = match.group(3)
self.dest = match.group(4)
if match.group(2) == 'AND':
self.op = AND
elif match.group(2) == 'NOT':
self.op = NOT
self.op1 = None
elif match.group(2) == 'OR':
self.op = OR
elif match.group(2) == 'LSHIFT':
self.op = LSHIFT
self.op2 = int(self.op2)
elif match.group(2) == 'RSHIFT':
self.op = RSHIFT
self.op2 = int(self.op2)
else:
raise Exception(f'Unknown operation {match.group(2)}')
def __repr__(self):
return f'{self.op1} {self.op} {self.op2} -> {self.dest}'
with open('test') as file:
for line in file:
match = assign.match(line)
if(match):
wires[match.group(2)] = int(match.group(1))
else:
match = operation.match(line)
if(match):
operation_queue.append(Operation(match))
else:
raise Exception(f'Syntax error: {line}')
while(operation_queue):
op = operation_queue.popleft()
if (op.op != NOT and op.op1 not in wires) or ((op.op != LSHIFT and op.op != RSHIFT) and op.op2 not in wires):
operation_queue.append(op)
else:
if op.op == AND:
wires[op.dest] = wires[op.op1] & wires[op.op2]
elif op.op == OR:
wires[op.dest] = wires[op.op1] | wires[op.op2]
elif op.op == NOT:
wires[op.dest] = ~ wires[op.op2]
elif op.op == LSHIFT:
wires[op.dest] = wires[op.op1] << op.op2
elif op.op == RSHIFT:
wires[op.dest] = wires[op.op1] >> op.op2
def int32_to_uint32(i):
return numpy.uint16(numpy.int16(i))
for key, value in wires.items():
print(key, int32_to_uint32(value))
|
{"hexsha": "c2d48bbd90b949e75abc5f1a8d1a6135223a5535", "size": 2198, "ext": "py", "lang": "Python", "max_stars_repo_path": "day7/part1.py", "max_stars_repo_name": "topjens/aoc2016", "max_stars_repo_head_hexsha": "3bf5f99d2c9320a7272e273ccd7447f7f7b5137e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "day7/part1.py", "max_issues_repo_name": "topjens/aoc2016", "max_issues_repo_head_hexsha": "3bf5f99d2c9320a7272e273ccd7447f7f7b5137e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day7/part1.py", "max_forks_repo_name": "topjens/aoc2016", "max_forks_repo_head_hexsha": "3bf5f99d2c9320a7272e273ccd7447f7f7b5137e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8227848101, "max_line_length": 113, "alphanum_fraction": 0.525477707, "include": true, "reason": "import numpy", "num_tokens": 599}
|
{ -- MF/TIN SRAM functions
-- Original code by Jeff Massung, 2003 }
\ shift masking
:i /8 ( n1 -- n1 n2 ) dup 8 # n/ ;
:n *8 ( n1 n2 -- n3 ) swap 8 # n* + ;
\ save data to static ram
:n savebyte ( b -- ) >a c!a a> ;
:n saveword ( n -- ) >a /8 c!a c!a a> ;
:n savelong ( x -- ) >a /8 /8 /8 c!a c!a c!a c!a a> ;
\ load data from static ram
:n loadbyte ( -- b ) >a c@a a> ;
: loadword ( -- n ) >a c@a c@a *8 a> ;
: loadlong ( -- x ) >a c@a c@a *8 c@a *8 c@a *8 a> ;
\ save a string
code-thumb /savetext ( a -- )
$100 w movi
l: __loop
\ copy a byte
tos 0@ v0 ldrb,
1 ## tos add,
a 0@ v0 strb,
1 ## a add,
\ loop until 256 bytes written
1 ## w sub,
__loop gt? b,
\ done
tos pop
ret
end-code
\ load a string
code-thumb /loadtext ( a -- a )
$100 w movi
tos v2 mov,
l: __loop
\ load two bytes
a 0@ v1 ldrb,
1 ## a add,
\ write bytes
v2 0@ v1 strb,
1 ## v2 add,
\ loop until length is zero
1 ## w sub,
__loop gt? b,
\ done
ret
end-code
\ save a string to static ram
: savetext ( a -- ) >a /savetext a> ;
: loadtext$ ( -- a ) >a 256 # r-alloc /loadtext a> ;
|
{"hexsha": "89f67e6734714e4b271051e1b2df75a41fa22267", "size": 1101, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "optional/sram.f", "max_stars_repo_name": "uli/dragonbasic", "max_stars_repo_head_hexsha": "901c0e5e88963df44aff28d3124ca842234dad3c", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-06-12T07:54:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-02T02:20:50.000Z", "max_issues_repo_path": "optional/sram.f", "max_issues_repo_name": "funkygallo/dragonbasic", "max_issues_repo_head_hexsha": "901c0e5e88963df44aff28d3124ca842234dad3c", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": 26, "max_issues_repo_issues_event_min_datetime": "2015-04-27T18:51:42.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-17T15:28:15.000Z", "max_forks_repo_path": "optional/sram.f", "max_forks_repo_name": "funkygallo/dragonbasic", "max_forks_repo_head_hexsha": "901c0e5e88963df44aff28d3124ca842234dad3c", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-07-08T05:54:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-16T20:21:38.000Z", "avg_line_length": 16.9384615385, "max_line_length": 53, "alphanum_fraction": 0.5395095368, "num_tokens": 465}
|
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from figure3 import select, ket, exp
from matrix import ops
from measures import local_entropies_from_rhos, local_exp_vals_from_rhos
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib import rc
rc("text", usetex=True)
font = {"size": 11, "weight": "normal"}
mpl.rc(*("font",), **font)
mpl.rcParams["pdf.fonttype"] = 42
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{amsmath}",
r"\usepackage{sansmath}", # sanserif math
r"\sansmath",
]
if __name__ == "__main__":
names = {
"c1_f0": {"name": ket("010"), "ls": "-", "c": "C5", "m": "v"},
"exp-z": {"name": exp("\hat{\sigma_j}^z"), "ls": "-", "c": "C5", "m": "v"},
"exp-x": {"name": exp("\hat{\sigma_j}^x"), "ls": "-", "c": "C5", "m": "v"},
"s-2": {"name": " $s^{(2)}_j$", "ls": "-", "c": "C5", "m": "v"},
}
cmaps = ["inferno_r", "inferno"]
plot_fname = "figures/figure2/figure2_V5.pdf"
fig = plt.figure(figsize=(4.75, 3.7))
Skey = ["3.6", "3.13", "3.14", "5.4", "5.2"]
measures = ["exp-z", "s-2"]
IC = "c1_f0"
L = 18
T = (L - 1) * 3 + 1 # plot ylim
letts1 = [
r"$\mathrm{A}$",
r"$\mathrm{C}$",
r"$\mathrm{E}$",
r"$\mathrm{G}$",
r"$\mathrm{I}$",
]
letts2 = [
r"$\mathrm{B}$",
r"$\mathrm{D}$",
r"$\mathrm{F}$",
r"$\mathrm{H}$",
r"$\mathrm{J}$",
]
clett1 = ["w", "w", "w", "w", "w"]
clett2 = ["k", "k", "k", "w", "k"]
letts = [letts1, letts2]
cletts = [clett1, clett2]
for row, (meas, letti, cli) in enumerate(zip(measures, letts, cletts)):
grid = ImageGrid(
fig,
int("21" + str(1 + row)),
nrows_ncols=(1, 5),
direction="row",
axes_pad=0.1,
add_all=True,
cbar_mode="single",
cbar_location="right",
cbar_size="20%",
cbar_pad=0.05,
)
for col, (S, lett, cl) in enumerate(zip(Skey, letti, cli)):
N, S = map(int, S.split("."))
ax = grid[col]
if N == 3:
sim = select(L=L, S=S, IC=IC, V="H", BC="0")
if sim is None:
print("No sim!")
continue
S = sim["S"]
L = sim["L"]
IC = sim["IC"]
h5file = sim["h5file"]
if meas[0] == "e":
ticks = [-1, 1]
ticklabels = ["↑", "↓"]
else:
ticks = [0, 1]
ticklabels = ["$0$","$1$"]
vmin, vmax = ticks
d = h5file[meas]
elif N == 5:
der = "/home/lhillber/documents/research/cellular_automata/qeca/qops"
der = os.path.join(der, f"qca_output/hamiltonian/rule{S}/rho_i.npy")
one_site = np.load(der)
one_site = one_site.reshape(2000, 22, 2, 2)
one_site = one_site[::, 2:-2, :, :]
T5, L5, *_ = one_site.shape
d = np.zeros((T5, L5))
ti = 0
for t, rhoi in enumerate(one_site):
if t % 10 == 0:
if meas == "exp-z":
d[ti, :] = local_exp_vals_from_rhos(rhoi, ops["Z"])
elif meas == "s-2":
d[ti, :] = local_entropies_from_rhos(rhoi, order=2)
ti += 1
I = ax.imshow(
d[0:T],
origin="lower",
interpolation=None,
cmap=cmaps[row],
vmin=vmin,
vmax=vmax,
)
ax.cax.colorbar(I)
ax.cax.set_yticks(ticks)
ax.cax.set_yticklabels(ticklabels)
ax.set_xticks([0, 8, 17])
ax.set_yticks([i * (L - 1) for i in range(4)])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.text(0.5, 46, lett, color=cl, family="sans-serif", weight="bold")
if col == len(Skey) - 1:
ax.cax.text(
1.6,
0.5,
names[meas]["name"],
rotation=0,
transform=ax.transAxes,
ha="left",
va="center",
)
if row == 0 and col < 3:
ax.set_title(r"$T_{%d}$" % S)
elif row == 0 and col > 2:
ax.set_title(r"${F_{%d}}$" % S)
ax.tick_params(direction="out")
grid[0].set_yticklabels(["$"+str(i * (L - 1))+"$" for i in range(4)])
grid[0].set_xticklabels(["$0$", "$8$", "$17$"])
grid[0].set_xlabel("$j$", labelpad=0)
grid[0].set_ylabel("$t$", labelpad=0)
fig.subplots_adjust(hspace=0.1, left=0.05, top=0.93)
plt.savefig(plot_fname, dpi=300)
print("plot saved to ", plot_fname)
|
{"hexsha": "22040e5fbc98320308f1868f94a43716c95f2a35", "size": 5044, "ext": "py", "lang": "Python", "max_stars_repo_path": "figure2.py", "max_stars_repo_name": "lhillber/qops", "max_stars_repo_head_hexsha": "2bdd9548222c720e877b1a1e4b6200d993abed5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-02T20:18:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-06T14:44:47.000Z", "max_issues_repo_path": "figure2.py", "max_issues_repo_name": "lhillber/qops", "max_issues_repo_head_hexsha": "2bdd9548222c720e877b1a1e4b6200d993abed5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figure2.py", "max_forks_repo_name": "lhillber/qops", "max_forks_repo_head_hexsha": "2bdd9548222c720e877b1a1e4b6200d993abed5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.9673202614, "max_line_length": 85, "alphanum_fraction": 0.43259318, "include": true, "reason": "import numpy", "num_tokens": 1504}
|
[STATEMENT]
lemma mcont_bind_pmf [cont_intro]:
assumes g: "\<And>y. mcont luba orda lub_spmf (ord_spmf (=)) (g y)"
shows "mcont luba orda lub_spmf (ord_spmf (=)) (\<lambda>x. bind_pmf p (\<lambda>y. g y x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mcont luba orda lub_spmf (ord_spmf (=)) (\<lambda>x. p \<bind> (\<lambda>y. g y x))
[PROOF STEP]
using mcont_bind_spmf[where f="\<lambda>_. spmf_of_pmf p" and g=g, OF _ assms]
[PROOF STATE]
proof (prove)
using this:
mcont luba orda lub_spmf (ord_spmf (=)) (\<lambda>_. spmf_of_pmf p) \<Longrightarrow> mcont luba orda lub_spmf (ord_spmf (=)) (\<lambda>x. spmf_of_pmf p \<bind> (\<lambda>y. g y x))
goal (1 subgoal):
1. mcont luba orda lub_spmf (ord_spmf (=)) (\<lambda>x. p \<bind> (\<lambda>y. g y x))
[PROOF STEP]
by(simp)
|
{"llama_tokens": 362, "file": "Probabilistic_While_While_SPMF", "length": 2}
|
"""Utilities"""
# This file is part of geneparse.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Pharmacogenomics Centre
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import urllib
import json
import logging
import warnings
import numpy as np
import pandas as pd
from .core import Variant
from . import parsers
logger = logging.getLogger(__name__)
warnings.simplefilter("once", DeprecationWarning)
def flip_alleles(genotypes):
"""Flip the alleles of an Genotypes instance."""
warnings.warn("deprecated: use 'Genotypes.flip_coded'", DeprecationWarning)
genotypes.reference, genotypes.coded = (genotypes.coded,
genotypes.reference)
genotypes.genotypes = 2 - genotypes.genotypes
return genotypes
def code_minor(genotypes):
"""Encode the genotypes with respect to the minor allele.
This confirms that "reference" is the major allele and that "coded" is
the minor allele.
In other words, this function can be used to make sure that the genotype
value is the number of minor alleles for an individual.
"""
warnings.warn("deprecated: use 'Genotypes.code_minor'", DeprecationWarning)
_, minor_coded = maf(genotypes)
if not minor_coded:
return flip_alleles(genotypes)
return genotypes
def maf(genotypes):
"""Computes the MAF and returns a boolean indicating if the minor allele
is currently the coded allele.
"""
warnings.warn("deprecated: use 'Genotypes.maf'", DeprecationWarning)
g = genotypes.genotypes
maf = np.nansum(g) / (2 * np.sum(~np.isnan(g)))
if maf > 0.5:
maf = 1 - maf
return maf, False
return maf, True
def rsids_to_variants(li):
url = "http://grch37.rest.ensembl.org/variation/homo_sapiens"
req = urllib.request.Request(
url=url,
data=json.dumps({"ids": li}).encode("utf-8"),
headers={
"Content-type": "application/json",
"Accept": "application/json",
},
method="POST"
)
with urllib.request.urlopen(req) as f:
data = json.loads(f.read().decode("utf-8"))
out = {}
for name, info in data.items():
# Check the mappings.
found = False
for mapping in info["mappings"]:
chrom = mapping.get("seq_region_name")
pos = mapping.get("start")
alleles = mapping.get("allele_string").split("/")
assembly = mapping.get("assembly_name")
valid = (assembly == "GRCh37" and
chrom is not None and
pos is not None and
len(alleles) >= 2)
if found and valid:
logger.warning("Multiple mappings for '{}'.".format(name))
elif valid:
found = True
out[name] = Variant(name, chrom, pos, alleles)
if not found:
logger.warning(
"Could not find mappings for '{}'.".format(name)
)
return out
def genotype_to_df(g, samples, as_string=False):
"""Convert a genotype object to a pandas dataframe.
By default, the encoded values are stored, but the as_string argument can
be used to represent it as characters (alleles) instead.
"""
name = g.variant.name if g.variant.name else "genotypes"
df = pd.DataFrame(g.genotypes, index=samples, columns=[name])
if as_string:
df["alleles"] = None
hard_calls = df[name].round()
df.loc[hard_calls == 0, "alleles"] = "{0}/{0}".format(g.reference)
df.loc[hard_calls == 1, "alleles"] = "{0}/{1}".format(g.reference,
g.coded)
df.loc[hard_calls == 2, "alleles"] = "{0}/{0}".format(g.coded)
df = df[["alleles"]]
df.columns = [name]
return df
def compute_ld(cur_geno, other_genotypes, r2=False):
"""Compute LD between a marker and a list of markers.
Args:
cur_geno (Genotypes): The genotypes of the marker.
other_genotypes (list): A list of genotypes.
Returns:
numpy.array: An array containing the r or r**2 values between cur_geno
and other_genotypes.
Note:
The genotypes will automatically be normalized using (x - mean) / std.
"""
# Normalizing the current genotypes
norm_cur = normalize_genotypes(cur_geno)
# Normalizing and creating the matrix for the other genotypes
norm_others = np.stack(
tuple(normalize_genotypes(g) for g in other_genotypes),
axis=1,
)
# Making sure the size is the same
assert norm_cur.shape[0] == norm_others.shape[0]
# Getting the number of "samples" per marker (taking into account NaN)
n = (
~np.isnan(norm_cur.reshape(norm_cur.shape[0], 1)) *
~np.isnan(norm_others)
).sum(axis=0)
# Computing r (replacing NaN by 0)
r = pd.Series(
np.dot(
np.nan_to_num(norm_cur), np.nan_to_num(norm_others) / n
),
index=[g.variant.name for g in other_genotypes],
name="r2" if r2 else "r",
)
# Checking no "invalid" values (i.e. < -1 or > 1)
r.loc[r > 1] = 1
r.loc[r < -1] = -1
if r2:
return r ** 2
else:
return r
def compute_ld_matrix(genotypes, r2=False):
"""Compute the pairwise LD matrix from a genotype matrix.
Args:
genotypes (numpy.array): An m x n matrix of m samples and n variants.
r2 (bool): Whether to return the r or r2.
Returns:
numpy.array: The n x n LD matrix.
"""
ns = (~np.isnan(genotypes)).astype(int)
ns = np.dot(ns.T, ns)
# Standardize the genotypes.
g_std = (
(genotypes - np.nanmean(genotypes, axis = 0)) /
np.nanstd(genotypes, axis = 0)
)
g_std[np.isnan(g_std)] = 0
# Compute the LD.
# i,j needs to be divided by n_samples i and j
r = np.dot(g_std.T, g_std) / ns
if r2:
return r ** 2
else:
return r
def normalize_genotypes(genotypes):
"""Normalize the genotypes.
Args:
genotypes (Genotypes): The genotypes to normalize.
Returns:
numpy.array: The normalized genotypes.
"""
genotypes = genotypes.genotypes
return (genotypes - np.nanmean(genotypes)) / np.nanstd(genotypes)
def add_arguments_to_parser(parser):
"""Add often used arguments to an argument parser.
When reading genotype files some command-line arguments are almost
systematically used. To avoid rewriting the code, this function adds
these arguments to a Python argparse.ArgumentParser instance.
Eventually, a well-formed reader can be constructed using this pattern:
reader = geneparse.parsers[args.genotypes_format](
args.genotypes,
**geneparse.utils.parse_kwargs(args.genotypes_kwargs)
)
"""
parser.add_argument(
"--genotypes", "-g",
help="The genotypes file."
)
parser.add_argument(
"--genotypes-format", "-f",
help="The genotypes file format (one of: {})."
"".format(", ".join(parsers.keys()))
)
parser.add_argument(
"--genotypes-kwargs", "-kw",
help="Keyword arguments to pass to the genotypes container. "
"A string of the following format is expected: "
"'key1=value1,key2=value2,...It is also possible to prefix"
"the values by 'int:' or 'float:' to cast the them before "
"passing them to the constructor."
)
def parse_kwargs(s):
"""Parse command line arguments into Python arguments for parsers.
Converts an arguments string of the form: key1=value1,key2=value2 into
a dict of arguments that can be passed to Python initializers.
This function also understands type prefixes and will cast values prefixed
with 'int:' or 'float:'. For example magic_number=int:4 will be converted
to {"magic_number": 4}.
"""
if s is None:
return {}
kwargs = {}
for argument in s.split(","):
key, value = argument.strip().split("=")
if value.startswith("int:"):
value = int(value[4:])
elif value.startswith("float:"):
value = float(value[6:])
kwargs[key] = value
return kwargs
|
{"hexsha": "73a9d9aaae64331687fbf553f73c8fe9754b16a0", "size": 9341, "ext": "py", "lang": "Python", "max_stars_repo_path": "geneparse/utils.py", "max_stars_repo_name": "pgxcentre/geneparse", "max_stars_repo_head_hexsha": "ecb3a7fcbcb6a295828445ccbe6a3062ba20e701", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-11-09T11:10:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-23T22:17:58.000Z", "max_issues_repo_path": "geneparse/utils.py", "max_issues_repo_name": "pgxcentre/geneparse", "max_issues_repo_head_hexsha": "ecb3a7fcbcb6a295828445ccbe6a3062ba20e701", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-05-02T15:28:01.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-16T18:29:15.000Z", "max_forks_repo_path": "geneparse/utils.py", "max_forks_repo_name": "pgxcentre/geneparse", "max_forks_repo_head_hexsha": "ecb3a7fcbcb6a295828445ccbe6a3062ba20e701", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-05-12T17:58:32.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-12T17:58:32.000Z", "avg_line_length": 29.190625, "max_line_length": 79, "alphanum_fraction": 0.6274488813, "include": true, "reason": "import numpy", "num_tokens": 2208}
|
"""Controller used to generate distribution over hierarchical, variable-length objects."""
import tensorflow as tf
import numpy as np
from dso.program import Program
from dso.memory import Batch
from dso.prior import LengthConstraint
class LinearWrapper(tf.contrib.rnn.LayerRNNCell):
"""
RNNCell wrapper that adds a linear layer to the output.
See: https://github.com/tensorflow/models/blob/master/research/brain_coder/single_task/pg_agent.py
"""
def __init__(self, cell, output_size):
self.cell = cell
self._output_size = output_size
def __call__(self, inputs, state, scope=None):
with tf.variable_scope(type(self).__name__):
outputs, state = self.cell(inputs, state, scope=scope)
logits = tf.layers.dense(outputs, units=self._output_size)
return logits, state
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self.cell.state_size
def zero_state(self, batch_size, dtype):
return self.cell.zero_state(batch_size, dtype)
class Controller(object):
"""
Recurrent neural network (RNN) controller used to generate expressions.
Specifically, the RNN outputs a distribution over pre-order traversals of
symbolic expression trees. It is trained using REINFORCE with baseline.
Parameters
----------
sess : tf.Session
TenorFlow Session object.
prior : dso.prior.JointPrior
JointPrior object used to adjust probabilities during sampling.
state_manager: dso.tf_state_manager.StateManager
Object that handles the state features to be used
summary : bool
Write tensorboard summaries?
debug : int
Debug level, also used in learn(). 0: No debug. 1: Print shapes and
number of parameters for each variable.
cell : str
Recurrent cell to use. Supports 'lstm' and 'gru'.
num_layers : int
Number of RNN layers.
num_units : int or list of ints
Number of RNN cell units in each of the RNN's layers. If int, the value
is repeated for each layer.
initiailizer : str
Initializer for the recurrent cell. Supports 'zeros' and 'var_scale'.
optimizer : str
Optimizer to use. Supports 'adam', 'rmsprop', and 'sgd'.
learning_rate : float
Learning rate for optimizer.
entropy_weight : float
Coefficient for entropy bonus.
entropy_gamma : float or None
Gamma in entropy decay. None (or
equivalently, 1.0) turns off entropy decay.
pqt : bool
Train with priority queue training (PQT)?
pqt_k : int
Size of priority queue.
pqt_batch_size : int
Size of batch to sample (with replacement) from priority queue.
pqt_weight : float
Coefficient for PQT loss function.
pqt_use_pg : bool
Use policy gradient loss when using PQT?
max_length : int or None
Maximum sequence length. This will be overridden if a LengthConstraint
with a maximum length is part of the prior.
"""
def __init__(self, sess, prior, state_manager, debug=0, summary=False,
# RNN cell hyperparameters
cell='lstm',
num_layers=1,
num_units=32,
initializer='zeros',
# Optimizer hyperparameters
optimizer='adam',
learning_rate=0.001,
# Loss hyperparameters
entropy_weight=0.005,
entropy_gamma=1.0,
# PQT hyperparameters
pqt=False,
pqt_k=10,
pqt_batch_size=1,
pqt_weight=200.0,
pqt_use_pg=False,
# Other hyperparameters
max_length=30):
self.sess = sess
self.prior = prior
self.summary = summary
###self.rng = np.random.RandomState(0) # Used for PPO minibatch sampling
self.n_objects = Program.n_objects
lib = Program.library
# Find max_length from the LengthConstraint prior, if it exists
# Both priors will never happen in the same experiment
prior_max_length = None
for single_prior in self.prior.priors:
if isinstance(single_prior, LengthConstraint):
if single_prior.max is not None:
prior_max_length = single_prior.max
self.max_length = prior_max_length
break
if prior_max_length is None:
assert max_length is not None, "max_length must be specified if "\
"there is no LengthConstraint."
self.max_length = max_length
print("WARNING: Maximum length not constrained. Sequences will "
"stop at {} and complete by repeating the first input "
"variable.".format(self.max_length))
elif max_length is not None and max_length != self.max_length:
print("WARNING: max_length ({}) will be overridden by value from "
"LengthConstraint ({}).".format(max_length, self.max_length))
self.max_length *= self.n_objects
max_length = self.max_length
# Hyperparameters
self.entropy_weight = entropy_weight
self.pqt = pqt
self.pqt_k = pqt_k
self.pqt_batch_size = pqt_batch_size
n_choices = lib.L
# Placeholders, computed after instantiating expressions
self.batch_size = tf.placeholder(dtype=tf.int32, shape=(), name="batch_size")
self.baseline = tf.placeholder(dtype=tf.float32, shape=(), name="baseline")
# Entropy decay vector
if entropy_gamma is None:
entropy_gamma = 1.0
entropy_gamma_decay = np.array([entropy_gamma**t for t in range(max_length)])
# Build controller RNN
with tf.name_scope("controller"):
def make_initializer(name):
if name == "zeros":
return tf.zeros_initializer()
if name == "var_scale":
return tf.contrib.layers.variance_scaling_initializer(
factor=0.5, mode='FAN_AVG', uniform=True, seed=0)
raise ValueError("Did not recognize initializer '{}'".format(name))
def make_cell(name, num_units, initializer):
if name == 'lstm':
return tf.nn.rnn_cell.LSTMCell(num_units, initializer=initializer)
if name == 'gru':
return tf.nn.rnn_cell.GRUCell(num_units, kernel_initializer=initializer, bias_initializer=initializer)
raise ValueError("Did not recognize cell type '{}'".format(name))
# Create recurrent cell
if isinstance(num_units, int):
num_units = [num_units] * num_layers
initializer = make_initializer(initializer)
cell = tf.contrib.rnn.MultiRNNCell(
[make_cell(cell, n, initializer=initializer) for n in num_units])
cell = LinearWrapper(cell=cell, output_size=n_choices)
task = Program.task
initial_obs = task.reset_task(prior)
state_manager.setup_manager(self)
initial_obs = tf.broadcast_to(initial_obs, [self.batch_size, len(initial_obs)]) # (?, obs_dim)
initial_obs = state_manager.process_state(initial_obs)
# Get initial prior
initial_prior = self.prior.initial_prior()
initial_prior = tf.constant(initial_prior, dtype=tf.float32)
initial_prior = tf.broadcast_to(initial_prior, [self.batch_size, n_choices])
# Define loop function to be used by tf.nn.raw_rnn.
initial_cell_input = state_manager.get_tensor_input(initial_obs)
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
finished = tf.zeros(shape=[self.batch_size], dtype=tf.bool)
obs = initial_obs
next_input = state_manager.get_tensor_input(obs)
next_cell_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32) # 2-tuple, each shape (?, num_units)
emit_output = None
actions_ta = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False) # Read twice
obs_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=True)
priors_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True, clear_after_read=True)
prior = initial_prior
lengths = tf.ones(shape=[self.batch_size], dtype=tf.int32)
next_loop_state = (
actions_ta,
obs_ta,
priors_ta,
obs,
prior,
lengths, # Unused until implementing variable length
finished)
else:
actions_ta, obs_ta, priors_ta, obs, prior, lengths, finished = loop_state
logits = cell_output + prior
next_cell_state = cell_state
emit_output = logits
# tf.multinomial is deprecated: TF recommends switching to tf.random.categorical
# action = tf.random.categorical(logits=logits, num_samples=1, output_dtype=tf.int32, seed=1)[:, 0]
action = tf.multinomial(logits=logits, num_samples=1, output_dtype=tf.int32, seed=1)[:, 0]
# When implementing variable length:
# action = tf.where(
# tf.logical_not(finished),
# tf.multinomial(logits=logits, num_samples=1, output_dtype=tf.int32)[:, 0],
# tf.zeros(shape=[self.batch_size], dtype=tf.int32))
next_actions_ta = actions_ta.write(time - 1, action) # Write chosen actions
# Get current action batch
actions = tf.transpose(next_actions_ta.stack()) # Shape: (?, time)
# Compute obs and prior
next_obs, next_prior = tf.py_func(func=task.get_next_obs,
inp=[actions, obs],
Tout=[tf.float32, tf.float32])
next_prior.set_shape([None, lib.L])
next_obs.set_shape([None, task.OBS_DIM])
next_obs = state_manager.process_state(next_obs)
next_input = state_manager.get_tensor_input(next_obs)
next_obs_ta = obs_ta.write(time - 1, obs) # Write OLD obs
next_priors_ta = priors_ta.write(time - 1, prior) # Write OLD prior
finished = next_finished = tf.logical_or(
finished,
time >= max_length)
# When implementing variable length:
# finished = next_finished = tf.logical_or(tf.logical_or(
# finished, # Already finished
# next_dangling == 0), # Currently, this will be 0 not just the first time, but also at max_length
# time >= max_length)
next_lengths = tf.where(
finished, # Ever finished
lengths,
tf.tile(tf.expand_dims(time + 1, 0), [self.batch_size]))
next_loop_state = (next_actions_ta,
next_obs_ta,
next_priors_ta,
next_obs,
next_prior,
next_lengths,
next_finished)
return (finished, next_input, next_cell_state, emit_output, next_loop_state)
# Returns RNN emit outputs TensorArray (i.e. logits), final cell state, and final loop state
with tf.variable_scope('policy'):
_, _, loop_state = tf.nn.raw_rnn(cell=cell, loop_fn=loop_fn)
actions_ta, obs_ta, priors_ta, _, _, _, _ = loop_state
self.actions = tf.transpose(actions_ta.stack(), perm=[1, 0]) # (?, max_length)
self.obs = tf.transpose(obs_ta.stack(), perm=[1, 2, 0]) # (?, obs_dim, max_length)
self.priors = tf.transpose(priors_ta.stack(), perm=[1, 0, 2]) # (?, max_length, n_choices)
# Generates dictionary containing placeholders needed for a batch of sequences
def make_batch_ph(name):
with tf.name_scope(name):
batch_ph = {
"actions": tf.placeholder(tf.int32, [None, max_length]),
"obs": tf.placeholder(tf.float32, [None, task.OBS_DIM, self.max_length]),
"priors": tf.placeholder(tf.float32, [None, max_length, n_choices]),
"lengths": tf.placeholder(tf.int32, [None, ]),
"rewards": tf.placeholder(tf.float32, [None], name="r"),
"on_policy": tf.placeholder(tf.int32, [None, ])
}
batch_ph = Batch(**batch_ph)
return batch_ph
def safe_cross_entropy(p, logq, axis=-1):
safe_logq = tf.where(tf.equal(p, 0.), tf.ones_like(logq), logq)
return - tf.reduce_sum(p * safe_logq, axis)
# Generates tensor for neglogp of a given batch
def make_neglogp_and_entropy(B):
with tf.variable_scope('policy', reuse=True):
logits, _ = tf.nn.dynamic_rnn(cell=cell,
inputs=state_manager.get_tensor_input(B.obs),
sequence_length=B.lengths, # Backpropagates only through sequence length
dtype=tf.float32)
logits += B.priors
probs = tf.nn.softmax(logits)
logprobs = tf.nn.log_softmax(logits)
# Generate mask from sequence lengths
# NOTE: Using this mask for neglogp and entropy actually does NOT
# affect training because gradients are zero outside the lengths.
# However, the mask makes tensorflow summaries accurate.
mask = tf.sequence_mask(B.lengths, maxlen=max_length, dtype=tf.float32)
# Negative log probabilities of sequences
actions_one_hot = tf.one_hot(B.actions, depth=n_choices, axis=-1, dtype=tf.float32)
neglogp_per_step = safe_cross_entropy(actions_one_hot, logprobs, axis=2) # Sum over action dim
neglogp = tf.reduce_sum(neglogp_per_step * mask, axis=1) # Sum over time dim
# NOTE 1: The above implementation is the same as the one below:
# neglogp_per_step = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=actions)
# neglogp = tf.reduce_sum(neglogp_per_step, axis=1) # Sum over time
# NOTE 2: The above implementation is also the same as the one below, with a few caveats:
# Exactly equivalent when removing priors.
# Equivalent up to precision when including clipped prior.
# Crashes when prior is not clipped due to multiplying zero by -inf.
# neglogp_per_step = -tf.nn.log_softmax(logits + tf.clip_by_value(priors, -2.4e38, 0)) * actions_one_hot
# neglogp_per_step = tf.reduce_sum(neglogp_per_step, axis=2)
# neglogp = tf.reduce_sum(neglogp_per_step, axis=1) # Sum over time
# If entropy_gamma = 1, entropy_gamma_decay_mask == mask
entropy_gamma_decay_mask = entropy_gamma_decay * mask # ->(batch_size, max_length)
entropy_per_step = safe_cross_entropy(probs, logprobs, axis=2) # Sum over action dim -> (batch_size, max_length)
entropy = tf.reduce_sum(entropy_per_step * entropy_gamma_decay_mask, axis=1) # Sum over time dim -> (batch_size, )
return neglogp, entropy
# On policy batch
self.sampled_batch_ph = make_batch_ph("sampled_batch")
# Memory batch
self.memory_batch_ph = make_batch_ph("memory_batch")
memory_neglogp, _ = make_neglogp_and_entropy(self.memory_batch_ph)
self.memory_probs = tf.exp(-memory_neglogp)
self.memory_logps = -memory_neglogp
# PQT batch
if pqt:
self.pqt_batch_ph = make_batch_ph("pqt_batch")
# Setup losses
with tf.name_scope("losses"):
neglogp, entropy = make_neglogp_and_entropy(self.sampled_batch_ph)
r = self.sampled_batch_ph.rewards
# Entropy loss
entropy_loss = -self.entropy_weight * tf.reduce_mean(entropy, name="entropy_loss")
loss = entropy_loss
if not pqt or (pqt and pqt_use_pg):
# Baseline is the worst of the current samples r
pg_loss = tf.reduce_mean((r - self.baseline) * neglogp, name="pg_loss")
# Loss already is set to entropy loss
loss += pg_loss
# Priority queue training loss
if pqt:
pqt_neglogp, _ = make_neglogp_and_entropy(self.pqt_batch_ph)
pqt_loss = pqt_weight * tf.reduce_mean(pqt_neglogp, name="pqt_loss")
loss += pqt_loss
self.loss = loss
def make_optimizer(name, learning_rate):
if name == "adam":
return tf.train.AdamOptimizer(learning_rate=learning_rate)
if name == "rmsprop":
return tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=0.99)
if name == "sgd":
return tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
raise ValueError("Did not recognize optimizer '{}'".format(name))
# Create training op
optimizer = make_optimizer(name=optimizer, learning_rate=learning_rate)
with tf.name_scope("train"):
self.grads_and_vars = optimizer.compute_gradients(self.loss)
self.train_op = optimizer.apply_gradients(self.grads_and_vars)
# The two lines above are equivalent to:
# self.train_op = optimizer.minimize(self.loss)
with tf.name_scope("grad_norm"):
self.grads, _ = list(zip(*self.grads_and_vars))
self.norms = tf.global_norm(self.grads)
if debug >= 1:
total_parameters = 0
print("")
for variable in tf.trainable_variables():
shape = variable.get_shape()
n_parameters = np.product(shape)
total_parameters += n_parameters
print("Variable: ", variable.name)
print(" Shape: ", shape)
print(" Parameters:", n_parameters)
print("Total parameters:", total_parameters)
# Create summaries
with tf.name_scope("summary"):
if self.summary:
if not pqt or (pqt and pqt_use_pg):
tf.summary.scalar("pg_loss", pg_loss)
if pqt:
tf.summary.scalar("pqt_loss", pqt_loss)
tf.summary.scalar("entropy_loss", entropy_loss)
tf.summary.scalar("total_loss", self.loss)
tf.summary.scalar("reward", tf.reduce_mean(r))
tf.summary.scalar("baseline", self.baseline)
tf.summary.histogram("reward", r)
tf.summary.histogram("length", self.sampled_batch_ph.lengths)
for g, v in self.grads_and_vars:
tf.summary.histogram(v.name, v)
tf.summary.scalar(v.name + '_norm', tf.norm(v))
tf.summary.histogram(v.name + '_grad', g)
tf.summary.scalar(v.name + '_grad_norm', tf.norm(g))
tf.summary.scalar('gradient norm', self.norms)
self.summaries = tf.summary.merge_all()
else:
self.summaries = tf.no_op()
def sample(self, n):
"""Sample batch of n expressions"""
feed_dict = {self.batch_size : n}
actions, obs, priors = self.sess.run([self.actions, self.obs, self.priors], feed_dict=feed_dict)
return actions, obs, priors
def compute_probs(self, memory_batch, log=False):
"""Compute the probabilities of a Batch."""
feed_dict = {
self.memory_batch_ph : memory_batch
}
if log:
fetch = self.memory_logps
else:
fetch = self.memory_probs
probs = self.sess.run([fetch], feed_dict=feed_dict)[0]
return probs
def train_step(self, b, sampled_batch, pqt_batch):
"""Computes loss, trains model, and returns summaries."""
feed_dict = {
self.baseline : b,
self.sampled_batch_ph : sampled_batch
}
if self.pqt:
feed_dict.update({
self.pqt_batch_ph : pqt_batch
})
summaries, _ = self.sess.run([self.summaries, self.train_op], feed_dict=feed_dict)
return summaries
|
{"hexsha": "1c54369e5252e68489fadb62f75b88fa1eb2a591", "size": 21498, "ext": "py", "lang": "Python", "max_stars_repo_path": "dso/dso/controller.py", "max_stars_repo_name": "brendenpetersen/deep-symbolic-optimization", "max_stars_repo_head_hexsha": "8724839dab910022e24d03debdf564236683474b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 134, "max_stars_repo_stars_event_min_datetime": "2021-07-06T06:14:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T18:24:08.000Z", "max_issues_repo_path": "dso/dso/controller.py", "max_issues_repo_name": "brendenpetersen/deep-symbolic-optimization", "max_issues_repo_head_hexsha": "8724839dab910022e24d03debdf564236683474b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-06-10T17:03:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T20:15:35.000Z", "max_forks_repo_path": "dso/dso/controller.py", "max_forks_repo_name": "brendenpetersen/deep-symbolic-optimization", "max_forks_repo_head_hexsha": "8724839dab910022e24d03debdf564236683474b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 44, "max_forks_repo_forks_event_min_datetime": "2021-06-26T19:11:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T04:07:41.000Z", "avg_line_length": 43.4303030303, "max_line_length": 136, "alphanum_fraction": 0.5809377617, "include": true, "reason": "import numpy", "num_tokens": 4466}
|
module TrafficAssignment
# package code goes here
using LightGraphs, Optim
include("load_network.jl")
include("frank_wolfe.jl")
export
load_ta_network,
ta_frank_wolfe,
TA_Data
end # module
|
{"hexsha": "b007ed1c4b0190d3dd02696945fb73962560db33", "size": 221, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/TrafficAssignment.jl", "max_stars_repo_name": "JuliaPackageMirrors/TrafficAssignment.jl", "max_stars_repo_head_hexsha": "87b011e851bb005775ade803ef7ca56bdfe8633a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TrafficAssignment.jl", "max_issues_repo_name": "JuliaPackageMirrors/TrafficAssignment.jl", "max_issues_repo_head_hexsha": "87b011e851bb005775ade803ef7ca56bdfe8633a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TrafficAssignment.jl", "max_forks_repo_name": "JuliaPackageMirrors/TrafficAssignment.jl", "max_forks_repo_head_hexsha": "87b011e851bb005775ade803ef7ca56bdfe8633a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 11.6315789474, "max_line_length": 26, "alphanum_fraction": 0.7013574661, "num_tokens": 57}
|
[STATEMENT]
lemma set_plus_intro2 [intro]: "b \<in> C \<Longrightarrow> a + b \<in> a +o C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. b \<in> C \<Longrightarrow> a + b \<in> a +o C
[PROOF STEP]
by (auto simp add: elt_set_plus_def)
|
{"llama_tokens": 101, "file": null, "length": 1}
|
### Google Ngram Data Tools
## Alex John Quijano
## Created: 9/13/2017
import os
import re
import sys
import math
import errno
import numpy as np
import pandas as pd
import subprocess as sb
# Read Google ngram data
def read(n,l,ignore_case=True,restriction=True,annotation=False,specific_fileName='all'):
try:
# raw data (in of directory)
directory_A = '/google-ngram/'+n+'gram-normalized/'+l+'/'
directory_0 = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_A
rscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.rscore.pkl',compression='gzip')
pscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.pscore.pkl',compression='gzip')
try:
zscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.zscore.pkl',compression='gzip')
except:
zscore = None
pos_annotation = np.load(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.pos.npy',allow_pickle=True).item()
return {'rscore':rscore,'pscore':pscore,'zscore':zscore,'pos':pos_annotation}
except FileNotFoundError:
try:
# raw data (out of directory)
directory_A = '/raw-data/google-ngram/'+n+'gram-normalized/'+l+'/'
directory_0 = os.path.abspath(os.path.join(os.getcwd(), os.pardir))+directory_A
rscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.rscore.pkl',compression='gzip')
pscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.pscore.pkl',compression='gzip')
try:
zscore = pd.read_pickle(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.zscore.pkl',compression='gzip')
except:
zscore = None
pos_annotation = np.load(directory_0+'googlebooks-'+l+'-all-'+n+'gram-20120701.filtered.'+'I'+str(ignore_case)+'R'+str(restriction)+'A'+str(annotation)+'.'+specific_fileName+'.pos.npy',allow_pickle=True).item()
return {'rscore':rscore,'pscore':pscore,'zscore':zscore,'pos':pos_annotation}
except FileNotFoundError:
print('Error: The computed-data directory can not be found of '+n+'gram dataset for '+l+' with specified parameters does not exist anywhere.')
print('TRY: Use the bash script below to download and process the Google Ngram data according to your specified parameters or see Section 2 of the INSTRUCTIONS file.')
print()
print('%%bash')
print('./downloadAndFilter.ngram.sh '+n+' '+l+' 1900 2008 1 1')
print('./normalize.ngram.py '+n+' '+l+' '+str(ignore_case)+' '+str(restriction)+' '+str(annotation)+' '+specific_fileName)
try:
sys.exit()
except SystemExit:
sys.exit
# get word set from a file
def get_subset(DataFrame,file_path):
# defined lexicon
ngrams = []
try:
file = open(file_path,encoding='utf-8')
except FileNotFoundError:
print('The file does not exist')
for f in file:
ngrams.append(f.replace('\n',''))
df_ngrams = DataFrame.index
df_ngrams_chosen = []
for i in df_ngrams:
i_split = i.split(' ')
for j in i_split:
if j in ngrams:
df_ngrams_chosen.append(i)
out = DataFrame.reindex(np.array(df_ngrams_chosen)).dropna(axis='index')
print('Number of input words: '+str(len(ngrams)))
print('Number of valid words: '+str(out.shape[0]))
return out
|
{"hexsha": "a1e0a8044128df824ef1146a76c24776307aaef5", "size": 3894, "ext": "py", "lang": "Python", "max_stars_repo_path": "googleNgram.py", "max_stars_repo_name": "stressosaurus/a-statistical-model-of-word-rank-evolution", "max_stars_repo_head_hexsha": "4a06a872b5c84b561510958aed18e76d931443f4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "googleNgram.py", "max_issues_repo_name": "stressosaurus/a-statistical-model-of-word-rank-evolution", "max_issues_repo_head_hexsha": "4a06a872b5c84b561510958aed18e76d931443f4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "googleNgram.py", "max_forks_repo_name": "stressosaurus/a-statistical-model-of-word-rank-evolution", "max_forks_repo_head_hexsha": "4a06a872b5c84b561510958aed18e76d931443f4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.8117647059, "max_line_length": 213, "alphanum_fraction": 0.7059578839, "include": true, "reason": "import numpy", "num_tokens": 1084}
|
import numpy as np
from sklearn import cluster, manifold, metrics
class Reservoir(object):
""" Reservoir Class
Parameters
----------
n_features: (default=1)
number of input features, set in range (>=1)
n_reservoir: (default=100)
number of reservoir neurons, set in range(1, 10000)
spectral_radius: (default=0.95)
spectral radius of the absolute of recurrent weight matrix,
set in range(0, 1)
(see Notes for more information)
connectivity: (default=0.05)
proportion of recurrent weights are retained, set in range(0, 1)
(see Notes for more information)
random_state: (default=None)
integer seed, np.rand.RandomState object, or None to use numpy's builting RandomState.
Attributes
----------
W: (n_reservoir, n_reservoir) array
recurrent weight matrix
W_in: (n_reservoir, n_features) array
input weight matrix
reservoir_state: (n_reservoir,) array
state of the reservoir
Notes
-----
The setting of parameters spectral_radius and connectivity affect the
dynamic of the reservoir. The proper setting can obtain reservoir with
long-term memory. Larger spectral_radius brings longer memory, proper
connectivity, e.g. 5 / n_reservoir, usually derives better dynamic for reservoir.
References
----------
Xu, M., P. Baraldi, and E. Zio. "Fault diagnostics by conceptors-aided clustering."
In 30th European Safety and Reliability Conference, ESREL 2020 and 15th Probabilistic
Safety Assessment and Management Conference, PSAM 2020, pp. 3656-3663.
Research Publishing Services, 2020.
Jaeger, Herbert. "Using conceptors to manage neural long-term memories for temporal patterns."
The Journal of Machine Learning Research 18, no. 1 (2017): 387-429.
Examples
--------
>>> # generate a set of multivariate time series dataset (n_samples, n_timestamps, n_features) array
>>> # it combines 10 short time series with n_timestamps=100 and 10 long time series with n_timestamps=200
>>> X_short = np.random.randn(10,100,2)
>>> tail_arr = np.zeros((10,100,2)) + np.NaN
>>> X_short = np.concatenate((X_short,tail_arr),axis=1)
>>> X_long = np.random.randn(10,200,2)
>>> X = np.concatenate((X_short,X_long),axis=0)
>>> # build reservoir object
>>> res = Reservoir(n_features=2)
>>> # build reservoir object
>>> # (assign arguments by users)
>>> res = Reservoir(n_features=2,
... n_reservoir=100,
... spectral_radius=0.95,
... connectivity=0.05,
... random_state=2)
>>> # convert these 20 multivariate time series into 20 Conceptors
>>> X_C = res.transform_conceptor(X)
>>> # show an example of Conceptor
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.imshow(X_C[0])
>>> plt.show()
"""
def __init__(self,
n_features=1,
n_reservoir=100,
spectral_radius=0.95,
connectivity=0.05,
random_state=None):
self.n_features = n_features
self.n_reservoir = n_reservoir
self.spectral_radius = spectral_radius
self.connectivity = connectivity
# the given random_state might be either an actual RandomState object,
# a seed or None (in which case we use numpy's builtin RandomState)
if isinstance(random_state, np.random.RandomState):
self.random_state_ = random_state
elif random_state:
try:
self.random_state_ = np.random.RandomState(random_state)
except TypeError as e:
raise Exception("Invalid seed: " + str(e))
else:
self.random_state_ = np.random.mtrand._rand
self.reservoir_state = np.zeros((n_reservoir,))
self._init_weights()
def _init_weights(self):
"""Initialize the weight of Reservoir network.
Notes
-----
The weights are normalized in ``Uniform`` distribution and the
``Spectral Radius`` < 1.
"""
# initialize recurrent weights:
# begin with a random matrix centered around zero:
W = self.random_state_.rand(self.n_reservoir, self.n_reservoir) - 0.5
# delete the fraction of connections given by (self.connectivity):
W[self.random_state_.rand(*W.shape) > self.connectivity] = 0
# compute the spectral radius of these weights:
radius = np.max(np.linalg.eigvals(np.abs(W)))
radius = np.real(radius)
# rescale them to reach the requested spectral radius:
self.W = W * (self.spectral_radius / radius)
# random input weights:
self.W_in = self.random_state_.rand(self.n_reservoir, self.n_features) * 2 - 1
def _update_reservoir(self, x):
"""Performs one update step.
Note
----
i.e., computes the next network state by applying the recurrent weights
to the last state & and feeding in the current input and output patterns
Parameters
----------
x: (n_features,) array
the input pattern at a time step
Returns
-------
reservoir_state: (n_reservoir,) array
the updated state of the reservoir
"""
self.reservoir_state = np.tanh(np.dot(self.W, self.reservoir_state) +
np.dot(self.W_in, x))
return self.reservoir_state
def reset_reservoir(self, reservoir_state=None):
"""reset the reservoir state by reservoir_state argument or zeros
Parameters
----------
reservoir_state : (n_reservoir,) array, optional
The initial state used to set the reservoir state. The default is None.
Returns
-------
None.
"""
if reservoir_state is None:
self.reservoir_state = np.zeros((self.n_reservoir,))
elif reservoir_state is not None:
assert reservoir_state.shape == self.reservoir_state.shape, "reservoir_state shape doesn't match."
self.reservoir_state = reservoir_state
def transform_conceptor(self, X, alpha=1.):
"""Transform the input X into Conceptor matrix
Parameters
----------
X: (n_samples, n_timestamps, n_features) array
Multivariate time series dataset. The time series may have different length,
the short ones are padding with np.NaN type.
alpha: float number, (default=1)
A control parameter called aperture, which control the scaling of
singular values of correlation matrix of reservoir states, set in
the range (1, 10, 1000, 10000) (see Notes)
Returns
-------
X_C: (n_reservoir, n_reservoir) array
Conceptor matrix transformed from multivariate time series dataset.
Notes
-----
Alpha usually use an emperical value of 1.
Each sample of multivariate time series can have different length.
References
-----------
Xu, M., P. Baraldi, and E. Zio. "Fault diagnostics by conceptors-aided clustering."
In 30th European Safety and Reliability Conference, ESREL 2020 and 15th Probabilistic
Safety Assessment and Management Conference, PSAM 2020, pp. 3656-3663.
Research Publishing Services, 2020.
Examples
--------
>>> X = np.random.randn(10,100,2)
>>> res = Reservoir(n_features=2,
... n_reservoir=100,
... spectral_radius=0.95,
... connectivity=0.05,
... random_state=2)
>>> X_C = res.transform_conceptor(X, alpha=1.)
"""
n_samples, n_timestamps, n_features = X.shape
n_reservoir = self.reservoir_state.shape[0]
X_C = np.zeros((n_samples, n_reservoir, n_reservoir))
R = np.zeros((n_reservoir, n_reservoir))
for i_s,u in enumerate(X):
for i_t in range(1,n_timestamps):
if not np.isnan(np.sum(u[i_t])):
# update R matrix
x = self._update_reservoir(u[i_t])
x = x[:,np.newaxis]
R = R*(i_t-1)/i_t + np.dot(x,x.T)/i_t
elif np.isnan(np.sum(u[i_t])):
break
# compute C matrix
R_inv = np.linalg.inv(R + alpha**(-2) * np.eye(n_reservoir))
C = np.matmul(R, R_inv)
X_C[i_s] = C
return X_C
def conceptor_clustering(X_C,
n_clusters=None,
n_components=None,
sigma=1.,
random_state=None):
"""Apply clustering to Conceptor matrixes.
Parameters
----------
X_C : (n_samples, n_reservoir, n_reservoir) array
Conceptor matrixes transformed by using Reservoir.transform_conceptor(X).
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigenvectors to use for the spectral embedding.
sigma : float, default=1
Parameter in Radius Basis Function in similarity measure.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization of by
spectral clustering and K-Means initialization. Use an int to make
the randomness deterministic.
Returns
-------
labels : (n_samples,) array
The labels of the clusters.
silhouette_scores : dict
The silhouette scores w.r.t. number of clusters.
For example, silhouette_scores[3] denotes the score if n_clusters=3.
note silhouette_scores[0], silhouette_scores[1] are -Inf.
affinity_matrix : (n_samples, n_samples) array
The affinity matrix which measures the similarity of Conceptors between
each two multivariate time series.
* affinity_matrix = np.exp(- D**2 / (2 * sigma**2)) *
where D is Conceptor Distance matrix.
Notes
-----
silhouette_scores may have different length for different cases.
References
----------
Xu, M., P. Baraldi, and E. Zio. "Fault diagnostics by conceptors-aided clustering."
In 30th European Safety and Reliability Conference, ESREL 2020 and 15th Probabilistic
Safety Assessment and Management Conference, PSAM 2020, pp. 3656-3663.
Research Publishing Services, 2020.
Examples
--------
>>> # generate a set of multivariate time series dataset (n_samples, n_timestamps, n_features) array
>>> # it combines 10 short time series with n_timestamps=100 and 10 long time series with n_timestamps=200
>>> X_short = np.random.randn(10,100,2)
>>> tail_arr = np.zeros((10,100,2)) + np.NaN
>>> X_short = np.concatenate((X_short,tail_arr),axis=1)
>>> X_long = np.random.randn(10,200,2)
>>> X = np.concatenate((X_short,X_long),axis=0)
>>> # build reservoir object
>>> res = Reservoir(n_features=2)
>>> # build reservoir object
>>> # (assign arguments by users)
>>> res = Reservoir(n_features=2,
... n_reservoir=100,
... spectral_radius=0.95,
... connectivity=0.05,
... random_state=2)
>>> # convert these 20 multivariate time series into 20 Conceptors
>>> X_C = res.transform_conceptor(X)
>>> # show an example of Conceptor
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.imshow(X_C[0])
>>> plt.show()
>>> # Clustering the Conceptors
>>> # (automatically choose number of clusters, corresponding to the max silhouette score)
>>> labels, silhouette_scores, affinity_matrix = conceptor_clustering(X_C)
>>> # Clustering the Conceptors
>>> # (assign arguments by users)
>>> labels, silhouette_scores, affinity_matrix = conceptor_clustering(X_C,
... n_clusters=3,
... n_components=3,
... sigma=1.,
... random_state=2)
"""
n_samples = X_C.shape[0]
assert n_samples > 2, "Too few time series samples (at least >= 3)."
# obtain the affinity matrix of X_C
D = np.zeros((n_samples,n_samples)) # distance matrix
for i,c1 in enumerate(X_C):
for j,c2 in enumerate(X_C):
if j >= i:
D[i,j] = np.linalg.norm((c1 - c2), 'fro')
elif j < i:
D[i,j] = D[j,i]
A = np.exp(- D**2 / (2 * sigma**2)) # affinity matrix
affinity_matrix = A
# obtain the n_clusters and n_components if they are None
max_n_clusters = min(max(8, n_samples//100), 100, n_samples-1)
s_z = min(max_n_clusters*100, n_samples)
silhouette_scores = {}
if n_clusters is None:
for n_c in range(2, max_n_clusters+1):
# To obtain spectral embedding of Affinity matrix
X_embed = manifold.spectral_embedding(A,
n_components=n_c,
random_state=random_state,
eigen_tol=0.0,
norm_laplacian=True)
# Kmeans clustering
kmeans = cluster.KMeans(n_clusters=n_c, random_state=random_state).fit(X_embed)
labels = kmeans.labels_
# To obtain silhouette score
silhouette_scores[n_c] = metrics.silhouette_score(X_embed, labels,
metric='euclidean',
sample_size=s_z,
random_state=random_state)
# choose n_clusters with largest silhouette score
n_clusters = max(silhouette_scores, key=silhouette_scores.get)
if n_components is None:
n_components = n_clusters
# call scikit spectral clustering algorithm
labels = cluster.spectral_clustering(A,
n_clusters=n_clusters,
n_components=n_components,
random_state=random_state,
assign_labels="kmeans")
return labels, silhouette_scores, affinity_matrix
class ConceptorClustering(object):
"""The Conceptor Clustering Algorithm.
Parameters
----------
n_clusters : int, (default=None)
Number of clusters to extract.
It can be automatically set by maximizing silhouette scores.
n_reservoir: (default=100)
number of reservoir neurons, set in range(1, 10000)
spectral_radius: (default=0.95)
spectral radius of the absolute of recurrent weight matrix,
set in range(0, 1)
(see Notes for more information)
connectivity: (default=0.05)
proportion of recurrent weights are retained, set in range(0, 1)
(see Notes for more information)
n_components : int, (default=n_clusters)
Number of eigenvectors to use for the spectral embedding.
sigma : float, (default=1)
Parameter in Radius Basis Function in similarity measure.
random_state : int, RandomState instance, (default=None)
A pseudo random number generator used for the initialization of by
spectral clustering and K-Means initialization. Use an int to make
the randomness deterministic.
Attributes
----------
labels_ : numpy.ndarray
Labels of each sample
reservoir_ : Reservoir() object
The reservoir instance of class Reservoir.
affinity_matrix_ : numpy.mndarray
The affinity matrix which measures the similarity of Conceptors between
each two multivariate time series.
* affinity_matrix = np.exp(- D**2 / (2 * sigma**2)) *
where D is Conceptor Distance matrix.
Notes
-----
The setting of parameters spectral_radius and connectivity affect the
dynamic of the reservoir. The proper setting can obtain reservoir with
long-term memory. Larger spectral_radius brings longer memory, proper
connectivity, e.g. 5 / n_reservoir, usually derives better dynamic for reservoir.
References
----------
Xu, M., P. Baraldi, and E. Zio. "Fault diagnostics by conceptors-aided clustering."
In 30th European Safety and Reliability Conference, ESREL 2020 and 15th Probabilistic
Safety Assessment and Management Conference, PSAM 2020, pp. 3656-3663.
Research Publishing Services, 2020.
Examples
--------
>>> # generate a set of multivariate time series dataset (n_samples, n_timestamps, n_features) array
>>> # it combines 10 short time series with n_timestamps=100 and 10 long time series with n_timestamps=200
>>> X_short = np.random.randn(10,100,2)
>>> tail_arr = np.zeros((10,100,2)) + np.NaN
>>> X_short = np.concatenate((X_short,tail_arr),axis=1)
>>> X_long = np.random.randn(10,200,2)
>>> X = np.concatenate((X_short,X_long),axis=0)
>>> # create ConceptorClustering object
>>> # (automatically choose number of clusters, corresponding to the max silhouette score)
>>> conceptor_cluster = ConceptorClustering()
>>> # create ConceptorClustering object
>>> # (assign arguments by users)
>>> conceptor_cluster = ConceptorClustering(n_clusters=3,
... n_reservoir=100,
... spectral_radius=0.95,
... connectivity=0.05,
... sigma=1.,
... random_state=1)
>>> # use fit() function to obtain prediction_labels, silhouette scores and affinity matrix
>>> conceptor_cluster.fit(X)
>>> print(conceptor_cluster.labels_)
>>> print(conceptor_cluster.silhouette_scores_)
>>> print(conceptor_cluster.affinity_matrix_)
>>> labels = conceptor_cluster.fit_predict(X)
>>> print(labels)
"""
def __init__(self, n_clusters=None,
n_reservoir=100, spectral_radius=0.95, connectivity=0.05,
n_components=None, sigma=1., random_state=None):
self.n_clusters = n_clusters
self.n_reservoir = n_reservoir
self.spectral_radius = spectral_radius
self.connectivity = connectivity
self.n_components = n_components
self.sigma = sigma
self.random_state = random_state
def fit(self, X, y=None, sample_weight=None):
"""Compute conceptor clustering.
Parameters
----------
X : array-like of shape=(n_samples, n_timestamps, n_features)
Time series dataset.
y
Ignored
"""
n_samples, n_timestamps, n_features = X.shape
self.reservoir_ = Reservoir(n_features=n_features,
n_reservoir=self.n_reservoir,
spectral_radius=self.spectral_radius,
connectivity=self.connectivity,
random_state=self.random_state)
X_C = self.reservoir_.transform_conceptor(X)
self.labels_, self.silhouette_scores_, self.affinity_matrix_ = conceptor_clustering(
X_C,
n_clusters=self.n_clusters,
n_components=self.n_components,
sigma=self.sigma,
random_state=self.random_state)
return self
def fit_predict(self, X, y=None):
"""Fit conceptor clustering using X and then predict the closest cluster
each time series in X belongs to.
It is more efficient to use this method than to sequentially call fit
and predict.
Parameters
----------
X : array-like of shape=(n_samples, n_timestamps, n_features)
Time series dataset to predict.
y
Ignored
Returns
-------
labels : array of shape=(n_samples, )
Index of the cluster each sample belongs to.
"""
return self.fit(X, y).labels_
|
{"hexsha": "35fa313e690253a9da5fd85f51afce675dd9d610", "size": 21506, "ext": "py", "lang": "Python", "max_stars_repo_path": "ConceptorClustering.py", "max_stars_repo_name": "nhejequjq/Conceptor-TSCluster", "max_stars_repo_head_hexsha": "4bf7bc16a50742c812532152f606ee4dd95fef91", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-28T09:29:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-28T09:29:29.000Z", "max_issues_repo_path": "ConceptorClustering.py", "max_issues_repo_name": "nhejequjq/Conceptor-TSCluster", "max_issues_repo_head_hexsha": "4bf7bc16a50742c812532152f606ee4dd95fef91", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ConceptorClustering.py", "max_forks_repo_name": "nhejequjq/Conceptor-TSCluster", "max_forks_repo_head_hexsha": "4bf7bc16a50742c812532152f606ee4dd95fef91", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1981308411, "max_line_length": 111, "alphanum_fraction": 0.5697479773, "include": true, "reason": "import numpy", "num_tokens": 4600}
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import re
import os
import json
import heapq
import math
from collections import defaultdict
import networkx as nx
from networkx.readwrite import json_graph
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import igraph as ig
import pandas as pd
import louvain
from louvain import Optimiser
# from googletrans import Translator
CENTRALITY_DIR = 'centrality'
VIS_DATA_DIR = './csv'
def check_contain_chinese(text):
"""check if need to translated
Arguments:
text {string} -- string to be detected
Returns:
Boolean -- whether or not contain chinese
"""
if not text.strip(): return False
return not all('0' <= char <= '9' for char in text)
# def translate_json(input_data):
# """using google trans api to translate datas
# Arguments:
# input_data {[json, str]}
# Returns:
# input_data_translated
# """
# if isinstance(input_data, list):
# datas = []
# for item in input_data:
# datas.append(translate_json(item))
# return datas
# elif isinstance(input_data, dict):
# data = dict()
# for item in input_data:
# data[item] = translate_json(input_data[item])
# return data
# elif isinstance(input_data, str) and check_contain_chinese(input_data):
# trans = Translator(service_urls = ['translate.google.cn'])
# trans_str = trans.translate(input_data, dest='en').text
# print(input_data, trans_str)
# return trans_str
# else:
# return input_data
def get_topPeople(dynasty = 'song', topk = 10, sort_by = 0):
"""get Topk central figures
Keyword Arguments:
dynasty {str} -- dyansty name (default: {'song'})
topk {int} -- topk (default: {10})
sort_by {int} -- sorted by which centrality (default: {0})
degree_centrality, betweenness_centrality,closeness_centrality,eigenvector_centrality
Returns:
[type] -- topk results
"""
json_file_path = os.path.join(CENTRALITY_DIR, '{}_centrality.json'.format(dynasty))
with open(json_file_path) as f:
json_data = json.load(f)
top_degree = []
for people in json_data:
heapq.heappush(top_degree, (json_data[people][sort_by], json_data[people], people))
res = heapq.nlargest(topk, top_degree)
return [i[1] for i in res]
def get_subgraph(node_list = ['1762'], depth = 3, graph_path='song-signed.gexf'):
'''
node-list 是起点节点,
depth是深度
'''
gexf_path = os.path.join(VIS_DATA_DIR, graph_path)
g = nx.read_gexf(gexf_path)
#
g_edges = g.edges()
g_edges_dict = defaultdict(set)
for edge in g_edges:
n1 = edge[0]
n2 = edge[1]
g_edges_dict[n1].add(n2)
g_edges_dict[n2].add(n1)
# print(len(g_edges_dict['1762']))
subgraph_nodes = set(node_list)
now_nlists = list(node_list)
k = depth
while k:
k -= 1
next_nlists = []
for node in now_nlists:
for n in g_edges_dict[node]:
if n not in subgraph_nodes:
next_nlists.append(n)
next_nlists = list(set(next_nlists))
print(len(next_nlists))
tmp = list(subgraph_nodes)
tmp.extend(next_nlists)
subgraph_nodes = set(tmp)
now_nlists = next_nlists
# 获取这个节点的所有连接节点
print(len(subgraph_nodes))
sub_g = g.subgraph(subgraph_nodes)
return sub_g
def get_property(sub_g) :
fname = os.path.join(VIS_DATA_DIR, 'song.csv')
people_df = pd.read_csv(fname)
attrs = dict()
centrality_attrs = dict()
with open('./centrality/song_centrality.json') as f:
json_data = json.load(f)
for n in sub_g.nodes():
p = people_df[people_df.nid == int(n)]
name1 = p['ChName']
name2 = p['EngName']
attrs[n]= "".join(name1.values)
d = dict()
d["EngName"] = "".join(name2.values)
d["ChName"] = "".join(name1.values)
d["PersonID"] = n
pku = json_data[n]
d["c1"] = round(pku[0], 3)
d["c2"] = round(pku[1], 3)
d["c3"] = round(pku[2], 3)
d["c4"] = round(pku[3], 3)
centrality_attrs[n] = d
return attrs, centrality_attrs
def naive_plot(node_list, cate="1"):
'''
1384, 歐陽修 22
3762,蘇洵 2
1493,蘇轍 13
3767,蘇軾 2
1762,王安石 6
7364,曾鞏 0
'''
graph_path_dict = {
'1': 'song-pos.gexf',
'2': 'song-neg.gexf',
'3': 'song-signed.gexf'
}
graph_path = graph_path_dict[cate]
sub_g = get_subgraph(node_list=node_list, depth=0, graph_path=graph_path)
attrs , centrality_attrs = get_property(sub_g)
e_pos = [(u, v) for (u, v, d) in sub_g.edges(data=True) if d['weight'] > 0]
e_neg = [(u, v) for (u, v, d) in sub_g.edges(data=True) if d['weight'] < 0]
pos = nx.circular_layout(sub_g)
# 为了保证画出来顺序是确定的,
# print(pos.items())
values = sorted(pos.items(), key = lambda x:x[1][1]/x[1][0], reverse=True)
nodes = {i[0]: {'position': list(i[1]), 'name': attrs[i[0]], 'centrality': centrality_attrs[i[0]]} for i in values}
pos_key = [i for i in pos.keys()]
pos_key.sort()
for index, i in enumerate(pos_key):
pos[i] = values[index][1]
for n in sub_g:
sub_g.node[n]['name'] = n
d = json_graph.node_link_data(sub_g) # node-link format to serialize
# print(d)
# print(pos_values)
return d, nodes
def layer_partition(sub_g):
graphml_path = os.path.join(VIS_DATA_DIR, 'song-tmp.graphml')
nx.write_graphml(sub_g, graphml_path)
G = ig.Graph.Read_GraphML(graphml_path)
G_pos = G.subgraph_edges(G.es.select(weight_gt = 0), delete_vertices=False)
G_neg = G.subgraph_edges(G.es.select(weight_lt = 0), delete_vertices=False)
G_neg.es['weight'] = [-w for w in G_neg.es['weight']]
part_pos = louvain.ModularityVertexPartition(G_pos, weights='weight')
part_neg = louvain.ModularityVertexPartition(G_neg, weights='weight')
optimiser = louvain.Optimiser()
part_pos = louvain.ModularityVertexPartition(G_pos, weights='weight')
part_neg = louvain.ModularityVertexPartition(G_neg, weights='weight')
diff = optimiser.optimise_partition_multiplex([part_pos, part_neg],layer_weights=[1,-1])
# while diff > 0:
# diff = optimiser.optimise_partition_multiplex([part_pos, part_neg],layer_weights=[1,-1])
# print(diff)
# print(part_neg)
# print(part_pos)
# for v in G.vs:
# print(v.index, v["label"])
# print(dir(part_pos), part_pos.membership)
# print(dir(part_pos))
# print(part_pos.summary())
# print(part_pos.modularity, part_pos.q, part_pos)
node_partition = {}
for v in G.vs:
node_partition[v["label"]] = v.index
node_partition2 = {}
memberships = [i for i in part_pos.membership]
assert len(memberships) == len(node_partition)
for i in node_partition:
node_partition2[i] = memberships[node_partition[i]]
return node_partition2
def generate_group_results(node_list = ['1384', '3762', '1493', '3767', '1762', '7364'], depth = 0):
print(node_list, depth)
sub_g = get_subgraph(node_list, depth)
# 得倒聚类结果,然后挑选每个组里前depth * 5
results = layer_partition(sub_g)
attrs , centrality_attrs = get_property(sub_g)
groups = defaultdict(list)
for i in results:
groups[results[i]].append(i)
allow_groups = set([results[i] for i in node_list])
res_groups = defaultdict(list)
for group in groups:
if group in allow_groups:
group_i = groups[group]
some_group_people = set()
group_i_sorted = sorted(group_i, key=lambda x:centrality_attrs[x]['c1'], reverse=True)
#
for node in node_list:
if node in group_i:
some_group_people.add(node)
for node in group_i_sorted[:5*(depth+1)]:
some_group_people.add(node)
res_groups[group] = list(some_group_people)
print(allow_groups, len(res_groups))
all_nodes = []
for group in res_groups:
for node in res_groups[group]:
tmp = {
"group": group,
"id": node,
"label": node,
"name": centrality_attrs[node]["ChName"],
"data": centrality_attrs[node]
}
all_nodes.append(tmp)
all_node_list = [i["id"] for i in all_nodes]
res_sub_g = get_subgraph(all_node_list, depth = 0)
result_json = json_graph.node_link_data(res_sub_g)
for link in result_json["links"]:
print(link)
link['value'] = link['weight']
result_json["nodes"] = all_nodes
return result_json
def generate_direct_results(node_list):
links1, nodes1 = naive_plot(node_list)
links2, nodes2 = naive_plot(node_list, cate='2')
links3, nodes3 = naive_plot(node_list, cate='3')
if (json.dumps(nodes1) != json.dumps(nodes2)):
print('sth not good!')
return links1['links'], links2['links'], links3['links'], nodes1
def compute(node_list, depth=0):
result_json = generate_group_results(node_list, depth)
links1, links2, links3, nodes = generate_direct_results(node_list)
datas = {}
datas["links1"] = links1
datas["links2"] = links2
datas["links3"] = links3
datas["nodes"] = nodes
datas["link_datas"] = result_json
return datas
def main():
node_list = ['1384', '3762', '1493', '3767', '1762', '7364']
# result = compute(node_list, 0)
links1, links2, links3, nodes1 = generate_direct_results(node_list)
# print(result)
print(links1)
if __name__ == '__main__':
main()
|
{"hexsha": "6cbd83235f641dd5b0c6cc54df9123ea69b73dc3", "size": 9802, "ext": "py", "lang": "Python", "max_stars_repo_path": "SIGNLENS/functions.py", "max_stars_repo_name": "huangjunjie95/signlens", "max_stars_repo_head_hexsha": "005c0ad0d625c0efa14ba696b6e5db7d8ae81fbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-10-18T06:39:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-15T06:29:51.000Z", "max_issues_repo_path": "SIGNLENS/functions.py", "max_issues_repo_name": "huangjunjie95/signlens", "max_issues_repo_head_hexsha": "005c0ad0d625c0efa14ba696b6e5db7d8ae81fbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SIGNLENS/functions.py", "max_forks_repo_name": "huangjunjie95/signlens", "max_forks_repo_head_hexsha": "005c0ad0d625c0efa14ba696b6e5db7d8ae81fbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.137704918, "max_line_length": 119, "alphanum_fraction": 0.6177310753, "include": true, "reason": "import networkx,from networkx", "num_tokens": 2702}
|
import time
import numpy as np
from matplotlib import pyplot as plt
from progressbar import progressbar as pbar
rng = np.random.default_rng()
T = 0.5
def clock(func, mat, vec):
t0 = time.perf_counter()
N = 0
while time.perf_counter()-t0 < T:
func(mat, vec)
N += 1
t1 = time.perf_counter()
return (t1-t0)/N
def formal(mat, vec):
return np.linalg.inv(mat) @ vec
def faster(mat, vec):
return np.linalg.solve(mat, vec)
if __name__ == '__main__':
num_sizes = 100
# I flipped n_vals so that the large arrays would run first
n_vals = np.flip(np.geomspace(100, 7000, num_sizes, dtype='i'))
form = np.zeros(num_sizes)
fast = np.zeros(num_sizes)
for i, n in enumerate(pbar(n_vals)):
A = rng.random((n, n)) # both algorithms run on the same linear system
b = rng.random(n).T
form[i] = clock(formal, A, b)
fast[i] = clock(faster, A, b)
plt.figure()
plt.plot(n_vals, form, label='inverse method')
plt.plot(n_vals, fast, label='np.linalg method')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Array size')
plt.ylabel('Execution time (s)')
plt.tight_layout()
plt.grid()
plt.legend()
plt.show()
|
{"hexsha": "fd8923bf959b2562ede772818ba88efed3cc1f4a", "size": 1236, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/complexity/linear_solve.py", "max_stars_repo_name": "jacione/phys513", "max_stars_repo_head_hexsha": "a8e1d1de800b0372d013d69543e1619b0fb8e4e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/complexity/linear_solve.py", "max_issues_repo_name": "jacione/phys513", "max_issues_repo_head_hexsha": "a8e1d1de800b0372d013d69543e1619b0fb8e4e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/complexity/linear_solve.py", "max_forks_repo_name": "jacione/phys513", "max_forks_repo_head_hexsha": "a8e1d1de800b0372d013d69543e1619b0fb8e4e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.2352941176, "max_line_length": 79, "alphanum_fraction": 0.6221682848, "include": true, "reason": "import numpy", "num_tokens": 352}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.