text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for slerp."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_graphics.math.interpolation import slerp
from tensorflow_graphics.util import test_case
_SQRT2_DIV2 = np.sqrt(2.0).astype(np.float32) * 0.5
class SlerpTest(test_case.TestCase):
def _pick_random_quaternion(self):
"""Creates a random quaternion with random shape."""
tensor_size = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()
return np.random.normal(size=tensor_shape + [4])
def _quaternion_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for quaternions."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.QUATERNION)
def _vector_slerp_helper(self, q1, q2, p):
"""Calls interpolate function for vectors."""
return slerp.interpolate(q1, q2, p, slerp.InterpolationType.VECTOR)
def test_interpolate_raises_exceptions(self):
"""Tests if unknown methods raise exceptions."""
vector1 = self._pick_random_quaternion()
self.assert_exception_is_raised(
slerp.interpolate,
error_msg="Unknown interpolation type supplied.",
shapes=[],
vector1=vector1,
vector2=-vector1,
percent=0.1,
method=2)
def test_interpolate_with_weights_quaternion_preset(self):
"""Compares interpolate to quaternion_weights + interpolate_with_weights."""
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
q1 = tf.nn.l2_normalize(q1, axis=-1)
q2 = tf.nn.l2_normalize(q2, axis=-1)
weight1, weight2 = slerp.quaternion_weights(q1, q2, 0.25)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(
q1, q2, 0.25, method=slerp.InterpolationType.QUATERNION)
self.assertAllClose(qf, qi, atol=1e-9)
def test_interpolate_with_weights_vector_preset(self):
"""Compares interpolate to vector_weights + interpolate_with_weights."""
# Any quaternion is a valid vector
q1 = self._pick_random_quaternion()
q2 = q1 + tf.ones_like(q1)
weight1, weight2 = slerp.vector_weights(q1, q2, 0.75)
qf = slerp.interpolate_with_weights(q1, q2, weight1, weight2)
qi = slerp.interpolate(q1, q2, 0.75, method=slerp.InterpolationType.VECTOR)
self.assertAllClose(qf, qi, atol=1e-9)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)),
((-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((0.408248290463863, -0.408248290463863, 0.816496580927726, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-0.5,)),
((-0.408248290463863, -0.408248290463863, -0.816496580927726, 0.0),)),
)
def test_quaternion_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of qslerp against numpy-quaternion values."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._quaternion_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_unnormalized_quaternion_weights_exception_raised(self):
"""Tests if quaternion_weights raise exceptions for unnormalized input."""
q1 = self._pick_random_quaternion()
q2 = tf.nn.l2_normalize(q1, axis=-1)
p = tf.constant((0.5), dtype=q1.dtype)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(slerp.quaternion_weights(q1, q2, p))
@parameterized.parameters(
((4,), (4,), (1,)),
((None, 4), (None, 4), (None, 1)),
((None, 4), (None, 4), (None, 4)),
)
def test_quaternion_weights_exception_not_raised(self, *shapes):
"""Tests that valid input shapes do not raise exceptions for qslerp."""
self.assert_exception_is_not_raised(slerp.quaternion_weights, shapes)
@parameterized.parameters(
("must have exactly 4 dimensions in axis -1", (3,), (4,), (1,)),
("must have exactly 4 dimensions in axis -1", (4,), (3,), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 4), (3, 4),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 4), (3, 4),
(2,)),
)
def test_quaternion_weights_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised for qslerp."""
self.assert_exception_is_raised(slerp.quaternion_weights, error_msg, shapes)
@parameterized.parameters(
# Same quaternions
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)), (
(0.25,),
(0.75,),
)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.2,)), (
(-0.8,),
(0.2,),
)),
# Anti-polar - large percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.8,)), (
(-0.2,),
(0.8,),
)),
)
def test_quaternion_weights_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of quaternion_weights for problem cases."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(slerp.quaternion_weights, test_inputs,
test_outputs, tile=False)
@parameterized.parameters(
((3,), (3,), (1,)),
((None, 4), (None, 4), (None, 1)),
)
def test_vector_weights_exception_not_raised(self, *shapes):
"""Tests that valid inputs do not raise exceptions for vector_weights."""
self.assert_exception_is_not_raised(slerp.vector_weights, shapes)
@parameterized.parameters(
("must have the same number of dimensions in axes", (None, 3), (None, 4),
(1,)),
("must have the same number of dimensions in axes", (2, 3), (2, 4), (1,)),
("Not all batch dimensions are broadcast-compatible.", (2, 3), (3, 3),
(1,)),
("Not all batch dimensions are broadcast-compatible.", (1, 3), (3, 3),
(2,)),
)
def test_vector_weights_exception_raised(self, error_msg, *shapes):
"""Tests that shape exceptions are properly raised for vector_weights."""
self.assert_exception_is_raised(slerp.vector_weights, error_msg, shapes)
@parameterized.parameters(
# Orthogonal, same hemisphere
(((1.0, 0.0, 0.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.5,)),
((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),)),
(((_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)), ((0.5, 0.5, 0.5, 0.5),)),
# Same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(0.0, 0.0, _SQRT2_DIV2, _SQRT2_DIV2), (0.5,)),
((0.408248290463863, 0.0, 0.816496580927726, 0.408248290463863),)),
# Same vectors
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0), (0.75,)),
((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),)),
# Anti-polar - equal weights
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.5,)),
((0.0, 0.0, 0.0, 0.0),)),
# Anti-polar - small percent
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, 0.0, -_SQRT2_DIV2, 0.0), (0.25,)),
((0.5, 0.0, 0.5, 0.0),)),
# Extrapolation - same hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (-1.0,)),
((0.0, -_SQRT2_DIV2, _SQRT2_DIV2, 0.0),)),
# Extrapolation - opposite hemisphere
(((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0),
(-_SQRT2_DIV2, _SQRT2_DIV2, 0.0, 0.0), (1.5,)),
((-_SQRT2_DIV2, -0.0, -_SQRT2_DIV2, 0.0),)),
# Unnormalized vectors
(((4.0, 0.0), (0.0, 1.0), (0.5,)), ((2.82842712, _SQRT2_DIV2),)),
)
def test_vector_slerp_preset(self, test_inputs, test_outputs):
"""Tests the accuracy of vector slerp results."""
test_inputs = [np.array(test_input).astype(np.float32)
for test_input in test_inputs]
self.assert_output_is_correct(self._vector_slerp_helper, test_inputs,
test_outputs, tile=False)
def test_vector_weights_reduce_to_lerp_preset(self):
"""Tests if vector slerp reduces to lerp for identical vectors as input."""
q1 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
q2 = tf.constant((_SQRT2_DIV2, 0.0, _SQRT2_DIV2, 0.0))
p = tf.constant((0.75,), dtype=q1.dtype)
w1, w2 = slerp.vector_weights(q1, q2, p)
self.assertAllClose(w1, (0.25,), rtol=1e-6)
self.assertAllClose(w2, (0.75,), rtol=1e-6)
if __name__ == "__main__":
test_case.main()
|
{"hexsha": "f8d9b21a1a7918ba9eb78aecc9be1941ce66944c", "size": 10437, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_graphics/math/interpolation/tests/slerp_test.py", "max_stars_repo_name": "Liang813/graphics", "max_stars_repo_head_hexsha": "71ab1775228a0a292427551350cbb62bfa8bd01a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2759, "max_stars_repo_stars_event_min_datetime": "2019-01-08T10:40:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T13:49:37.000Z", "max_issues_repo_path": "tensorflow_graphics/math/interpolation/tests/slerp_test.py", "max_issues_repo_name": "Liang813/graphics", "max_issues_repo_head_hexsha": "71ab1775228a0a292427551350cbb62bfa8bd01a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 262, "max_issues_repo_issues_event_min_datetime": "2019-04-28T12:25:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T19:35:15.000Z", "max_forks_repo_path": "tensorflow_graphics/math/interpolation/tests/slerp_test.py", "max_forks_repo_name": "Liang813/graphics", "max_forks_repo_head_hexsha": "71ab1775228a0a292427551350cbb62bfa8bd01a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 380, "max_forks_repo_forks_event_min_datetime": "2019-05-09T00:14:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T12:48:25.000Z", "avg_line_length": 41.748, "max_line_length": 80, "alphanum_fraction": 0.6339944428, "include": true, "reason": "import numpy", "num_tokens": 3566}
|
\chapter{Holomorphic functions}
Throughout this chapter, we denote by $U$ an open subset of the complex plane,
and by $\Omega$ an open subset which is also simply connected.
The main references for this chapter were \cite{ref:dartmouth,ref:bak_ca}.
\section{The nicest functions on earth}
In high school you were told how to differentiate and integrate real-valued functions.
In this chapter on complex analysis,
we'll extend it to differentiation and integration of complex-valued functions.
Big deal, you say. Calculus was boring enough. Why do I care about complex calculus?
Perhaps it's easiest to motivate things if I compare real analysis to complex analysis.
In real analysis, your input lives inside the real line $\RR$.
This line is not terribly discerning -- you can construct a lot of unfortunate functions.
Here are some examples.
\begin{example}
[Optional: evil real functions]
You can skim over these very quickly: they're only here to make a point.
\begin{enumerate}[(a)]
\ii The \vocab{Devil's Staircase} (or Cantor function)
is a continuous function $H : [0,1] \to [0,1]$
which has derivative zero ``almost everywhere'',
yet $H(0) = 0$ and $H(1) = 1$.
\ii The \vocab{Weierstra\ss\ function}
\[ x \mapsto \sum_{n=0}^\infty \left( \half \right)^n \cos \left( 2015^n \pi x \right) \]
is continuous \emph{everywhere} but differentiable \emph{nowhere}.
\ii The function
\[
x \mapsto
\begin{cases}
x^{100} & x \ge 0 \\
-x^{100} & x < 0
\end{cases}
\]
has the first $99$ derivatives but not the $100$th one.
\ii
If a function has all derivatives (we call these \vocab{smooth} functions),
then it has a Taylor series.
But for real functions that Taylor series might still be wrong. The function
\[ x \mapsto
\begin{cases}
e^{-1/x} & x > 0 \\
0 & x \le 0
\end{cases}
\]
has derivatives at every point.
But if you expand the Taylor series at $x=0$, you get $0 + 0x + 0x^2 + \dots$,
which is wrong for \emph{any} $x > 0$ (even $x=0.0001$).
\end{enumerate}
\end{example}
\begin{figure}[h]
\centering
\includegraphics[width=0.8\textwidth]{media/weierstrass-pubdomain.png}
\caption{The Weierstra\ss\ Function (image from \cite{img:weierstrass}).}
\end{figure}
Let's even put aside the pathology.
If I tell you the value of a real smooth function on the interval $[-1, 1]$,
that still doesn't tell you anything about the function as a whole.
It could be literally anything, because it's somehow possible to ``fuse together'' smooth functions.
So what about complex functions?
If you consider them as functions $\RR^2 \to \RR^2$, you now have the interesting property
that you can integrate along things that are not line segments: you can write integrals
across curves in the plane.
But $\CC$ has something more: it is a \emph{field}, so you can \emph{multiply} and \emph{divide} two complex numbers.
So we restrict our attention to differentiable functions called \emph{holomorphic functions}.
It turns out that the multiplication on $\CC$ makes all the difference.
The primary theme in what follows is that holomorphic functions are \emph{really, really nice},
and that knowing tiny amounts of data about the function can determine all its values.
%In particular, they are highly \emph{rigid} and \emph{regular}.
The two main highlights of this chapter,
from which all other results are more or less corollaries:
\begin{itemize}
\ii Contour integrals of loops are always zero.
\ii A holomorphic function is essentially given by its Taylor series;
in particular, single-differentiable implies infinitely differentiable.
Thus, holomorphic functions behave quite like polynomials.
\end{itemize}
Some of the resulting corollaries:
\begin{itemize}
\ii It'll turn out that knowing the values of a holomorphic function
on the boundary of the unit circle will tell you the values in its interior.
\ii Knowing the values of the function at $1$, $\half$, $\frac13$, \dots
are enough to determine the whole function!
\ii Bounded holomorphic functions $\CC \to \CC$ must be constant
\ii And more\dots
\end{itemize}
As \cite{ref:pugh} writes: ``Complex analysis is the good twin and real analysis is the evil one: beautiful formulas and elegant theorems seem to blossom spontaneously in the complex domain, while toil and pathology rule the reals''.
\section{Complex differentiation}
\prototype{Polynomials are holomorphic; $\ol z$ is not.}
Let $f : U \to \CC$ be a complex function.
Then for some $z_0 \in U$, we define the \vocab{derivative} at $z_0$ to be
\[
\lim_{h \to 0} \frac{f(z_0+h) - f(z_0)}{h}.
\]
Note that this limit may not exist;
when it does we say $f$ is \vocab{differentiable} at $z_0$.
What do I mean by a ``complex'' limit $h \to 0$?
It's what you might expect: for every $\eps > 0$ there should be a $\delta > 0$
such that
\[ 0 < \left\lvert h \right\rvert < \delta
\implies
\left\lvert \frac{f(z_0+h)-f(z_0)}{h} - L \right\rvert < \eps. \]
If you like topology, you are encouraged to think of this in terms of
open neighborhoods in the complex plane.
(This is why we require $U$ to be open:
it makes it possible to take $\delta$-neighborhoods in it.)
But note that having a complex derivative is actually much stronger
than a real function having a derivative.
In the real line, $h$ can only approach zero from below and above,
and for the limit to exist we need the ``left limit'' to equal the ``right limit''.
But the complex numbers form a \emph{plane}: $h$ can approach zero
from many directions, and we need all the limits to be equal.
\begin{example}
[Important: conjugation is \emph{not} holomorphic]
Let $f(z) = \ol z$ be complex conjugation, $f : \CC \to \CC$.
This function, despite its simple nature, is not holomorphic!
Indeed, at $z=0$ we have,
\[ \frac{f(h)-f(0)}{h} = \frac{\ol h}{h}. \]
This does not have a limit as $h \to 0$, because depending
on ``which direction'' we approach zero from we have different values.
\begin{center}
\begin{asy}
size(7cm);
dot("$0$", origin, dir(225));
void meow(string s, real theta, real eps = 45, pen p) {
draw( (dir(theta) * 0.8) -- (dir(theta) * 0.2), p+1);
draw( (dir(theta) * 0.8) -- (dir(theta) * 0.2), p, Arrow);
label(s, dir(theta)*0.5, dir(eps), p);
}
meow("$1$", 0, 90, blue);
meow("$1$", 180, 90, blue);
meow("$i$", -45, 45, heavygreen);
meow("$-1$", 90, 0, red);
label("$f(z) = \overline z$", dir(135));
label("$\dfrac{f(0+h)-f(0)}{h}$", dir(135)-0.35*dir(90));
import graph;
graph.xaxis("Re", -1, 1, grey, NoTicks, Arrows);
graph.yaxis("Im", -1, 1, grey, NoTicks, Arrows);
\end{asy}
\end{center}
\end{example}
If a function $f : U \to \CC$ is complex differentiable
at all the points in its domain it is called \vocab{holomorphic}.
In the special case of a holomorphic function with domain $U = \CC$,
we call the function \vocab{entire}.\footnote{Sorry, I know the word ``holomorphic'' sounds so much cooler. I'll try to do things more generally for that sole reason.}
\begin{example}
[Examples of holomorphic functions]
In all the examples below, the derivative of the function
is the same as in their real analogues
(e.g.\ the derivative of $e^z$ is $e^z$).
\begin{enumerate}[(a)]
\ii Any polynomial $z \mapsto z^n + c_{n-1} z^{n-1} + \dots + c_0$ is holomorphic.
\ii The complex exponential $\exp : x+yi \mapsto e^x (\cos y + i \sin y)$
can be shown to be holomorphic.
\ii $\sin$ and $\cos$ are holomorphic when extended
to the complex plane by $\cos z = \frac{e^{iz}+e^{-iz}}{2}$
and $\sin z = \frac{e^{iz}-e^{-iz}}{2i}$.
\ii As usual, the sum, product, chain rules and so on apply,
and hence \textbf{sums, products, nonzero quotients,
and compositions of holomorphic functions are also holomorphic}.
\end{enumerate}
\end{example}
You are welcome to try and prove these results, but I won't bother to do so.
\section{Contour integrals}
\prototype{$\oint_\gamma z^m \; dz$ around the unit circle.}
In the real line we knew how to integrate a function across a line segment $[a,b]$:
essentially, we'd ``follow along'' the line segment adding up the values of $f$ we see
to get some area.
Unlike in the real line, in the complex plane we have the power to integrate
over arbitrary paths: for example, we might compute an integral around a unit circle.
A contour integral lets us formalize this.
First of all, if $f : \RR \to \CC$ and $f(t) = u(t) + iv(t)$ for $u,v \in \RR$,
we can define an integral $\int_a^b$ by just adding the real and imaginary parts:
\[ \int_a^b f(t) \; dt
= \left( \int_a^b u(t) \; dt \right)
+ i \left( \int_a^b v(t) \; dt \right). \]
Now let $\alpha : [a,b] \to \CC$ be a path, thought of as
a complex differentiable\footnote{This isn't entirely correct here:
you want the path $\alpha$ to be continuous and mostly differentiable,
but you allow a finite number of points to have ``sharp bends''; in other words,
you can consider paths which are combinations of $n$ smooth pieces.
But for this we also require that $\alpha$ has ``bounded length''.} function.
Such a path is called a \vocab{contour},
and we define its \vocab{contour integral} by
\[
\oint_\alpha f(z) \; dz
= \int_a^b f(\alpha(t)) \cdot \alpha'(t) \; dt.
\]
You can almost think of this as a $u$-substitution (which is where the $\alpha'$ comes from).
In particular, it turns out this integral does not depend on how $\alpha$ is ``parametrized'':
a circle given by \[ [0,2\pi] \to \CC : t \mapsto e^{it} \]
and another circle given by \[ [0,1] \to \CC : t \mapsto e^{2\pi i t} \]
and yet another circle given by \[ [0,1] \to \CC : t \mapsto e^{2 \pi i t^5} \]
will all give the same contour integral, because the paths they represent have the same
geometric description: ``run around the unit circle once''.
In what follows I try to use $\alpha$ for general contours and $\gamma$ in the special case of loops.
Let's see an example of a contour integral.
\begin{theorem}
\label{thm:central_cauchy_computation}
Take $\gamma : [0,2\pi] \to \CC$ to be the unit circle specified by
\[ t \mapsto e^{it}. \]
Then for any integer $m$, we have
\[ \oint_\gamma z^{m} \; dz
=
\begin{cases}
2\pi i & m = -1 \\
0 & \text{otherwise}
\end{cases}
\]
\end{theorem}
\begin{proof}
The derivative of $e^{it}$ is $i e^{it}$.
So, by definition the answer is the value of
\begin{align*}
\int_0^{2\pi} (e^{it})^m \cdot (ie^{it}) \; dt
&= \int_0^{2\pi} i(e^{it})^{1+m} \; dt \\
&= i \int_0^{2\pi} \cos [(1+m)t] + i \sin [(1+m)t] \; dt \\
&= - \int_0^{2\pi} \sin [(1+m)t] \; dt + i \int_0^{2\pi} \cos [(1+m)t] \; dt.
\end{align*}
This is now an elementary calculus question.
One can see that this equals $2\pi i$ if $m=-1$ and
otherwise the integrals vanish.
\end{proof}
Let me try to explain why this intuitively ought to be true for $m=0$.
In that case we have $\oint_\gamma 1 \; dz$.
So as the integral walks around the unit circle, it ``sums up'' all the tangent vectors
at every point (that's the direction it's walking in), multiplied by $1$.
And given the nice symmetry of the circle, it should come as no surprise that everything cancels out.
The theorem says that even if we multiply by $z^m$ for $m \neq -1$, we get the same cancellation.
\begin{center}
\begin{asy}
size(5cm);
draw(unitcircle, dashed);
void arrow(real theta) {
pair P = dir(theta);
dot(P);
pair delta = 0.4*P*dir(90);
draw( P--(P+delta), EndArrow );
}
arrow(0);
arrow(50);
arrow(140);
arrow(210);
arrow(300);
\end{asy}
\end{center}
\begin{definition}
Given $\alpha : [0,1] \to \CC$,
we denote by $\ol\alpha$ the ``backwards'' contour
$\ol\alpha(t) = \alpha(1-t)$.
\end{definition}
\begin{ques}
What's the relation between $\oint_\alpha f \; dz$ and $\oint_{\ol\alpha} f \; dz$?
Prove it.
\end{ques}
This might seem a little boring.
Things will get really cool really soon, I promise.
\section{Cauchy-Goursat theorem}
\prototype{$\oint_\gamma z^m \; dz = 0$ for $m \ge 0$. But if $m < 0$, Cauchy's theorem does not apply.}
Let $\Omega \subseteq \CC$ be simply connected (for example, $\Omega = \CC$),
and consider two paths $\alpha$, $\beta$ with the same start and end points.
\begin{center}
\begin{asy}
unitsize(0.8cm);
bigbox("$\Omega$");
pair A = Drawing((-3,0));
pair B = Drawing((3,0));
draw(A..(-2,0.5)..MP("\alpha", (0,2), dir(90))..(1,1.2)..B, red, EndArrow);
draw(A----MP("\beta", (A+B)/2, dir(-90))--B, blue, EndArrow);
\end{asy}
\end{center}
What's the relation between $\oint_\alpha f(z) \; dz$ and $\oint_\beta f(z) \; dz$?
You might expect there to be some relation between them, considering that the space $\Omega$ is simply connected.
But you probably wouldn't expect there to be \emph{much} of a relation.
As a concrete example, let $\Psi : \CC \to \CC$ be the function $z \mapsto z - \Re[z]$
(for example, $\Psi(2015+3i) = 3i$). Let's consider two paths from $-1$ to $1$.
Thus $\beta$ is walking along the real axis, and $\alpha$ which follows an upper semicircle.
\begin{center}
\begin{asy}
pair A = Drawing("-1", dir(180), dir(-90));
pair B = Drawing("1", dir(0), dir(-90));
draw(arc(origin, 1, 180, 0), red, EndArrow);
MP("\alpha", dir(90), dir(90));
draw(A--B, blue, EndArrow);
MP("\beta", 0, dir(-90));
\end{asy}
\end{center}
Obviously $\oint_\beta \Psi(z) \; dz = 0$.
But heaven knows what $\oint_\alpha \Psi(z) \; dz$ is supposed to equal.
We can compute it now just out of non-laziness.
If you like, you are welcome to compute it yourself (it's a little annoying but not hard).
If I myself didn't mess up, it is
\[ \oint_\alpha \Psi(z) \; dz = - \oint_{\ol\alpha} \Psi(z) \; dz
= - \int_0^\pi (i \sin(t)) \cdot ie^{it} \; dt = \half\pi i \]
which in particular is not zero.
But somehow $\Psi$ is not a really natural function.
It's not respecting any of the nice, multiplicative structure of $\CC$ since
it just rudely lops off the real part of its inputs.
More precisely,
\begin{ques}
Show that $\Psi(z) = z - \Re[z]$ is not holomorphic.
(Hint: $\ol z$ is not holomorphic.)
\end{ques}
Now here's a miracle: for holomorphic functions, the two integrals are \emph{always equal}.
Equivalently, (by considering $\alpha$ followed by $\ol\beta$) contour integrals of loops are always zero.
This is the celebrated Cauchy-Goursat theorem
(also called the Cauchy integral theorem,
but later we'll have a ``Cauchy Integral Formula'' so blah).
\begin{theorem}
[Cauchy-Goursat theorem]
Let $\gamma$ be a loop, and $f : \Omega \to \CC$ a holomorphic function
where $\Omega$ is open in $\CC$ and simply connected.
Then
\[ \oint_\gamma f(z) \; dz = 0. \]
\end{theorem}
\begin{remark}[Sanity check]
This might look surprising considering that we saw $\oint_\gamma z^{-1} \; dz = 2 \pi i$ earlier.
The subtlety is that $z^{-1}$ is not even defined at $z = 0$.
On the other hand, the function $\CC \setminus \{0\} \to \CC$ by $z \mapsto \frac 1z$ \emph{is} holomorphic!
The defect now is that $\Omega = \CC \setminus \{0\}$ is not simply connected.
So the theorem passes our sanity checks, albeit barely.
\end{remark}
The typical proof of Cauchy's Theorem assumes additionally
that the partial derivatives of $f$ are continuous
and then applies the so-called Green's theorem.
But it was Goursat who successfully proved the fully general theorem we've stated above,
which assumed only that $f$ was holomorphic.
I'll only outline the proof, and very briefly.
You can show that if $f : \Omega \to \CC$ has an antiderivative $F : \Omega \to \CC$ which is also holomorphic,
and moreover $\Omega$ is simply connected, then you get a ``fundamental theorem of calculus'', a la
\[ \oint_\alpha f(z) \; dz = F(\alpha(b)) - F(\alpha(a)) \]
where $\alpha : [a,b] \to \CC$ is some path.
So to prove Cauchy-Goursat, you only have to show this antiderivative $F$ exists.
Goursat works very hard to prove the result in the special case that $\gamma$ is a triangle,
and hence by induction for any polygon.
Once he has the result for a rectangle, he uses this special case to construct the function $F$ explicitly.
Goursat then shows that $F$ is holomorphic, completing the proof.
Anyways, the theorem implies that $\oint_\gamma z^m \; dz = 0$ when $m \ge 0$.
So much for all our hard work earlier.
But so far we've only played with circles.
This theorem holds for \emph{any} contour which is a loop.
So what else can we do?
\section{Cauchy's integral theorem}
We now present a stunning application of Cauchy-Goursat, a ``representation theorem'':
essentially, it says that values of $f$ inside a disk
are determined by just the values on the boundary!
In fact, we even write down the exact formula.
As \cite{ref:dartmouth} says,
``any time a certain type of function satisfies some sort of representation theorem,
it is likely that many more deep theorems will follow.''
Let's pull back the curtain:
\begin{theorem}[Cauchy's integral formula]
Let $\gamma : [0,2\pi] \to \CC$ be a circle in the plane given by $t \mapsto Re^{it}$,
which bounds a disk $D$.
Suppose $f : U \to \CC$ is holomorphic such that $U$ contains the circle and its interior.
Then for any point $a$ in the interior of $D$, we have
\[
f(a)
=
\frac{1}{2\pi i} \oint_\gamma \frac{f(z)}{z-a} \; dz.
\]
\end{theorem}
Note that we don't require $U$ to be simply connected, but the reason is pretty silly:
we're only going to ever integrate $f$ over $D$, which is an open disk, and hence the disk
is simply connected anyways.
The presence of $2\pi i$, which you saw earlier in the form $\oint_{\text{circle}} z^{-1} \; dz$,
is no accident.
In fact, that's the central result we're going to use to prove the result.
\begin{proof}
There are several proofs out there, but I want to give the one that really
draws out the power of Cauchy's theorem. Here's the picture we have:
there's a point $a$ sitting inside a circle $\gamma$,
and we want to get our hands on the value $f(a)$.
\begin{center}
\begin{asy}
size(3cm);
draw(unitcircle, dashed, MidArrow);
MP("\gamma", dir(-45), dir(-45));
pair a = 0.1 * dir(60);
dot("$a$", a, dir(a));
\end{asy}
\end{center}
We're going to do a trick: construct a \vocab{keyhole contour} $\Gamma_{\delta, \eps}$
which has an outer circle $\gamma$, plus an inner circle $\ol{\gamma_\eps}$, which is a circle centered
at $a$ with radius $\eps$, running clockwise (so that $\gamma_\eps$ runs counterclockwise).
The ``width'' of the corridor is $\delta$. See picture:
\begin{center}
\begin{asy}
size(4cm);
MP("\gamma", dir(-45), dir(-45));
pair a = 0.1 * dir(60);
dot("$a$", a, dir(a));
real delta_outer = 6;
real delta_inner = 20;
pair P = dir(60+delta_outer);
pair Q = dir(60-delta_outer);
draw(arc(origin, 1, 60+delta_outer, 360+60-delta_outer), MidArrow);
draw(arc(a, 0.3, 60-delta_inner, -360+60+delta_inner), MidArrow);
draw(dir(60-delta_outer)--(a+0.3*dir(60-delta_inner)), MidArrow);
draw((a+0.3*dir(60+delta_inner))--dir(60+delta_outer), MidArrow);
MP("\overline{\gamma_\varepsilon}", a+0.3*dir(225), dir(225));
\end{asy}
\end{center}
Hence $\Gamma_{\delta,\eps}$ consists of four smooth curves.
\begin{ques}
Draw a \emph{simply connected} open set $\Omega$ which contains the entire
$\Gamma_{\delta,\eps}$ but does not contain the point $a$.
\end{ques}
Hence, the function $\frac{f(z)}{z-a}$ manages to be holomorphic on all of $\Omega$.
Thus Cauchy's theorem applies and tells us that
\[
0 = \oint_{\Gamma_{\delta,\eps}} \frac{f(z)}{z-a} \; dz.
\]
As we let $\delta \to 0$, the two walls of the keyhole will cancel each other (because $f$ is continuous,
and the walls run in opposite directions).
So taking the limit as $\delta \to 0$, we are left with just $\gamma$ and $\gamma_\eps$,
which (taking again orientation into account) gives
\[
\oint_{\gamma} \frac{f(z)}{z-a} \; dz
= - \oint_{\ol{\gamma_\eps}} \frac{f(z)}{z-a} \; dz
= \oint_{\gamma_\eps} \frac{f(z)}{z-a} \; dz.
\]
Thus \textbf{we've managed to replace $\gamma$ with a much smaller circle $\gamma_\eps$ centered around $a$},
and the rest is algebra.
To compute the last quantity, write
\begin{align*}
\oint_{\gamma_\eps} \frac{f(z)}{z-a} \; dz
&=
\oint_{\gamma_\eps} \frac{f(z)-f(a)}{z-a} \; dz
+
f(a) \cdot \oint_{\gamma_\eps} \frac{1}{z-a} \; dz \\
&=
\oint_{\gamma_\eps} \frac{f(z)-f(a)}{z-a} \; dz
+
2\pi i f(a).
\end{align*}
where we've used \Cref{thm:central_cauchy_computation}
Thus, all we have to do is show that
\[ \oint_{\gamma_\eps} \frac{f(z)-f(a)}{z-a} \; dz = 0. \]
For this we can basically use the weakest bound possible, the so-called $ML$ lemma
which I'll cite without proof:
it says ``bound the function everywhere by its maximum''.
\begin{lemma}
[$ML$ estimation lemma]
Let $f$ be a holomorphic function and $\alpha$ a path.
Suppose $M = \max_{z \text{ on } \alpha} \left\lvert f(z) \right\rvert$, and
let $L$ be the length of $\alpha$.
Then
\[ \left\lvert \oint_\alpha f(z) \; dz \right\rvert \le ML. \]
\end{lemma}
(This is straightforward to prove if you know the definition of length:
$L = \int_a^b |\alpha'(t)| \; dt$, where $\alpha : [a,b] \to \CC$.)
Anyways, as $\eps \to 0$, the quantity $\frac{f(z)-f(a)}{z-a}$ approaches $f'(a)$,
and so for small enough $\eps$ (i.e.\ $z$ close to $a$) there's some upper bound $M$.
Yet the length of $\gamma_\eps$ is the circumference $2\pi \eps$.
So the $ML$ lemma says that
\[ \left\lvert \oint_{\gamma_\eps} \frac{f(z)-f(a)}{z-a} \right\rvert
\le 2\pi\eps \cdot M \to 0
\]
as desired.
\end{proof}
\section{Holomorphic functions are analytic}
\prototype{Imagine a formal series $\sum_k c_k x^k$!}
In the setup of the previous problem, we have a circle $\gamma : [0,2\pi] \to \CC$
and a holomorphic function $f \colon U \to \CC$ which contains the disk $D$.
We can write
\begin{align*}
f(a) &= \frac{1}{2\pi i} \oint_\gamma \frac{f(z)}{z-a} \; dz \\
&= \frac{1}{2\pi i} \oint_\gamma \frac{f(z)/z}{1 - \frac az} \; dz \\
&= \frac{1}{2\pi i} \oint_\gamma f(z)/z \cdot \sum_{k \ge 0} \left( \frac az \right)^k \; dz \\
\intertext{You can prove (using the so-called Weierstrass M-test) that the summation order can be switched:}
f(a) &= \frac{1}{2\pi i} \sum_{k \ge 0} \oint_\gamma \frac{f(z)}{z} \cdot \left( \frac az \right)^k \; dz \\
&= \frac{1}{2\pi i} \sum_{k \ge 0} \oint_\gamma a^k \cdot \frac{f(z)}{z^{k+1}} \; dz \\
&= \sum_{k \ge 0} \left( \frac{1}{2\pi i}\oint_\gamma \frac{f(z)}{z^{k+1}} \; dz \right) a^k. \\
\intertext{Letting
$c_k = \frac{1}{2\pi i}\oint_\gamma \frac{f(z)}{z^{k+1}} \; dz$,
and noting this is independent of $a$, this is}
f(a) &= \sum_{k \ge 0} c_k a^k
\end{align*}
and that's the miracle: holomorphic functions
are given by a \vocab{Taylor series}!
This is one of the biggest results in complex analysis.
Moreover, if one is willing to believe that we can
take the derivative $k$ times, we obtain
\[ c_k = \frac{f^{(k)}(0)}{k!} \]
and this gives us $f^{(k)}(0) = k! \cdot c_k$.
Naturally, we can do this with any circle (not just one centered at zero).
So let's state the full result below, with arbitrary center $p$.
\begin{theorem}
[Cauchy's differentiation formula]
Let $f : U \to \CC$ be a holomorphic function and let $D$ be a disk centered at point $p$
bounded by a circle $\gamma$. Suppose $D$ is contained inside $U$.
Then $f$ is given everywhere in $D$ by a Taylor series
\[
f(z) = c_0 + c_1(z-p) + c_2(z-p)^2 + \dots
\]
where
\[
c_k = \frac{f^{k}(p)}{k!} = \frac{1}{2\pi i} \oint_\gamma \frac{f(w-p)}{(w-p)^{k+1}} \; dw
\]
In particular,
\[ f^{(k)}(p) = k! c_k = \frac{k!}{2\pi i} \oint_\gamma \frac{f(w-p)}{(w-p)^{k+1}} \; dw. \]
\end{theorem}
Most importantly,
\begin{moral}
Over any disk,
a holomorphic function is given
exactly by a Taylor series.
\end{moral}
This establishes a result we stated at the beginning of the chapter:
that a function being complex differentiable once means it is not only infinitely differentiable,
but in fact equal to its Taylor series.
I should maybe emphasize a small subtlety of the result:
the Taylor series centered at $p$ is only valid in a disk centered at $p$ which lies entirely in the domain $U$.
If $U = \CC$ this is no issue, since you can make the disk big enough to accommodate any point you want.
It's more subtle in the case that $U$ is, for example, a square; you can't cover the entire square
with a disk centered at some point without going outside the square.
However, since $U$ is open we can at any rate at least find some
open neighborhood for which the Taylor
series is correct -- in stark contrast to the real case.
Indeed, as you'll see in the problems,
the existence of a Taylor series is incredibly powerful.
\section\problemhead
These aren't olympiad problems, but I think they're especially nice!
In the next complex analysis chapter we'll see some more nice applications.
The first few results are the most important.
\begin{sproblem}
[Liouville's theorem]
\gim
Let $f : \CC \to \CC$ be an entire function.
Suppose that $\left\lvert f(z) \right\rvert < 1000$ for all complex numbers $z$.
Prove that $f$ is a constant function.
\begin{hint}
Look at the Taylor series of $f$,
and use Cauchy's differentiation formula to
show that each of the larger coefficients must be zero.
\end{hint}
% \footnote{%
% It's true more generally that if
% $\left\lvert f(z) \right\rvert < A+B\left\lvert z \right\rvert^n$
% for some constants $A$ and $B$,
% then $f$ is a polynomial of degree at most $n$.
% The proof is induction on $n$ with the case $n=0$ being the theorem.}
\end{sproblem}
\begin{sproblem}[Zeros are isolated]
An \vocab{isolated set} in the complex plane is a
set of points $S$ such that around each point in $S$,
one can draw an open neighborhood not intersecting any other point of $S$.
Show that the zero set of any nonzero holomorphic
function $f : U \to \CC$ is an isolated set,
unless there exists a nonempty open subset of $U$
on which $f$ is identically zero.
\begin{hint}
Proceed by contradiction,
meaning there exists a sequence $z_1, z_2, \dots \to z$
where $0 = f(z_1) = f(z_2) = \dots$ all distinct.
Prove that $f = 0$ on an open neighborhood of $z$
by looking at the Taylor series of $f$ and
pulling out factors of $z$.
\end{hint}
\begin{sol}
Proceed by contradiction, meaning there exists a sequence $z_1, z_2, \dots \to z$
where $0 = f(z_1) = f(z_2) = \dots$ all distinct.
WLOG set $z=0$.
Look at the Taylor series of $f$ around $z=0$.
Since it isn't uniformly zero by assumption,
write it as $a_N z^N + a_{N+1}z^{N+1} + \dots$, $a_N \neq 0$.
But by continuity of $h(z) = a_N + a_{N+1}z + \dots$ there is some
open neighborhood of zero where $h(z) \neq 0$.
\end{sol}
\end{sproblem}
\begin{sproblem}
[Identity theorem]
\gim
Let $f, g : U \to \CC$ be holomorphic, and assume that $U$ is connected.
Prove that if $f$ and $g$ agree on some open neighborhood,
then $f = g$.
\begin{hint}
Take the interior of the agreeing points;
show that this set is closed, which implies the conclusion.
\end{hint}
\begin{sol}
Let $S$ be the interior of the points satisfying $f=g$.
By definition $S$ is open.
By the previous part, $S$ is closed: if $z_i \to z$ and $z_i \in S$,
then $f=g$ in some open neighborhood of $z$, hence $z \in S$.
Since $S$ is clopen and nonempty, $S = U$.
\end{sol}
\end{sproblem}
%\begin{dproblem}
% [Mean Value Property]
% Let $f : U \to \CC$ be holomorphic.
% Assume that $z_0 \in U$ and the disk centered at $z_0$
% with radius $r > 0$ is contained inside $U$. Show that
% \[ f(z_0) = \frac{1}{2\pi} \int_0^{2\pi} f(z_0+re^{it}) \; dt. \]
% In other words, $f(z_0)$ is the average of $f$ along the circle.
% \begin{hint}
% Evaluate $\oint_\gamma \frac{f(w)}{w-z_0} \; dw$ over the circle.
% \end{hint}
%\end{dproblem}
\begin{dproblem}[Maximums Occur On Boundaries]
Let $f : U \to \CC$ be holomorphic, let $Y \subseteq U$ be compact,
and let $\partial Y$ be boundary\footnote{
The boundary $\partial Y$ is the set of points $p$
such that no open neighborhood of $p$ is contained in $Y$.
It is also a compact set if $Y$ is compact.
} of $Y$.
Show that
\[ \max_{z \in Y} \left\lvert f(z) \right\rvert
= \max_{z \in \partial Y} \left\lvert f(z) \right\rvert. \]
In other words, the maximum values of $\left\lvert f \right\rvert$ occur
on the boundary. (Such maximums exist by compactness.)
\end{dproblem}
\begin{problem}
[Harvard quals]
Let $f : \CC \to \CC$ be a nonconstant entire function.
Prove that $f\im(\CC)$ is dense in $\CC$.
(In fact, a much stronger result is true:
Little Picard's theorem says that the image of a nonconstant
entire function omits at most one point.)
\begin{hint}
Liouville. Look at $\frac{1}{f(z)-w}$.
\end{hint}
\begin{sol}
Suppose we want to show that there's a point
in the image within $\eps$ of a given a point $w \in \CC$.
Look at $\frac{1}{f(z) - w}$ and use Liouville's theorem.
\end{sol}
\end{problem}
%\begin{dproblem}
% [Liouiville's theorem extended]
% Let $f : \CC \to \CC$ be entire.
% \begin{enumerate}[(a)]
% \ii Show that if $\left\lvert f(z) \right\rvert < C \left\lvert z \right\rvert^{1000}$
% for some constant $C$, then $f$ is a polynomial of degree at most $1000$.
% \ii Show that the image $f\im(\CC)$ is dense in $\CC$,
% unless $f$ is constant.
% \end{enumerate}
% \begin{hint}
% Part (a) is the same proof of the original Louiville's theorem.
%
% For part (b), assume it's not dense and misses a circle at $w$
% with radius $\eps$. Look at $\frac{1}{f(z)-w}$ and show it's bounded.
% \end{hint}
%\end{dproblem}
%\begin{problem}
% Show that a nonzero entire function can have at most countably many zeros,
% and give an example where equality occurs.
% \begin{hint}
% Assume there are uncountably many zeros
% and do a pigeonhole style argument to force them to
% accumulate at some point.
% Then apply the identity theorem.
% Equality occurs at $\sin(z)$.
% \end{hint}
%\end{problem}
|
{"hexsha": "c2fe223ddd6def66aff70ac2fd48f45c04943aed", "size": 29993, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "corpus/napkin/tex/complex-ana/holomorphic.tex", "max_stars_repo_name": "aDotInTheVoid/ltxmk", "max_stars_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "corpus/napkin/tex/complex-ana/holomorphic.tex", "max_issues_repo_name": "aDotInTheVoid/ltxmk", "max_issues_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "corpus/napkin/tex/complex-ana/holomorphic.tex", "max_forks_repo_name": "aDotInTheVoid/ltxmk", "max_forks_repo_head_hexsha": "ee461679e51e92a0e4b121f28ae5fe17d5e5319e", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.125, "max_line_length": 233, "alphanum_fraction": 0.681725736, "num_tokens": 9707}
|
import numpy as np
import pandas as pd
import cv2
from matplotlib import pyplot as plt
from pathlib import Path
import random
from skimage.draw import circle
from skatingAI.Data.BODY_25_model import BODY_25
from skatingAI.Data.skating_dataset import get_data_names, get_pose_kp
path = f"{Path.cwd()}/Data"
def normalize_kp(kp, height, width):
kp_0 = kp[0] # x (width)
kp_1 = kp[1] # y(height)
if kp_1 >= height:
kp_1 = height - 1
if kp_0 >= width:
kp_0 = width-1
return int(kp_0), int(kp_1)
def show_keypoints_img(name, frame_n, max_frame_amount=2, score_bound=18):
imgs, img_kps, kps, scores, frame_ns = \
find_subsequent_frames(
name, frame_n, max_frame_amount, score_bound)
if imgs is None:
return None, None, None
max_kps, max_scores = find_max_scores(scores, kps)
print(f"> max score: ", np.sum(max_scores))
# plot the frames with keypoints
fig = plt.figure(num=f"{name}_{frame_ns}_kp_analysis", figsize=(15, 10))
for idx in range(max_frame_amount):
# first row: frames with keypoints
a = fig.add_subplot(2, max_frame_amount, idx+1)
a.set_title(f"{name}_{frame_ns[idx]}_kp")
plt.imshow(
img_kps[idx], cmap='coolwarm', interpolation='bicubic')
# second row: frames with max keypoints
a = fig.add_subplot(2, max_frame_amount, idx+max_frame_amount+1)
a.set_title(f"{name}_{frame_ns[idx]}_result")
img_max = kp2img(max_kps, imgs[idx])
plt.imshow(img_max, cmap='coolwarm', interpolation='bicubic')
plt.show()
return scores, max_scores, frame_ns
def find_max_scores(scores, kps):
max_scores_idx = np.argmax(scores, axis=0)
max_kp, max_kps = [], []
for i, ms in enumerate(max_scores_idx):
max_kp = kps[ms][i]
max_kps.append([*max_kp, scores[ms][i]])
return np.array(max_kps), np.amax(scores, axis=0)
def find_subsequent_frames(name, frame_n, max_frame_amount=2, score_bound=18):
imgs, img_kps, scores, kps, ns = [], [], [], [], []
subsequent_frame_n = 1
max_frame_check_gate, max_frame_check = 100, 0
while subsequent_frame_n <= max_frame_amount and \
max_frame_check < max_frame_check_gate:
img = None
try:
img = cv2.imread(
f"{path}/ResultingFrames/{name}_{frame_n}.jpg", 0)
imgs.append(img)
except:
print(f'{name}_{frame_n}: not found')
subsequent_frame_n = 0
imgs, img_kps, scores, kps, ns = [], [], [], [], []
if img is not None:
_img_kps, _kps, _scores = add_keypoints_img(
name, frame_n, img.copy())
if np.sum(_scores) > score_bound:
img_kps.append(_img_kps)
kps.append(_kps)
scores.append(_scores)
ns.append(frame_n)
else:
subsequent_frame_n = 0
imgs, img_kps, scores, kps, ns = [], [], [], [], []
else:
subsequent_frame_n = 0
imgs, img_kps, scores, kps, ns = [], [], [], [], []
frame_n += 1
subsequent_frame_n += 1
max_frame_check += 1
if max_frame_check == max_frame_check_gate:
return None, None, None, None, None
print('*'*100)
return imgs, img_kps, kps, scores, ns
def kp2img(kp, img):
if len(img.shape) == 2:
height, width = img.shape
color = 255
else:
height, width, rgb = img.shape
color = np.array([255, 255, 255])
for idx, kp in enumerate(kp):
radius = 5
kp_0, kp_1 = normalize_kp(kp, height-radius, width-radius)
if len(kp) == 3:
if kp[2] < 0.9:
color -= 50
if kp[2] < 0.5:
color -= 100
rr, cc = circle(kp_1, kp_0, radius)
img[rr, cc] = color
return img
def add_keypoints_img(name, frame_n, img):
img_path = f"{path}/ResultingFrames/{name}_{frame_n}.jpg"
kp_path = str(list(Path(
f"{path}/KeyPoints/{name}").glob(f"{name}*0{frame_n}_keypoints.json"))[0])
focus_person_kp = get_pose_kp(pd.read_json(kp_path), show_score=True)
img = kp2img(focus_person_kp, img)
return img, focus_person_kp[:, :2], focus_person_kp[:, 2]
def analyze_images():
max_frame_amount, score_bound = 3, 19
df_scores = pd.DataFrame(columns=["name", *BODY_25])
df_max_scores = pd.DataFrame(columns=["name", *BODY_25])
video_names = get_data_names()
frames = [(name, random.randint(1, 100)) for name in video_names]
for idx, frame in enumerate(frames):
print("\n\n", "*"*100, "\n", frame[0])
scores, max_scores, frames_ns = show_keypoints_img(
frame[0], frame[1], max_frame_amount, score_bound)
if scores is not None:
for _i, _score in enumerate(scores):
df_scores.loc[idx +
_i] = [f"{frame[0]}_{frames_ns[_i]}", *_score]
df_max_scores.loc[idx] = [frame[0], *max_scores]
else:
print(
f"\n{frame[0]} is a really crapy video! Best would be you'd throw it away very quickly.\n")
print(df_scores)
print(df_max_scores)
df_scores.to_csv(f"{Path.cwd()}/Analysis_kp/scores.csv")
df_max_scores.to_csv(f"{Path.cwd()}/Analysis_kp/max_scores.csv")
# analyze_images()
|
{"hexsha": "ada1c6f0f6586a5f8d4229f82cbc4ede10dc2220", "size": 5432, "ext": "py", "lang": "Python", "max_stars_repo_path": "skatingAI/Data/image_operations.py", "max_stars_repo_name": "na018/awesome.skating.ai", "max_stars_repo_head_hexsha": "50738d5a359dc7fd69ec676cfaa83471b8ffe2e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-08-03T20:13:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T10:41:18.000Z", "max_issues_repo_path": "skatingAI/Data/image_operations.py", "max_issues_repo_name": "na018/awesome.skating.ai", "max_issues_repo_head_hexsha": "50738d5a359dc7fd69ec676cfaa83471b8ffe2e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "skatingAI/Data/image_operations.py", "max_forks_repo_name": "na018/awesome.skating.ai", "max_forks_repo_head_hexsha": "50738d5a359dc7fd69ec676cfaa83471b8ffe2e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7407407407, "max_line_length": 107, "alphanum_fraction": 0.5951767305, "include": true, "reason": "import numpy", "num_tokens": 1495}
|
import numpy as np
import threading
import multiprocessing
#As defined in the name this class wraps the processed data file.
#It provides methods for further processing of this data, for example inversion analysis.
#Potentially look at the RawDataWrapper first if this class is too confusing.
class ProcessedDataWrapper():
def __init__(self, filePath):
self.m_filePath = filePath
substrings = filePath.split('/')
self.m_fileName = substrings[len(substrings) - 1]
self.m_dataParsed = False #data has been parsed?
self.m_dataInverted = False #data has been inverted?
self.m_normalized = False #input data is normalized?
self.m_dataSimulated = False #simulation done?
self.m_listOfColumns = [] #labels for columns
self.m_parsedInputData = [] #effectively experimental desorption rate normalized to units of 1ML
self.m_totalCoverages = [] #total coverage for an input data column
self.m_expCoverages = {} #dictionary to connect dataset with prefactor as key, coverage(temp), from experiment
self.m_desorptionEnergies = {} #dictionary to connect dataset with prefactor as key
self.m_simCoverages = {} #dictionary to connect dataset with prefactor as key, coverage(temp), from simulation
self.m_simDesorptionRate = {} #dictionary to connect dataset with prefactor as key, dTheta/dT, from sim
self.m_chiSquared = {}
self.m_includedFiles = []
def parseProcessedDataFile(self):
if(self.m_dataParsed):
return True
try: #try parsing so we can return false if we catch an exception
firstHeaderLine = np.loadtxt(self.m_filePath, dtype=str, max_rows=1, comments=None) #ignoring comments because we want to load the header
self.m_mass = int(firstHeaderLine[-1]) #last part of first header line is mass number
secondHeaderLine = np.loadtxt(self.m_filePath, dtype=str, skiprows=1, max_rows=1, comments=None) #ignoring comments, same as before
headerLength = int (secondHeaderLine[-1]) #last part of second header line is header length
for i in range(3, headerLength-1):
includedFileNameBuffer = np.loadtxt(self.m_filePath, dtype=str, skiprows=i, max_rows = 1, comments= None)
self.m_includedFiles.append(' '.join(includedFileNameBuffer[2:]))#ignore '# ' before line
firstTwoLines = np.loadtxt(self.m_filePath, dtype=str, max_rows=2)
self.m_listOfColumns = firstTwoLines[0,:]
self.m_totalCoverages = [float(c) for c in firstTwoLines[1,:]]
if not (1.0 in self.m_totalCoverages):
self.m_normalized = False
else:
self.m_normalized = True
temp = np.loadtxt(self.m_filePath, dtype = float, comments = None, skiprows= headerLength + 2)
#now columns can be traversed contiguously in memory
self.m_parsedInputData = temp.transpose().copy()
self.m_dataParsed = True
return True
except Exception:
return False
return False
def getInputData(self):
return self.m_parsedInputData
def clearInvertedData(self):
self.m_expCoverages = {} #dictionary to connect dataset with prefactor as key
self.m_desorptionEnergies = {} #dictionary to connect dataset with prefactor as key
def invertProcessedData(self, prefactor, rampRate = 1, gasConstant_R = 8.314):
#gas constant R in J/mol/K
#ramp rate in K/s
# coverageBuffer = np.zeros(shape=self.m_parsedInputData.shape)
# desorptionEnergiesBuffer = np.zeros(shape=self.m_parsedInputData.shape)
self.m_expCoverages[str(prefactor)] = np.zeros(shape=self.m_parsedInputData.shape)
self.m_desorptionEnergies[str(prefactor)] = np.zeros(shape=self.m_parsedInputData.shape)
#calculate coverage vs temperature
for i in range(len(self.m_totalCoverages)-1):
self.m_expCoverages[str(prefactor)][i,:] = np.array([self.m_totalCoverages[i+1] - np.trapz(self.m_parsedInputData[i+1,:j],x=self.m_parsedInputData[0,:j]) for j in range(self.m_parsedInputData.shape[1])])
# if (len(self.m_totalCoverages) > 1): #if we have multiple datasets, go through all of them
factor1 = (rampRate * prefactor)
factor2 = - gasConstant_R * 0.001 * 0.01036410 #factor for eV
for i in range(len(self.m_totalCoverages)-1):
temp = self.m_parsedInputData[i+1,:] / (self.m_expCoverages[str(prefactor)][i,:] * factor1)
# self.m_expCoverages = np.vstack((self.m_totalCoverages,np.trapz(self.m_parsedInputData[i,:],x=self.m_parsedInputData[0,:]) - self.m_totalCoverages[i]))
self.m_desorptionEnergies[str(prefactor)][i,:] = factor2 * self.m_parsedInputData[0,:] * np.log(temp)
# self.m_desorptionEnergies[str(prefactor)][i,np.where(self.m_expCoverages[str(prefactor)][i,:] == self.m_totalCoverages[i+1])] = 0.0 #set desorption energy to zero where coverage stays constant (i.e. temperatures below desorption peaks)
# self.m_expCoverages[str(prefactor)] = coverageBuffer
# self.m_desorptionEnergies[str(prefactor)] = desorptionEnergiesBuffer
#E_des(:,i)=-R.*temp.*log(des_rate(:,i)./(ramp_rate.*pref_fce.*coverage_fc(:,i)))*0.001;% in kJ/mol
self.m_dataInverted = True
def getExpCoverageVSTemp(self, prefactor):
return np.vstack((self.m_parsedInputData[0,:],self.m_expCoverages[str(prefactor)][:-1,:]))
def getExpDesorptionRateVSTemp(self):
return self.m_parsedInputData
def fileNameToExpDesorptionRateVSTemp(self, fileName):
try:
index = self.m_includedFiles.index(fileName) + 1
return np.vstack((self.m_parsedInputData[0,:],self.m_parsedInputData[index,:]))
except:
return None
def getDesEnergyVSCoverageList(self, prefactor):
result = []
for i in range(len(self.m_totalCoverages)-1):
result.append(np.vstack((self.m_expCoverages[str(prefactor)][i,:],self.m_desorptionEnergies[str(prefactor)][i,:])))
return result
def saveInvertedDataToFile(self,outputFilePath):
if(outputFilePath == None):
raise ValueError
#keys are prefactors
for k in self.m_desorptionEnergies.keys():
headerString = "Processed TPD data for mass " + str(self.m_mass) + \
"\nHeader length is " + str(4) + \
"\nPrefactor is " + "{:e}".format(float(k)) + \
"\nA temperature column is followed by pairs of coverage and desorption energy columns:"
#outputData starts out column-major
outputData = self.m_parsedInputData[0,:].copy() # start with temperature column
labels = ["Temperature"]
coverages = [str(0.0)]
for i in range(len(self.m_totalCoverages) - 1):
# headerString = headerString + w.m_fileName + "\n" #write filename to header for quick overview
outputData = np.vstack((outputData, self.m_expCoverages[k][i,:], self.m_desorptionEnergies[k][i,:])) #append data column for coverage and then mass
labels.append("Coverage_" + self.m_listOfColumns[i+1]) # append total coverage once for coverage column
labels.append("EDes_" + self.m_listOfColumns[i+1]) # append total coverage a second time for desorption energy column
coverages.append(str(self.m_totalCoverages[i+1])) # append total coverage once for coverage column
coverages.append(str(self.m_totalCoverages[i+1])) # append total coverage a second time for desorption energy column
#make one file per mass
namedOutputFilePath = outputFilePath + ".M" + str(self.m_mass) + "Prefactor_" + "{:e}".format(float(k)) + ".invdat" #pdat for processed data
stringData = np.vstack((np.array(labels,dtype=str),np.array(coverages,dtype=str)))
with open(namedOutputFilePath, mode='a') as fileHandle:
#write header and stringData first
np.savetxt(fileHandle, stringData, fmt="%s", delimiter=' ', header=headerString)
#then write float data (after transposing it)
np.savetxt(fileHandle, outputData.transpose(), delimiter=' ')
#Represents the polanyi wigner equation. Used for the Runga-Kutta integration.
def polanyiWigner(self,prefactor,coverageRow,monolayerCoverage,monolayerDesEnergy,temperature,eCharge = 1.6022e-19,kBoltz=1.3806e-23):
interpolatedEnergyInEV = np.interp(coverageRow,monolayerCoverage,monolayerDesEnergy)
result = -prefactor*coverageRow*np.exp(-interpolatedEnergyInEV*(eCharge/kBoltz)/temperature)
return result
#Effectively this is the heart of inversion analysis. It is an RK4 integrator.
#This function is called in the following function, depending on whether we have multiple prefactors and multiple available threads.
def simulateCoverageFromInvertedDataForSinglePrefactor(self, strPrefactor, tStep = 0.1):
monolayerIndex = self.m_totalCoverages.index(1.0) - 1 #index of the data column associated with 1ML coverage
# self.m_monolayerIndex = self.m_totalCoverages.index(1.0) - 1 #index of the data column associated with 1ML coverage
# self.m_totalCoverages[self.m_monolayerIndex] = 1.0-np.finfo(float).eps
# monolayerIndex = self.m_monolayerIndex #index of the data column associated with 1ML coverage
temperature = self.m_parsedInputData[0,:] #temperature data column
monolayerCoverage = self.m_expCoverages[strPrefactor][monolayerIndex,::-1].copy() #should be same every time
monolayerDesEnergy = self.m_desorptionEnergies[strPrefactor][monolayerIndex,::-1].copy() #should be different every time
floatPrefactor = float(strPrefactor)
simCoverageBuffer = np.zeros(shape=(len(temperature), len(self.m_totalCoverages) - 1))
simDesorptionRateBuffer = simCoverageBuffer.copy() #start with zeros and same shape
simCoverageBuffer[0,:] = self.m_totalCoverages[1:] #starting values for coverages
simCoverageBuffer[np.where(simCoverageBuffer == 1.0)] -= 0.0001
for i in range(len(temperature) - 1):
refSimCoverageRow = simCoverageBuffer[i,:]
#RK4 integration
k1 = self.polanyiWigner(floatPrefactor,
refSimCoverageRow,
monolayerCoverage,
monolayerDesEnergy,
temperature[i])
k2 = self.polanyiWigner(floatPrefactor,
refSimCoverageRow + 0.5*tStep*k1,
monolayerCoverage,
monolayerDesEnergy,
temperature[i] + 0.5*tStep)
k3 = self.polanyiWigner(floatPrefactor,
refSimCoverageRow + 0.5*tStep*k2,
monolayerCoverage,
monolayerDesEnergy,
temperature[i] + 0.5*tStep)
k4 = self.polanyiWigner(floatPrefactor,
refSimCoverageRow + tStep*k3,
monolayerCoverage,
monolayerDesEnergy,
temperature[i] + tStep)
simDesorptionRateBuffer[i,:] = - k1.copy()
# for e in self.m_simDesorptionRate[k][i,:]:
# if e >= 1.0:
# e = 0.0
newSimCoverageRow = refSimCoverageRow + (1.0/6.0)*(k1+ 2.0*k2 + 2.0*k3 + k4)*tStep
for j in range(len(newSimCoverageRow)):
if newSimCoverageRow[j] < 1.0e-6: #don't care about smaller values, effectively is zero
newSimCoverageRow[j] = 0.0
if newSimCoverageRow[j] > 1.0: #at some point the simulation diverges
newSimCoverageRow[j] = 0.0
simCoverageBuffer[i+1,:] = newSimCoverageRow.copy() # write new row to simulated data array
return strPrefactor, simCoverageBuffer.transpose().copy() , simDesorptionRateBuffer.transpose().copy() #column major now
#This is somewhat like a dispatching function for the inversion analysis. One thread per pre-factor on basis of the if-branch-conditions.
def simulateCoveragesFromInvertedData(self):
if (not self.m_dataInverted):
raise ValueError #we need to perform inversion before simulation and evaluation
prefactorList = list(self.m_expCoverages)
if( len(prefactorList) == 1): #only one prefactor
trash, self.m_simCoverages[prefactorList[0]], self.m_simDesorptionRate[prefactorList[0]] = self.simulateCoverageFromInvertedDataForSinglePrefactor(prefactorList[0]) #column major now
else: #try multiprocessing
cpu_count = multiprocessing.cpu_count()
if( cpu_count == 1): #single-core
for k in prefactorList:
trash, self.m_simCoverages[k], self.m_simDesorptionRate[k] = self.simulateCoverageFromInvertedDataForSinglePrefactor(k) #column major now
else: #try using as many cores as there are prefactors, or at least as many cores as we have (minus one for UI thread)
print("Using " + str(min(cpu_count - 1, len(prefactorList))) + " processes!") #debug
with multiprocessing.Pool(min(cpu_count - 1, len(prefactorList))) as p:
# keys, simCoverages, simDesorptionRates = p.map(self.simulateCoverageFromInvertedDataForSinglePrefactor, prefactors)
results = p.map(self.simulateCoverageFromInvertedDataForSinglePrefactor, prefactorList)
for r in results: #r[0] is the prefactor key, r[1] is the simCoverage, r[2] is the simDesorptionRate
self.m_simCoverages[r[0]] = r[1]
self.m_simDesorptionRate[r[0]] = r[2]
self.m_dataSimulated = True #simulation done?
def getSimCoverageVSTemp(self, prefactor):
return np.vstack((self.m_parsedInputData[0,:],self.m_simCoverages[str(prefactor)]))
def getSimDesRateVSTemp(self, prefactor):
return np.vstack((self.m_parsedInputData[0,:],self.m_simDesorptionRate[str(prefactor)]))
#Here we calulcate the chi-squared as the last step of the inversion analysis.
def evaluateData(self):
if not self.m_dataSimulated:
raise ValueError #cant evaluate before simulation
for k in self.m_expCoverages.keys():
self.m_chiSquared[k] = np.zeros(len(self.m_totalCoverages) - 1)
for i in range(len(self.m_totalCoverages) - 1):
sim = self.m_simDesorptionRate[k][i,:] #simulated desorption rate
obs = self.m_parsedInputData[i+1,:] #observed desorption rate
difference = sim - obs
diffSquared = difference*difference
# self.m_chiSquared[k][i] = np.sum(np.where( obs != 0.0, diffSquared/obs, diffSquared/np.finfo(float).eps))
# self.m_chiSquared[k][i] = np.sum(np.where( obs > 1.0e-6, diffSquared/obs, diffSquared/np.finfo(float).eps))
self.m_chiSquared[k][i] = np.sum(np.where( obs > 1.0e-6, diffSquared/obs, 0))
# if self.m_chiSquared[k][i] > 1000:
# print("Wierd chiSquared")
def getChiSquaredVSPrefactor(self):
prefactorList = list(self.m_chiSquared) #this returns the keys of the dictionary as an indexable list
dataPointsPerPrefactor = len(self.m_chiSquared[prefactorList[0]]) - 1 #-1 because we don't want the chisquared for the monolayer coverage
result = np.zeros((len(prefactorList),dataPointsPerPrefactor))
#result[0,:]
for i in range(len(prefactorList)):
for j in range(dataPointsPerPrefactor):
result[i,j] = self.m_chiSquared[prefactorList[i]][j]
result = np.vstack((np.array([float(p) for p in prefactorList]),result.transpose()))
return result
def getChiSquaredSumVSPrefactor(self):
prefactorList = list(self.m_chiSquared) #this returns the keys of the dictionary as an indexable list
result = np.zeros(len(prefactorList))
for i in range(len(prefactorList)):
for j in range(len(self.m_chiSquared[prefactorList[i]]) - 1): #-1 because we don't want the chisquared for the monolayer coverage
result[i] += self.m_chiSquared[prefactorList[i]][j] #sum of chi squared values for a prefactor
result = np.vstack((np.array([float(p) for p in prefactorList]),result.transpose()))
return result
def getCoverageLabels(self):
result = []
for c in self.m_totalCoverages[1:]:
result.append("M" + str(self.m_mass) + ' {:04.2f} ML'.format(c))
return result
def fileNameToCoverageLabel(self, fileName):
try:
index = self.m_includedFiles.index(fileName) + 1
c = self.m_totalCoverages[index]
return "M" + str(self.m_mass) + ' {:04.2f} ML'.format(c)
except:
return None
def getMinTemp(self):
return self.m_parsedInputData[0,0]
def getMaxTemp(self):
return self.m_parsedInputData[0,-1]
def getProcessedDataBetweenForFile(self, t1, t2, fileName):
try:
dataIndex = self.m_includedFiles.index(fileName) + 1
indices = np.where((self.m_parsedInputData[0,:] > t1) & (self.m_parsedInputData[0,:] < t2))[0]
idx1 = indices[0]
idx2 = indices[-1]
result = (self.m_parsedInputData[0,idx1:idx2],self.m_parsedInputData[dataIndex,idx1:idx2])
return result
except:
return None
def integrateDesorptionRate(self, t1, t2, fileName):
try:
# dataIndex = self.m_includedFiles.index(fileName) + 1
# indices = np.where((self.m_parsedInputData[0,:] > t1) & (self.m_parsedInputData[0,:] < t2))[0]
# # idx1 = np.where(self.m_parsedInputData[0,:] == t1)
# # idx2 = np.where(self.m_parsedInputData[0,:] == t2)
# idx1 = indices[0]
# idx2 = indices[-1]
# result = np.trapz(self.m_parsedInputData[dataIndex,idx1:idx2],self.m_parsedInputData[0,idx1:idx2])
data = self.getProcessedDataBetweenForFile(t1,t2,fileName)
result = np.trapz(data[1],data[0]) #y, then x
return result
except:
return None
|
{"hexsha": "16b240e4777dadb9df966ebdeba34af8c06a5079", "size": 18729, "ext": "py", "lang": "Python", "max_stars_repo_path": "DataModels/ProcessedDataWrapper.py", "max_stars_repo_name": "manuelulreich/TPDAnalysisToolkit", "max_stars_repo_head_hexsha": "ba3bf59658a113543fc5b70f36e38975e26675f3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DataModels/ProcessedDataWrapper.py", "max_issues_repo_name": "manuelulreich/TPDAnalysisToolkit", "max_issues_repo_head_hexsha": "ba3bf59658a113543fc5b70f36e38975e26675f3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DataModels/ProcessedDataWrapper.py", "max_forks_repo_name": "manuelulreich/TPDAnalysisToolkit", "max_forks_repo_head_hexsha": "ba3bf59658a113543fc5b70f36e38975e26675f3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 60.4161290323, "max_line_length": 249, "alphanum_fraction": 0.6425329703, "include": true, "reason": "import numpy", "num_tokens": 4556}
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import trimesh
from os import path as osp
from .image_vis import (draw_camera_bbox3d_on_img, draw_depth_bbox3d_on_img,
draw_lidar_bbox3d_on_img)
def _write_obj(points, out_filename):
"""Write points into ``obj`` format for meshlab visualization.
Args:
points (np.ndarray): Points in shape (N, dim).
out_filename (str): Filename to be saved.
"""
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
if points.shape[1] == 6:
c = points[i, 3:].astype(int)
fout.write(
'v %f %f %f %d %d %d\n' %
(points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
else:
fout.write('v %f %f %f\n' %
(points[i, 0], points[i, 1], points[i, 2]))
fout.close()
def _write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes.
Args:
scene_bbox(list[ndarray] or ndarray): xyz pos of center and
3 lengths (dx,dy,dz) and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename(str): Filename.
"""
def heading2rotmat(heading_angle):
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if len(scene_bbox) == 0:
scene_bbox = np.zeros((1, 7))
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to obj file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='obj')
return
def show_result(points,
gt_bboxes,
pred_bboxes,
out_dir,
filename,
show=False,
snapshot=False,
pred_labels=None):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_bboxes (np.ndarray): Ground truth boxes.
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool): Visualize the results online. Defaults to False.
snapshot (bool): Whether to save the online results. Defaults to False.
"""
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if show:
from .open3d_vis import Visualizer
vis = Visualizer(points)
if pred_bboxes is not None:
if pred_labels is None:
vis.add_bboxes(bbox3d=pred_bboxes)
else:
palette = np.random.randint(0, 256, size=(pred_labels.max() + 1, 3))
S = {}
for j in range(len(pred_labels)):
i = int(pred_labels[j].numpy())
if S.get(i) == None:
S[i] = []
S[i].append(pred_bboxes[j])
for i in S:
vis.add_bboxes(bbox3d = np.array(S[i]), bbox_color=palette[S[i]], points_in_box_color=palette[S[i]])
if gt_bboxes is not None:
vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
show_path = osp.join(result_path,
f'{filename}_online.png') if snapshot else None
vis.show(show_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_bboxes is not None:
# bottom center to gravity center
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
gt_bboxes[:, 6] *= -1
_write_oriented_bbox(gt_bboxes,
osp.join(result_path, f'{filename}_gt.obj'))
if pred_bboxes is not None:
# bottom center to gravity center
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
# the positive direction for yaw in meshlab is clockwise
pred_bboxes[:, 6] *= -1
_write_oriented_bbox(pred_bboxes,
osp.join(result_path, f'{filename}_pred.obj'))
def show_seg_result(points,
gt_seg,
pred_seg,
out_dir,
filename,
palette,
ignore_index=None,
show=True,
snapshot=False):
"""Convert results into format that is directly readable for meshlab.
Args:
points (np.ndarray): Points.
gt_seg (np.ndarray): Ground truth segmentation mask.
pred_seg (np.ndarray): Predicted segmentation mask.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
palette (np.ndarray): Mapping between class labels and colors.
ignore_index (int, optional): The label index to be ignored, e.g. \
unannotated points. Defaults to None.
show (bool, optional): Visualize the results online. Defaults to False.
snapshot (bool, optional): Whether to save the online results. \
Defaults to False.
"""
# we need 3D coordinates to visualize segmentation mask
if gt_seg is not None or pred_seg is not None:
assert points is not None, \
'3D coordinates are required for segmentation visualization'
# filter out ignored points
if gt_seg is not None and ignore_index is not None:
if points is not None:
points = points[gt_seg != ignore_index]
if pred_seg is not None:
pred_seg = pred_seg[gt_seg != ignore_index]
gt_seg = gt_seg[gt_seg != ignore_index]
if gt_seg is not None:
gt_seg_color = palette[gt_seg]
gt_seg_color = np.concatenate([points[:, :3], gt_seg_color], axis=1)
if pred_seg is not None:
pred_seg_color = palette[pred_seg]
pred_seg_color = np.concatenate([points[:, :3], pred_seg_color],
axis=1)
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
# online visualization of segmentation mask
# we show three masks in a row, scene_points, gt_mask, pred_mask
if show:
from .open3d_vis import Visualizer
mode = 'xyzrgb' if points.shape[1] == 6 else 'xyz'
vis = Visualizer(points, mode=mode)
if gt_seg is not None:
vis.add_seg_mask(gt_seg_color)
if pred_seg is not None:
vis.add_seg_mask(pred_seg_color)
show_path = osp.join(result_path,
f'{filename}_online.png') if snapshot else None
vis.show(show_path)
if points is not None:
_write_obj(points, osp.join(result_path, f'{filename}_points.obj'))
if gt_seg is not None:
_write_obj(gt_seg_color, osp.join(result_path, f'{filename}_gt.obj'))
if pred_seg is not None:
_write_obj(pred_seg_color, osp.join(result_path,
f'{filename}_pred.obj'))
def show_multi_modality_result(img,
gt_bboxes,
pred_bboxes,
proj_mat,
out_dir,
filename,
box_mode='lidar',
img_metas=None,
show=True,
gt_bbox_color=(61, 102, 255),
pred_bbox_color=(241, 101, 72)):
"""Convert multi-modality detection results into 2D results.
Project the predicted 3D bbox to 2D image plane and visualize them.
Args:
img (np.ndarray): The numpy array of image in cv2 fashion.
gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
pred_bboxes (:obj:`BaseInstance3DBoxes`): Predicted boxes.
proj_mat (numpy.array, shape=[4, 4]): The projection matrix
according to the camera intrinsic parameters.
out_dir (str): Path of output directory.
filename (str): Filename of the current frame.
box_mode (str): Coordinate system the boxes are in. Should be one of
'depth', 'lidar' and 'camera'. Defaults to 'lidar'.
img_metas (dict): Used in projecting depth bbox.
show (bool): Visualize the results online. Defaults to False.
gt_bbox_color (str or tuple(int)): Color of bbox lines.
The tuple of color should be in BGR order. Default: (255, 102, 61)
pred_bbox_color (str or tuple(int)): Color of bbox lines.
The tuple of color should be in BGR order. Default: (72, 101, 241)
"""
if box_mode == 'depth':
draw_bbox = draw_depth_bbox3d_on_img
elif box_mode == 'lidar':
draw_bbox = draw_lidar_bbox3d_on_img
elif box_mode == 'camera':
draw_bbox = draw_camera_bbox3d_on_img
else:
raise NotImplementedError(f'unsupported box mode {box_mode}')
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
if show:
show_img = img.copy()
if gt_bboxes is not None:
show_img = draw_bbox(
gt_bboxes, show_img, proj_mat, img_metas, color=gt_bbox_color)
if pred_bboxes is not None:
show_img = draw_bbox(
pred_bboxes,
show_img,
proj_mat,
img_metas,
color=pred_bbox_color)
mmcv.imshow(show_img, win_name='project_bbox3d_img', wait_time=0)
if img is not None:
mmcv.imwrite(img, osp.join(result_path, f'{filename}_img.png'))
if gt_bboxes is not None:
gt_img = draw_bbox(
gt_bboxes, img, proj_mat, img_metas, color=gt_bbox_color)
mmcv.imwrite(gt_img, osp.join(result_path, f'{filename}_gt.png'))
if pred_bboxes is not None:
pred_img = draw_bbox(
pred_bboxes, img, proj_mat, img_metas, color=pred_bbox_color)
mmcv.imwrite(pred_img, osp.join(result_path, f'{filename}_pred.png'))
|
{"hexsha": "295c28dbd9df24468739ea733a274da046b3d6e4", "size": 10811, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdet3d/core/visualizer/show_result.py", "max_stars_repo_name": "MilkClouds/mmdetection3d", "max_stars_repo_head_hexsha": "772a7fd2a47b081b9445fba03c2f9f537328cb17", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mmdet3d/core/visualizer/show_result.py", "max_issues_repo_name": "MilkClouds/mmdetection3d", "max_issues_repo_head_hexsha": "772a7fd2a47b081b9445fba03c2f9f537328cb17", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mmdet3d/core/visualizer/show_result.py", "max_forks_repo_name": "MilkClouds/mmdetection3d", "max_forks_repo_head_hexsha": "772a7fd2a47b081b9445fba03c2f9f537328cb17", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.0669014085, "max_line_length": 120, "alphanum_fraction": 0.5832947923, "include": true, "reason": "import numpy", "num_tokens": 2560}
|
### tf-nightly==2.5.0-dev20210104
### https://google.github.io/flatbuffers/flatbuffers_guide_tutorial.html
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import json
import tensorflow.compat.v1 as tf
import tensorflow as tfv2
import shutil
from pathlib import Path
import pprint
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
schema = "schema.fbs"
binary = "./flatc"
model_path = "segm_full_v679_opt.tflite"
output_pb_path = "segm_full_v679_opt.pb"
output_savedmodel_path = "saved_model"
model_json_path = "segm_full_v679_opt.json"
output_node_names = ['segment']
#################################################################
# Change to True when converting to EdgeTPU model.
optimizing_for_edgetpu_flg = False
#################################################################
def gen_model_json():
if not os.path.exists(model_json_path):
cmd = (binary + " -t --strict-json --defaults-json -o . {schema} -- {input}".format(input=model_path, schema=schema))
print("output json command =", cmd)
os.system(cmd)
def parse_json():
j = json.load(open(model_json_path))
op_types = [v['builtin_code'] for v in j['operator_codes']]
print('op types:', op_types)
ops = j['subgraphs'][0]['operators']
print('num of ops:', len(ops))
return ops, op_types
def optimizing_hardswish_for_edgetpu(input_op, name=None):
ret_op = None
if not optimizing_for_edgetpu_flg:
ret_op = input_op * tf.nn.relu6(input_op + 3) * 0.16666667
else:
ret_op = input_op * tf.nn.relu6(input_op + 3) * 0.16666666
return ret_op
def make_graph(ops, op_types, interpreter):
height = 144
width = 256
tensors = {}
input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
print(input_details)
for input_detail in input_details:
tensors[input_detail['index']] = tf.placeholder(
dtype=input_detail['dtype'],
shape=input_detail['shape'],
name=input_detail['name'])
for op in ops:
print('@@@@@@@@@@@@@@ op:', op)
op_type = op_types[op['opcode_index']]
if op_type == 'CONV_2D':
input_tensor = tensors[op['inputs'][0]]
weights = tensors[op['inputs'][1]].transpose(1,2,3,0)
bias = tensors[op['inputs'][2]]
output_detail = interpreter._get_tensor_details(op['outputs'][0])
options = op['builtin_options']
output_tensor = tf.nn.conv2d(
input_tensor,
weights,
strides=[1, options['stride_h'], options['stride_w'], 1],
padding=options['padding'],
dilations=[
1, options['dilation_h_factor'],
options['dilation_w_factor'], 1
],
name=output_detail['name'] + '/conv2d')
output_tensor = tf.add(
output_tensor, bias, name=output_detail['name'])
if output_detail['name'].split('/')[-1]=='Relu6':
output_tensor = tf.nn.relu6(output_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'DEPTHWISE_CONV_2D':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
weights = tensors[op['inputs'][1]].transpose(1,2,3,0)
bias = tensors[op['inputs'][2]]
options = op['builtin_options']
output_tensor = tf.nn.depthwise_conv2d(
input_tensor,
weights,
strides=[1, options['stride_h'], options['stride_w'], 1],
padding=options['padding'],
# dilations=[1, options['dilation_h_factor'], options['dilation_w_factor'], 1],
name=output_detail['name'] + '/depthwise_conv2d')
output_tensor = tf.add(output_tensor, bias, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'MAX_POOL_2D':
input_tensor = tensors[op['inputs'][0]]
output_detail = interpreter._get_tensor_details(op['outputs'][0])
options = op['builtin_options']
output_tensor = tf.nn.max_pool(
input_tensor,
ksize=[
1, options['filter_height'], options['filter_width'], 1
],
strides=[1, options['stride_h'], options['stride_w'], 1],
padding=options['padding'],
name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'PAD':
input_tensor = tensors[op['inputs'][0]]
output_detail = interpreter._get_tensor_details(op['outputs'][0])
paddings_detail = interpreter._get_tensor_details(op['inputs'][1])
paddings_array = interpreter.get_tensor(paddings_detail['index'])
paddings = tf.Variable(
paddings_array, name=paddings_detail['name'])
output_tensor = tf.pad(
input_tensor, paddings, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'RELU':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
output_tensor = tf.nn.relu(input_tensor, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'PRELU':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
alpha_detail = interpreter._get_tensor_details(op['inputs'][1])
alpha_array = interpreter.get_tensor(alpha_detail['index'])
with tf.variable_scope(name_or_scope=output_detail['name']):
alphas = tf.Variable(alpha_array, name=alpha_detail['name'])
output_tensor = tf.maximum(alphas * input_tensor, input_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'RELU6':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
output_tensor = tf.nn.relu6(input_tensor, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'RESHAPE':
input_tensor = tensors[op['inputs'][0]]
output_detail = interpreter._get_tensor_details(op['outputs'][0])
options = op['builtin_options']
output_tensor = tf.reshape(input_tensor, options['new_shape'], name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'ADD':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor_0 = tensors[op['inputs'][0]]
try:
input_tensor_1 = tensors[op['inputs'][1]]
except:
param = interpreter._get_tensor_details(op['inputs'][1])
input_tensor_1 = interpreter.get_tensor(param['index'])
output_tensor = tf.add(input_tensor_0, input_tensor_1, name=output_detail['name'])
if output_detail['name'].split('/')[-1]=='Relu6':
output_tensor = tf.nn.relu6(output_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'CONCATENATION':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor_0 = tensors[op['inputs'][0]]
input_tensor_1 = tensors[op['inputs'][1]]
try:
input_tensor_2 = tensors[op['inputs'][2]]
options = op['builtin_options']
output_tensor = tf.concat([input_tensor_0, input_tensor_1, input_tensor_2],
options['axis'],
name=output_detail['name'])
except:
options = op['builtin_options']
output_tensor = tf.concat([input_tensor_0, input_tensor_1],
options['axis'],
name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'LOGISTIC':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
output_tensor = tf.math.sigmoid(input_tensor, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'TRANSPOSE_CONV':
input_tensor = tensors[op['inputs'][2]]
weights_detail = interpreter._get_tensor_details(op['inputs'][1])
output_shape_detail = interpreter._get_tensor_details(op['inputs'][0])
output_detail = interpreter._get_tensor_details(op['outputs'][0])
weights_array = interpreter.get_tensor(weights_detail['index'])
weights_array = np.transpose(weights_array, (1, 2, 0, 3))
output_shape_array = interpreter.get_tensor(output_shape_detail['index'])
weights = tf.Variable(weights_array, name=weights_detail['name'])
shape = tf.Variable(output_shape_array, name=output_shape_detail['name'])
options = op['builtin_options']
output_tensor = tf.nn.conv2d_transpose(input_tensor,
weights,
shape,
[1, options['stride_h'], options['stride_w'], 1],
padding=options['padding'],
name=output_detail['name'] + '/conv2d_transpose')
tensors[output_detail['index']] = output_tensor
elif op_type == 'MUL':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor_0 = tensors[op['inputs'][0]]
input_tensor_1 = None
try:
input_tensor_1 = tensors[op['inputs'][1]]
except:
param = interpreter._get_tensor_details(op['inputs'][1])
input_tensor_1 = interpreter.get_tensor(param['index'])
output_tensor = tf.multiply(input_tensor_0, input_tensor_1, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'HARD_SWISH':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
output_tensor = optimizing_hardswish_for_edgetpu(input_tensor, name=output_detail['name'])
tensors[output_detail['index']] = output_tensor
elif op_type == 'AVERAGE_POOL_2D':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
options = op['builtin_options']
pool_size = [options['filter_height'], options['filter_width']]
strides = [options['stride_h'], options['stride_w']]
padding = options['padding']
output_tensor = tf.keras.layers.AveragePooling2D(pool_size=pool_size,
strides=strides,
padding=padding,
name=output_detail['name'])(input_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'FULLY_CONNECTED':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
weights = tensors[op['inputs'][1]].transpose(1,0)
bias = tensors[op['inputs'][2]]
output_shape_detail = interpreter._get_tensor_details(op['inputs'][0])
output_shape_array = interpreter.get_tensor(output_shape_detail['index'])
output_tensor = tf.keras.layers.Dense(units=output_shape_array.shape[3],
use_bias=True,
kernel_initializer=tf.keras.initializers.Constant(weights),
bias_initializer=tf.keras.initializers.Constant(bias))(input_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'RESIZE_BILINEAR':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
input_tensor = tensors[op['inputs'][0]]
size_detail = interpreter._get_tensor_details(op['inputs'][1])
size = interpreter.get_tensor(size_detail['index'])
size_height = size[0]
size_width = size[1]
def upsampling2d_bilinear(x, size_height, size_width):
if optimizing_for_edgetpu_flg:
return tf.image.resize_bilinear(x, (size_height, size_width))
else:
return tfv2.image.resize(x, [size_height, size_width], method='bilinear')
output_tensor = tf.keras.layers.Lambda(upsampling2d_bilinear, arguments={'size_height': size_height, 'size_width': size_width})(input_tensor)
tensors[output_detail['index']] = output_tensor
elif op_type == 'DEQUANTIZE':
output_detail = interpreter._get_tensor_details(op['outputs'][0])
weights_detail = interpreter._get_tensor_details(op['inputs'][0])
weights = interpreter.get_tensor(weights_detail['index'])
output_tensor = weights.astype(np.float32)
tensors[output_detail['index']] = output_tensor
else:
raise ValueError(op_type)
# Convolution2DTransposeBias
input_tensor = tensors[241]
weights = np.load('weights/segment_Kernel').transpose(1,2,0,3).astype(np.float32)
bias = np.load('weights/segment_Bias').astype(np.float32)
custom_trans = tf.nn.conv2d_transpose(input=input_tensor,
filters=weights,
output_shape=[1, height, width, 2],
strides=[2, 2],
padding='SAME',
dilations=[1, 1])
output_tensor = tf.math.add(custom_trans, bias, name='segment')
tensors[999] = output_tensor
def main():
tf.disable_eager_execution()
gen_model_json()
ops, op_types = parse_json()
interpreter = tf.lite.Interpreter(model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
make_graph(ops, op_types, interpreter)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
graph = tf.get_default_graph()
with tf.Session(config=config, graph=graph) as sess:
sess.run(tf.global_variables_initializer())
graph_def = tf.graph_util.convert_variables_to_constants(
sess=sess,
input_graph_def=graph.as_graph_def(),
output_node_names=output_node_names)
with tf.io.gfile.GFile(output_pb_path, 'wb') as f:
f.write(graph_def.SerializeToString())
shutil.rmtree(output_savedmodel_path, ignore_errors=True)
tf.saved_model.simple_save(
sess,
output_savedmodel_path,
inputs={'input_1': graph.get_tensor_by_name('input_1:0')},
outputs={'segment': graph.get_tensor_by_name('segment:0')}
)
converter = tfv2.lite.TFLiteConverter.from_saved_model(output_savedmodel_path)
converter.target_spec.supported_ops = [tfv2.lite.OpsSet.TFLITE_BUILTINS, tfv2.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
with open(f'{output_savedmodel_path}/model_float32.tflite', 'wb') as w:
w.write(tflite_model)
if __name__ == '__main__':
main()
"""
$ saved_model_cli show --dir saved_model --all
MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs:
signature_def['serving_default']:
The given SavedModel SignatureDef contains the following input(s):
inputs['input_1'] tensor_info:
dtype: DT_FLOAT
shape: (1, 144, 256, 3)
name: input_1:0
The given SavedModel SignatureDef contains the following output(s):
outputs['segment'] tensor_info:
dtype: DT_FLOAT
shape: (1, 144, 256, 2)
name: segment:0
Method name is: tensorflow/serving/predict
"""
|
{"hexsha": "71ad88189177ea071e54978b3e39b0f56406eabf", "size": 16832, "ext": "py", "lang": "Python", "max_stars_repo_path": "082_MediaPipe_Meet_Segmentation/02_segm_full_v679_tflite_to_pb_saved_model.py", "max_stars_repo_name": "IgiArdiyanto/PINTO_model_zoo", "max_stars_repo_head_hexsha": "9247b56a7dff37f28a8a7822a7ef4dd9adf7234d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1529, "max_stars_repo_stars_event_min_datetime": "2019-12-11T13:36:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T18:38:27.000Z", "max_issues_repo_path": "082_MediaPipe_Meet_Segmentation/02_segm_full_v679_tflite_to_pb_saved_model.py", "max_issues_repo_name": "IgiArdiyanto/PINTO_model_zoo", "max_issues_repo_head_hexsha": "9247b56a7dff37f28a8a7822a7ef4dd9adf7234d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 200, "max_issues_repo_issues_event_min_datetime": "2020-01-06T09:24:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:29:08.000Z", "max_forks_repo_path": "082_MediaPipe_Meet_Segmentation/02_segm_full_v679_tflite_to_pb_saved_model.py", "max_forks_repo_name": "IgiArdiyanto/PINTO_model_zoo", "max_forks_repo_head_hexsha": "9247b56a7dff37f28a8a7822a7ef4dd9adf7234d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 288, "max_forks_repo_forks_event_min_datetime": "2020-02-21T14:56:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T03:00:35.000Z", "avg_line_length": 47.414084507, "max_line_length": 153, "alphanum_fraction": 0.5915518061, "include": true, "reason": "import numpy", "num_tokens": 3493}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from datetime import datetime
from typing import List
import logging
def get_futures_chain(meta_data: pd.DataFrame, asofdate: datetime.date) -> pd.DataFrame:
"""
get current futures chain on asofdate
:param meta_data: dataframe: actual futures ==> last trading day
:param asofdate: datetime.date
:return: dataframe non-expired actual futures ==> last trading day
"""
# searchsorted searches the first one >= the given;
# if the given is the biggest, return last_idx+1 (consistent with range)
# if the given is the smallest, return 0 (NOT -1)
dateidx = meta_data['Last_Trade_Date'].searchsorted(asofdate)
return meta_data.iloc[dateidx:]
def get_futures_generic_ticker(futures_chain: pd.DataFrame, futures: str) -> str:
"""
get the generic ticker, e.g., NGZ19 ==> NG1
:param futures_chain, pd.DataFrame NGZ19 indexed: futures_chain on asofdate
:param futures: actual futures
:return: generic futures
"""
contract_idx = list(futures_chain.index).index(futures)
return futures[:-3] + str(contract_idx+1)
def get_futures_actual_ticker(futures_chain: pd.DataFrame, generic_ticker: str) -> str:
"""
get the actual ticker, e.g., NG1 ==> NGZ19
:param futurs_chain: futurs_chain on asofdate
:param generic_ticker: generic ticker
:return: actual ticker
"""
if generic_ticker[-2].isdigit():
contract_idx = int(generic_ticker[-2:]) - 1
else:
contract_idx = int(generic_ticker[-1]) - 1
return futures_chain.index[contract_idx]
def get_generic_futures_hist_data(actual_futures_hist_data: pd.DataFrame, meta_data: pd.DataFrame) -> pd.DataFrame:
"""
. . . . . . . .
. . . . . . . .
. . . . . . . . <- roll_idx_old
. . . . . . . <- roll_idx_previous = roll_idx_old + 1
. . . . . . .
. . . . . . . <- roll_idx_new
. . . . . .
. . . . . .
. . . . . . <- asofdate/dateidx
construct generic futures hist data from actual futures hist data
It assume roll on last trading day, to stitch together PRICE series
iterativelly between roll_idx_previous and roll_idx
:param actual_futures_hist_data: dataframe, index is date, column is futures
:param meta_data: dataframe: actual futures ==> last_trading_day
:return: dataframe: index is date, column is generic futures
"""
if ':' not in actual_futures_hist_data.columns[0]:
root_sym = actual_futures_hist_data.columns[0][:-5]
else: # inter-comdty spread
root_sym_idx = actual_futures_hist_data.columns[0].rfind(':')
root_sym = actual_futures_hist_data.columns[0][:(root_sym_idx+1)]
asofdate = actual_futures_hist_data.index[-1]
dateidx = meta_data['Last_Trade_Date'].searchsorted(asofdate) # first non-expired contract
n_contracts = min(60, meta_data[dateidx:].shape[0])
generic_data_df = pd.DataFrame()
roll_idx_previous = 0
try:
for idx in range(dateidx+1): # contracts expired up to first non-expired included
# cut at this point, between here and previous cut, they are the 60 generic contracts for this date range
roll_idx = actual_futures_hist_data.index.searchsorted(meta_data['Last_Trade_Date'].iloc[idx]) # first expired contract, last trade date
try:
# from to roll_idx+1; range needs to be idx+1
actual_contracts = meta_data.index[idx:n_contracts+idx]
actual_contracts_existed = set(actual_contracts).intersection(set(actual_futures_hist_data.columns))
actual_contracts_non_existed = set(actual_contracts).difference(set(actual_futures_hist_data.columns))
if len(actual_contracts_existed) == 0: # no contract existed
continue
else:
temp_df = actual_futures_hist_data.iloc[roll_idx_previous:roll_idx+1][list(actual_contracts_existed)]
if len(actual_contracts_non_existed) > 0:
empty_temp_df = pd.DataFrame(np.nan, temp_df.index, columns=list(actual_contracts_non_existed))
temp_df = pd.concat([temp_df, empty_temp_df], axis=1, join='outer', sort=True)
temp_df = temp_df[list(actual_contracts)]
temp_df.columns = [root_sym+str(c+1) for c in range(n_contracts)]
generic_data_df = generic_data_df.append(temp_df)
except:
logging.error(root_sym + ' generic error')
roll_idx_previous = roll_idx+1
except:
pass
return generic_data_df
def get_seasonal_contracts(futures_asofdate: pd.Timestamp, contracts: List[str], weights: List[int], hist_data: pd.DataFrame, meta_data: pd.DataFrame) -> pd.DataFrame:
"""
return seasonal series
:param hist_data:
:param meta_data:
:param contracts: outright, curve, fly, e.g.['CLH2021', 'CLM2021'] asof 12/1/2019
:param weights: matches contracts, e.g. [-1, 1]
:return: dataframe
"""
# go back year by year until first leg expires, e.g. on 2/20/2019 CLH2019 expired
# then find the total business days to its expiry
# (anchor_day, anchor_contract) pair is (asofdate, first leg contract) pair going back yrs_back
# e.g. (12/1/2018, CLH2020), (12/1/2017, CLH2019), ....,
# the first one is not complete/ not yet expired as of 12/1/2019, while the second one is complete/expired.
yrs_back = 0
anchor_days = []
anchor_contracts = []
anchor_days.append(futures_asofdate) # 12/1/2019
anchor_contracts.append(contracts[0]) # CLH2021
last_complete_yr = None
while True:
try:
anchor_contract = str(int(anchor_contracts[-1][-2:]) - 1)
if len(anchor_contract) == 1:
anchor_contract = '0' + anchor_contract # CLH10 ==> CLH09 padding 0
anchor_contract = anchor_contracts[-1][:-2] + anchor_contract
anchor_day = anchor_days[-1]
anchor_day = anchor_day.replace(year=anchor_day.year - 1)
anchor_day = hist_data.index[hist_data.index.searchsorted(anchor_day)]
if anchor_day <= hist_data.index[0]: # run out of hist data
break
anchor_days.append(anchor_day) # 12/1/2019, add 12/1/2018, 12/1/2017 (complete), ...
anchor_contracts.append(anchor_contract) # CLH2021, add CLH2020, CLH2019 (complete), ...
yrs_back -= 1
if meta_data.loc[anchor_contracts[-1], 'Last_Trade_Date'] < futures_asofdate and last_complete_yr is None:
last_complete_yr = yrs_back # in this case, -2
except:
break
s = pd.DataFrame()
final_index = None
for i in range(len(anchor_days)):
# for i in range(len(anchor_days)-1, -1, -1):
anchor_day = anchor_days[i]
c1 = str(int(contracts[0][-2:]) - i)
if len(c1) == 1:
c1 = '0' + c1
c1 = contracts[0][:-2] + c1
s1 = hist_data[c1]
if (len(contracts) > 1):
c2 = str(int(contracts[1][-2:]) - i)
if len(c2) == 1:
c2 = '0' + c2
c2 = contracts[1][:-2] + c2
s2 = hist_data[c2]
if (len(contracts) > 2):
c3 = str(int(contracts[2][-2:]) - i)
if len(c3) == 1:
c3 = '0' + c3
c3 = contracts[2][:-2] + c3
s3 = hist_data[c3]
combo = s1 * weights[0] + s2 * weights[1] + s3 * weights[2]
combo.name = c1 + '-' + c2 + '-' + c3
else:
combo = s1 * weights[0] + s2 * weights[1]
combo.name = c1 + '-' + c2
else:
combo = s1 * weights[0]
combo.name = c1
j = abs(last_complete_yr) # j=2
if i < j: # when i=0, (12/1/2019, CLH2021) is not complete or expired, i=1, (12/1/2018, CLH2020) is not complete or expired ==> need to append nan
anchor_day_j = anchor_days[j - i] # when i=0, days_to_go between (12/1/2019, CLH21)==(12/1/2017, CLH19), when i=1, days_to_go betwen (12/1/2018, CLH20)==(12/1/2017, CLH19)
anchor_contract_j = anchor_contracts[j]
days_to_go = hist_data.index.searchsorted(meta_data.loc[anchor_contract_j, 'Last_Trade_Date']) \
- hist_data.index.searchsorted(anchor_day_j) # last_trade_date(CLH19) - 12/1/2017
# append nan to days_to_go, asofdate should be day days_to_go, there are 0..(days_to_go-1) days with NaN to complete
s_to_go = pd.Series(np.zeros(days_to_go) + np.nan)
s_to_go.name = combo.name
combo.index = range(combo.shape[0] - 1 + days_to_go, days_to_go - 1, -1) # n-1, n-2, ..., 0
combo = combo.append(s_to_go)
combo.sort_index(inplace=True) # index by days_to_go, or last day first, to facilitate slicing
else: # when i=2, (12/1/2017, CLH19) is complete
last_day_1 = meta_data.loc[c1, 'Last_Trade_Date']
# anchor_day_idx = hist_data.index.searchsorted(anchor_day)
# contract_day_idx = hist_data.index.searchsorted(last_day_1)
combo = combo.loc[:last_day_1]
if (i == j):
final_index = combo.index # the index for all seasonal series
combo.index = range(combo.shape[0] - 1, -1, -1) # n-1, n-2, ..., 0
s = pd.concat([s, combo], axis=1)
s = s[:len(final_index)] # cut off s in order to attach final_index
s.index = final_index.sort_values(ascending=False) # reverse final_index and attach
s.sort_index(inplace=True)
return s
|
{"hexsha": "22e40b6c0f02b47113510502c8badf108c4081af", "size": 10057, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/futures_tools.py", "max_stars_repo_name": "Velocities/QuantResearch", "max_stars_repo_head_hexsha": "2435cf2d109a32c7cff51263bcd7d20ac4874d37", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 623, "max_stars_repo_stars_event_min_datetime": "2020-07-11T04:28:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T03:30:16.000Z", "max_issues_repo_path": "utils/futures_tools.py", "max_issues_repo_name": "clemaxique/QuantResearch", "max_issues_repo_head_hexsha": "d61cfe71f7decf564cceb98aa9715563a1cacb78", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-09-04T14:18:38.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-24T17:25:14.000Z", "max_forks_repo_path": "utils/futures_tools.py", "max_forks_repo_name": "clemaxique/QuantResearch", "max_forks_repo_head_hexsha": "d61cfe71f7decf564cceb98aa9715563a1cacb78", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 155, "max_forks_repo_forks_event_min_datetime": "2020-07-11T21:57:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-22T12:55:13.000Z", "avg_line_length": 48.1196172249, "max_line_length": 194, "alphanum_fraction": 0.599383514, "include": true, "reason": "import numpy", "num_tokens": 2702}
|
/-
Copyright (c) 2018 Andreas Swerdlow. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Andreas Swerdlow
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.module.basic
import Mathlib.ring_theory.ring_invo
import Mathlib.PostPort
universes u v l u_1 u_2
namespace Mathlib
/-!
# Sesquilinear form
This file defines a sesquilinear form over a module. The definition requires a ring antiautomorphism
on the scalar ring. Basic ideas such as
orthogonality are also introduced.
A sesquilinear form on an `R`-module `M`, is a function from `M × M` to `R`, that is linear in the
first argument and antilinear in the second, with respect to an antiautomorphism on `R` (an
antiisomorphism from `R` to `R`).
## Notations
Given any term `S` of type `sesq_form`, due to a coercion, can use the notation `S x y` to
refer to the function field, ie. `S x y = S.sesq x y`.
## References
* <https://en.wikipedia.org/wiki/Sesquilinear_form#Over_arbitrary_rings>
## Tags
Sesquilinear form,
-/
/-- A sesquilinear form over a module -/
structure sesq_form (R : Type u) (M : Type v) [ring R] (I : R ≃+* (Rᵒᵖ)) [add_comm_group M] [module R M]
where
sesq : M → M → R
sesq_add_left : ∀ (x y z : M), sesq (x + y) z = sesq x z + sesq y z
sesq_smul_left : ∀ (a : R) (x y : M), sesq (a • x) y = a * sesq x y
sesq_add_right : ∀ (x y z : M), sesq x (y + z) = sesq x y + sesq x z
sesq_smul_right : ∀ (a : R) (x y : M), sesq x (a • y) = opposite.unop (coe_fn I a) * sesq x y
namespace sesq_form
protected instance has_coe_to_fun {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} : has_coe_to_fun (sesq_form R M I) :=
has_coe_to_fun.mk (fun (S : sesq_form R M I) => M → M → R) fun (S : sesq_form R M I) => sesq S
theorem add_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) (z : M) : coe_fn S (x + y) z = coe_fn S x z + coe_fn S y z :=
sesq_add_left S x y z
theorem smul_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (a : R) (x : M) (y : M) : coe_fn S (a • x) y = a * coe_fn S x y :=
sesq_smul_left S a x y
theorem add_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) (z : M) : coe_fn S x (y + z) = coe_fn S x y + coe_fn S x z :=
sesq_add_right S x y z
theorem smul_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (a : R) (x : M) (y : M) : coe_fn S x (a • y) = opposite.unop (coe_fn I a) * coe_fn S x y :=
sesq_smul_right S a x y
theorem zero_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) : coe_fn S 0 x = 0 :=
eq.mpr (id (Eq._oldrec (Eq.refl (coe_fn S 0 x = 0)) (Eq.symm (zero_smul R 0))))
(eq.mpr (id (Eq._oldrec (Eq.refl (coe_fn S (0 • 0) x = 0)) (smul_left 0 0 x)))
(eq.mpr (id (Eq._oldrec (Eq.refl (0 * coe_fn S 0 x = 0)) (zero_mul (coe_fn S 0 x)))) (Eq.refl 0)))
theorem zero_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) : coe_fn S x 0 = 0 := sorry
theorem neg_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) : coe_fn S (-x) y = -coe_fn S x y := sorry
theorem neg_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) : coe_fn S x (-y) = -coe_fn S x y := sorry
theorem sub_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) (z : M) : coe_fn S (x - y) z = coe_fn S x z - coe_fn S y z := sorry
theorem sub_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) (y : M) (z : M) : coe_fn S x (y - z) = coe_fn S x y - coe_fn S x z := sorry
theorem ext {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} {D : sesq_form R M I} (H : ∀ (x y : M), coe_fn S x y = coe_fn D x y) : S = D := sorry
protected instance add_comm_group {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} : add_comm_group (sesq_form R M I) :=
add_comm_group.mk
(fun (S D : sesq_form R M I) => mk (fun (x y : M) => coe_fn S x y + coe_fn D x y) sorry sorry sorry sorry) sorry
(mk (fun (x y : M) => 0) sorry sorry sorry sorry) sorry sorry
(fun (S : sesq_form R M I) => mk (fun (x y : M) => -sesq S x y) sorry sorry sorry sorry)
(add_group.sub._default
(fun (S D : sesq_form R M I) => mk (fun (x y : M) => coe_fn S x y + coe_fn D x y) sorry sorry sorry sorry) sorry
(mk (fun (x y : M) => 0) sorry sorry sorry sorry) sorry sorry
fun (S : sesq_form R M I) => mk (fun (x y : M) => -sesq S x y) sorry sorry sorry sorry)
sorry sorry
protected instance inhabited {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} : Inhabited (sesq_form R M I) :=
{ default := 0 }
/-- The proposition that two elements of a sesquilinear form space are orthogonal -/
def is_ortho {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) (x : M) (y : M) :=
coe_fn S x y = 0
theorem ortho_zero {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (x : M) : is_ortho S 0 x :=
zero_left x
theorem is_add_monoid_hom_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) (x : M) : is_add_monoid_hom fun (z : M) => coe_fn S z x :=
is_add_monoid_hom.mk (zero_left x)
theorem is_add_monoid_hom_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) (x : M) : is_add_monoid_hom fun (z : M) => coe_fn S x z :=
is_add_monoid_hom.mk (zero_right x)
theorem map_sum_left {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {α : Type u_1} (S : sesq_form R M I) (t : finset α) (g : α → M) (w : M) : coe_fn S (finset.sum t fun (i : α) => g i) w = finset.sum t fun (i : α) => coe_fn S (g i) w :=
Eq.symm (finset.sum_hom t fun (z : M) => coe_fn S z w)
theorem map_sum_right {R : Type u} {M : Type v} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {α : Type u_1} (S : sesq_form R M I) (t : finset α) (g : α → M) (w : M) : coe_fn S w (finset.sum t fun (i : α) => g i) = finset.sum t fun (i : α) => coe_fn S w (g i) :=
Eq.symm (finset.sum_hom t fun (z : M) => coe_fn S w z)
protected instance to_module {R : Type u_1} [comm_ring R] {M : Type v} [add_comm_group M] [module R M] {J : R ≃+* (Rᵒᵖ)} : module R (sesq_form R M J) :=
semimodule.mk sorry sorry
theorem ortho_smul_left {R : Type u_1} [domain R] {M : Type v} [add_comm_group M] [module R M] {K : R ≃+* (Rᵒᵖ)} {G : sesq_form R M K} {x : M} {y : M} {a : R} (ha : a ≠ 0) : is_ortho G x y ↔ is_ortho G (a • x) y := sorry
theorem ortho_smul_right {R : Type u_1} [domain R] {M : Type v} [add_comm_group M] [module R M] {K : R ≃+* (Rᵒᵖ)} {G : sesq_form R M K} {x : M} {y : M} {a : R} (ha : a ≠ 0) : is_ortho G x y ↔ is_ortho G x (a • y) := sorry
end sesq_form
namespace refl_sesq_form
/-- The proposition that a sesquilinear form is reflexive -/
def is_refl {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) :=
∀ (x y : M), coe_fn S x y = 0 → coe_fn S y x = 0
theorem eq_zero {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_refl S) {x : M} {y : M} : coe_fn S x y = 0 → coe_fn S y x = 0 :=
H x y
theorem ortho_sym {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_refl S) {x : M} {y : M} : sesq_form.is_ortho S x y ↔ sesq_form.is_ortho S y x :=
{ mp := eq_zero H, mpr := eq_zero H }
end refl_sesq_form
namespace sym_sesq_form
/-- The proposition that a sesquilinear form is symmetric -/
def is_sym {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) :=
∀ (x y : M), opposite.unop (coe_fn I (coe_fn S x y)) = coe_fn S y x
theorem sym {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_sym S) (x : M) (y : M) : opposite.unop (coe_fn I (coe_fn S x y)) = coe_fn S y x :=
H x y
theorem is_refl {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_sym S) : refl_sesq_form.is_refl S := sorry
theorem ortho_sym {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_sym S) {x : M} {y : M} : sesq_form.is_ortho S x y ↔ sesq_form.is_ortho S y x :=
refl_sesq_form.ortho_sym (is_refl H)
end sym_sesq_form
namespace alt_sesq_form
/-- The proposition that a sesquilinear form is alternating -/
def is_alt {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} (S : sesq_form R M I) :=
∀ (x : M), coe_fn S x x = 0
theorem self_eq_zero {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_alt S) (x : M) : coe_fn S x x = 0 :=
H x
theorem neg {R : Type u_1} {M : Type u_2} [ring R] [add_comm_group M] [module R M] {I : R ≃+* (Rᵒᵖ)} {S : sesq_form R M I} (H : is_alt S) (x : M) (y : M) : -coe_fn S x y = coe_fn S y x := sorry
|
{"author": "AurelienSaue", "repo": "Mathlib4_auto", "sha": "590df64109b08190abe22358fabc3eae000943f2", "save_path": "github-repos/lean/AurelienSaue-Mathlib4_auto", "path": "github-repos/lean/AurelienSaue-Mathlib4_auto/Mathlib4_auto-590df64109b08190abe22358fabc3eae000943f2/Mathlib/linear_algebra/sesquilinear_form.lean"}
|
import numpy as np
class Zeros:
def __call__(self, shape):
return np.zeros(shape)
class Ones:
def __call__(self, shape):
return np.zeros(shape)
class RandomNormal:
def __init__(self, mean=0.0, sd=1.0, scale=0.01):
self.mean = mean
self.sd = sd
self.scale=scale
def __call__(self, shape):
return np.random.normal(loc=self.mean, scale=self.sd, size=shape)
class GlorotUniform:
def __init__(self, constant=6.0, random_state=None):
self.constant = constant
self.random_state = random_state
def __call__(self, shape):
np.random.seed(self.random_state)
sd = np.sqrt(self.constant / (shape[0] + shape[1]))
ar = np.random.uniform(-sd, sd, (shape[0], shape[1]))
return ar
aliases = {
'zeros': Zeros(),
'ones': Ones(),
'glorot_uniform': GlorotUniform(),
'random_normal': RandomNormal()
}
def get(initializer):
if isinstance(initializer, str):
return aliases[initializer]
elif callable(initializer):
return initializer
else:
raise ValueError('Parameter type not understood')
|
{"hexsha": "4e35f2b943c625f544ddd2ea9482f64a69194887", "size": 1155, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepynets/initializers.py", "max_stars_repo_name": "akarsh-saxena/DeePyNets", "max_stars_repo_head_hexsha": "b7ea3687530305ccbf83a7374b7ccd4164489009", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-12-22T09:53:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T14:43:02.000Z", "max_issues_repo_path": "deepynets/initializers.py", "max_issues_repo_name": "akarsh-saxena/DeePyNets", "max_issues_repo_head_hexsha": "b7ea3687530305ccbf83a7374b7ccd4164489009", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepynets/initializers.py", "max_forks_repo_name": "akarsh-saxena/DeePyNets", "max_forks_repo_head_hexsha": "b7ea3687530305ccbf83a7374b7ccd4164489009", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.625, "max_line_length": 73, "alphanum_fraction": 0.6251082251, "include": true, "reason": "import numpy", "num_tokens": 289}
|
## Coordinates
#==========================================================================================#
wing_bounds(lead, trail) = permutedims([ lead trail ])
chop_leading_edge(obj :: HalfWing, span_num; y_flip = false) = chop_coordinates(leading_edge(obj, y_flip), span_num)
chop_trailing_edge(obj :: HalfWing, span_num; y_flip = false) = chop_coordinates(trailing_edge(obj, y_flip), span_num)
chop_leading_edge(obj :: Wing, span_num :: Integer) = chop_coordinates([ leading_edge(left(obj), true)[1:end-1]; leading_edge(right(obj)) ], span_num)
chop_trailing_edge(obj :: Wing, span_num :: Integer) = chop_coordinates([ trailing_edge(left(obj), true)[1:end-1]; trailing_edge(right(obj)) ], span_num)
coordinates(wing :: HalfWing, y_flip = false) = let (lead, trail) = wing_bounds(wing, y_flip); affine_transformation(wing).(wing_bounds(lead, trail)) end
coordinates(wing :: Wing) = let (lead, trail) = wing_bounds(wing); affine_transformation(wing).(wing_bounds(lead, trail)) end
chord_coordinates(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false) = chop_wing(coordinates(wing, flip), span_num, chord_num; span_spacing = spacings, flip = flip)
function camber_coordinates(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false)
leading_xyz = leading_edge(wing, flip)
scaled_foils = @. wing.chords * (camber_coordinates ∘ camber_thickness)(wing.foils, chord_num)
affine_transformation(wing).(chop_spanwise_sections(scaled_foils, twists(wing), leading_xyz, span_num, spacings, flip))
end
function surface_coordinates(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false)
leading_xyz = leading_edge(wing, flip)
scaled_foils = @. wing.chords * (extend_yz ∘ coordinates ∘ cosine_interpolation)(wing.foils, chord_num)
affine_transformation(wing).(chop_spanwise_sections(scaled_foils, twists(wing), leading_xyz, span_num, spacings, flip))
end
function number_of_spanwise_panels(wing :: HalfWing, span_num :: Integer)
# Compute contribution of each section to total span length
weights = spans(wing) / span(wing)
weights[findall(<(0.2), weights)] .*= 3
# Heuristic (aka hAx0rZ) check to ensure small sections also get some panel love
# weights = ifelse(any(<(0.2), weights), fill(1. / length(spans(wing)), length(spans(wing))), weights)
# Generate spanwise panel distribution
ceil.(Int, span_num .* weights)
end
function number_of_spanwise_panels(wing :: HalfWing, span_num :: Vector{<: Integer})
@assert (length ∘ spans)(wing) > 1 "Provide a positive integer of spanwise panels for 1 wing section."
span_num
end
# Spacing
symmetric_spacing(wing :: HalfWing) = [ Sine(); fill(Cosine(), (length ∘ spans)(wing) - 1) ]
## Wing variants
#==========================================================================================#
function chord_coordinates(wing :: Wing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing))
left_coord = chord_coordinates(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_coord = chord_coordinates(right(wing), span_num, chord_num; spacings = spacings)
[ left_coord[:,1:end-1] right_coord ]
end
function camber_coordinates(wing :: Wing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing))
left_coord = camber_coordinates(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_coord = camber_coordinates(right(wing), span_num, chord_num; spacings = spacings)
[ left_coord[:,1:end-1] right_coord ]
end
function surface_coordinates(wing :: Wing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing))
left_coord = surface_coordinates(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_coord = surface_coordinates(right(wing), span_num, chord_num; spacings = spacings)
[ left_coord[:,1:end-1] right_coord ]
end
number_of_spanwise_panels(wing :: Wing, span_num :: Integer) = number_of_spanwise_panels(right(wing), span_num ÷ 2)
symmetric_spacing(wing :: Wing) = [ Sine(); fill(Cosine(), (length ∘ spans ∘ right)(wing) - 1) ]
function number_of_spanwise_panels(wing :: Wing, span_num :: Vector{<: Integer})
@assert (length ∘ spans ∘ right)(wing) > 1 "Provide a positive integer of spanwise panels for 1 wing section."
span_num .÷ 2
end
# Coordinates
chord_coordinates(wing :: Wing, span_num :: Integer, chord_num :: Integer; spacings = symmetric_spacing(wing)) = chord_coordinates(wing, number_of_spanwise_panels(wing, span_num), chord_num; spacings = spacings)
camber_coordinates(wing :: Wing, span_num :: Integer, chord_num :: Integer; spacings = symmetric_spacing(wing)) = camber_coordinates(wing, number_of_spanwise_panels(wing, span_num), chord_num; spacings = spacings)
surface_coordinates(wing :: Wing, span_num :: Integer, chord_num :: Integer; spacings = symmetric_spacing(wing)) = surface_coordinates(wing, number_of_spanwise_panels(wing, span_num), chord_num; spacings = spacings)
## Panelling
#==========================================================================================#
mesh_chords(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false) = make_panels(chord_coordinates(wing, span_num, chord_num; spacings = spacings, flip = flip))
function mesh_chords(wing :: Wing, span_num, chord_num; spacings = symmetric_spacing(wing))
left_panels = mesh_chords(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_panels = mesh_chords(right(wing), span_num, chord_num; spacings = spacings)
[ left_panels right_panels ]
end
mesh_wing(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false) = make_panels(surface_coordinates(wing, span_num, chord_num, spacings = spacings, flip = flip))
function mesh_wing(wing :: Wing, span_num, chord_num; spacings = symmetric_spacing(wing))
left_panels = mesh_wing(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_panels = mesh_wing(right(wing), span_num, chord_num; spacings = spacings)
[ left_panels right_panels ]
end
mesh_cambers(wing :: HalfWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing), flip = false) = make_panels(camber_coordinates(wing, span_num, chord_num; spacings = spacings, flip = flip))
function mesh_cambers(wing :: Wing, span_num, chord_num; spacings = symmetric_spacing(wing))
left_panels = mesh_cambers(left(wing), reverse(span_num), chord_num; spacings = reverse(spacings), flip = true)
right_panels = mesh_cambers(right(wing), span_num, chord_num; spacings = spacings)
[ left_panels right_panels ]
end
function make_panels(wing :: AbstractWing, span_num :: Vector{<: Integer}, chord_num :: Integer; spacings = symmetric_spacing(wing))
horseshoe_panels = mesh_chords(wing, span_num, chord_num; spacings = spacings)
camber_panels = mesh_cambers(wing, span_num, chord_num; spacings = spacings)
horseshoe_panels, normal_vector.(camber_panels)
end
make_panels(wing :: AbstractWing, span_num :: Integer, chord_num :: Integer; spacings = symmetric_spacing(wing)) = make_panels(wing, [span_num], chord_num; spacings = spacings)
panel_wing(comp :: AbstractWing, span_panels :: Union{Integer, Vector{<: Integer}}, chord_panels :: Integer; spacing = symmetric_spacing(comp)) = make_panels(comp, span_panels, chord_panels, spacings = spacing)
## Meshing type for convenience
#==========================================================================================#
struct WingMesh{M <: AbstractWing, N <: Integer, P, Q, T} <: AbstractWing
surface :: M
num_span :: Vector{N}
num_chord :: N
chord_spacing :: P
span_spacing :: Q
chord_mesh :: Matrix{T}
camber_mesh :: Matrix{T}
end
"""
WingMesh(
surface :: AbstractWing,
n_span :: Vector{Integer}, n_chord :: Integer;
span_spacing :: AbstractSpacing = symmetric_spacing(surface)
)
Define a container to generate meshes and panels for a given `AbstractWing` with a specified distribution of number of spanwise panels, and a number of chordwise panels.
Optionally a combination of `AbstractSpacing` types (`Sine(), Cosine(), Uniform()`) can be provided to the **named argument** `span_spacing`, either as a singleton or as a vector with length equal to the number of spanwise sections. By default, the combination is `[Sine(), Cosine(), ..., Cosine()]`.
"""
function WingMesh(surface :: M, n_span :: AbstractVector{N}, n_chord :: N; chord_spacing :: P = Cosine(), span_spacing :: Q = symmetric_spacing(surface)) where {M <: AbstractWing, N <: Integer, P <: AbstractSpacing, Q <: Union{AbstractSpacing, Vector{<:AbstractSpacing}}}
check_definition(surface, n_span)
chord_mesh = chord_coordinates(surface, n_span, n_chord; spacings = span_spacing)
camber_mesh = camber_coordinates(surface, n_span, n_chord; spacings = span_spacing)
T = promote_type(eltype(chord_mesh), eltype(camber_mesh))
WingMesh{M,N,P,Q,T}(surface, n_span, n_chord, chord_spacing, span_spacing, chord_mesh, camber_mesh)
end
WingMesh(surface, n_span :: Integer, n_chord :: Integer; chord_spacing = Cosine(), span_spacing = symmetric_spacing(surface)) = WingMesh(surface, number_of_spanwise_panels(surface, n_span), n_chord; chord_spacing = chord_spacing, span_spacing = span_spacing)
check_definition(surf :: HalfWing, n_span) = @assert length(n_span) == length(surf.spans) "The spanwise number vector's length must be the same as the number of sections of the surface."
check_definition(surf :: Wing, n_span) = @assert length(n_span) == length(surf.right.spans) == length(surf.left.spans) "The spanwise number vector's length must be the same as the number of sections of the surface."
##
"""
chord_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord)
Generate the chord coordinates of a `WingMesh` with default spanwise ``n_s`` and chordwise ``n_c`` panel distributions from the mesh.
"""
chord_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord) = chord_coordinates(wing.surface, n_span, n_chord)
"""
camber_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord)
Generate the camber coordinates of a `WingMesh` with default spanwise ``n_s`` and chordwise ``n_c`` panel distributions from the mesh.
"""
camber_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord) = camber_coordinates(wing.surface, n_span, n_chord)
"""
surface_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord)
Generate the surface coordinates of a `WingMesh` with default spanwise ``n_s`` and chordwise ``n_c`` panel distributions from the mesh.
"""
surface_coordinates(wing :: WingMesh, n_span = wing.num_span, n_chord = wing.num_chord) = surface_coordinates(wing.surface, n_span, n_chord)
"""
surface_panels(wing_mesh :: WingMesh,
n_s = wing_mesh.num_span,
n_c = length(first(foils(wing_mesh.surface))).x)
Generate the surface panel distribution from a `WingMesh` with the default spanwise ``n_s`` panel distribution from the mesh and the chordwise panel ``n_c`` distribution from the airfoil.
In case of strange results, provide a higher number of chordwise panels to represent the airfoils more accurately
"""
surface_panels(wing :: WingMesh, n_span = wing.num_span, n_chord = length(first(foils(wing.surface)).x)) = (make_panels ∘ surface_coordinates)(wing, n_span, n_chord)
"""
chord_panels(wing_mesh :: WingMesh)
Generate the chord panel distribution from a `WingMesh`.
"""
chord_panels(wing :: WingMesh) = make_panels(wing.chord_mesh)
"""
camber_panels(wing_mesh :: WingMesh)
Generate the camber panel distribution from a `WingMesh`.
"""
camber_panels(wing :: WingMesh) = make_panels(wing.camber_mesh)
"""
wetted_area(wing_mesh :: WingMesh,
n_s = wing_mesh.num_span,
n_c = length(first(foils(wing_mesh.surface))).x)
Determine the wetted area ``S_{wet}`` of a `WingMesh` by calculating the total area of the surface panels.
"""
wetted_area(wing :: WingMesh, n_span = wing.num_span, n_chord = length(first(foils(wing.surface)).x)) = wetted_area(surface_panels(wing, n_span, n_chord))
"""
wetted_area_ratio(wing_mesh :: WingMesh,
n_s = wing_mesh.num_span,
n_c = length(first(foils(wing_mesh.surface))).x)
Determine the wetted area ratio ``S_{wet}/S`` of a `WingMesh` by calculating the ratio of the total area of the surface panels to the projected area of the `Wing`.
"""
wetted_area_ratio(wing :: WingMesh, n_span = wing.num_span, n_chord = length(first(foils(wing.surface)).x)) = wetted_area(wing, n_span, n_chord) / projected_area(wing.surface)
function Base.show(io :: IO, mesh :: WingMesh)
n_c, n_s = size(mesh.chord_mesh) .- 1
println(io, "WingMesh —")
println(io, "Spanwise panels: ", n_s)
println(io, "Chordwise panels: ", n_c)
println(io, "Spanwise spacing: ", mesh.span_spacing)
println(io, "Chordwise spacing: ", mesh.chord_spacing)
nothing
end
|
{"hexsha": "6efefd77a7db516239a4a1140442fb0294f93b9a", "size": 13600, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Geometry/AircraftGeometry/Wings/mesh_wing.jl", "max_stars_repo_name": "HKUST-OCTAD-LAB/AeroMDAO.jl", "max_stars_repo_head_hexsha": "0ca9aa924f088cac59d04958eb5c6704b50feb18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Geometry/AircraftGeometry/Wings/mesh_wing.jl", "max_issues_repo_name": "HKUST-OCTAD-LAB/AeroMDAO.jl", "max_issues_repo_head_hexsha": "0ca9aa924f088cac59d04958eb5c6704b50feb18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Geometry/AircraftGeometry/Wings/mesh_wing.jl", "max_forks_repo_name": "HKUST-OCTAD-LAB/AeroMDAO.jl", "max_forks_repo_head_hexsha": "0ca9aa924f088cac59d04958eb5c6704b50feb18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.1983471074, "max_line_length": 300, "alphanum_fraction": 0.7104411765, "num_tokens": 3512}
|
/*
* Copyright (c) CERN 2013
*
* Copyright (c) Members of the EMI Collaboration. 2010-2013
* See http://www.eu-emi.eu/partners for details on the copyright
* holders.
*
* Licensed under Apache License Version 2.0
*
*/
#include "HdfsNS.h"
#include <boost/algorithm/string/predicate.hpp>
using namespace dmlite;
HDFSUtil::HDFSUtil(){}
HDFSUtil::~HDFSUtil(){}
void HDFSUtil::setClasspath(std::string basefolder) throw ()
{
//getting the current classpath
char * classpath;
classpath = getenv ("CLASSPATH");
if (classpath == NULL)
classpath = "";
//creating the string
std::string classpathString = std::string(classpath);
//configure the CLASSPATH
DIR * dir = opendir(basefolder.c_str());
struct dirent * d;
while ( d = readdir(dir))
{
std::string baseFolder = std::string(basefolder);
baseFolder.append(std::string("/"));
if (d->d_type == 0x8 && (boost::algorithm::ends_with(std::string(d->d_name),".jar")))
classpathString.append(std::string(":").append(baseFolder.append(std::string(d->d_name)).c_str()));
}
closedir(dir);
//overriding
setenv("CLASSPATH", classpathString.c_str(),true);
}
std::string HDFSUtil::getRandomGateway(const std::vector<std::string>& gateways) throw ()
{
if (gateways.size() == 1)
return gateways.at(0);
else {
//initilialize rand
int random;
std::stringstream strs;
srand (time(NULL));
random =rand() % gateways.size();
return gateways.at(random);
}
}
void HDFSUtil::setLibraryPath(std::string java_home) throw ()
{
//getting the current LD_LIBRARY_PATH
char * path;
path = getenv ("LD_LIBRARY_PATH");
if (path == NULL)
path ="";
//creating the string
std::string pathString = std::string(path);
std::string java_string64 = std::string(java_home).append(std::string("/jre/lib/amd64/server/"));
std::string java_stringi386 = std::string(java_home).append(std::string("/jre/lib/i386/server/"));
pathString.append(std::string(":")).append(java_string64).append(std::string(":")).append(java_stringi386);
setenv("LD_LIBRARY_PATH", pathString.c_str(),true);
}
int HDFSUtil::mkdirs(const char *dir) throw ()
{
char tmp[256];
char *p = NULL;
size_t len;
snprintf(tmp, sizeof(tmp),"%s",dir);
len = strlen(tmp);
if(tmp[len - 1] == '/')
tmp[len - 1] = 0;
for(p = tmp + 1; *p; p++)
if(*p == '/') {
*p = 0;
mkdir(tmp, S_IRWXU);
*p = '/';
}
return mkdir(tmp, S_IRWXU);
}
std::string HDFSUtil::trim(std::string& str) throw ()
{
std::string::size_type begin=0;
std::string::size_type end=str.size()-1;
while(begin<=end && (str[begin]<=0x20 || str[begin]==0x7f))
++begin;
while(end>begin && (str[end]<=0x20 || str[end]==0x7f))
--end;
str = str.substr(begin, end - begin + 1);
return str;
}
|
{"hexsha": "b57c1d82e1503c36fb75231a5f40f5f31603ba95", "size": 3022, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/HdfsUtil.cpp", "max_stars_repo_name": "andrea-manzi/dmlite-hdfs-plugin", "max_stars_repo_head_hexsha": "16a9909db7cbdf0328215d30b9df4ea5a752a9b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/HdfsUtil.cpp", "max_issues_repo_name": "andrea-manzi/dmlite-hdfs-plugin", "max_issues_repo_head_hexsha": "16a9909db7cbdf0328215d30b9df4ea5a752a9b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/HdfsUtil.cpp", "max_forks_repo_name": "andrea-manzi/dmlite-hdfs-plugin", "max_forks_repo_head_hexsha": "16a9909db7cbdf0328215d30b9df4ea5a752a9b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3709677419, "max_line_length": 115, "alphanum_fraction": 0.5959629385, "num_tokens": 822}
|
using Protos
using Protos.Parsing
using Test
@testset "Protos.jl" begin
@testset "parsing" begin
testfile = joinpath(@__DIR__, "test.proto")
io = open(testfile)
parsed = parse_proto(io)
@show parsed
@test parsed.comment == "\nThis is a file-wide comment.\n"
@test parsed.statements[1].package == "some.package.name"
@test parsed.statements[2].value == "some.other.package.name"
# TODO: ensure both messages and enum are present
@test length(parsed.statements) > 5
end
@testset "utils" begin
import Protos: proto_default
proto_default(UInt32) == zero(UInt32)
proto_default(String) == ""
proto_default(Bool) == false
proto_default(Vector{String}) == String[]
proto_default(ProtoFile) == missing
end
@testset "serialization" begin
import Protos.Serialization: writefield
io = IOBuffer()
idx = 1
writefield(io, idx, 150, Val(:uint32))
@test take!(io) == [0x08, 0x96, 0x01]
end
end
|
{"hexsha": "463721624e9d45f2b93d2ce49d156ba71ee220f3", "size": 1073, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "tbreloff/Protos.jl", "max_stars_repo_head_hexsha": "6f154a39231268300ca4f9e0f71878cd769eea3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-14T12:33:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T12:33:40.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "tbreloff/Protos.jl", "max_issues_repo_head_hexsha": "6f154a39231268300ca4f9e0f71878cd769eea3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "tbreloff/Protos.jl", "max_forks_repo_head_hexsha": "6f154a39231268300ca4f9e0f71878cd769eea3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8055555556, "max_line_length": 69, "alphanum_fraction": 0.6104380242, "num_tokens": 284}
|
import numpy as np
import matplotlib.pyplot as plt
def estimate_coef(x, y):
# number of observations/points
n = np.size(x)
# mean of x and y vector
m_x, m_y = np.mean(x), np.mean(y)
# calculating cross-deviation and deviation about x
SS_xy = np.sum(y*x) - n*m_y*m_x
SS_xx = np.sum(x*x) - n*m_x*m_x
# calculating regression coefficients
b_1 = SS_xy / SS_xx
b_0 = m_y - b_1*m_x
return(b_0, b_1)
def plot_regression_line(x, y, b):
# plotting the actual points as scatter plot
plt.scatter(x, y, color = "m",
marker = "o", s = 30)
# predicted response vector
y_pred = b[0] + b[1]*x
# plotting the regression line
plt.plot(x, y_pred, color = "g")
# putting labels
plt.xlabel('x')
plt.ylabel('y')
# function to show plot
plt.show()
def main():
# observations
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
y = np.array([1, 3, 2, 5, 7, 8, 8, 9, 10, 12])
# estimating coefficients
b = estimate_coef(x, y)
print("Estimated coefficients:\nb_0 = {} \
\nb_1 = {}".format(b[0], b[1]))
# plotting regression line
plot_regression_line(x, y, b)
if __name__ == "__main__":
main()
|
{"hexsha": "7c29ce63420201450e16d8972536698fa42db53c", "size": 1226, "ext": "py", "lang": "Python", "max_stars_repo_path": "Regression/SimpleLinearRegression.py", "max_stars_repo_name": "sum-coderepo/HadoopApp", "max_stars_repo_head_hexsha": "0e8d48c5d541b5935c9054fb1335d829d67d7b59", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-26T23:58:32.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-01T20:45:30.000Z", "max_issues_repo_path": "Regression/SimpleLinearRegression.py", "max_issues_repo_name": "sum-coderepo/HadoopApp", "max_issues_repo_head_hexsha": "0e8d48c5d541b5935c9054fb1335d829d67d7b59", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Regression/SimpleLinearRegression.py", "max_forks_repo_name": "sum-coderepo/HadoopApp", "max_forks_repo_head_hexsha": "0e8d48c5d541b5935c9054fb1335d829d67d7b59", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.7037037037, "max_line_length": 55, "alphanum_fraction": 0.5831973899, "include": true, "reason": "import numpy", "num_tokens": 400}
|
"""Exercise 1
Usage:
$ CUDA_VISIBLE_DEVICES=2 python practico_1_train_petfinder.py --dataset_dir ../ --epochs 30 --dropout 0.1 0.1 --hidden_layer_sizes 200 100
To know which GPU to use, you can check it with the command
$ nvidia-smi
"""
import argparse
import os
import mlflow
import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, models
import warnings
warnings.filterwarnings("ignore")
from auxiliary import process_features, load_dataset, build_columns, log_dir_name
TARGET_COL = 'AdoptionSpeed'
def read_args():
parser = argparse.ArgumentParser(
description='Training a MLP on the petfinder dataset')
# Here you have some examples of classifier parameters. You can add
# more arguments or change these if you need to.
parser.add_argument('--experiment_name', type=str, default='Base model',
help='Name of the experiment, used in mlflow.')
parser.add_argument('--dataset_dir', default='../petfinder_dataset', type=str,
help='Directory with the training and test files.')
parser.add_argument('--hidden_layer_sizes', nargs='+', default=[100], type=int,
help='Number of hidden units of each hidden layer.')
parser.add_argument('--epochs', default=50, type=int,
help='Number of epochs to train.')
parser.add_argument('--dropout', nargs='+', default=[0.5], type=float,
help='Dropout ratio for every layer.')
parser.add_argument('--batch_size', type=int, default=32,
help='Number of instances in each batch.')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate.')
args = parser.parse_args()
assert len(args.hidden_layer_sizes) == len(args.dropout)
return args
def print_args(args):
print('-------------------------------------------')
print('PARAMS ------------------------------------')
print('-------------------------------------------')
print('--experiment_name ', args.experiment_name)
print('--dataset_dir ', args.dataset_dir)
print('--epochs ', args.epochs)
print('--hidden_layer_sizes', args.hidden_layer_sizes)
print('--dropout ', args.dropout)
print('--batch_size ', args.batch_size)
print('--learning_rate ', args.learning_rate)
print('-------------------------------------------')
def main():
args = read_args()
print_args(args)
experiment_name = args.experiment_name
batch_size = args.batch_size
learning_rate = args.learning_rate
hidden_layer_sizes = args.hidden_layer_sizes
dropout = args.dropout
epochs = args.epochs
### Output directory
dir_name = log_dir_name(args)
print()
print(dir_name)
print()
output_dir = os.path.join('experiments', experiment_name, dir_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
dataset, dev_dataset, test_dataset = load_dataset(args.dataset_dir)
nlabels = dataset[TARGET_COL].unique().shape[0]
columns = [
'Gender', 'Color1', 'Vaccinated', 'Dewormed',
'Breed1',
'Age', 'Fee', 'Quantity']
one_hot_columns, embedded_columns, numeric_columns = build_columns(dataset, columns)
# TODO (optional) put these three types of columns in the same dictionary with "column types"
X_train, y_train = process_features(dataset, one_hot_columns, numeric_columns, embedded_columns)
direct_features_input_shape = (X_train['direct_features'].shape[1],)
X_dev, y_dev = process_features(dev_dataset, one_hot_columns, numeric_columns, embedded_columns)
###########################################################################################################
### TODO: Shuffle train dataset - Done
###########################################################################################################
shuffle_len = X_train['direct_features'].shape[0]
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(shuffle_len).batch(batch_size)
###########################################################################################################
dev_ds = tf.data.Dataset.from_tensor_slices((X_dev, y_dev)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensor_slices(process_features(
test_dataset, one_hot_columns, numeric_columns, embedded_columns, test=True)[0]).batch(batch_size)
###########################################################################################################
### TODO: Build the Keras model - Done
###########################################################################################################
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, max_value in embedded_columns.items():
input_layer = layers.Input(shape=(1,), name=embedded_col)
inputs.append(input_layer)
# Define the embedding layer
embedding_size = int(max_value / 4)
embedding_layers.append(
tf.squeeze(layers.Embedding(input_dim=max_value, output_dim=embedding_size)(input_layer), axis=-2))
print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = layers.Input(shape=direct_features_input_shape, name='direct_features')
inputs.append(direct_features_input)
# Concatenate everything together
features = layers.concatenate(embedding_layers + [direct_features_input])
denses = []
dense1 = layers.Dense(hidden_layer_sizes[0], activation='relu')(features)
denses.append(dense1)
if len(hidden_layer_sizes) > 1:
for hidden_layer_size in hidden_layer_sizes[1:]:
dense = layers.Dense(hidden_layer_size, activation='relu')(denses[-1])
denses.append(dense)
output_layer = layers.Dense(nlabels, activation='softmax')(dense1)
model = models.Model(inputs=inputs, outputs=output_layer)
###########################################################################################################
###########################################################################################################
### TODO: Fit the model - Done
###########################################################################################################
mlflow.set_experiment(experiment_name)
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
logdir = "logs/scalars/" + dir_name
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('hidden_layer_size', hidden_layer_sizes)
mlflow.log_param('dropout', dropout)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
mlflow.log_param('numeric_columns', numeric_columns) # Not using these yet
mlflow.log_param('epochs', epochs)
mlflow.log_param('batch_size', batch_size)
mlflow.log_param('learning_rate', learning_rate)
# Train
history = model.fit(train_ds, epochs=epochs,
validation_data=dev_ds,
callbacks=[tensorboard_callback])
#######################################################################################################
### TODO: analyze history to see if model converges/overfits
#######################################################################################################
output_csv = os.path.join(output_dir, 'history.pickle')
with open(output_csv, 'bw') as f:
pickle.dump(history.history, f)
#######################################################################################################
#######################################################################################################
### TODO: Evaluate the model, calculating the metrics. - Done
#######################################################################################################
loss, accuracy = model.evaluate(dev_ds)
print("*** Dev loss: {} - accuracy: {}".format(loss, accuracy))
mlflow.log_metric('loss', loss)
mlflow.log_metric('accuracy', accuracy)
predictions = model.predict(test_ds)
#######################################################################################################
#######################################################################################################
### TODO: Convert predictions to classes - Done
#######################################################################################################
prediction_classes = np.argmax(predictions, axis=1)
#######################################################################################################
#######################################################################################################
### TODO: Save the results for submission - Done
#######################################################################################################
output_csv = os.path.join(output_dir, 'submit.csv')
submissions = pd.DataFrame(prediction_classes, columns=[TARGET_COL], index=test_dataset.PID)
submissions.to_csv(output_csv)
#######################################################################################################
###########################################################################################################
print('All operations completed')
if __name__ == '__main__':
main()
|
{"hexsha": "0ebf6e6f4a1667f2d0b5238c117fa44dfca6f7c4", "size": 10203, "ext": "py", "lang": "Python", "max_stars_repo_path": "tercer_modelo.py", "max_stars_repo_name": "nahuelalmeira/deepLearning", "max_stars_repo_head_hexsha": "f1fcd06f5735c8be9272b0c8392b1ae467c08582", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tercer_modelo.py", "max_issues_repo_name": "nahuelalmeira/deepLearning", "max_issues_repo_head_hexsha": "f1fcd06f5735c8be9272b0c8392b1ae467c08582", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tercer_modelo.py", "max_forks_repo_name": "nahuelalmeira/deepLearning", "max_forks_repo_head_hexsha": "f1fcd06f5735c8be9272b0c8392b1ae467c08582", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.3466666667, "max_line_length": 138, "alphanum_fraction": 0.5118102519, "include": true, "reason": "import numpy", "num_tokens": 1755}
|
import numpy as np
from paralleldomain.decoding.decoder import DatasetDecoder
from paralleldomain.model.class_distribution import ClassDistribution
def test_from_dataset(decoder: DatasetDecoder):
dataset = decoder.get_dataset()
class_dist = ClassDistribution.from_dataset(dataset=dataset)
assert class_dist is not None
assert isinstance(class_dist, ClassDistribution)
car_info = class_dist.get_class_info(class_name="Car")
sum = 0
for ci in class_dist.class_distribution_infos:
sum += ci.class_pixel_percentage
assert np.allclose(sum, 100.0)
assert car_info is not None
assert car_info.class_pixel_count > 0
assert car_info.class_pixel_percentage > 1.0
assert car_info.class_instance_count > 0
assert car_info.class_instance_percentage > 1.0
|
{"hexsha": "3e12232216043ab981613a802fd5bb80a1b54f17", "size": 805, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_paralleldomain/model/test_class_distibution.py", "max_stars_repo_name": "parallel-domain/pd-sdk", "max_stars_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-11-17T17:23:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T09:51:23.000Z", "max_issues_repo_path": "test_paralleldomain/model/test_class_distibution.py", "max_issues_repo_name": "parallel-domain/pd-sdk", "max_issues_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-12-02T17:16:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-07T12:47:13.000Z", "max_forks_repo_path": "test_paralleldomain/model/test_class_distibution.py", "max_forks_repo_name": "parallel-domain/pd-sdk", "max_forks_repo_head_hexsha": "20e3d052a5cb612a2dd84bda7b1b5487a6a60edc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-09T07:03:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T15:53:48.000Z", "avg_line_length": 36.5909090909, "max_line_length": 69, "alphanum_fraction": 0.7801242236, "include": true, "reason": "import numpy", "num_tokens": 179}
|
# based on the keras documentation
#
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Conv1D,Activation
from keras import losses, optimizers
import keras.utils as keras_utils
import json
f = open('data_for_everything')
j = json.load(f)
# our parameters
nn_input_size = len(j[0]['in'])
nn_hidden_layer_size = 20
nn_hidden_layers_n = 1
nn_output_layer_size = len(j[0]['out'])
epochs_count = 2000
batch_size = 100
# define the model.
model = Sequential()
model.add(Dense(nn_hidden_layer_size, activation='relu',
input_shape=(nn_input_size,)))
# hidden layers. we went for three. seems decent.
for i in range(0, nn_hidden_layers_n):
model.add(Dense(nn_hidden_layer_size, activation='relu',
input_shape=(nn_hidden_layer_size,)))
# output layer.
model.add(Dense(nn_output_layer_size, activation='relu',
input_shape=(nn_hidden_layer_size,)))
# compile it.
model.compile(optimizer=optimizers.Adam(),
loss=losses.binary_crossentropy,
metrics=['accuracy'])
######
###### LOAD DATA HERE
######
#data = np.empty(shape=(len(j),196))
#labels = np.empty(shape=(len(j),664))
t_data = []
t_label = []
for i in j:
t_data += [i['in']]
t_label += [i['out']]
amount_for_training = 1000
data = np.array(t_data[:amount_for_training])
labels = np.array(t_label[:amount_for_training])
test_data = np.array(t_data[amount_for_training:])
test_labels = np.array(t_label[amount_for_training:])
print data
model.fit(data, labels, epochs=epochs_count, batch_size=batch_size)
score = model.evaluate(test_data, test_labels, batch_size=batch_size)
print score
#print j[1001]['bill']
#print t_label[100]
#print model.predict(np.matrix(t_data[1001],))
model.save('trained.dat')
|
{"hexsha": "9b8f07e157c6588a775d6619b9a559fea0c384f1", "size": 1750, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/nn/train.py", "max_stars_repo_name": "bahorn/BrumHack7", "max_stars_repo_head_hexsha": "cffa2484f63728e73d6dd2bbe6b24fbd12e1cd93", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/nn/train.py", "max_issues_repo_name": "bahorn/BrumHack7", "max_issues_repo_head_hexsha": "cffa2484f63728e73d6dd2bbe6b24fbd12e1cd93", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/nn/train.py", "max_forks_repo_name": "bahorn/BrumHack7", "max_forks_repo_head_hexsha": "cffa2484f63728e73d6dd2bbe6b24fbd12e1cd93", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9230769231, "max_line_length": 69, "alphanum_fraction": 0.7268571429, "include": true, "reason": "import numpy", "num_tokens": 437}
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for drawing bounding boxes on PIL images
import numpy as np
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
def draw_bounding_boxes_on_image(image,
boxes,
color=(255, 0, 0),
thickness=4,
display_str_list=()):
"""Draws bounding boxes on image.
Args:
image (PIL.Image): PIL.Image object
boxes (np.array): a 2 dimensional numpy array
of [N, 4]: (ymin, xmin, ymax, xmax)
The coordinates are in normalized format between [0, 1]
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): bounding box line thickness
display_str_list [str]: list of strings.
Contains one string for each bounding box.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('boxes must be of size [N, 4]')
for i in range(boxes_shape[0]):
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list[i])
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color=(255, 0, 0),
thickness=4,
display_str='',
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
The string passed in display_str is displayed above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the string
is displayed below the bounding box.
Args:
image (PIL.Image): PIL.Image object
ymin (float): ymin of bounding box
xmin (float): xmin of bounding box
ymax (float): ymax of bounding box
xmax (float): xmax of bounding box
color (int, int, int): RGB tuple describing color to draw bounding box
thickness (int): line thickness
display_str (str): string to display in box
use_normalized_coordinates (bool): If True, treat coordinates
ymin, xmin, ymax, xmax as relative to the image. Otherwise treat
coordinates as absolute
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom),
(right, top), (left, top)], width=thickness, fill=tuple(color))
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display string added to the top of the bounding
# box exceeds the top of the image, move the string below the bounding box
# instead of above
display_str_height = font.getsize(display_str)[1]
# Each display_str has a top and bottom margin of 0.05x
total_display_str_height = (1 + 2 * 0.05) * display_str_height
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle(
[(left, text_bottom - text_height - 2 * margin), (left + text_width,
text_bottom)],
fill=tuple(color))
draw.text(
(left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
|
{"hexsha": "a8613eae27f6f6740cd4054eb1d513606f97e1e9", "size": 4883, "ext": "py", "lang": "Python", "max_stars_repo_path": "samples/python/uff_ssd/utils/boxes.py", "max_stars_repo_name": "martellz/TensorRT", "max_stars_repo_head_hexsha": "f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5249, "max_stars_repo_stars_event_min_datetime": "2019-06-17T17:20:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:56:05.000Z", "max_issues_repo_path": "samples/python/uff_ssd/utils/boxes.py", "max_issues_repo_name": "martellz/TensorRT", "max_issues_repo_head_hexsha": "f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1721, "max_issues_repo_issues_event_min_datetime": "2019-06-17T18:13:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:09:53.000Z", "max_forks_repo_path": "samples/python/uff_ssd/utils/boxes.py", "max_forks_repo_name": "martellz/TensorRT", "max_forks_repo_head_hexsha": "f182e83b30b5d45aaa3f9a041ff8b3ce83e366f4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1414, "max_forks_repo_forks_event_min_datetime": "2019-06-18T04:01:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T09:16:53.000Z", "avg_line_length": 40.3553719008, "max_line_length": 80, "alphanum_fraction": 0.6145812001, "include": true, "reason": "import numpy", "num_tokens": 1091}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 11:00:56 2017
@author: 028375
"""
import pandas as pd
import numpy as np
begindate='20171001'
spotdate='20171018'
lastdate='20171017'
path0='F:\月结表\境内TRS\S201710\\'.decode('utf-8')
def TestTemplate(Status,Collateral,Position):
path1=('股衍境内TRS检验'+spotdate+'.xlsx').decode('utf-8')
path2=('股衍境内TRS估值'+lastdate+'.xlsx').decode('utf-8')
LastStatus=pd.read_excel(path0+path2,'账户状态'.decode('utf-8'),encoding="gb2312",keep_default_na=False)
LastStatus=LastStatus[['清单编号'.decode('utf-8'),'客户总预付金'.decode('utf-8'),'我方角度合约价值'.decode('utf-8')]]
LastStatus.columns=[['TradeID','LastCollateral','LastValue']]
Status=pd.merge(Status,LastStatus,how='outer',left_on='清单编号'.decode('utf-8'),right_on='TradeID')
Result=range(len(Status))
for i in range(len(Status)):
tmp1=Position[Position['合约编号'.decode('utf-8')]==Status['清单编号'.decode('utf-8')][Status.index[i]]]['持仓数量'.decode('utf-8')]
tmp2=Position[Position['合约编号'.decode('utf-8')]==Status['清单编号'.decode('utf-8')][Status.index[i]]]['最新价'.decode('utf-8')]
Result[i]=np.sum(tmp1*tmp2)
Result=pd.DataFrame(Result,columns=['Position'],index=Status.index)
Status['Position']=Result['Position']
wbw=pd.ExcelWriter(path0+path1)
Status.to_excel(wbw,'Status',index=False)
Collateral.to_excel(wbw,'Collateral',index=False)
Position.to_excel(wbw,'Position',index=False)
wbw.save()
return Status
def ExportTRS(Status,Collateral):
path1=('股衍境内TRS估值'+spotdate+'.xlsx').decode('utf-8')
wbw=pd.ExcelWriter(path0+path1)
Status.to_excel(wbw,'账户状态'.decode('utf-8'),index=False)
Collateral.to_excel(wbw,'资金流水'.decode('utf-8'),index=False)
wbw.save()
if __name__ == "__main__":
path1='收益互换日终报表-账户状态.xlsx'.decode('utf-8')
path2='收益互换日终报表-资金流水表.xlsx'.decode('utf-8')
path3='收益互换日终报表-组合持仓.xlsx'.decode('utf-8')
Status=pd.read_excel(path0+path1,'Sheet1',encoding="gb2312",keep_default_na=False)
Collateral=pd.read_excel(path0+path2,'Sheet1',encoding="gb2312",keep_default_na=False)
Position=pd.read_excel(path0+path3,'Sheet1',encoding="gb2312",keep_default_na=False)
Status=Status[Status['交易状态'.decode('utf-8')]==1]
Collateral=Collateral[pd.to_datetime(Collateral['日期'.decode('utf-8')])>=pd.to_datetime(begindate)]
Position=Position[Position['合约状态'.decode('utf-8')]==1]
ExportTRS(Status,Collateral)
Status0=TestTemplate(Status,Collateral,Position)
|
{"hexsha": "676b6ca831e438f7ac593289042c9e7e1e5beb4a", "size": 2602, "ext": "py", "lang": "Python", "max_stars_repo_path": "productcontrol/TRS_EQ.py", "max_stars_repo_name": "JulianGong/littleAccountingTools", "max_stars_repo_head_hexsha": "d315a70ed102b13d48b2df6968283c36934857bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "productcontrol/TRS_EQ.py", "max_issues_repo_name": "JulianGong/littleAccountingTools", "max_issues_repo_head_hexsha": "d315a70ed102b13d48b2df6968283c36934857bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "productcontrol/TRS_EQ.py", "max_forks_repo_name": "JulianGong/littleAccountingTools", "max_forks_repo_head_hexsha": "d315a70ed102b13d48b2df6968283c36934857bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6478873239, "max_line_length": 129, "alphanum_fraction": 0.6602613374, "include": true, "reason": "import numpy", "num_tokens": 856}
|
"""
Compare our results with FJ model (and upper bound)
If we directly modify k nodes' innate opinions, each one modify with at most epsilon,
how much will it influence the final measures?
Ref:
Gaitonde, Jason, Jon Kleinberg, and Eva Tardos.
"Adversarial perturbations of opinion dynamics in networks." Proceedings of the 21st ACM Conference on Economics and Computation. 2020.
Chen, Mayee F., and Miklos Z. Racz.
"Network disruption: maximizing disagreement and polarization in social networks." arXiv preprint arXiv:2003.08377 (2020).
"""
include("../src/Graph.jl")
include("../src/Tools.jl")
include("../src/Algorithm.jl")
include("../src/Sampling.jl")
include("../src/Methods.jl")
using LinearAlgebra
using StatsBase
using JSON
using JLD2
epsilon = 0.1
jsdict = Dict{String, Any}()
tags = []
# Read Graph
buf = split(ARGS[1], ',')
fileName = string("../data/all/", buf[1], ".jld2")
(name, n, m, V, E) = load(fileName, "name", "n", "m", "V", "E")
G = Graph(n, m, V, E)
merge!(jsdict, Dict("name"=>name, "n"=>n, "m"=>m))
# Before Making changes
if ARGS[2] == "1"
# Uniform distribution
push!(tags, "Uniform")
s = load(fileName, "s_Uni")
elseif ARGS[2] == "2"
# Exponential distribution
push!(tags, "Exponential")
s = load(fileName, "s_Exp")
elseif ARGS[2] == "3"
# Power-law distribution
push!(tags, "Power-law")
s = load(fileName, "s_Pow")
end
# Different parameters to select k
if ARGS[3] == "1"
push!(tags, "0.005")
k = load(fileName, "k1")
elseif ARGS[3] == "2"
push!(tags, "0.015")
k = load(fileName, "k2")
elseif ARGS[3] == "3"
push!(tags, "0.02")
k = load(fileName, "k3")
end
merge!(jsdict, Dict("k"=>k))
merge!(jsdict, Dict("s_dist"=>tags[1], "k_par"=>tags[2]))
name = ["z_sum", "aci", "ad", "ap", "aidc"]
##################### Init
# The initial of different measures
_, init_aci, init_ad, init_ap, init_aidc = Approx(G, s)
init_result = [sum(s), init_aci, init_ad, init_ap, init_aidc, init_aidc] # the last one is a upper bound
init_labels = "init_".*name
for i in 1:length(name)
merge!(jsdict, Dict(init_labels[i]=>init_result[i]))
end
#################### Changes
# Precalculation
L = Symmetric(getL(G))
W = getW(G)
W = (W + W') / 2
#println(issymmetric(W))
Mp = W*W - ones(n, n)/n
Mp = (Mp' + Mp)/2
Md = W*L*W
Md = (Md' + Md)/2
Mic = W*L*L*W
Mic = (Mic' + Mic)/2
Midc = W
s_sqrt = sqrt(sum([i^2 for i in s]))
g = zeros(n)
for (ID, u, v, w) in G.E
g[v] += 1
g[u] += 1
end
d_max = maximum(g) / 2
########################################### Upper bound
# Polarization
maxEigen = eigmax(Mp)
uppChange = maxEigen * epsilon^2 * k + min(2*k*epsilon, 2*sqrt(k)*epsilon*maxEigen*s_sqrt)
merge!(jsdict, Dict("FJ_upp_ap"=>uppChange))
#println("polarization ", uppChange)
# Disagreement
maxEigen = eigmax(Md)
uppChange = maxEigen * epsilon^2 * k + min(2*k*epsilon*d_max, 2*sqrt(k)*epsilon*maxEigen*s_sqrt)
merge!(jsdict, Dict("FJ_upp_ad"=>uppChange))
#println("Disagreement ", uppChange)
# Internal conflict
maxEigen = eigmax(Mic)
uppChange = maxEigen * epsilon^2 * k + 2*sqrt(k)*epsilon*maxEigen*s_sqrt
merge!(jsdict, Dict("FJ_upp_aci"=>uppChange))
#println("IC ", uppChange)
# Disagreement-Controversy
maxEigen = eigmax(Midc)
uppChange = maxEigen * epsilon^2 * k + 2*sqrt(k)*epsilon*maxEigen*s_sqrt
merge!(jsdict, Dict("FJ_upp_aidc"=>uppChange))
#println("IDC ", uppChange)
########################################### Algorithm
function greedyFJ(Mx, s, n, k, epsilon)
s_new = deepcopy(s)
Lvec = 2*Mx*s
cSet = Set{Int}()
AllSet = BitSet(1:n)
epVec = zeros(n)
TotalGain = 0
for i = 1:k
MaxGain = 0
MaxIdx = 0
MaxFlag = 0
for j in AllSet
TmpGain1 = Lvec[j]*epsilon + sum([Mx[t, j]*epVec[t] + Mx[j, t]*epVec[t] for t in cSet])*(epsilon) + Mx[j, j]*epsilon^2
TmpGain2 = -Lvec[j]*epsilon + sum([Mx[t, j]*epVec[t] + Mx[j, t]*epVec[t] for t in cSet])*(-epsilon) + Mx[j, j]*epsilon^2
if TmpGain1 > MaxGain
MaxIdx = j
MaxGain = TmpGain1
MaxFlag = 1
end
if TmpGain2 > MaxGain
MaxIdx = j
MaxGain = TmpGain2
MaxFlag = 2
end
end
if MaxIdx == 0
break
else
if MaxFlag == 1
epVec[MaxIdx] = epsilon
s_new[MaxIdx] = s[MaxIdx] + epsilon
elseif MaxFlag == 2
epVec[MaxIdx] = epsilon
s_new[MaxIdx] = s[MaxIdx] - epsilon
end
push!(cSet, MaxIdx)
delete!(AllSet, MaxIdx)
TotalGain += MaxGain
end
end
#println("chosen nodes are ", cSet)
#println("changes is: ", s_new'*Mx*s_new - s'*Mx*s, " or ", TotalGain, " or ", epVec'*Lvec + epVec'*Mx*epVec)
return TotalGain
end
# Polarization
Change = greedyFJ(Mp, s, n, k, epsilon)
merge!(jsdict, Dict("FJ_ap"=>Change))
#println("polarization ", uppChange)
# Disagreement
Change = greedyFJ(Md, s, n, k, epsilon)
merge!(jsdict, Dict("FJ_ad"=>Change))
#println("Disagreement ", uppChange)
# Internal conflict
Change = greedyFJ(Mic, s, n, k, epsilon)
merge!(jsdict, Dict("FJ_aci"=>Change))
#println("IC ", uppChange)
# Disagreement-Controversy
Change = greedyFJ(Midc, s, n, k, epsilon)
merge!(jsdict, Dict("FJ_aidc"=>Change))
#println("IDC ", uppChange)
# zsum
merge!(jsdict, Dict("FJ_z_sum"=>k*epsilon))
#println("zsum ", k*epsilon)
js = JSON.json(jsdict)
println(jsdict)
open("out/fj_compare.json", "a+") do ff
write(ff, js)
write(ff, "\n")
end
|
{"hexsha": "345ad8b0321082c7721b876529efcddc14ff0289", "size": 5663, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "compare/CompareFJ.jl", "max_stars_repo_name": "SijingTu/WebConf-22-Viral-Marketing-Opinion-Dynamics", "max_stars_repo_head_hexsha": "8f1cc37b0cf5b392aece17b45cca84e6121d26eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "compare/CompareFJ.jl", "max_issues_repo_name": "SijingTu/WebConf-22-Viral-Marketing-Opinion-Dynamics", "max_issues_repo_head_hexsha": "8f1cc37b0cf5b392aece17b45cca84e6121d26eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compare/CompareFJ.jl", "max_forks_repo_name": "SijingTu/WebConf-22-Viral-Marketing-Opinion-Dynamics", "max_forks_repo_head_hexsha": "8f1cc37b0cf5b392aece17b45cca84e6121d26eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5151515152, "max_line_length": 139, "alphanum_fraction": 0.592971923, "num_tokens": 1834}
|
from python_helper import log
from globals import newGlobalsInstance
globalsInstance = newGlobalsInstance(
__file__,
successStatus = True,
errorStatus = True,
infoStatus = True,
# debugStatus = True,
failureStatus = True
)
log.info(__name__, 'Importiong libraries')
import time
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from python_helper import Constant as c
from python_helper import RandomHelper, StringHelper, ObjectHelper
from utils import query as queryFrom
from utils.query import Dataset, HashedDataset, DatasetKeys
log.info(__name__, 'Libraries imported')
def plotSamples(data: np.array, target, samples: int = 1, shape: tuple = (28, 28)):
"""Plot the first 5 images and their labels in a row."""
for index, (img, target) in enumerate(zip(data[:samples].reshape(tuple([samples, *list(shape)])), target[:samples])):
plt.subplot(151 + index)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title(target)
def loadDataset(datasetName: str, dataType: str, targetType: str, cache: bool = False) -> Dataset:
log.info(loadDataset, 'Loading dataset')
originalDataset = fetch_openml(datasetName, cache=cache)
dataset = Dataset(
originalDataset.data.astype(dataType).values,
originalDataset.target.astype(targetType)
)
log.debug(loadDataset, 'Dataset loaded')
return dataset
def roundToInt(value):
return int(round(value))
def resizeByAverage(dataset: Dataset, dataShape: tuple)-> Dataset:
log.info(resizeByAverage, f'Resizing dataset to {dataShape} by avarege')
return Dataset(
resizeDataByAverage(dataset.data, dataShape),
dataset.target
)
def resizeDataByAverage(data: np.array, dataShape: tuple)-> Dataset:
log.debug(resizeDataByAverage, f'Resizing data to {dataShape} by avarege')
#TODO: actualy write this "flattenedData" thing
flattenedData = [
dataUnit for dataUnit in data
]
stepSize = len(flattenedData[0]) / sum(dataShape)
resized = np.asarray(
[
[
np.mean(dataUnit[roundToInt(step * stepSize):]) if roundToInt((step+1) * stepSize) >= len(dataUnit) else np.mean(dataUnit[roundToInt(step * stepSize):roundToInt((step+1) * stepSize)]) for step in range(roundToInt(len(dataUnit) / stepSize))
] for dataUnit in flattenedData
]
)
log.debug(resizeDataByAverage, f'Original shape: {data.shape}')
log.debug(resizeDataByAverage, f'Step size: {stepSize}')
log.debug(resizeDataByAverage, f'Resized shape: {resized.shape}')
return resized
def resizeSampleByAverage(query: np.array, displayShape: tuple) -> np.array:
return resizeDataByAverage(
np.asarray([query]),
(np.product(np.asarray(displayShape)),)
)[0]
def reshape(dataset: Dataset, shape: tuple, weigth: float = 1.0, displayShape: tuple = None) -> Dataset:
log.info(reshape, f'Reshaping dataset to {shape}')
reshapeDataset = None
try:
reshapeDataset = Dataset(
dataset.data.reshape(*shape) * weigth,
dataset.target,
displayShape = displayShape
)
except Exception as exception:
log.error(reshape, f'No possible to reshape {queryFrom.arrayToString(dataset.data)}', exception, muteStackTrace=True)
raise exception
reshapeDataset.displaySample()
return reshapeDataset
def reduceShape(dataset: Dataset, shape: tuple) -> Dataset:
return resizeByAverage(dataset, (np.product(np.asarray(shape)),))
def getHashedDataset(resampledMnist: Dataset, displayShape: tuple) -> dict:
log.info(getHashedDataset, 'Building hashed dataset over central point')
hashedDataset = HashedDataset(resampledMnist, displayShape)
log.debug(getHashedDataset, f'hashedDataset keys length: {len(list(hashedDataset.get()[DatasetKeys.VALUES].keys()))}')
log.debug(getHashedDataset, f'hashedDataset sampledDigit: {queryFrom.arrayToString(queryFrom.buildDatasetMean(RandomHelper.sample(list(hashedDataset.get().values()))[DatasetKeys.VALUES]), shape=displayShape)}')
return hashedDataset
def mnist():
mnistDataset = loadDataset('mnist_784', 'float32', 'int64')
flattenMnist = reshape(mnistDataset, (-1, 784), weigth=1/255, displayShape=(28, 28))
# reshapedMnist = reshape(mnistDataset, (-1, 28, 28), weigth=1/255, displayShape=(28, 28))
reducedDisplaySchape = (3, 3)
resampledMnist = reduceShape(flattenMnist, reducedDisplaySchape)
reshapedResampledMnist = reshape(resampledMnist, (-1, *reducedDisplaySchape))
hashedDataset = getHashedDataset(resampledMnist, reducedDisplaySchape)
startedAt = time.time()
log.info(mnist, f'Query started at {startedAt}')
sampledIndex = RandomHelper.integer(minimum=0, maximum=flattenMnist.data.shape[0]-1)
predictions = hashedDataset.query(resizeSampleByAverage(flattenMnist.data[sampledIndex], reducedDisplaySchape), 10)
results = []
for prediction in predictions:
results.append(
hashedDataset.get()[DatasetKeys.VALUES][
queryFrom.arrayToString(
resizeSampleByAverage(prediction, hashedDataset.displayShape),
roundTo=2,
shape=hashedDataset.displayShape
)
][DatasetKeys.TARGET]
)
log.success(mnist, f'Expected result: {flattenMnist.target[sampledIndex]}')
log.success(mnist, f'Predicted result: {sorted(results, key=results.count, reverse=True)[0]}')
finishedAt = time.time()
log.info(mnist, f'Query finished at {finishedAt}')
log.info(mnist, f'Query last {finishedAt - startedAt} seconds')
if __name__ == '__main__':
mnist()
|
{"hexsha": "53b2e5e8e0577e88d1f1cd9fa686b55b1b1c86f3", "size": 5812, "ext": "py", "lang": "Python", "max_stars_repo_path": "appTest.py", "max_stars_repo_name": "SamuelJansen/central-point-query", "max_stars_repo_head_hexsha": "1cbf1d5fdab3efe653bfd4e24952d9a1883983ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "appTest.py", "max_issues_repo_name": "SamuelJansen/central-point-query", "max_issues_repo_head_hexsha": "1cbf1d5fdab3efe653bfd4e24952d9a1883983ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appTest.py", "max_forks_repo_name": "SamuelJansen/central-point-query", "max_forks_repo_head_hexsha": "1cbf1d5fdab3efe653bfd4e24952d9a1883983ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4900662252, "max_line_length": 255, "alphanum_fraction": 0.6997591191, "include": true, "reason": "import numpy", "num_tokens": 1395}
|
"""
Run Program: horovodrun -np 4 python3 pycylon_horovod_pytorch_example.py
"""
import argparse
import os
import socket
import horovod.torch as hvd
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from pycylon import CylonEnv
from pycylon import DataFrame
from pycylon.net import MPIConfig
from pycylon.util.logging import log_level, disable_logging
from sklearn.preprocessing import StandardScaler
log_level(0) # set an arbitrary log level
disable_logging() # disable logging completely
hostname = socket.gethostname()
def setup():
hvd.init()
mpi_config = MPIConfig()
env = CylonEnv(config=mpi_config, distributed=True)
rank = env.rank
print(f"Init Process Groups : => [{hostname}]Demo DDP Rank {rank}")
cuda_available = torch.cuda.is_available()
device = 'cuda:' + str(rank) if cuda_available else 'cpu'
if cuda_available:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(42)
return env, device, cuda_available
class Network(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(3, 1)
def forward(self, x):
y_pred = F.leaky_relu(self.linear(x))
return y_pred
def train(epoch, x_train, y_train, ddp_model, loss_fn, optimizer):
for x_batch, y_batch in zip(x_train, y_train):
print(f"Epoch {epoch}", end='\r')
prediction = ddp_model(x_batch)
loss = loss_fn(prediction, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
def demo_basic(epochs):
env, device, cuda_available = setup()
rank = env.rank
print(f"Simple Batch Train => [{hostname}]Demo DDP Rank {rank}")
base_path = "https://raw.githubusercontent.com/cylondata/cylon/main/cpp/src/tutorial/data/"
user_devices_file = os.path.join(base_path, f'user_device_tm_{rank + 1}.csv')
user_usage_file = os.path.join(base_path, f'user_usage_tm_{rank + 1}.csv')
print("Rank[{}] User Device File : {}".format(rank, user_devices_file))
print("Rank[{}] User Usage File : {}".format(rank, user_usage_file))
user_devices_data = DataFrame(pd.read_csv(user_devices_file)) # read_csv(user_devices_file, sep=',')
user_usage_data = DataFrame(pd.read_csv(user_usage_file)) # read_csv(user_usage_file, sep=',')
print(f"Rank [{rank}] User Devices Data Rows:{len(user_devices_data)}, Columns: {len(user_devices_data.columns)}")
print(f"Rank [{rank}] User Usage Data Rows:{len(user_usage_data)}, Columns: {len(user_usage_data.columns)}")
print("--------------------------------")
print("Before Join")
print("--------------------------------")
print(user_devices_data[0:5])
print("-------------------------------------")
print(user_usage_data[0:5])
join_df = user_devices_data.merge(right=user_usage_data, left_on=[0], right_on=[3], algorithm='hash')
print("----------------------")
print("Rank [{}] New Table After Join (5 Records)".format(rank))
print(join_df[0:5])
print("----------------------")
feature_df = join_df[
['_xplatform_version', '_youtgoing_mins_per_month', '_youtgoing_sms_per_month',
'_ymonthly_mb']]
feature_df.rename(
['platform_version', 'outgoing_mins_per_month', 'outgoing_sms_per_month', 'monthly_mb'])
if rank == 0:
print("Data Engineering Complete!!!")
print("=" * 80)
print("Rank [{}] Feature DataFrame ".format(rank))
print(feature_df[0:5])
print("=" * 80)
data_ar: np.ndarray = feature_df.to_numpy()
data_features: np.ndarray = data_ar[:, 0:3]
data_learner: np.ndarray = data_ar[:, 3:4]
x_train, y_train = data_features[0:100], data_learner[0:100]
x_test, y_test = data_features[100:], data_learner[100:]
x_train = np.asarray(x_train, dtype=np.float32)
y_train = np.asarray(y_train, dtype=np.float32)
x_test = np.asarray(x_test, dtype=np.float32)
y_test = np.asarray(y_test, dtype=np.float32)
sc = StandardScaler()
sct = StandardScaler()
x_train = sc.fit_transform(x_train)
y_train = sct.fit_transform(y_train)
x_test = sc.fit_transform(x_test)
y_test = sct.fit_transform(y_test)
x_train = torch.from_numpy(x_train).to(device)
y_train = torch.from_numpy(y_train).to(device)
x_test = torch.from_numpy(x_test).to(device)
y_test = torch.from_numpy(y_test).to(device)
# create model and move it to GPU with id rank
lr = 0.01 # learning rate
model = Network()
loss_fn = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=lr)
optimizer.zero_grad()
# By default, Adasum doesn't need scaling up learning rate.
lr_scaler = 1
if cuda_available:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(model.parameters(), lr=lr * lr_scaler, momentum=0.01)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum,
gradient_predivide_factor=1.0)
if rank == 0:
print("Training A Dummy Model")
for epoch in range(epochs):
train(epoch, x_train, y_train, model, loss_fn, optimizer)
if rank == 0:
print("Data Analysis Complete!!!")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--backend",
help="example : 'mpi', 'nccl'",
default='mpi',
type=str)
parser.add_argument("-e", "--epochs",
help="training epochs",
default=10,
type=int)
parser.add_argument("-m", "--master_address",
help="master address for torch distributed runtime",
default='localhost',
type=str)
parser.add_argument("-p", "--port",
help="torch port for distributed runtime",
default='12335',
type=str)
args = parser.parse_args()
backend = args.backend
demo_basic(epochs=args.epochs)
|
{"hexsha": "7c5e4f25c9657e0dbd6d852c3ff3fa8db8933e82", "size": 6895, "ext": "py", "lang": "Python", "max_stars_repo_path": "horovod/pycylon_horovod_pytorch_example.py", "max_stars_repo_name": "vibhatha/cylon_applications", "max_stars_repo_head_hexsha": "6af9bdf3b4738c6cc4cf2c3c181a5c60f049d113", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-01-12T17:18:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T05:37:14.000Z", "max_issues_repo_path": "horovod/pycylon_horovod_pytorch_example.py", "max_issues_repo_name": "vibhatha/cylon_applications", "max_issues_repo_head_hexsha": "6af9bdf3b4738c6cc4cf2c3c181a5c60f049d113", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-01-06T12:49:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-20T16:50:16.000Z", "max_forks_repo_path": "horovod/pycylon_horovod_pytorch_example.py", "max_forks_repo_name": "vibhatha/cylon_applications", "max_forks_repo_head_hexsha": "6af9bdf3b4738c6cc4cf2c3c181a5c60f049d113", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9114583333, "max_line_length": 118, "alphanum_fraction": 0.6274111675, "include": true, "reason": "import numpy", "num_tokens": 1609}
|
import cirq
import numpy as np
import pandas as pd
from typing import List
from qnn.qnlp.circuits_words import CircuitsWords
def get_overall_run_words(trial_result: cirq.TrialResult, num: int):
""" Takes the average of the measurements of a given qubit on a given circuit
(the results are on the form of a bitstring)
If the qubit is in state $|1>$ all the measurement are going to be 1 forming a bitstring of then length of
number of measurement and consisting of all ones.
Args:
trial_result (cirq.TrialResult): the result of the simulation of a specific circuit
num (int): the number of the qubit on which the average is taken (starting from 0)
Returns:
float: a value between 1 and 0 that is the average of the measurements of the specified qubit
"""
a = 0
index_num = 0
string = '(' + str(num) + ', 0)' # localizes the qubit specified by num
# (in the cirq.TrialResult object are in the form "(x, 0):", with x as the number of the qubit)
for i in trial_result.data.columns: # using the argument data on cirq.TrialResult is transformed to a pd.DataFrame
if i == string:
index_num = a
a = a + 1
dict_result = trial_result.data.transpose()
values = dict_result.values[index_num] # gets the bitstring of the specified qubit
# values is a numpy array
return sum(values) / len(values) # sum the bits and normalizes them according to the len of the bitstring
def cost_global_words(trial_results: List[cirq.TrialResult], expected_bits: List[List[float]]):
""" Given a list of circuits results, returns the global cost of those results doing a average of the local cost
of each circuits. The local cost evaluated using the mean squared error method. And the global cost is the average
of all the local cost of all the circuits (that are used in the optimization).
The cost would be zero in the case that all qubits are in the desired basis state (|0> or |1>).
Args:
trial_results: a list of cirq.TrialResult of the circuits on which the cost is taken
expected_bits: a list of the bits that the qubits are supposed to be at (0 for qubit in |0> and 1 for |1>)
(e.g. bits = [[1, 0, 0], [0, 1, 1]] where bits[0] are the expected bits for the first circuit)
Returns:
float: the global cost evaluated using mean squared error
"""
result, result_global = [], []
for a in range(len(trial_results)):
for i in range(len(trial_results[a].data.columns)): # get the result of a specific qubit
# calculate the mean squared error (Y' - Y)^2
result.append((get_overall_run_words(trial_results[a], i) - (expected_bits[a])[i])**2)
# adjust the sum of the results to the number of measurements
result_global.append((1 / 2 * len(trial_results[a].data.columns)) * sum(result))
# evaluate the average of all the local cost
return sum(result_global) / len(trial_results)
def g_parameter_shift_global_words(circuits_object: CircuitsWords,
param: int,
theta_sample: np.array,
expected_bits: List[List[float]]):
""" Given a CircuitsWords and a parameter, takes the gradient of the circuits (all of the circuits in the object)
witch respect to that parameter.
The parameter shift method is the one used (Eq. 2)
Args:
circuits_object: the circuit on which the gradient is taken
param: the parameter that the gradient is taken respected to
theta_sample: the parameters of the circuit
expected_bits: a list of the bits that the qubits are supposed to be at (to evaluate the cost function)
Returns:
float: The gradient of the circuits with respect to a specific parameter
"""
# creates the perturbation vector
perturbation_vector = np.zeros(len(theta_sample))
perturbation_vector[param] = 1
# creates the new parameters (only updating the parameter specified with param)
pos_theta = theta_sample + (np.pi / 4) * perturbation_vector
neg_theta = theta_sample - (np.pi / 4) * perturbation_vector
# simulates the new results with CircuitsWords.sample_run_global()
pos_result = circuits_object.sample_run_global(pos_theta, 100)
neg_result = circuits_object.sample_run_global(neg_theta, 100)
return cost_global_words(pos_result, expected_bits) - cost_global_words(neg_result, expected_bits)
def get_expected_bits(data_frame: pd.DataFrame,
num_phrases: int,
num_qubits: int):
""" Gets the expected bits of the circuits according to they desired output.
The number of phrases and the number of qubits is needed, that is in case the optimization is on fewer phrases that the
ones on the database. The number of qubits is needed because the length of the expected bits has to match with it.
Args:
data_frame: the DataFrame that corresponds to the database (using the function extract_words)
num_phrases: the number of phrases used in the optimization
num_qubits: the number of qubits used in the optimization
Returns:
List: expected bits of all the circuits
"""
expected_bits = [[] for __ in range(num_phrases)]
a = 0
for j in data_frame.transpose().values[4][:num_phrases]:
index = 0
for i in data_frame.transpose().values[0]:
if i == j:
break
index += 1
if index == 84:
raise ValueError('Word not found')
bit = bin(index)[2:]
for _ in range(num_qubits - len(bit)):
expected_bits[a].append(0)
for __ in bit:
expected_bits[a].append(int(__))
a += 1
return expected_bits
|
{"hexsha": "61338eafe37a3edfeddacf3a5680596929dc166e", "size": 5446, "ext": "py", "lang": "Python", "max_stars_repo_path": "qnn/qnlp/optimization_words.py", "max_stars_repo_name": "tomiock/QNNs", "max_stars_repo_head_hexsha": "9822aac1b56e617f92bc5e3670d06285047e5066", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "qnn/qnlp/optimization_words.py", "max_issues_repo_name": "tomiock/QNNs", "max_issues_repo_head_hexsha": "9822aac1b56e617f92bc5e3670d06285047e5066", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2020-12-16T15:12:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-14T09:19:12.000Z", "max_forks_repo_path": "qnn/qnlp/optimization_words.py", "max_forks_repo_name": "tomiock/QNNs", "max_forks_repo_head_hexsha": "9822aac1b56e617f92bc5e3670d06285047e5066", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-07T09:42:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-07T09:42:49.000Z", "avg_line_length": 43.2222222222, "max_line_length": 119, "alphanum_fraction": 0.7258538377, "include": true, "reason": "import numpy", "num_tokens": 1349}
|
# -*- coding: utf-8 -*-
import logging
import os
import sys
import matplotlib.pyplot as plt
import tempfile
import numpy as np
import wave
import subprocess
from PyQt4 import QtCore
from PyQt4 import QtGui
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
point_per_second = 44100
up_interval = 1 # 每1s刷新一次页面
max_display = 44100*10 # 保留5s的记录
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(os.path.dirname(sys.argv[0]), "wave_plot.log"),
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.root.addHandler(console)
def run_shell(command):
logging.info(command)
cmd = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
for info in cmd.communicate():
logging.info(info)
class WavePlot(object):
def __init__(self, file):
if not os.path.exists(file):
raise Exception("file isn't exists!")
self.fp = self.mp3_to_wav(file) if os.path.splitext(file)[-1] != ".wav" else wave.open(file)
def __del__(self):
if self.fp:
self.fp.close()
def __getattr__(self, item):
pass
def mp3_to_wav(self, file):
try:
self.tmp_wav = os.path.join(tempfile.mkdtemp(), os.path.basename(file).replace(".mp3", ".wav") )
command = "ffmpeg -i %s %s" % (file, self.tmp_wav)
run_shell(command)
if os.path.exists(self.tmp_wav):
return wave.open(self.tmp_wav)
os.remove(self.tmp_wav)
except Exception as ex:
logging.error(ex)
return None
def get_data(self):
if not self.fp:
logging.info("no fp!")
return {}
params = self.fp.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
global point_per_second
point_per_second = framerate
str_data = self.fp.readframes(nframes)
wave_data = np.fromstring(str_data, dtype=np.short)
wave_data.shape = -1, nchannels
wave_data = wave_data.T
time = np.arange(0, nframes) * (1.0 / framerate)
return {"x":time, 'ys':wave_data}
@staticmethod
def draw(**kwargs):
if not kwargs:
logging.info("no data!")
return
time = kwargs['x']
wave_data = kwargs['ys']
plt.subplot(211)
plt.plot(time, wave_data[0])
plt.subplot(212)
plt.plot(time, wave_data[1], c="g")
plt.show()
class Wave(QtGui.QDialog):
def __init__(self, data, parent=None):
super(Wave, self).__init__(parent)
self.setFixedSize(1080, 720)
self.data = data
self.flag = 0
self.pos = 0
self.dy_start = 0
self.times = len(self.data['x'])
self.figure = plt.figure()
self.ax = self.figure.add_subplot(111)
self.canvas = FigureCanvas(self.figure)
self.toolbar = NavigationToolbar(self.canvas, self)
self.button = QtGui.QPushButton('start')
self.button.clicked.connect(self.control)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
layout.addWidget(self.button)
self.setLayout(layout)
self.timer=QtCore.QTimer()
QtCore.QObject.connect(self.timer, QtCore.SIGNAL("timeout()"), self.plot)
self.timer.start(up_interval*1000)
def control(self):
self.flag = 0 if self.flag else 1
if self.flag:
self.button.setText("stop")
else:
self.button.setText("start")
def plot(self):
if self.flag:
end = self.pos + up_interval*point_per_second
if end - self.dy_start > max_display:
self.ax.cla()
self.dy_start = self.pos
self.ax.plot(self.data['x'][self.pos:end], self.data['ys'][0][self.pos:end])
self.ax.set_xlabel("Time(s)")
self.ax.set_ylabel("Hz")
self.pos = self.pos + up_interval*point_per_second
if not self.pos > self.times:
self.canvas.draw()
if __name__ == '__main__':
wp = WavePlot("D:/1.mp3")
data = wp.get_data()
app = QtGui.QApplication(sys.argv)
wave = Wave(data)
wave.show()
sys.exit(app.exec_())
|
{"hexsha": "55b8d8d88cb42117086883871b24eb24f0156212", "size": 4562, "ext": "py", "lang": "Python", "max_stars_repo_path": "wav_plot.py", "max_stars_repo_name": "QuantumEnergyE/wave_plot", "max_stars_repo_head_hexsha": "af2c03d90ca23ddca35051298ad9d1f4e514e932", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wav_plot.py", "max_issues_repo_name": "QuantumEnergyE/wave_plot", "max_issues_repo_head_hexsha": "af2c03d90ca23ddca35051298ad9d1f4e514e932", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wav_plot.py", "max_forks_repo_name": "QuantumEnergyE/wave_plot", "max_forks_repo_head_hexsha": "af2c03d90ca23ddca35051298ad9d1f4e514e932", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8243243243, "max_line_length": 108, "alphanum_fraction": 0.6093818501, "include": true, "reason": "import numpy", "num_tokens": 1083}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Wordle Solver"""
import json
import logging
from pathlib import Path
from collections import Counter
from functools import cached_property
import networkx as nx
from .vocab import Vocabulary
from .wordle import Wordle
from .defaults import COVERAGE_CACHE
###############################################################################
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
###############################################################################
class WordleSolver:
def __init__(
self,
wordle: Wordle = None,
coverage_cache: str or Path = COVERAGE_CACHE
):
self.wordle = wordle
self.vocabulary = Vocabulary()
self.valid_words = set(self.vocabulary.vocab)
self.num_attempts = 0
self.guesses = {}
self.known_letters = {}
self.build_graph()
if Path(coverage_cache).exists():
with open(coverage_cache, mode="r") as f:
self.coverage = json.load(f)
else:
with open(coverage_cache, mode="w") as f:
json.dump(self.coverage, f, indent=2)
def build_graph(self):
self.graph = nx.DiGraph()
self.markers = set()
for source_dict in ['letter', 'letter_position']:
for letter, words in self.vocabulary.index[source_dict].items():
for word in words:
self.graph.add_edge(letter, word)
self.markers.add(letter)
def calculate_coverage(self, letters, omit_known: bool = True):
"""Calculate coverage of specified set of letters"""
omit_set = set(self.known_letters) if omit_known else set()
node_boundary = nx.algorithms.boundary.node_boundary(
self.graph, set(letters) - omit_set
)
graph_words_count = self.graph.number_of_nodes() - len(self.markers)
return len(node_boundary) / graph_words_count * 100
@cached_property
def coverage(self):
return {
word: self.calculate_coverage(word)
for word in self.vocabulary.vocab
}
def reset_coverage(self):
if hasattr(self, 'coverage'):
delattr(self, 'coverage')
def eliminate(self, markers, words=None):
# marker is one of the following,
# - a letter, such as, 'a', 'e' etc.
# - a letter_position, such as, 'e3', 'a1' etc.
remove_words = words or set()
for marker in markers:
for source_dict in ['letter', 'letter_position']:
for word in self.vocabulary.index[source_dict].get(marker, []):
remove_words.add(word)
self.graph.remove_nodes_from(remove_words)
count_before = len(self.valid_words)
self.valid_words -= remove_words
count_after = len(self.valid_words)
LOGGER.info(f"Eliminated {count_before - count_after} options.")
self.reset_coverage()
def top_coverage(
self,
n: int = None,
avoid_set: set = None,
coverage_min: int = 0,
coverage_max: int = 100
):
if avoid_set is None:
avoid_set = set()
return Counter({
k: v for k, v in self.coverage.items()
if (
not set(k).intersection(avoid_set)
and coverage_min < v < coverage_max
)
}).most_common(n)
def best_options(self):
options_from_valid_words = self.get_options_from_valid_words()
if self.wordle:
# if less valid words than number of attempts left
# start guessing them directly
attempts_left = self.wordle.max_attempts - self.wordle.num_attempts
choose_from_valid_words = len(self.valid_words) <= attempts_left
if choose_from_valid_words:
return options_from_valid_words
avoid_set = {
k
for k, v in self.known_letters.items()
if isinstance(v, int)
}
options_with_top_coverage = self.top_coverage(
n=100,
avoid_set=avoid_set
)
options = sorted(
set(options_with_top_coverage + options_from_valid_words),
key=lambda x: x[1], reverse=True
)
top_values = sorted(set(x[1] for x in options), reverse=True)
if options:
possible_best_options = [
option
for option in options
if option[1] in top_values[:3]
]
else:
possible_best_options = []
pruned_options = []
for option in possible_best_options:
_word, _coverage = option
if _word in self.guesses:
continue
for _position, _letter in enumerate(_word):
if isinstance(self.known_letters.get(_letter), set):
if _position in self.known_letters[_letter]:
break
else:
pruned_options.append(option)
return pruned_options or options_from_valid_words
def get_options_from_valid_words(self):
return sorted([
(word, self.coverage[word])
for word in self.valid_words
if word not in self.guesses
], key=lambda x: x[1], reverse=True)
def handle_result(self, result):
if not result:
return
self.num_attempts += 1
eliminate_markers = set()
remove_words = set()
for idx, (letter, score) in enumerate(result):
if score == 0:
eliminate_markers.add(letter)
if score == 1:
if letter not in self.known_letters:
self.known_letters[letter] = set()
if isinstance(self.known_letters[letter], set):
self.known_letters[letter].add(idx)
eliminate_markers.add(f"{letter}{idx}")
for word in self.valid_words:
if letter not in word:
remove_words.add(word)
if score == 2:
self.known_letters[letter] = idx
for other_letter in self.vocabulary.alphabet:
if other_letter != letter:
eliminate_markers.add(f"{other_letter}{idx}")
self.eliminate(eliminate_markers, words=remove_words)
def guess(self, option=None):
if self.wordle is None:
LOGGER.error("No Wordle is defined.")
return False
if option is None:
options = self.best_options()
option, coverage = options[0]
else:
coverage = self.calculate_coverage(option)
LOGGER.info(f"Guessing '{option}' (Coverage: {coverage})")
result = self.wordle.guess(option)
if result:
self.handle_result(result)
if self.wordle.solved:
LOGGER.info(f"Solved in {self.num_attempts} attempts.")
def solve(self):
if self.wordle is None:
LOGGER.error("No Wordle is defined.")
return False
while not self.wordle.solved and not self.wordle.failed:
self.guess()
|
{"hexsha": "50b299b723395ebfbc522d00056cc5baf88e3a98", "size": 7287, "ext": "py", "lang": "Python", "max_stars_repo_path": "wordle/solver.py", "max_stars_repo_name": "hrishikeshrt/python-wordle", "max_stars_repo_head_hexsha": "574f4476a0a3b35ebb0030babc8bd49d8107f34c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-02T21:11:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T21:11:14.000Z", "max_issues_repo_path": "wordle/solver.py", "max_issues_repo_name": "hrishikeshrt/wordle", "max_issues_repo_head_hexsha": "574f4476a0a3b35ebb0030babc8bd49d8107f34c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wordle/solver.py", "max_forks_repo_name": "hrishikeshrt/wordle", "max_forks_repo_head_hexsha": "574f4476a0a3b35ebb0030babc8bd49d8107f34c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-02T21:31:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T21:31:31.000Z", "avg_line_length": 32.3866666667, "max_line_length": 79, "alphanum_fraction": 0.5614107314, "include": true, "reason": "import networkx", "num_tokens": 1483}
|
import networkx as nx
from tools import safe_sample
from nature import add_node
MIN_LAYERS, MAX_LAYERS = 1, 3
MIN_NODES, MAX_NODES = 1, 2
def Regulon(parent=None):
n_layers = safe_sample(MIN_LAYERS, MAX_LAYERS)
M, ids = nx.MultiDiGraph(), []
for layer_number in range(n_layers):
n_nodes = safe_sample(MIN_NODES, MAX_NODES)
ids_of_nodes_in_this_layer = []
for node_number in range(n_nodes):
node_id = f"{layer_number}.{node_number}"
if parent:
node_id = f"{parent}.{node_id}"
add_node(M, node_id, "black", "square", "brick")
ids_of_nodes_in_this_layer.append(node_id)
ids.append(ids_of_nodes_in_this_layer)
for predecessor_layer_number, predecessor_node_ids in enumerate(ids):
for successor_layer_number, successor_node_ids in enumerate(ids):
if predecessor_layer_number < successor_layer_number:
for predecessor_node_id in predecessor_node_ids:
for successor_node_id in successor_node_ids:
M.add_edge(predecessor_node_id, successor_node_id)
return M, ids
|
{"hexsha": "54217b9f17adf3a2f406e0a6c57b26d895d39cab", "size": 1155, "ext": "py", "lang": "Python", "max_stars_repo_path": "nature/bricks/graph/evolve/regulon.py", "max_stars_repo_name": "bionicles/neuromax", "max_stars_repo_head_hexsha": "a53a17a1c033c11ac607a9e28f43b1f906e58aad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nature/bricks/graph/evolve/regulon.py", "max_issues_repo_name": "bionicles/neuromax", "max_issues_repo_head_hexsha": "a53a17a1c033c11ac607a9e28f43b1f906e58aad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nature/bricks/graph/evolve/regulon.py", "max_forks_repo_name": "bionicles/neuromax", "max_forks_repo_head_hexsha": "a53a17a1c033c11ac607a9e28f43b1f906e58aad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5, "max_line_length": 74, "alphanum_fraction": 0.6735930736, "include": true, "reason": "import networkx", "num_tokens": 262}
|
from fundopt.fundtsloader import load_funds
import pandas as pd
import numpy as np
import datetime as dt
import logging
from arctic import Arctic # pyright: reportMissingImports=false
from pymongo import MongoClient
import keyring
import ssl
client = MongoClient("localhost")
# client = MongoClient(keyring.get_password('atlas', 'connection_string'), ssl_cert_reqs=ssl.CERT_NONE)
a = Arctic(client)
lib = a['fund']
if __name__ == "__main__":
logging.basicConfig( level = logging.INFO )
start = dt.date(2020, 1, 1)
end = dt.date(2021, 9, 30)
holding = 20
funds = lib.list_symbols()
funds.remove('003254') # data issue
funds.remove('003255') # data issue
funds.remove('001481') # QDII funds, not sold on Ant Financial
funds.remove('512310') # stopped funds
funds.remove('512340') # stopped funds
fund_returns=load_funds(funds, start, end, holding)
fund_returns.to_pickle('./{}_{}_{}.pkl'.format(start, end, holding))
|
{"hexsha": "3b03ab83cb454909408ef5ae71b5b8c67c716b32", "size": 983, "ext": "py", "lang": "Python", "max_stars_repo_path": "load_fund.py", "max_stars_repo_name": "joshualee155/FundOptimizer", "max_stars_repo_head_hexsha": "da842de6c99f89c767d03c9ef1b392237b726a3f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-03T00:46:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T02:48:51.000Z", "max_issues_repo_path": "load_fund.py", "max_issues_repo_name": "joshualee155/FundOptimizer", "max_issues_repo_head_hexsha": "da842de6c99f89c767d03c9ef1b392237b726a3f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "load_fund.py", "max_forks_repo_name": "joshualee155/FundOptimizer", "max_forks_repo_head_hexsha": "da842de6c99f89c767d03c9ef1b392237b726a3f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-08-28T11:04:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-28T11:04:00.000Z", "avg_line_length": 28.9117647059, "max_line_length": 103, "alphanum_fraction": 0.7070193286, "include": true, "reason": "import numpy", "num_tokens": 261}
|
"""
Simple linear regression example in TensorFlow
This program tries to predict the number of thefts from
the number of fire in the city of Chicago
"""
# pylint: disable=invalid-name
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
DATA_FILE = 'data/fire_theft.xls'
LOG_FILE = 'logs/fire_theft'
LEARNING_RATE = 0.0001
NUM_EPOCH = 500
# Standardize X values.
# Calculate Z score with mea = 0, sd = 1
def feature_standardize(dataset):
mu = np.mean(dataset, axis=0)
sigma = np.std(dataset, axis=0)
return (dataset - mu) / sigma
# Step 1: read in data from the .xls file
book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
# Normalize X values
all_X, all_Y = data.T[0], data.T[1]
all_X = feature_standardize(all_X)
all_X = np.transpose([all_X])
all_Y = np.transpose([all_Y])
# Step 2: create placeholders for input X (number of fire) and label Y
# (number of theft)
X = tf.placeholder(tf.float32, [None, 1], name='X')
Y = tf.placeholder(tf.float32, [None, 1], name='Y')
# Step 3: create weight and bias, initialized to 0
w1 = tf.Variable(tf.zeros([1, 1]), name='weights1')
w2 = tf.Variable(tf.zeros([1, 1]), name='weights2')
w3 = tf.Variable(tf.zeros([1, 1]), name='weights3')
b = tf.Variable(tf.zeros([1]), name='bias')
# Step 4: build model to predict Y
#Y_predicted = X * w1 + b
#Y_predicted = X ** 2 * w2 + X * w1 + b
Y_predicted = X ** 3 * w3 + X ** 2 * w2 + X * w1 + b
# Step 5: use the square error as the loss function
loss = tf.square(Y - Y_predicted, name='loss')
loss_sum = tf.summary.scalar("loss", loss)
mean_loss = tf.reduce_mean(tf.square(Y - Y_predicted), name='mean_loss')
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)
sess = tf.Session()
# Merge all the summaries and write them out to log file
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
init = tf.global_variables_initializer()
sess.run(init)
# Step 7: initialize the necessary variables, in this case, w and b
sess.run(tf.global_variables_initializer())
all_feed={X: all_X, Y: all_Y}
# Step 8: train the model
for i in range(NUM_EPOCH): # train the model 100 times
total_loss = 0
_, l = sess.run([optimizer, loss], feed_dict=all_feed)
total_loss = l.sum()
#print('Epoch {0}: Total lost: {1}'.format(i, total_loss))
#print('Epoch {0}: {1}'.format(i, total_loss / n_samples))
print('Epoch {0}: Mean: {1}'.format(i, sess.run(mean_loss, feed_dict={X: all_X, Y: all_Y})))
# close the writer when you're done using it
writer.close()
# Step 9: output the values of w and b
Y_pred = sess.run(Y_predicted, feed_dict={X: all_X, Y: all_Y})
sess.close()
# plot the results
X, Y = all_X, all_Y
plt.plot(X, Y, 'bo', label='Real data')
plt.plot(X, Y_pred, 'ro', label='Predicted data')
plt.legend()
plt.show()
'''
LEARNING_RATE = 0.0001
NUM_EPOCH = 100
Linear Epoch 99: Mean: 360.8202209472656
Quad Epoch 99: Mean: 333.27801513671875
Cube Epoch 999: Mean: 143.69219970703125
Cube Epoch 499: Mean: 197.4280548095703
'''
|
{"hexsha": "34ace8ced91080f8dd0809bca4b704466135ead1", "size": 3262, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw9/hw9_p2_normalized.py", "max_stars_repo_name": "shanaka-desoysa/tensorflow", "max_stars_repo_head_hexsha": "0effc668f42b64bd0712240ab2f5e8a8be42960f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw9/hw9_p2_normalized.py", "max_issues_repo_name": "shanaka-desoysa/tensorflow", "max_issues_repo_head_hexsha": "0effc668f42b64bd0712240ab2f5e8a8be42960f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw9/hw9_p2_normalized.py", "max_forks_repo_name": "shanaka-desoysa/tensorflow", "max_forks_repo_head_hexsha": "0effc668f42b64bd0712240ab2f5e8a8be42960f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.6545454545, "max_line_length": 96, "alphanum_fraction": 0.7032495402, "include": true, "reason": "import numpy", "num_tokens": 977}
|
import numpy as np
from scipy.stats import f
class MVRCalculator:
"""
Class holds the calculations needed to perform the regression
on some data. Used to seperate out the data and calculations.
"""
@staticmethod
def searchValue(f, target,
tolerance=0.000001, start=0, step_size=1, damping=0.5):
"""
Finds x for a given target y, for a given linear function f(x).
Works iteratively through values of x to find the target f(x)
value, once the target is 'found', the step gets reversed
and damped until the target is found within the given tolerance.
"""
def stepDirection(increasing, lower):
"""
Finds whether x should increase of decrease,
depending if the f(x) function is an increasing or decreasing
function and if f(x_0) is lower than f(x_target)
"""
if (increasing and lower) or (not increasing and not lower):
return 1
else:
return -1
x,error,a0,a1 = start, tolerance+1, f(start), f(start+step_size)
increasing, start_lower = a1 > a0, a0 < target
step_direction = stepDirection(increasing, start_lower)
step = step_direction * step_size
while abs(error) > tolerance :
x = x + step
a = f(x)
error = target - a
lower = error > 0
new_step_direction = stepDirection(increasing, lower)
# If true, the target x is between f(x) and f(x-step)
if step_direction != new_step_direction:
step_size = damping * step_size
step = new_step_direction * step_size
return x
@staticmethod
def addOnesToData(x,ndata,ndim):
"""Adds a column of 1s to a given input vector or matrix"""
#if len(x.shape) == 1:
# x = np.expand_dims(x, axis=0)
x = x.reshape(ndata,ndim)
return np.append(x,np.ones((ndata,1)), axis=1)
@staticmethod
def calcSumProduct(vector1,vector2):
"""Returns the sum of the product of two vectors"""
return np.sum(vector1 * vector2)
@staticmethod
def calcCorrelation(ndim, x_y_variance, x_variance_sq, y_variance_sq):
"""
Calculates the correlation between x and y data
for each x dimension
"""
coefficients = np.zeros(ndim)
for n in range(0,ndim):
coefficients[n] = x_y_variance[n] / np.sqrt(
x_variance_sq[n] * y_variance_sq)
return coefficients
@staticmethod
def calcRegression(s_matrix,x_matrix,y):
"""Calculates the regression equation (a_0 -> a_n + b)"""
return np.dot(s_matrix, np.dot(x_matrix.T, y))
@staticmethod
def findSMatrix(x_matrix):
return np.linalg.inv(np.dot(x_matrix.T,x_matrix))
@staticmethod
def findAdjustedRSquared(sum_errors_sq,y_variance_sq,ndata,df):
"""
Finds R^2, adjusted for the fact that normally R^2 will
increase for added predictor variables regardless if the variable
is a good predictor or not.
"""
return 1 - ((sum_errors_sq / df) / (y_variance_sq / (ndata - 1)))
@staticmethod
def getMahalanobisDistance(x_n, x_bar, ndim, ndata, s_matrix):
"""Get the mahalanobis distance of a given x_n"""
x = (x_n - x_bar).reshape(ndim,1)
return np.dot(x.T,np.dot(s_matrix[:-1,:-1],x)) * (ndata - 1)
@staticmethod
def findCriticalFValue(ndim, df, significance):
"""
Find F distribution values, used as critical values in
Analysis of variance tests.
"""
return MVRCalculator.searchValue(lambda z: f.cdf(z,ndim,df),
significance)
@staticmethod
def getConfidenceInterval(
sum_errors_sq, df, ndata, mahalanobis_distance, fval):
"""
Interval range for the mean value of a predicted y, to account
for the variance in the population data. With the confidence
(e.g. 0.95) determined by fval.
"""
return np.sqrt(fval
* (1/ndata + mahalanobis_distance / (ndata -1))
* (sum_errors_sq / df))
@staticmethod
def getPredictionInterval(
sum_errors_sq, df, ndata, mahalanobis_distance, fval):
"""
Interval range to give a probable range of future values.
This range will be higher than the confidence interval,
to account for the fact that the mean predicted value
can vary by the confidence value, and then additionally
the value can vary from that mean.
"""
return np.sqrt(fval
* (1 + 1/ndata + mahalanobis_distance / (ndata - 1))
* (sum_errors_sq / df))
@staticmethod
def getMatrixWidth(v):
"""Function to find the width of a given numpy vector or matrix"""
if len(np.shape(v)) > 1:
return np.shape(v)[1]
else:
return 1
@staticmethod
def autoCorrelationTest(y_error, sum_errors,sq):
"""
Check for auto correlation in our y data using the
Durbin-Watson statistic, a result lower than 1
may indicate the presence of autocorrelation.
"""
residual = y_error[1:] - y_error[:-1]
return (MVRCalculator.calcSumProduct(residual, residual)
/ sum_errors_sq)
@staticmethod
def calcAverage(m):
return np.mean(m,axis=0)
@staticmethod
def calcVariance(v,v_bar):
return v - v_bar
@staticmethod
def calcTestStatisticAllX(y_variance_sq,sum_errors_sq,ndim,df):
"""
Calculate the test statistic for the analysis of variance
where the Null hypothesis is that the population A_1 -> A_n
are all equal to 0. Such that the null hypothesis gets
rejected if any A_x != 0.
"""
return (((y_variance_sq - sum_errors_sq) / ndim)
/ (sum_errors_sq / df))
@staticmethod
def calcTestStatisticSingleX(regression, s_matrix, sum_errors_sq, n, df):
"""
Calculate the test statistic for the analysis of variance
where the Null hypothesis is that the population A_n is 0.
Such that the null hypothesis gets rejected if A_n != 0.
"""
return (regression[n]**2 / s_matrix[n,n]) / (sum_errors_sq / df)
|
{"hexsha": "0d273f529c35fcd4b1f7f0b7c8b1f48fd07a931b", "size": 6544, "ext": "py", "lang": "Python", "max_stars_repo_path": "mvr_calc.py", "max_stars_repo_name": "richhaar/multivariable-linear-regression", "max_stars_repo_head_hexsha": "af195fed6031e813da5614c0b77ab2a74190c8b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mvr_calc.py", "max_issues_repo_name": "richhaar/multivariable-linear-regression", "max_issues_repo_head_hexsha": "af195fed6031e813da5614c0b77ab2a74190c8b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mvr_calc.py", "max_forks_repo_name": "richhaar/multivariable-linear-regression", "max_forks_repo_head_hexsha": "af195fed6031e813da5614c0b77ab2a74190c8b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7595628415, "max_line_length": 77, "alphanum_fraction": 0.5973410758, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1528}
|
import numpy
import csv
import matplotlib.pyplot as plt
import pprint
#change leafsize according to need, i think it was 2000
def kdtree( data, leafsize=10 ):
ndim = data.shape[0]
ndata = data.shape[1]
# find bounding hyper-rectangle
hrect = numpy.zeros((2,data.shape[0]))
hrect[0,:] = data.min(axis=1)
hrect[1,:] = data.max(axis=1)
# create root of kd-tree
idx = numpy.argsort(data[0,:], kind='mergesort')
data[:,:] = data[:,idx]
splitval = data[0,ndata/2]
left_hrect = hrect.copy()
right_hrect = hrect.copy()
left_hrect[1, 0] = splitval
right_hrect[0, 0] = splitval
tree = [(None, None, left_hrect, right_hrect, None, None)]
stack = [(data[:,:ndata/2], idx[:ndata/2], 1, 0, True),
(data[:,ndata/2:], idx[ndata/2:], 1, 0, False)]
# recursively split data in halves using hyper-rectangles:
while stack:
# pop data off stack
data, didx, depth, parent, leftbranch = stack.pop()
ndata = data.shape[1]
nodeptr = len(tree)
# update parent node
_didx, _data, _left_hrect, _right_hrect, left, right = tree[parent]
tree[parent] = (_didx, _data, _left_hrect, _right_hrect, nodeptr, right) if leftbranch \
else (_didx, _data, _left_hrect, _right_hrect, left, nodeptr)
# insert node in kd-tree
# leaf node?
if ndata <= leafsize:
_didx = didx.copy()
_data = data.copy()
leaf = (_didx, _data, None, None, 0, 0)
tree.append(leaf)
# if not a leaf, split the data in two
else:
splitdim = depth % ndim
idx = numpy.argsort(data[splitdim,:], kind='mergesort')
data[:,:] = data[:,idx]
didx = didx[idx]
nodeptr = len(tree)
stack.append((data[:,:ndata/2], didx[:ndata/2], depth+1, nodeptr, True))
stack.append((data[:,ndata/2:], didx[ndata/2:], depth+1, nodeptr, False))
splitval = data[splitdim,ndata/2]
if leftbranch:
left_hrect = _left_hrect.copy()
right_hrect = _left_hrect.copy()
else:
left_hrect = _right_hrect.copy()
right_hrect = _right_hrect.copy()
left_hrect[1, splitdim] = splitval
right_hrect[0, splitdim] = splitval
# append node to tree
tree.append((None, None, left_hrect, right_hrect, None, None))
return tree
b=[]
with open('synth.te.csv') as inputfile:
results = csv.reader(inputfile)
for row in results:
a=[]
a.append(float(row[0]))
a.append(float(row[1]))
b.append(a)
b1=numpy.array(b)
b2=b1.transpose()
l=kdtree(b2,400)
nodes=[]
for i in l:
if(i[2]==None and i[3]==None and i[4]==0 and i[5]==0):
sumx=0
sumy=0
for j in i[1]:
sumx+=float(j[0])
sumy+=float(j[1])
sumx/=len(i[1][0])
sumy/=len(i[1][1])
point=[]
point.append(sumx)
point.append(sumy)
nodes.append(point)
print nodes
pp=pprint.PrettyPrinter(indent=4)
pp.pprint(l)
x_val=[]
y_val=[]
for i in b:
x_val.append(float(i[0]))
y_val.append(float(i[1]))
cent_x=[]
cent_y=[]
for i in nodes:
cent_x.append(float(i[0]))
cent_y.append(float(i[1]))
plt.plot(x_val,y_val,'ro')
plt.plot(cent_x,cent_y,'bo')
plt.show()
|
{"hexsha": "10e91a18a52008ddce42682ab629418cd0f8526d", "size": 3555, "ext": "py", "lang": "Python", "max_stars_repo_path": "k-d_tree.py", "max_stars_repo_name": "smellycattt/Data-Mining-Project", "max_stars_repo_head_hexsha": "83f3873df9ccecbfc5bdfc8e1fc6766acb4a2c8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-11T11:37:06.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-11T11:37:06.000Z", "max_issues_repo_path": "k-d_tree.py", "max_issues_repo_name": "smellycattt/Data-Mining-Project", "max_issues_repo_head_hexsha": "83f3873df9ccecbfc5bdfc8e1fc6766acb4a2c8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "k-d_tree.py", "max_forks_repo_name": "smellycattt/Data-Mining-Project", "max_forks_repo_head_hexsha": "83f3873df9ccecbfc5bdfc8e1fc6766acb4a2c8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1393442623, "max_line_length": 97, "alphanum_fraction": 0.5434599156, "include": true, "reason": "import numpy", "num_tokens": 1004}
|
# Raised Cosine distribution
#
# Ref: http://en.wikipedia.org/wiki/Raised_cosine_distribution
#
immutable Cosine <: ContinuousUnivariateDistribution
μ::Float64
σ::Float64
Cosine(μ::Real, σ::Real) = (@check_args(Cosine, σ > zero(σ)); new(μ, σ))
Cosine(μ::Real) = new(μ, 1.0)
Cosine() = new(0.0, 1.0)
end
@distr_support Cosine d.μ - d.σ d.μ + d.σ
#### Parameters
location(d::Cosine) = d.μ
scale(d::Cosine) = d.σ
params(d::Cosine) = (d.μ, d.σ)
#### Statistics
mean(d::Cosine) = d.μ
median(d::Cosine) = d.μ
mode(d::Cosine) = d.μ
var(d::Cosine) = d.σ^2 * 0.13069096604865779 # 0.130... = 1/3 - 2 / π^2
skewness(d::Cosine) = 0.0
kurtosis(d::Cosine) = -0.59376287559828102362
#### Evaluation
function pdf(d::Cosine, x::Float64)
if insupport(d, x)
z = (x - d.μ) / d.σ
return (1.0 + cospi(z)) / (2 * d.σ)
else
return 0.0
end
end
logpdf(d::Cosine, x::Float64) = insupport(d, x) ? log(pdf(d, x)) : -Inf
function cdf(d::Cosine, x::Float64)
z = (x - d.μ) / d.σ
0.5 * (1.0 + z + sinpi(z) * invπ)
end
function ccdf(d::Cosine, x::Float64)
nz = (d.μ - x) / d.σ
0.5 * (1.0 + nz + sinpi(nz) * invπ)
end
quantile(d::Cosine, p::Float64) = quantile_bisect(d, p)
|
{"hexsha": "9f3ee9364db998d12332eda28897ede401ff6b70", "size": 1235, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/univariate/continuous/cosine.jl", "max_stars_repo_name": "ericproffitt/Distributions.jl", "max_stars_repo_head_hexsha": "54daf6f7230c6cf1fa46d9a948a33ad68b5fd3b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/univariate/continuous/cosine.jl", "max_issues_repo_name": "ericproffitt/Distributions.jl", "max_issues_repo_head_hexsha": "54daf6f7230c6cf1fa46d9a948a33ad68b5fd3b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/univariate/continuous/cosine.jl", "max_forks_repo_name": "ericproffitt/Distributions.jl", "max_forks_repo_head_hexsha": "54daf6f7230c6cf1fa46d9a948a33ad68b5fd3b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0, "max_line_length": 76, "alphanum_fraction": 0.5829959514, "num_tokens": 512}
|
from flask import Flask, render_template
import plotly.figure_factory as ff
import json
import plotly
import pandas as pd
import numpy as np
import requests
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
info_data = requests.get("http://localhost:8000/test").json()
t_o_a = info_data['data']['time of arrival']
t_o_p = info_data['time']
alarm = info_data['data']['prediccs-alarm']
clear_signal = info_data['data']['all-clear']
SEP_threshold = info_data['thresholds']['SEP probability threshold']
threshold = info_data['thresholds']
suit_threshold = threshold['Thin spacesuit shielding threshold']
shelter_threshold = threshold['Storm shelter shielding threshold']
if t_o_a is None:
t_o_a = 'None'
if alarm == 0:
alarm = 'None'
else:
alarm = 'Warning !!!'
if clear_signal is None or clear_signal == 0:
clear_signal = 'None'
else:
clear_signal = 'all-clear'
table_data = {'data': {'time of arrival': t_o_a,
'alarm': alarm, 'clear': clear_signal,
'SEP threshold': SEP_threshold,
'suit threshold': suit_threshold,
'shelter threshold': shelter_threshold}}
plot_data = requests.get("http://localhost:8000/plot")
data = plot_data.json()['data']
x2 = []
y2 = []
y1 = []
x1 = []
for i in data:
x2.append(float(i[1]))
y2.append(float(i[-2]))
if float(i[-2]) > float(0.068):
y1.append(float(i[-2]))
x1.append(float(i[1]))
rng = pd.date_range('1/1/2011', periods=7500, freq='H')
ts = pd.Series(np.random.randn(len(rng)), index=rng)
graphs = [
dict(
data=[
dict(
x=x2,
y=y2,
name='prediccs-data',
type='scatter',
marker=dict(
color='green')
),
dict(
x=x1,
y=y1,
name='above probability threshold',
type='scatter',
marker=dict(
color='orange')
),
],
layout=dict(
title='Dose Mars',
xaxis=dict(
title='Coordinated Universal Time(UTC)'
),
yaxis=dict(
title='cGy/day'
),
shapes=[dict(
type='line',
x0=min(x2),
y0=0.068,
x1=max(x2),
y1=0.068,
line=dict(
width=1,
color='black',
opacity=0.5,
)
), ]
),
)]
# Add "ids" to each of the graphs to pass up to the client
# for templating
ids = ['graph-{}'.format(i) for i, _ in enumerate(graphs)]
# Convert the figures to JSON
# PlotlyJSONEncoder appropriately converts pandas, datetime, etc
# objects to their JSON equivalents
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
dataJSON = json.dumps(table_data, cls=plotly.utils.PlotlyJSONEncoder)
print dataJSON
return render_template('layouts/index.html',
ids=ids,
graphJSON=graphJSON, data=dataJSON)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9999)
|
{"hexsha": "71e71b36fc938e4928ad678bdb96af9ffad4f5e5", "size": 3631, "ext": "py", "lang": "Python", "max_stars_repo_path": "servers/Radiation/plotly/app.py", "max_stars_repo_name": "arpitgogia/mars_city", "max_stars_repo_head_hexsha": "30cacd80487a8c2354bbc15b4fad211ed1cb4f9d", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2016-07-20T04:49:14.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-25T09:05:04.000Z", "max_issues_repo_path": "servers/Radiation/plotly/app.py", "max_issues_repo_name": "arpitgogia/mars_city", "max_issues_repo_head_hexsha": "30cacd80487a8c2354bbc15b4fad211ed1cb4f9d", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2016-12-27T08:30:27.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-18T08:51:44.000Z", "max_forks_repo_path": "servers/Radiation/plotly/app.py", "max_forks_repo_name": "arpitgogia/mars_city", "max_forks_repo_head_hexsha": "30cacd80487a8c2354bbc15b4fad211ed1cb4f9d", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": 49, "max_forks_repo_forks_event_min_datetime": "2016-07-20T13:08:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-02T18:26:12.000Z", "avg_line_length": 28.8174603175, "max_line_length": 73, "alphanum_fraction": 0.4885706417, "include": true, "reason": "import numpy", "num_tokens": 796}
|
import numpy as np
from RenderPy.Shape import Shape
from RenderPy.Tuple import Tuple
from RenderPy.Intersection import Intersection
# ---------------------
"""
Cone class helps to describe a cone with a center at point(0,0,0)
It inherits all elements from shape
Cone class contains the following functions:
__init__
localIntersect
localNormalAt
"""
# ---------------------
"""
Make sure you are on ~/src
---------------------------------------------------
nosetests -v ../test/ConeTest.py
--- OR ----
python3 -m nose -v ../test/ConeTest.py
--- OR ----
python -m nose -v ../test/ConeTest.py
---------------------------------------------------
"""
class Cone(Shape):
# ---------------------
"""
Cone class takes in a minimum and a maximum to describe the height of a cone
"""
# ---------------------
def __init__(self, minimum=float("-inf"), maximum=float("inf"), closed=False):
super().__init__()
self.minimum = minimum
self.maximum = maximum
self.closed = closed
# ---------------------
"""
Define equivalence of two Cube instances
"""
# ---------------------
def __eq__(self, cone2: "Cone"):
if type(cone2).__name__ != "Cone":
return False
return self.material == cone2.material and self.transform == cone2.transform
# ---------------------
"""
Find the intersection between the ray and the cube
---- Inputs: --------
* ray: a Ray
---- Outputs: --------
* count: a scalar, the number of intersections
* results: a tuple, all intersections are listed
"""
# ---------------------
def localIntersect(self, ray: "Ray"):
def checkCaps(t):
x = ray.origin.x + t*ray.direction.x
z = ray.origin.z + t*ray.direction.z
yVal = max(self.maximum, self.minimum)
return (x*x + z*z) <= yVal * yVal
def intersectCap(xs):
if not self.closed or abs(ray.direction.y) < 0.00001:
return len(xs), xs
t = (self.minimum - ray.origin.y)/ray.direction.y
if checkCaps(t):
xs.append(Intersection(t, self))
t = (self.maximum - ray.origin.y)/ray.direction.y
if checkCaps(t):
xs.append(Intersection(t, self))
return len(xs), xs
xs = []
a = ray.direction.x ** 2 + ray.direction.z**2 - ray.direction.y ** 2
b = 2*ray.origin.x*ray.direction.x + 2*ray.origin.z * \
ray.direction.z - 2 * ray.origin.y * ray.direction.y
c = ray.origin.x**2 + ray.origin.z**2 - ray.origin.y**2
if abs(a) < 0.00001 and abs(b) < 0.00001:
return intersectCap(xs)
elif abs(a) < 0.00001:
xs.append(Intersection(-c/(2*b), self))
else:
disc = b*b-4*a*c
if disc < 0:
return 0, ()
t0 = (-b-disc**0.5)/(2*a)
t1 = (-b+disc**0.5)/(2*a)
if t0 > t1:
t0, t1 = t1, t0
y0 = ray.origin.y + t0*ray.direction.y
if self.minimum < y0 < self.maximum:
xs.append(Intersection(t0, self))
y1 = ray.origin.y + t1*ray.direction.y
if self.minimum < y1 < self.maximum:
xs.append(Intersection(t1, self))
return intersectCap(xs)
# -----------------
"""
Make sure you are on ~/src
---------------------------------------------------
nosetests -v ../test/ConeTest.py:test_intersect
--- OR ----
python3 -m nose -v ../test/ConeTest.py:test_intersect
--- OR ----
python -m nose -v ../test/ConeTest.py:test_intersect
---------------------------------------------------
"""
# ---------------------
"""
Find the normal at a certain point of the Cube
---- Inputs: --------
* point: a Tuple, indicating a point on the Cube
---- Outputs: --------
* vector: the normal vector
"""
# ---------------------
def localNormalAt(self, point: "Tuple"):
dist = point.x * point.x + point.z * point.z
if dist < 1 and point.y >= self.maximum-0.00001:
return Tuple.vector(0, 1, 0)
elif dist < 1 and point.y <= self.minimum + 0.00001:
return Tuple.vector(0, -1, 0)
y = dist ** 0.5
if point.y > 0:
y = -y
return Tuple.vector(point.x, y, point.z)
# -----------------
"""
Make sure you are on ~/src
---------------------------------------------------
nosetests -v ../test/ConeTest.py:test_normalAt
--- OR ----
python3 -m nose -v ../test/ConeTest.py:test_normalAt
--- OR ----
python -m nose -v ../test/ConeTest.py:test_normalAt
---------------------------------------------------
"""
|
{"hexsha": "9c7c6acdf32febfc5fc729500846cb63026a8063", "size": 4928, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/RenderPy/Cone.py", "max_stars_repo_name": "woes-lynne/3DRenderPy", "max_stars_repo_head_hexsha": "44d9106b51ae4ce8307c794b85d4ec649751beb3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/RenderPy/Cone.py", "max_issues_repo_name": "woes-lynne/3DRenderPy", "max_issues_repo_head_hexsha": "44d9106b51ae4ce8307c794b85d4ec649751beb3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/RenderPy/Cone.py", "max_forks_repo_name": "woes-lynne/3DRenderPy", "max_forks_repo_head_hexsha": "44d9106b51ae4ce8307c794b85d4ec649751beb3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2972972973, "max_line_length": 84, "alphanum_fraction": 0.4655032468, "include": true, "reason": "import numpy", "num_tokens": 1176}
|
/* Legendre polynomials
An in-class exercise using equations for the Polynomials and solvers as
shown in the lecture/manuscript for Numerical Methods for CSE
by Prof. R. Hiptmair, ETH Zürich
Include the Eigen3 library as shown in documentation for Eigen3.
use piping to store the .m file. Example call:
legendre >legendre.m
This program calculates
- Legendre Polynomials P0 to P8 and their derivatives in interval [-1,1]
-> plots them using MatlabPlotter
- Gauss points / zero points for P1 to P8
- using secant method as solver
- using secant falsi method as solver
-> plots them using MatlabPlotter
- uses these Gauss points to calculate the weights for GL quadrature
- applies this GL quadrature to a function f(x) = e^(x^2) over an interval [a,b] = [3,6]
- plots the relative error (comparison of P1 to P8 vs reference result by Wolfram|Alpha)
v1.0.3 2015-11-22 / 2015-11-29 Pirmin Schmid
*/
//#define _USE_MATH_DEFINES
#include <cmath>
#include <vector>
#include <Eigen/Dense>
#include "matlab_plotter.h"
using namespace std;
using namespace Eigen;
//------------------------------------------------------------------------------
// 3-term recursion for sequences (Pn)n and (Pn')n for n in 0 to (N-1)
// evaluation of multiple x_i values in parallel
// input: vector x in R^n with x_0 to x_{n-1}
// input/output: Lx and DLx in R^Nxn
// thus, number of given rows N eq. number of elements desired
// of the sequences (Pn)n and (Pn')n
void legvals(const VectorXd& x, MatrixXd& Lx, MatrixXd& DLx) {
// check input
long n = x.size();
long N = Lx.rows();
if(Lx.cols() != n || DLx.rows() != N || DLx.cols() != n) {
cout << "Error in legvals(): Dimensions mismatch." << endl;
exit(1);
}
double denominator_inv = 0.0;
double numerator1 = 0.0;
double numerator2 = 0.0;
// we need a row vector for the calculations below (otherwise Eigen assertion fails)
// this seemed the easiest way for me to get one.
RowVectorXd xr = x;
DLx.row(0) = MatrixXd::Zero(1, n);
Lx.row(0) = MatrixXd::Ones(1, n);
DLx.row(1) = MatrixXd::Ones(1, n);
Lx.row(1) = x;
for(long i = 2; i < N; i++) {
// these values are calculated fresh for each iteration to avoid any accumulating rounding error
// when calculated iteratively from prior values
denominator_inv = 1.0 / (double)i; // one division here -> many much faster multiplications than divisions later
numerator1 = (double)(2 * i - 1);
numerator2 = (double)(i - 1);
DLx.row(i) = denominator_inv * (numerator1 * (Lx.row(i-1) + DLx.row(i-1).cwiseProduct(xr)) - numerator2 * DLx.row(i-2));
Lx.row(i) = denominator_inv * (numerator1 * Lx.row(i-1).cwiseProduct(xr) - numerator2 * Lx.row(i-2));
}
}
//------------------------------------------------------------------------------
using EvalFunction = function<double (const double, const int)>;
// computes Pk(x) for scalar x
double Pkx(const double x, const int k) {
if(k < 2) {
if(k == 0) {
return 1.0;
}
if(k == 1) {
return x;
}
cout << "Error in Pkx(): Negative k values are not valid." << endl;
exit(1);
}
double denominator = 0.0;
double numerator1 = 0.0;
double numerator2 = 0.0;
double result_minus2 = 1.0;
double result_minus1 = x;
double result = 0.0;
for(int i = 2; i <= k; i++) {
denominator = (double)i;
numerator1 = (double)(2 * i - 1);
numerator2 = (double)(i - 1);
result = (numerator1 * result_minus1 * x - numerator2 * result_minus2) / denominator;
result_minus2 = result_minus1;
result_minus1 = result;
}
return result;
}
//------------------------------------------------------------------------------
using Solver = function<double (double, double, EvalFunction, int, const double, const double, const int)>;
// translation of the Matlab function secant 2.3.25 in the manuscript
// modified to include the additional parameter k
double secant(double x0, double x1, EvalFunction f, int k, const double rtol, const double atol, const int maxIterations) {
double f0 = f(x0, k);
double fn = 0.0;
double s = 0.0;
for(int i=0; i < maxIterations; i++) {
fn = f(x1, k);
s = fn * (x1-x0) / (fn-f0); // correction
x0 = x1;
x1 = x1 - s;
if( abs(s) < max(atol, rtol * min(abs(x0), abs(x1))) ) {
return x1;
}
f0 = fn;
}
// default, best guess after maxIterations
return x1;
}
// translation of the Matlab function secant_falsi on the exercise sheet
// modified to include the additional parameter k
double secant_falsi(double x0, double x1, EvalFunction f, int k, const double rtol, const double atol, const int maxIterations) {
double f0 = f(x0, k);
double fn = 0.0;
double s = 0.0;
for(int i=0; i < maxIterations; i++) {
fn = f(x1, k);
s = fn * (x1-x0) / (fn-f0); // correction
if(f(x1 - s, k) * fn < 0.0) {
x0 = x1;
f0 = fn;
}
x1 = x1 - s;
if( abs(s) < max(atol, rtol * min(abs(x0), abs(x1))) ) {
return x1;
}
}
// default, best guess after maxIterations
return x1;
}
//------------------------------------------------------------------------------
#define MAX_ITERATIONS 100
// calculate zeros of Pk, k in 1 to n using the secant rule for end points {-1, 1} of the interval [-1, 1] and the zeros
// of the previous Legendre polynomial as initial guesses. Correction based termination criterion
// input: n size
// rtol and atol relative and absolute tolerance
// return: nxn upper triangular matrix, to actually get such an upper triangular
// I assume that row j indicates the j-th zero for j in 1 to k
// column k indicates the solutions for Pk (thus, we will have column vectors of solutions)
// note: for C++, index 0 will refer to j=1 and k=1 respectively, and so on.
MatrixXd gaussPts(const int n, Solver z, const double rtol = 1e-10, const double atol = 1e-12) {
MatrixXd zeros = MatrixXd::Zero(n, n);
// find the first for P1 -> will be in [0,0]
zeros(0,0) = z(-1.0, 1.0, Pkx, 1, rtol, atol, MAX_ITERATIONS);
// get the zeros for P2 to Pn (will be in columns 1 to (n-1)
for(int i = 1; i < n; i++) {
// get first zero
zeros(0, i) = z(-1.0, zeros(0, i-1), Pkx, i+1, rtol, atol, MAX_ITERATIONS);
// get last zero
zeros(i, i) = z(zeros(i-1, i-1), 1.0, Pkx, i+1, rtol, atol, MAX_ITERATIONS);
// get the zeros in-between
for(int j = 1; j < i; j++) {
zeros(j, i) = z(zeros(j-1, i-1), zeros(j, i-1), Pkx, i+1, rtol, atol, MAX_ITERATIONS);
}
}
return zeros;
}
//------------------------------------------------------------------------------
#define A 3
#define B 6
using Function = function<double (const double)>;
// just a simple function that does not have a primitive (Stammfunktion) that can be expressed
// in R space. thus: suitable for numerical integration / quadrature
double test_function_for_quadrature(const double x) {
return exp(x * x);
}
// Wolfram|Alpha calculated the quadrature of this function in [3,6] to be
// 3.644831077835569048422984645481051411815484722480248338949090926023254915628803401963716304967305392 10^14
// change this reference value if you change the function or A, B
#define REFERENCE_RESULT 3.6448310778355690e14
// applies the Gauss-Legendre quadrature for the given function over a defined interval [a,b]
// input: f a function that fits the type definition of Function
// a, b define interval [a,b]
// w, x weights and Gauss points for the given Legendre Polynomial in standard interval [-1,1]
// size of both arrays must match, of course
// return: quadrature approximation for this function in interval [a,b]
double GLquadrature(const Function f, const double a, const double b, const ArrayXd& w, const ArrayXd& x) {
int n = w.size();
if(n != x.size()) {
cout << "vectors of weights and Gauss points must have the same size" << endl;
exit(1);
}
double half_delta = 0.5 * (b-a);
double avg = 0.5 * (a+b);
ArrayXd weights = half_delta * w;
ArrayXd xs = half_delta * x;
xs += avg;
ArrayXd ys = xs.unaryExpr(f);
ys *= weights;
return ys.sum();
}
//------------------------------------------------------------------------------
#define MIN -1
#define MAX 1
#define MAX_K 8
#define N_X 600
int main() {
// initialization
MatlabPlotter p;
p.comment("Legendre polynomials");
p.comment("Code generated by legendre.cpp");
vector<string> colors = {"k-", "b-", "g-", "r-", "c-", "m-", "y-"};
vector<string> colors2 = {"ko", "bo", "go", "ro", "co", "mo", "yo"};
int n_colors = colors.size();
vector<string> description = {"P_{0}", "P_{1}", "P_{2}", "P_{3}", "P_{4}", "P_{5}", "P_{6}", "P_{7}", "P_{8}"};
// (1) get a visual impression -> plot the Legendre polynomials up to k = MAX_K
VectorXd xx = VectorXd::LinSpaced(N_X, MIN, MAX);
MatrixXd Lx(MAX_K+1, N_X);
MatrixXd DLx(MAX_K+1, N_X);
legvals(xx, Lx, DLx);
p.figure("Legendre polynomials 0 to 8");
VectorXd yy = Lx.row(0);
p.plot(xx, yy, colors[0]);
p.hold();
p.title("Legendre polynomials 0 to 8");
for(int i=1; i <= MAX_K; i++) {
yy = Lx.row(i);
p.plot(xx, yy, colors[i % n_colors]);
}
p.xylabels("x", "P_{i}(x) for i={0, 1, ..., 8}");
p.legend("P_{0}", "P_{1}", "P_{2}", "P_{3}", "P_{4}", "P_{5}", "P_{6}", "P_{7}", "P_{8}");
p.hold(false);
// (2) get a visual impression -> plot the derivatives of the Legendre polynomials up to k = MAX_K
p.figure("Derivatives of Legendre polynomials 0 to 8");
yy = DLx.row(0);
p.plot(xx, yy, colors[0]);
p.hold();
p.title("Derivatives of Legendre polynomials 0 to 8");
for(int i=1; i <= MAX_K; i++) {
yy = DLx.row(i);
p.plot(xx, yy, colors[i % n_colors]);
}
p.xylabels("x", "dP_{i}(x)/dx for i={0, 1, ..., 8}");
p.legend("dP_{0}/dx", "dP_{1}/dx", "dP_{2}/dx", "dP_{3}/dx", "dP_{4}/dx", "dP_{5}/dx", "dP_{6}/dx", "dP_{7}/dx", "dP_{8}/dx");
p.hold(false);
// (3) Find zeros with secant and secant falsi methods
// and since we are iterating thru the Legendre polynomials and their Gauss points
// -> use the gained insight to apply GL quadrature to a given function and interval
// and measure the error of approximation
MatrixXd zeros_x = gaussPts(MAX_K, secant);
MatrixXd zeros_x_falsi = gaussPts(MAX_K, secant_falsi);
VectorXd zeros_y = VectorXd::Zero(MAX_K);
vector<double> x = {MIN, MAX};
vector<double> y = {0.0, 0.0};
ArrayXd i_values(MAX_K+1);
i_values[0] = 0.0;
ArrayXd areas(MAX_K+1);
areas[0] = 0.0; // P0 is not tested
for(int i=1; i <= MAX_K; i++) {
p.figure("Legendre polynomial " + description[i] + " Zeros / Gauss points by secant and secant falsi methods.");
yy = Lx.row(i);
p.plot(xx, yy, colors[i % n_colors]);
p.hold();
p.title("Legendre polynomial " + description[i] + " Zeros / Gauss points by secant and secant falsi methods.");
// secant
VectorXd gyy = zeros_y.head(i);
VectorXd gxx = zeros_x.col(i-1);
gxx = gxx.head(i);
p.plot(gxx, gyy, "ko");
// true position of these "zeros"
MatrixXd Lgx(i+1, i);
MatrixXd DLgx(i+1, i);
legvals(gxx, Lgx, DLgx);
VectorXd true_gyy = Lgx.row(i);
p.plot(gxx, true_gyy, "b*"); // note: This may not be visible if secant falsi gets the same result
// but P8 shows a clear difference
p.comment("zeros for " + description[i] + " by secant method");
for(int j=0; j < i; j++) {
p.comment("x = " + to_string(gxx[j]) + " y = " + to_string(true_gyy[j]) + " expected 0.0");
}
// falsi
gxx = zeros_x_falsi.col(i-1);
gxx = gxx.head(i);
p.plot(gxx, gyy, "r*");
// true position of these "zeros"
legvals(gxx, Lgx, DLgx);
true_gyy = Lgx.row(i);
p.comment("zeros for " + description[i] + " by secant falsi method");
for(int j=0; j < i; j++) {
p.comment("x = " + to_string(gxx[j]) + " y = " + to_string(true_gyy[j]) + " expected 0.0");
}
// calculation of weights for GL-quadrature on standard interval [-1,1]
p.comment("");
p.comment(description[i] + ": Weights w_i and gauss points x_i needed for Gauss-Legendre quadrature");
p.comment("(integration approximation) on interval [-1, 1]. Use appropriate scaling for other intervals.");
ArrayXd DLgx_squared = DLgx.row(i).array();
DLgx_squared *= DLgx_squared;
ArrayXd gww = gxx.cwiseProduct(gxx).array();
gww = -gww; // intermediary step since - seems not to be defined in combination with scalars
gww = gww + 1.0;
gww = gww.cwiseProduct(DLgx_squared);
gww = 2.0 / gww;
for(int j=0; j < i; j++) {
string index = to_string(j);
p.comment("w_" + index + " = " + to_string(gww[j]) +" x_" + index + " = " + to_string(gxx[j]) );
}
// let's test this on an effective quadrature
double area = GLquadrature(test_function_for_quadrature, A, B, gww, gxx.array());
p.comment("-> quadrature of f(x)=e^(x^2) in [" + to_string(A) + "," + to_string(B) + "] approx. = " + to_string(area));
areas[i] = area;
i_values[i] = (double)i;
// null line
p.plot(x, y, "k:");
// info
p.xylabels("x", description[i] + "(x)");
p.legend(description[i], "zeros by secant", "true y value of these zeros", "zeros by secant falsi");
p.hold(false);
}
// show relative errors (P1 to P8 vs reference result)
double negReference = -REFERENCE_RESULT;
ArrayXd error = areas + negReference; // workaround since - is not accepted with scalars (while + is)
error = error.cwiseAbs() / REFERENCE_RESULT;
// do not show P0
i_values = i_values.tail(MAX_K);
error = error.tail(MAX_K);
p.figure("Relative errors of P1 to P8 vs reference result from Wolfram|Alpha", MatlabPlotter::LINEAR);
p.plot(i_values, error, "r*-");
p.xylabels("P_{i}", "Relative error vs reference result (lin scale)");
p.title("Relative errors of P1 to P8 vs reference result from Wolfram|Alpha");
p.figure("Relative errors of P1 to P8 vs reference result from Wolfram|Alpha", MatlabPlotter::SEMILOGY);
p.plot(i_values, error, "r*-");
p.xylabels("P_{i}", "Relative error vs reference result (log scale)");
p.title("Relative errors of P1 to P8 vs reference result from Wolfram|Alpha");
return 0;
}
|
{"hexsha": "58bb6829d85013d3f24fa231f2aa1caebebfc428", "size": 13948, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "example2/legendre.cpp", "max_stars_repo_name": "pirminschmid/MatlabPlotter", "max_stars_repo_head_hexsha": "6cdc3954ee4a065d978c0248b00406366eafe237", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-11-09T13:21:08.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-09T15:54:38.000Z", "max_issues_repo_path": "example2/legendre.cpp", "max_issues_repo_name": "pirminschmid/MatlabPlotter", "max_issues_repo_head_hexsha": "6cdc3954ee4a065d978c0248b00406366eafe237", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example2/legendre.cpp", "max_forks_repo_name": "pirminschmid/MatlabPlotter", "max_forks_repo_head_hexsha": "6cdc3954ee4a065d978c0248b00406366eafe237", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2222222222, "max_line_length": 129, "alphanum_fraction": 0.628477201, "num_tokens": 4326}
|
Lose weight permanently
Have you lost weight, regained the weight, lost weight again and again and again? If you are like many people, weight management has been a life long struggle. Now is the time to stop the struggle and learn the skills you need in order to lose the weight and keep it off permanently.
Diets and exercise work only if you are consistent. It is not enough to diet until you lose the weight. You need to create a style of life that lets you get healthy and stay healthy. You need a life style that is not based on deprivation but is one that you enjoy and can stay with over the long haul.
Please visit our website at http://www.PlumshirePsychServ.com for an audio interview with Judith Beck, Ph.D., the innovator of this program and download the form that helps you decide if you are ready to begin.
What the research says
This program is highly acceptable to people. In a 10 week program, 92% of the subjects continued for 10 weeks and lost an average of 17 pounds. They continued to lose weight over the next 18 months.
|
{"hexsha": "963108133cd517075ce8c71135a947a2a52f56b3", "size": 1060, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Weight_No_More.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Weight_No_More.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Weight_No_More.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 75.7142857143, "max_line_length": 304, "alphanum_fraction": 0.7801886792, "num_tokens": 235}
|
"""
Retrieves either NZTA or NZS1170.5 code values
for the given locations
"""
from pathlib import Path
import argparse
import multiprocessing as mp
from typing import Sequence
import numpy as np
import pandas as pd
import sha_calc as sha
import gmhazard_calc as sc
DEFAULT_RETURN_PERIODS = np.array([20, 25, 50, 100, 250, 500, 1000, 2000, 2500])
DEFAULT_EXCEEDANCE_VALUES = 1 / DEFAULT_RETURN_PERIODS
DEFAULT_IMS = [
"PGA",
"pSA_0.01",
"pSA_0.02",
"pSA_0.03",
"pSA_0.04",
"pSA_0.05",
"pSA_0.075",
"pSA_0.1",
"pSA_0.12",
"pSA_0.15",
"pSA_0.17",
"pSA_0.2",
"pSA_0.25",
"pSA_0.3",
"pSA_0.4",
"pSA_0.5",
"pSA_0.6",
"pSA_0.7",
"pSA_0.75",
"pSA_0.8",
"pSA_0.9",
"pSA_1.0",
"pSA_1.25",
"pSA_1.5",
"pSA_2.0",
"pSA_2.5",
"pSA_3.0",
"pSA_4.0",
"pSA_5.0",
"pSA_6.0",
"pSA_7.5",
"pSA_10.0",
]
def main(
input_data_ffp: str,
output_dir: Path,
nz_code_type: str,
nzta_csv_ffp: Path = None,
ims: Sequence[str] = DEFAULT_IMS,
n_procs: int = 4,
):
# Load the required data
data_df = pd.read_csv(input_data_ffp)
# Need an ensemble
ens = sc.gm_data.Ensemble("v20p5emp")
if nz_code_type == "NZS1170.5":
with mp.Pool(n_procs) as pool:
results = pool.starmap(
_process_nzs1170p5_station,
[
(
ens,
cur_row.lat,
cur_row.lon,
cur_row.vs30,
ims,
ix,
data_df.shape[0],
)
for ix, (cur_id, cur_row) in enumerate(data_df.iterrows())
],
)
# Extract and save
grouped_sublists = list(zip(*results))
np.save(
str(output_dir / "NZS1170p5_im_values.npy"),
np.stack(grouped_sublists[0], axis=0),
)
np.save(
str(output_dir / "NZS1170p5_Z_values.npy"),
np.stack(grouped_sublists[1], axis=0),
)
np.save(
str(output_dir / "NZS1170p5_N_values.npy"),
np.stack(grouped_sublists[2], axis=0),
)
np.save(
str(output_dir / "NZS1170p5_R_values.npy"),
np.stack(grouped_sublists[3], axis=0),
)
np.save(
str(output_dir / "NZS1170p5_Ch_values.npy"),
np.stack(grouped_sublists[4], axis=0),
)
elif nz_code_type == "NZTA":
assert nzta_csv_ffp is not None, (
"Path to the NZTA csv is required when " "computing NZTA code PGA values"
)
nzta_df = pd.read_csv(nzta_csv_ffp, header=0, index_col=0)
with mp.Pool(n_procs) as pool:
results = pool.starmap(
_process_nzta_station,
[
(
ens,
cur_row.lat,
cur_row.lon,
cur_row.vs30,
nzta_df,
ix,
data_df.shape[0],
)
for ix, (cur_id, cur_row) in enumerate(data_df.iterrows())
],
)
grouped_sublists = list(zip(*results))
np.save(
str(output_dir / "NZTA_PGA_values.npy"),
np.stack(grouped_sublists[0], axis=0),
)
np.save(
str(output_dir / "NZTA_town_index.npy"),
np.stack(grouped_sublists[1], axis=0),
)
def _process_nzta_station(
ens: sc.gm_data.Ensemble,
lat: float,
lon: float,
vs30: float,
nzta_df: pd.DataFrame,
ix: int,
n_locs: int,
):
print(f"Processing location {ix + 1}/{n_locs}")
# Set result to nan if no vs30 values are available
if np.isnan(vs30):
return np.full(len(DEFAULT_EXCEEDANCE_VALUES), np.nan), np.nan
site_info = sc.site.SiteInfo(f"site_{ix}", lat, lon, vs30)
result = sc.nz_code.nzta_2018.run_ensemble_nzta(
ens, site_info, exceedance_values=DEFAULT_EXCEEDANCE_VALUES
)
return (
result.pga_values.loc[DEFAULT_EXCEEDANCE_VALUES].values,
np.flatnonzero(nzta_df.index.values == result.nearest_town)[0],
)
def _process_nzs1170p5_station(
ens: sc.gm_data.Ensemble,
lat: float,
lon: float,
vs30: float,
ims: Sequence[str],
ix: int,
n_locs: int,
):
print(f"Processing location {ix + 1}/{n_locs}")
# Get the periods
sa_periods = [0 if im == "PGA" else sc.utils.get_period_from_pSA(im) for im in ims]
# Set result to nan if no vs30 values are available
if np.isnan(vs30):
return (
np.full((len(sa_periods), len(DEFAULT_EXCEEDANCE_VALUES)), np.nan),
np.nan,
np.full(len(sa_periods), np.nan),
np.full(len(DEFAULT_EXCEEDANCE_VALUES), np.nan),
np.full(len(sa_periods), np.nan),
)
site_info = sc.site.SiteInfo(f"site_{ix}", lat, lon, vs30)
distance = sc.nz_code.nzs1170p5.get_distance_from_site_info(ens, site_info)
z_factor = float(sc.nz_code.nzs1170p5.ll2z((site_info.lon, site_info.lat)))
soil_class = sc.nz_code.nzs1170p5.get_soil_class(site_info.vs30)
results, R_values, N_values, Ch_values = [], [], [], []
for cur_exceedance in DEFAULT_EXCEEDANCE_VALUES:
cur_rp = 1 / cur_exceedance
if cur_rp < 20 or cur_rp > 2500:
raise NotImplementedError()
else:
C, Ch, R, N = sha.nzs1170p5_spectra(
sa_periods, z_factor, cur_rp, distance, soil_class.value
)
results.append(C)
R_values.append(R)
N_values.append(N)
Ch_values.append(Ch)
return (
np.stack(results, axis=1),
z_factor,
N_values[0],
np.asarray(R_values),
Ch_values[0],
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"input_data",
type=str,
help="Path to input csv, must contain columns lon, lat and vs30",
)
parser.add_argument("output_dir", type=Path, help="Output directory path")
parser.add_argument(
"nz_code_type",
type=str,
help="The NZCode for which to generate data",
choices=["NZS1170.5", "NZTA"],
)
parser.add_argument(
"--nzta_town_csv",
type=Path,
help="Path to the NZTA town csv, required when computing NZTA",
)
parser.add_argument(
"--n_procs", type=int, help="Number of processes to use", default=4
)
args = parser.parse_args()
main(
args.input_data,
args.output_dir,
args.nz_code_type,
nzta_csv_ffp=args.nzta_town_csv,
n_procs=args.n_procs,
)
|
{"hexsha": "f1b2b9beee3aefdc21321a7e4f4b69cbb2281c3d", "size": 6933, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/gmhazard_scripts/one_off/nz_code_retrieval.py", "max_stars_repo_name": "ucgmsim/gmhazard", "max_stars_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/gmhazard_scripts/one_off/nz_code_retrieval.py", "max_issues_repo_name": "ucgmsim/gmhazard", "max_issues_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-13T02:33:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:01:08.000Z", "max_forks_repo_path": "tools/gmhazard_scripts/one_off/nz_code_retrieval.py", "max_forks_repo_name": "ucgmsim/gmhazard", "max_forks_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.08203125, "max_line_length": 87, "alphanum_fraction": 0.5469493726, "include": true, "reason": "import numpy", "num_tokens": 1896}
|
"""
Class Features
Name: drv_data_hs_geo
Author(s): Francesco Avanzi (francesco.avanzi@cimafoundation.org), Fabio Delogu (fabio.delogu@cimafoundation.org)
Date: '20210525'
Version: '1.0.0'
"""
#######################################################################################
# Library
import logging
import os
import json
import xarray as xr
import numpy as np
from lib_hs_geo import read_file_raster
from lib_hs_io_generic import write_obj, read_obj, convert_values2da
from lib_hs_generic import make_folder
from lib_hs_ancillary_snow import compute_predictor, command_line_predictor
# Debug
import matplotlib.pylab as plt
#######################################################################################
# -------------------------------------------------------------------------------------
# Class to manage geographical datasets
class DriverGeo:
def __init__(self, src_dict, ancillary_dict, dst_dict, info_dict=None,
tag_folder_name='folder_name', tag_file_name='file_name',
tag_src_data='land_data',
tag_src_homogeneous_region='homogeneous_region_data',
tag_ancillary_land_data_grid='grid_land_reference',
tag_ancillary_homogeneous_region_grid='grid_homogeneous_region_reference',
tag_ancillary_data_geo='geo_reference',
tag_ancillary_data_predictor='predictor_reference',
tag_dst_data_aspect='aspect_data',
tag_dst_data_slope='slope_data',
tag_dst_data_hillshade='hillshade_data',
tag_dst_data_roughness='roughness',
tag_predictor_to_use='predictor_to_use',
flag_updating_ancillary=True):
self.src_dict = src_dict
self.ancillary_dict = ancillary_dict
self.dst_dict = dst_dict
self.info_dict = info_dict
self.tag_folder_name = tag_folder_name
self.tag_file_name = tag_file_name
self.tag_src_data = tag_src_data
self.tag_src_homogeneous_region = tag_src_homogeneous_region
self.tag_ancillary_land_data_grid = tag_ancillary_land_data_grid
self.tag_ancillary_homogeneous_region_grid = tag_ancillary_homogeneous_region_grid
self.tag_ancillary_data_geo = tag_ancillary_data_geo
self.tag_ancillary_data_predictor = tag_ancillary_data_predictor
self.tag_dst_data_aspect = tag_dst_data_aspect
self.tag_dst_data_slope = tag_dst_data_slope
self.tag_dst_data_hillshade = tag_dst_data_hillshade
self.tag_dst_data_roughness = tag_dst_data_roughness
self.tag_predictor_to_use = tag_predictor_to_use
self.flag_updating_ancillary = flag_updating_ancillary
self.folder_name_land = src_dict[self.tag_src_data][self.tag_folder_name]
self.file_name_land = src_dict[self.tag_src_data][self.tag_file_name]
self.file_path_land = os.path.join(self.folder_name_land, self.file_name_land)
self.folder_name_homogeneous_region = src_dict[self.tag_src_homogeneous_region][self.tag_folder_name]
self.file_name_homogeneous_region = src_dict[self.tag_src_homogeneous_region][self.tag_file_name]
self.file_path_homogeneous_region = \
os.path.join(self.folder_name_homogeneous_region, self.file_name_homogeneous_region)
self.folder_name_ancillary_land = \
ancillary_dict[self.tag_ancillary_land_data_grid][self.tag_folder_name]
self.file_name_ancillary_land = ancillary_dict[self.tag_ancillary_land_data_grid][self.tag_file_name]
self.file_path_ancillary_land = os.path.join(self.folder_name_ancillary_land, self.file_name_ancillary_land)
self.folder_name_ancillary_homogeneous_region = \
ancillary_dict[self.tag_ancillary_homogeneous_region_grid][self.tag_folder_name]
self.file_name_ancillary_homogeneous_region = \
ancillary_dict[self.tag_ancillary_homogeneous_region_grid][self.tag_file_name]
self.file_path_ancillary_homogeneous_region = \
os.path.join(self.folder_name_ancillary_homogeneous_region, self.file_name_ancillary_homogeneous_region)
self.folder_name_ancillary_predictor = ancillary_dict[self.tag_ancillary_data_predictor][self.tag_folder_name]
self.file_name_ancillary_predictor = ancillary_dict[self.tag_ancillary_data_predictor][self.tag_file_name]
self.file_path_ancillary_predictor = os.path.join(self.folder_name_ancillary_predictor, self.file_name_ancillary_predictor)
self.predictor_to_use = self.info_dict[self.tag_predictor_to_use]
self.tag_dim_geo_x = 'longitude'
self.tag_dim_geo_y = 'latitude'
self.tag_coord_geo_x = 'west_east'
self.tag_coord_geo_y = 'south_north'
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to write dataset in dictionary format
@staticmethod
def write_dset_obj(file_name, file_dset):
file_dict = file_dset.to_dict()
write_obj(file_name, file_dict)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to load dataset saved in dictionary format
@staticmethod
def read_dset_obj(file_name):
file_dict = read_obj(file_name)
file_dset = xr.Dataset.from_dict(file_dict)
return file_dset
# -------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# Method to define geo predictor(s)
def define_geo_predictor(self, dset_land):
logging.info(' ----> Get predictors information ... ')
file_path_land = self.file_path_land
file_path_ancillary = self.file_path_ancillary_predictor
values_geo_x = dset_land[self.tag_coord_geo_x].values
values_geo_y = dset_land[self.tag_coord_geo_y].values
predictor_to_use = self.predictor_to_use
if self.flag_updating_ancillary:
if os.path.exists(file_path_ancillary):
os.remove(file_path_ancillary)
dset_predictor = None
if os.path.exists(file_path_land):
if not os.path.exists(file_path_ancillary):
for var_name, var_fields in self.dst_dict.items():
logging.info(' -----> Variable ' + var_name + ' ... ')
if var_name in self.predictor_to_use:
if var_name in list(command_line_predictor.keys()):
cmd_step = command_line_predictor[var_name]['command_line']
folder_name_step = var_fields[self.tag_folder_name]
file_name_step = var_fields[self.tag_file_name]
file_path_step = os.path.join(folder_name_step, file_name_step)
make_folder(folder_name_step)
values_predictor = compute_predictor(file_path_land, file_path_step, cmd_step)
#set to nan where land is nan
values_predictor[np.isnan(dset_land.land_data)] = np.nan
da_predictor = convert_values2da(values_predictor, values_geo_x, values_geo_y, var_name=var_name,
coord_name_x=self.tag_coord_geo_x, coord_name_y=self.tag_coord_geo_y,
dim_name_x=self.tag_dim_geo_x, dim_name_y=self.tag_dim_geo_y)
if dset_predictor is None:
dset_predictor = da_predictor.to_dataset()
else:
dset_predictor[var_name] = da_predictor
logging.info(' -----> Variable ' + var_name + ' ... DONE')
else:
logging.info(' -----> Variable ' + var_name + ' ... SKIPPED')
logging.warning(' ===> Variable not included in allowed predictors')
else:
logging.info(' -----> Variable ' + var_name + ' ... SKIPPED')
logging.warning(' ===> Variable not selected in predictors list')
if dset_predictor is not None:
self.write_dset_obj(file_path_ancillary, dset_predictor)
logging.info(' ----> Get predictors information ... DONE')
else:
logging.info(' ----> Get predictors information ... SKIPPED')
logging.warning(' ===> Predictors are not available ')
else:
dset_predictor = self.read_dset_obj(file_path_ancillary)
logging.info(' ----> Get predictors information ... DONE. Loaded using saved ancillary file')
else:
logging.info(' ----> Get predictors information ... FAILED')
logging.error(' ===> Error in finding land information')
raise IOError('File not found')
return dset_predictor
# -----------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define geo reference
def define_geo_ref(self, file_name, ancillary_name, var_name):
logging.info(' ----> Get ' + var_name + ' information ... ')
if self.flag_updating_ancillary:
if os.path.exists(ancillary_name):
os.remove(ancillary_name)
if os.path.exists(file_name):
if not os.path.exists(ancillary_name):
da, wide, high, proj, transform, bounding_box, no_data = \
read_file_raster(file_name,
coord_name_x=self.tag_coord_geo_x, coord_name_y=self.tag_coord_geo_y,
dim_name_x=self.tag_dim_geo_x, dim_name_y=self.tag_dim_geo_y, var_name=var_name)
dset = da.to_dataset()
attrs = {'wide': wide, 'high': high,
'proj': proj, 'transform': transform,
'bbox': bounding_box,
'no_data': no_data}
dset.attrs = attrs
self.write_dset_obj(ancillary_name, dset)
logging.info(' ----> Get ' + var_name + ' information ... DONE')
else:
dset = self.read_dset_obj(ancillary_name)
logging.info(' ----> Get ' + var_name + ' information ... DONE. Loaded using saved ancillary file')
else:
logging.info(' ----> Get ' + var_name + ' information ... FAILED')
logging.error(' ===> Error in finding ' + var_name + ' information')
raise IOError('File not found')
return dset
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define geo attributes
@staticmethod
def define_geo_attributes(dset_land):
# Marche reference: iCols 643 iY jdim iRows 534 iX idim
dset_attrs_raw = dset_land.attrs
transform = dset_attrs_raw['transform']
cellsize = transform[0]
xllcorner = transform[2]
yllcorner = transform[5]
bbox = dset_attrs_raw['bbox']
nrows = int(dset_attrs_raw['high']) # high = dims[0] --> nrows
ncols = int(dset_attrs_raw['wide']) # wide = dims[1] --> cols
nodata_value = dset_attrs_raw['no_data']
proj = dset_attrs_raw['proj']
dset_attrs_tmp = {
'ncols': ncols, 'nrows': nrows,
'xllcorner': xllcorner, 'yllcorner': yllcorner,
'cellsize': cellsize, 'nodata_value': nodata_value, 'bounding_box': bbox,
'proj': proj, 'transform': transform}
dset_attrs_valid = {}
for dset_key, dset_value in dset_attrs_tmp.items():
if isinstance(dset_value, list):
dset_value_tmp = [str(value) for value in dset_value]
dset_value = ','.join(dset_value_tmp)
if isinstance(dset_value, dict):
dset_value_tmp = json.dumps(dset_value)
dset_value = dset_value_tmp
dset_attrs_valid[dset_key] = dset_value
return dset_attrs_valid
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compose geographical information
def composer_geo(self):
# Info start
logging.info(' ---> Compose geographical information ... ')
# Define land dataset
dset_land = self.define_geo_ref(self.file_path_land, self.file_path_ancillary_land, self.tag_src_data)
# Define homogeneous-region dataset
dset_homogeneous_regions = \
self.define_geo_ref(self.file_path_homogeneous_region,
self.file_path_ancillary_homogeneous_region, self.tag_src_homogeneous_region)
# Define predictors datasets
dset_predictor = self.define_geo_predictor(dset_land)
# Define attributes
dset_attrs = self.define_geo_attributes(dset_land)
# Merge land and predictors datasets
dset_collections = dset_land.merge(dset_homogeneous_regions)
dset_collections = dset_collections.merge(dset_predictor)
dset_collections.attrs = dset_attrs
# info end
logging.info(' ---> Compose geographical information ... DONE')
return dset_collections
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
|
{"hexsha": "4ed51360827cd95217c5f1af218cd870fb934ffb", "size": 14168, "ext": "py", "lang": "Python", "max_stars_repo_path": "apps/ground_network/hs/drv_data_hs_geo.py", "max_stars_repo_name": "c-hydro/hyde", "max_stars_repo_head_hexsha": "3a3ff92d442077ce353b071d5afe726fc5465201", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apps/ground_network/hs/drv_data_hs_geo.py", "max_issues_repo_name": "c-hydro/hyde", "max_issues_repo_head_hexsha": "3a3ff92d442077ce353b071d5afe726fc5465201", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-04-07T16:34:59.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T07:32:39.000Z", "max_forks_repo_path": "apps/ground_network/hs/drv_data_hs_geo.py", "max_forks_repo_name": "c-hydro/fp-hyde", "max_forks_repo_head_hexsha": "b0728397522aceebec3e7ff115aff160a10efede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4102564103, "max_line_length": 131, "alphanum_fraction": 0.5707227555, "include": true, "reason": "import numpy", "num_tokens": 2738}
|
SUBROUTINE struct_sizes(nat,nsym,ndif,lattic,AA,BB,CC,alpha,structf)
IMPLICIT NONE
CHARACTER*80, intent(in):: structf
INTEGER, intent(out) :: nat, nsym, ndif
CHARACTER*4, intent(out):: lattic
REAL*8, intent(out) :: AA,BB,CC,alpha(3)
!----------- local variables ---------------
CHARACTER*4 :: irel,cform
CHARACTER*80 :: title
CHARACTER*10 :: aname
REAL*8 :: r0,rmt,zz,rotloc(3,3),pos(3)
INTEGER :: iord,ios,mult,jrj,iatnr,isplit,index,i,j,m,jatom
open(20,FILE=structf,STATUS='OLD')
read (20,1000) title
read (20,1010) lattic,nat,cform,irel
read (20,1020) aa,bb,cc,alpha(1),alpha(2),alpha(3)
IF(ABS(ALPHA(1)).LT.1.D-5) ALPHA(1)=90.
IF(ABS(ALPHA(2)).LT.1.D-5) ALPHA(2)=90.
IF(ABS(ALPHA(3)).LT.1.D-5) ALPHA(3)=90.
INDEX=0
DO jatom=1,NAT
INDEX=INDEX+1
READ(20,1030,iostat=ios) iatnr,( pos(j),j=1,3 ), mult,isplit
IF(ios /= 0 ) THEN
WRITE(6,*) iatnr,( pos(j),j=1,3 ), mult,isplit
WRITE(6,*) 'ERROR IN STRUCT FILE READ'
STOP
ENDIF
IF (mult .EQ. 0) THEN
WRITE (6,6000) jatom, index, mult
STOP
ENDIF
DO m=1,mult-1
index=index+1
READ(20,1031) iatnr,( pos(j),j=1,3) ! pos -- position inside the unit cell read from case.struct
ENDDO
READ(20,1050) aname,jrj,r0,rmt,zz ! zz-nuclear charge, jrj--number of radial data points
READ(20,1051) ((rotloc(i,j),i=1,3),j=1,3)
ENDDO
ndif=index
READ(20,1151) iord
nsym=iord
CLOSE(20)
1000 FORMAT(A80)
1010 FORMAT(A4,23X,I3,1x,a4,/,13X,A4,18X,A4)
1020 FORMAT(6F10.7,10X,F10.7)
1030 FORMAT(4X,I4,4X,F10.7,3X,F10.7,3X,F10.7,/,15X,I2,17X,I2)
1031 FORMAT(4X,I4,4X,F10.7,3X,F10.7,3X,F10.7)
1050 FORMAT(A10,5X,I5,5X,F10.9,5X,F10.5,5X,F10.5)
1051 FORMAT(20X,3F10.8)
1151 FORMAT(I4)
6000 FORMAT(///,3X,'ERROR IN struct-read : MULT(JATOM)=0 ...',/, 20X,'JATOM=',I3,3X,'INDEX=',I3,3X,'MULT=',I3)
END SUBROUTINE struct_sizes
SUBROUTINE init_struct(lattic,aname,AA,BB,CC,alpha,tau,pos,rel,r0,dx,RMT,zz,rotloc,mult,jrj,&
&iatnr,isplit,iz,inum, structf,nat,nsym,ndif)
IMPLICIT NONE
! input
CHARACTER*80, intent(in) :: structf
INTEGER, intent(in) :: nat, nsym, ndif
! output
CHARACTER*4, intent(out) :: lattic
CHARACTER*10,intent(out) :: aname(nat)
REAL*8, intent(out) :: AA,BB,CC,alpha(3)
REAL*8, intent(out) :: tau(3,nsym), pos(3,ndif)
LOGICAL, intent(out) :: rel
REAL*8, intent(out) :: r0(nat),dx(nat),RMT(nat),zz(nat),rotloc(3,3,nat)
INTEGER, intent(out) :: mult(nat),jrj(nat),iatnr(nat),isplit(nat)
INTEGER, intent(out) :: iz(3,3,nsym),inum(nsym)
!----------- local variables ---------------
CHARACTER*80 :: title
CHARACTER*4 :: irel,cform
INTEGER :: ios,iord
!loop indexs
INTEGER :: index,i,j,j1,j2,m,jatom,nato
open(20,FILE=structf,STATUS='OLD')
read (20,1000) title
read (20,1010) lattic,nato,cform,irel
if (nato.NE.nat) WRITE(6,*) 'ERROR init_struct: nat(1)!=nat(2)'
REL=.TRUE.
IF(IREL.EQ.'NREL') REL=.FALSE.
read (20,1020) aa,bb,cc,alpha(1),alpha(2),alpha(3)
IF(ABS(ALPHA(1)).LT.1.D-5) ALPHA(1)=90.0D0
IF(ABS(ALPHA(2)).LT.1.D-5) ALPHA(2)=90.0D0
IF(ABS(ALPHA(3)).LT.1.D-5) ALPHA(3)=90.0D0
INDEX=0
DO jatom=1,nat
INDEX=INDEX+1
READ(20,1030,iostat=ios) iatnr(jatom),( pos(j,index),j=1,3 ), mult(jatom),isplit(jatom)
IF(ios /= 0 ) THEN
WRITE(6,*) iatnr(jatom),( pos(j,index),j=1,3 ), mult(jatom),isplit(jatom)
WRITE(6,*) 'ERROR IN STRUCT FILE READ'
STOP
ENDIF
IF (mult(jatom) .EQ. 0) THEN
WRITE (6,6000) jatom, index, mult(jatom)
STOP
ENDIF
DO m=1,mult(jatom)-1
index=index+1
READ(20,1031) iatnr(jatom),( pos(j,index),j=1,3) ! pos -- position inside the unit cell read from case.struct
ENDDO
READ(20,1050) aname(jatom),jrj(jatom),r0(jatom),rmt(jatom),zz(jatom) ! zz-nuclear charge, jrj--number of radial data points
dx(jatom)=LOG(rmt(jatom)/r0(jatom)) / (jrj(jatom)-1)
rmt(jatom)=r0(jatom)*EXP( dx(jatom)*(jrj(jatom)-1) )
READ(20,1051) ((rotloc(i,j,jatom),i=1,3),j=1,3)
ENDDO
if (ndif.NE.index) WRITE(6,*) 'ERROR init_struct: ndif(1)!=ndif(2)'
READ(20,1151) iord
if (nsym.NE.iord) WRITE(6,*) 'ERROR init_struct: nsym(1)!=nsym(2)'
DO j=1,iord ! iz(:,:,iord) - all symmetry transformations
! tau(:,iord) - translations
READ(20,1101) ( (iz(j1,j2,j),j1=1,3),tau(j2,j),j2=1,3 ),inum(j)
ENDDO
1000 FORMAT(A80)
1010 FORMAT(A4,23X,I3,1x,a4,/,13X,A4,18X,A4)
1020 FORMAT(6F10.7,10X,F10.7)
1030 FORMAT(4X,I4,4X,F10.7,3X,F10.7,3X,F10.7,/,15X,I2,17X,I2)
1031 FORMAT(4X,I4,4X,F10.7,3X,F10.7,3X,F10.7)
1050 FORMAT(A10,5X,I5,5X,F10.9,5X,F10.5,5X,F10.5)
1051 FORMAT(20X,3F10.8)
1101 FORMAT(3(3I2,F10.8/),I8)
1151 FORMAT(I4)
6000 FORMAT(///,3X,'ERROR IN LAPW0 : MULT(JATOM)=0 ...',/, 20X,'JATOM=',I3,3X,'INDEX=',I3,3X,'MULT=',I3)
END SUBROUTINE init_struct
SUBROUTINE LATGEN(BR1, BR2, vol, ORTHO, lattic, AA, BB, CC, alpha)
!*******************************************************************
!* LATGEN GENERATES TWO BRAVAIS MATRICES, DEFINES THE VOLUME OF *
!* THE UNIT CELL AND CALLS ROTDEF *
!* BR1(3,3) : TRANSFORMS INTEGER RECIPROCAL LATTICE VECTORS AS *
!* GIVEN IN THE VECTORLIST OF LAPW1 ( GENERATED IN *
!* COORS, TRANSFORMED IN BASISO, AND WRITTEN OUT IN *
!* WFTAPE) INTO CARTESIAN SYSTEM *
!* BR2(3,3) : TRANSFORMS A RECIPROCAL LATTICE VECTOR OF A SPE- *
!* CIAL COORDINATE SYSTEM ( IN UNITS OF 2 PI / A ) *
!* TO CARTESIAN SYSTEM *
!*******************************************************************
IMPLICIT NONE
REAL*8, intent(out) :: BR1(3,3),BR2(3,3), vol
LOGICAL, intent(out) :: ORTHO
CHARACTER*4, intent(in):: lattic
REAL*8, intent(in) :: AA,BB,CC,alpha(3)
!-----------------------------------------
! local variables
REAL*8 :: PI, SQRT3, RVFAC, SINAB, SINBC, COSAB, COSBC, WURZEL, COSAC
REAL*8 :: PIA(3)
REAL*8 :: talpha(3)
!---------------------------------------------------------------------
PI=ACOS(-1.0D0)
SQRT3=SQRT(3.D0)
tALPHA(:)=ALPHA(:)*PI/180.0D0
PIA(1)=2.D0*PI/AA
PIA(2)=2.D0*PI/BB
PIA(3)=2.D0*PI/CC
IF ( LATTIC(1:1).EQ.'H') THEN !.....HEXAGONAL LATTICE
BR1(1,1)=2.D0/SQRT3*PIA(1)
BR1(1,2)=1.D0/SQRT3*PIA(1)
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,1)=0.0D0
BR1(3,2)=0.0D0
BR1(3,3)=PIA(3)
!
BR2(1,1)=2.D0/SQRT3*PIA(1)
BR2(1,2)=1.D0/SQRT3*PIA(1)
BR2(1,3)=0.0D0
BR2(2,1)=0.0D0
BR2(2,2)=PIA(2)
BR2(2,3)=0.0D0
BR2(3,1)=0.0D0
BR2(3,2)=0.0D0
BR2(3,3)=PIA(3)
!
RVFAC=2.D0/SQRT(3.D0)
ORTHO=.FALSE.
ELSE IF(LATTIC(1:1).EQ.'R') THEN !.....RHOMBOHEDRAL CASE
BR1(1,1)=1.D0/SQRT(3.D0)*PIA(1)
BR1(1,2)=1.D0/SQRT(3.D0)*PIA(1)
BR1(1,3)=-2.d0/sqrt(3.d0)*PIA(1)
BR1(2,1)=-1.0d0*PIA(2)
BR1(2,2)=1.0d0*PIA(2)
BR1(2,3)=0.0d0*PIA(2)
BR1(3,1)=1.0d0*PIA(3)
BR1(3,2)=1.0d0*PIA(3)
BR1(3,3)=1.0d0*PIA(3)
!
BR2(1,1)=1.D0/SQRT(3.D0)*PIA(1)
BR2(1,2)=1.D0/SQRT(3.D0)*PIA(1)
BR2(1,3)=-2.d0/sqrt(3.d0)*PIA(1)
BR2(2,1)=-1.0d0*PIA(2)
BR2(2,2)=1.0d0*PIA(2)
BR2(2,3)=0.0d0*PIA(2)
BR2(3,1)=1.0d0*PIA(3)
BR2(3,2)=1.0d0*PIA(3)
BR2(3,3)=1.0d0*PIA(3)
RVFAC=6.D0/SQRT(3.D0)
ORTHO=.FALSE.
ELSE IF( LATTIC(1:1).EQ.'S' .OR. LATTIC(1:1).EQ.'P' ) THEN !.....PRIMITIVE LATTICE
SINBC=SIN(tALPHA(1))
COSAB=COS(tALPHA(3))
COSAC=COS(tALPHA(2))
COSBC=COS(tALPHA(1))
WURZEL=SQRT(SINBC**2-COSAC**2-COSAB**2+2*COSBC*COSAC*COSAB)
BR2(1,1)= SINBC/WURZEL*PIA(1)
BR2(1,2)= (-COSAB+COSBC*COSAC)/(SINBC*WURZEL)*PIA(2)
BR2(1,3)= (COSBC*COSAB-COSAC)/(SINBC*WURZEL)*PIA(3)
BR2(2,1)= 0.0
BR2(2,2)= PIA(2)/SINBC
BR2(2,3)= -PIA(3)*COSBC/SINBC
BR2(3,1)= 0.0
BR2(3,2)= 0.0
BR2(3,3)= PIA(3)
!
BR1(1,1)= SINBC/WURZEL*PIA(1)
BR1(1,2)= (-COSAB+COSBC*COSAC)/(SINBC*WURZEL)*PIA(2)
BR1(1,3)= (COSBC*COSAB-COSAC)/(SINBC*WURZEL)*PIA(3)
BR1(2,1)= 0.0
BR1(2,2)= PIA(2)/SINBC
BR1(2,3)= -PIA(3)*COSBC/SINBC
BR1(3,1)= 0.0
BR1(3,2)= 0.0
BR1(3,3)= PIA(3)
!
RVFAC= 1.d0/WURZEL
ORTHO=.TRUE.
if(abs(talpha(1)-pi/2.d0).gt.0.0001) ortho=.false.
if(abs(talpha(2)-pi/2.d0).gt.0.0001) ortho=.false.
if(abs(talpha(3)-pi/2.d0).gt.0.0001) ortho=.false.
!
ELSE IF(LATTIC(1:1).EQ.'F') THEN !.....FC LATTICE
BR1(1,1)=PIA(1)
BR1(1,2)=0.0D0
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,2)=0.0D0
BR1(3,1)=0.0D0
BR1(3,3)=PIA(3)
! definitions according to column, rows convention for BR2
BR2(1,1)=-PIA(1)
BR2(1,2)= PIA(1)
BR2(1,3)= PIA(1)
BR2(2,1)= PIA(2)
BR2(2,2)=-PIA(2)
BR2(2,3)= PIA(2)
BR2(3,1)= PIA(3)
BR2(3,2)= PIA(3)
BR2(3,3)=-PIA(3)
!
RVFAC=4.D0
ORTHO=.TRUE.
ELSE IF(LATTIC(1:1).EQ.'B') THEN !.....BC LATTICE
BR1(1,1)=PIA(1)
BR1(1,2)=0.0D0
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,1)=0.0D0
BR1(3,2)=0.0D0
BR1(3,3)=PIA(3)
!
BR2(1,1)= 0.0D0
BR2(1,2)= PIA(1)
BR2(1,3)= PIA(1)
BR2(2,1)= PIA(2)
BR2(2,2)= 0.0D0
BR2(2,3)= PIA(2)
BR2(3,1)= PIA(3)
BR2(3,2)= PIA(3)
BR2(3,3)= 0.0D0
!
RVFAC=2.D0
ORTHO=.TRUE.
!GOTO 100
ELSE IF(LATTIC(1:1).EQ.'C') THEN
!
IF(LATTIC(2:3).EQ.'XZ') THEN
!.....CXZ CASE (CXZ LATTICE BUILD UP)
!.....CXZ ORTHOROMBIC CASE
IF(ABS(tALPHA(3)-PI/2.0D0).LT.0.0001) then
BR1(1,1)=PIA(1)
BR1(1,2)=0.0D0
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,1)=0.0D0
BR1(3,2)=0.0D0
BR1(3,3)=PIA(3)
!
BR2(1,1)= PIA(1)
BR2(1,2)= 0.0
BR2(1,3)= PIA(1)
BR2(2,1)= 0.0
BR2(2,2)= PIA(2)
BR2(2,3)= 0.0
BR2(3,1)=-PIA(3)
BR2(3,2)= 0.0
BR2(3,3)= PIA(3)
!
RVFAC=2.0
ORTHO=.TRUE.
ELSE
!.....CXZ MONOCLINIC CASE
SINAB=SIN(tALPHA(3))
COSAB=COS(tALPHA(3))
!
BR1(1,1)= PIA(1)/SINAB
BR1(1,2)= -PIA(2)*COSAB/SINAB
BR1(1,3)= 0.0
BR1(2,1)= 0.0
BR1(2,2)= PIA(2)
BR1(2,3)= 0.0
BR1(3,1)= 0.0
BR1(3,2)= 0.0
BR1(3,3)= PIA(3)
!
BR2(1,1)= PIA(1)/SINAB
BR2(1,2)= -PIA(2)*COSAB/SINAB
BR2(1,3)= PIA(1)/SINAB
BR2(2,1)= 0.0
BR2(2,2)= PIA(2)
BR2(2,3)= 0.0
BR2(3,1)=-PIA(3)
BR2(3,2)= 0.0
BR2(3,3)= PIA(3)
!
RVFAC=2.0/SINAB
ORTHO=.FALSE.
ENDIF
ELSE IF(LATTIC(2:3).EQ.'YZ') THEN
BR1(1,1)=PIA(1)
BR1(1,2)=0.0D0
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,1)=0.0D0
BR1(3,2)=0.0D0
BR1(3,3)=PIA(3)
!
BR2(1,1)= PIA(1)
BR2(1,2)= 0.0
BR2(1,3)= 0.0
BR2(2,1)= 0.0
BR2(2,2)= PIA(2)
BR2(2,3)= PIA(2)
BR2(3,1)= 0.0
BR2(3,2)=-PIA(3)
BR2(3,3)= PIA(3)
!
RVFAC=2.0
ORTHO=.TRUE.
ELSE
!.....CXY LATTICE
BR1(1,1)=PIA(1)
BR1(1,2)=0.0D0
BR1(1,3)=0.0D0
BR1(2,1)=0.0D0
BR1(2,2)=PIA(2)
BR1(2,3)=0.0D0
BR1(3,1)=0.0D0
BR1(3,2)=0.0D0
BR1(3,3)=PIA(3)
!
BR2(1,1)= PIA(1)
BR2(1,2)= PIA(1)
BR2(1,3)= 0.0D0
BR2(2,1)=-PIA(2)
BR2(2,2)= PIA(2)
BR2(2,3)= 0.0D0
BR2(3,1)= 0.0D0
BR2(3,2)= 0.0D0
BR2(3,3)= PIA(3)
!
RVFAC=2.D0
ORTHO=.TRUE.
ENDIF
ELSE
! Error: wrong lattice, stop execution
!
WRITE(6,*) 'LATGEN: wrong lattice.'
STOP 'LATGEN - Error'
ENDIF
!.....DEFINE VOLUME OF UNIT CELL
VOL=AA*BB*CC/RVFAC
RETURN
!
654 format(3f10.5,3x,3f10.5)
END SUBROUTINE LATGEN
|
{"hexsha": "4123d0ce547ba761f7021db0938ace1c110750d4", "size": 21305, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/downfold/latgen.f90", "max_stars_repo_name": "chanul13/EDMFTF", "max_stars_repo_head_hexsha": "967d85d898924991b31861b4e1f45129e3eff180", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-04-03T06:37:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T11:44:06.000Z", "max_issues_repo_path": "src/downfold/latgen.f90", "max_issues_repo_name": "chanul13/EDMFTF", "max_issues_repo_head_hexsha": "967d85d898924991b31861b4e1f45129e3eff180", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/downfold/latgen.f90", "max_forks_repo_name": "chanul13/EDMFTF", "max_forks_repo_head_hexsha": "967d85d898924991b31861b4e1f45129e3eff180", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-10-27T20:23:34.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-13T13:54:11.000Z", "avg_line_length": 52.2181372549, "max_line_length": 128, "alphanum_fraction": 0.3005397794, "num_tokens": 6072}
|
"""
Optimization algorithms
"""
import numpy as np
import numpy.ma as ma
import numpy.linalg as LA
import copy
from tqdm import tqdm
from scipy.spatial import distance
from sklearn.neighbors import NearestNeighbors
import torch
import torch.nn as nn
"""
def grad_free_optimizer(initial_sequence, oracle, N):
...
optimization step
...
return opm
"""
######################
# Gradient Free Methods
######################
def eval_oracle(data_point, oracle):
data_point = torch.Tensor(data_point).reshape(1,-1)
# print("data point shape: {}".format(data_point.shape))
with torch.no_grad():
fit_value = oracle(data_point).squeeze().item()
# print("fitness value: {}".format(fit_value))
return fit_value
######################
# Directed Evolution
######################
def directed_evolution_sequence(initial_sequence, model, positions):
"""
from https://www.pnas.org/content/pnas/116/18/8852.full.pdf
traditional directed evolution by greedy walk
1. saturation mutagenesis at single position
2. fix optimal mutation
3. repeat for all positions
x = input_seq
for pos i in [0,N-1]:
best_aa optimial AA for pos i
x[i] == best_aa
return x
initial_sequence = sequence of inds (1 x N)
model = model where f(sequence) = y
positions = iterable of positions
"""
cand_seq = torch.from_numpy(initial_sequence).unsqueeze(0)
with torch.no_grad():
initial_fit = model(cand_seq)[0][-1]
cand_traj = np.zeros((len(positions)+1, len(initial_sequence)))
fit_traj = np.zeros((len(positions) + 1))
cand_traj[0] = cand_seq.reshape(-1).numpy()
fit_traj[0] = initial_fit
for indx, pos in enumerate(tqdm(positions)):
# create full expansion at position
cand_seqs = cand_seq.repeat(22, 1)
cand_seqs[:, pos] = torch.arange(22)
# screen expansion
with torch.no_grad():
cand_seqs_fit = model(cand_seqs)[0][-1]
max_fit_aa_indx = cand_seqs_fit.argmax(0)
max_fit = cand_seqs_fit.max()
cand_seq = cand_seqs[max_fit_aa_indx]
cand_traj[indx+1] = cand_seq.reshape(-1).numpy()
fit_traj[indx+1] = max_fit.numpy()
return cand_traj, fit_traj
############################
# MCMC
############################
# Sequence Level
# -------------------------
def model_predict(sequence, model):
model = model.eval()
if type(sequence) == type(np.ones(1)):
sequence = torch.from_numpy(sequence)
sequence = sequence.reshape(1,-1)
with torch.no_grad():
fit = model(sequence)[0][-1].numpy().squeeze()
return fit
def mutate_sequence(sequence, num_mutations):
sequence = sequence.copy()
AA_inds = np.arange(22)
positions = np.random.choice(np.arange(len(sequence)), num_mutations)
for pos in positions:
pos_val = sequence[pos]
if pos_val == 21:
print('mutation in padding region - ignoring')
else:
# change current AA to a new AA
mut_choices = np.ma.masked_where(AA_inds == pos_val, AA_inds)
chosen_mut = np.random.choice(AA_inds[~mut_choices.mask],1)
sequence[pos] = chosen_mut
return sequence
def acceptance_step(curr_fit, prop_fit, T):
"""
returns bool
"""
out_dict = {0: False, 1: True}
# acceptance probability
prob = np.exp(( (prop_fit-curr_fit) / T))
if prob == np.nan:
outcome = 0
else:
prob = min(1,prob)
outcome = np.random.binomial(1, prob)
return out_dict[outcome]
def get_l1_norm(seq1, seq2):
# convert to one-hot
seq1, seq2 = seq1.squeeze(), seq2.squeeze()
seq1 = np.eye(22)[seq1]
seq2 = np.eye(22)[seq2]
l1_norm = LA.norm(seq1 - seq2, 1)
return l1_norm
def metropolisMCMC_sequence(initial_sequence, model, T=0.01, mu=1, trust_radius=15,
N_steps=20):
"""
from pg 24 of low-N
https://www.biorxiv.org/content/10.1101/2020.01.23.917682v2.full.pdf
"""
# start at initial sequence
curr_seq = initial_sequence.numpy()
curr_fit = model_predict(curr_seq, model)
cand_traj = np.zeros( (N_steps, len(initial_sequence) ))
fit_traj = np.zeros((N_steps))
cand_traj[0] = curr_seq.reshape(-1)
fit_traj[0] = curr_fit
# optimization loop
for step_indx in tqdm(range(1,N_steps)):
num_mut = np.random.poisson(mu)
# produce candidate
prop_seq = mutate_sequence(curr_seq, num_mut)
# selection step
if get_l1_norm(prop_seq, curr_seq) < trust_radius: # mut radius
prop_fit = model_predict(prop_seq, model)
if acceptance_step(curr_fit, prop_fit, T):
# print('change accepted')
curr_seq = prop_seq.copy()
curr_fit = prop_fit.copy()
# logging
cand_traj[step_indx] = curr_seq.reshape(-1)
fit_traj[step_indx] = curr_fit
return cand_traj, fit_traj
# Latent space
# -------------------------
def metropolisMCMC_embedding(initial_embedding, oracle,
T=0.01, delta=0.1, N_steps=1000):
"""
MCMC on a continous vector space
proposed candidates come from random direction
"""
embed_dim = initial_embedding.shape[-1]
# start at initial sequence
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
# optimization loop
for indx in tqdm(range(1,N_steps)):
prop_embedding = curr_embedding + delta * np.random.randn(embed_dim)
prop_fit = eval_oracle(prop_embedding, oracle)
if acceptance_step(curr_fit,prop_fit, T):
curr_embedding = prop_embedding
curr_fit = prop_fit
# logging
fitness_list.append(curr_fit)
out_embedding_array[indx] = curr_embedding
return out_embedding_array, np.array(fitness_list)
def model_cycle(embedding, model):
"""passes embedding through decoder and encoder
Args:
embedding ([type]): [description]
model ([type]): [description]
"""
with torch.no_grad():
embedding = torch.from_numpy(embedding).float().reshape(1,-1)
decoded_seq = model.decode(embedding).argmax(1)
re_embed = model.encode(decoded_seq).numpy()
return re_embed
def metropolisMCMC_embedding_cycle(initial_embedding, oracle, model,
T=0.01, delta=0.05, N_steps=1000, perturbation=True):
"""
MCMC on a continous vector space
proposed candidates come from random direction
"""
embed_dim = initial_embedding.shape[-1]
# start at initial sequence
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
# optimization loop
for indx in tqdm(range(1,N_steps)):
# perturbation step
if perturbation:
prop_embedding = curr_embedding + delta * np.random.randn(embed_dim)
prop_embedding = model_cycle(prop_embedding, model)
else:
prop_embedding = model_cycle(curr_embedding, model)
prop_fit = eval_oracle(prop_embedding, oracle)
if acceptance_step(curr_fit,prop_fit, T):
curr_embedding = prop_embedding
curr_fit = prop_fit
# logging
fitness_list.append(curr_fit)
out_embedding_array[indx] = curr_embedding
return out_embedding_array, np.array(fitness_list)
############################
# Hill Climbing
############################
def get_knn_directions(dataset, current_point, k):
dists_to_initial_point = distance.cdist(current_point.reshape(1,-1), dataset).flatten()
knn = [dataset[x] for x in np.argsort(dists_to_initial_point)[:k]]
return knn
def get_steepest_neighbor(neighbors, oracle):
fitness_values = np.array([eval_oracle(x, oracle) for x in neighbors])
steepest_neighbor = neighbors[fitness_values.argmax()]
return steepest_neighbor, max(fitness_values)
def get_stochastic_steepest_neighbor(neighbors, oracle, curr_fit):
fitness_values = np.array([eval_oracle(x, oracle) for x in neighbors])
incline_inds = np.arange(len(fitness_values))[fitness_values > curr_fit]
if len(incline_inds) == 0:
all_inds = np.arange(len(fitness_values))
choice_ind = np.random.choice(all_inds, 1)[0]
else:
choice_ind = np.random.choice(incline_inds, 1)[0]
choice_neighbor = neighbors[choice_ind]
choice_fit = fitness_values[choice_ind]
return choice_neighbor, choice_fit
def nn_hill_climbing_embedding(initial_embedding, oracle, dataset_embeddings,
step_interp=0.5, k_neighbors=30, N_steps=1000, stochastic=False):
"""[summary]
Args:
initial_embedding ([type]): [description]
oracle ([type]): [description]
step_interp ([type]): [description]
N_steps ([type]): [description]
"""
embed_dim = initial_embedding.shape[-1]
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
for indx in tqdm(range(1,N_steps)):
# search step
k_directions = get_knn_directions(dataset_embeddings, curr_embedding, k_neighbors )
if stochastic:
next_neighbor, next_fitness = get_stochastic_steepest_neighbor(k_directions, oracle, curr_fit)
else:
next_neighbor, next_fitness = get_steepest_neighbor(k_directions, oracle)
next_direction = next_neighbor - curr_embedding
# update step
curr_embedding += step_interp * next_direction
curr_fit = next_fitness
# logging
out_embedding_array[indx] = curr_embedding
fitness_list.append(curr_fit)
return out_embedding_array, np.array(fitness_list)
############################
# Gradient Methods
############################
# TODO: convert to py class
def grad_ascent(initial_embedding, train_embeddings,
train_data, model, N_steps, lr, cycle=False):
#need to pass the sequence through the network layers for gradient to be taken, so cycle the embedding once
model.requires_grad_(True)
grad_list = []
# data logging
embed_dim = initial_embedding.shape[-1]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_fit_array = np.zeros((N_steps))
# initial step
curr_embedding = torch.tensor(initial_embedding, requires_grad=True).reshape(-1, embed_dim)
curr_fit = model.regressor_module(curr_embedding)
print("starting fitness: {}".format(curr_fit))
# save step 0 info
out_embedding_array[0] = curr_embedding.reshape(1,embed_dim).detach().numpy()
out_fit_array[0] = curr_fit.detach().numpy()
assert curr_embedding.requires_grad
for step in tqdm(range(1,N_steps)):
model.train()
grad = torch.autograd.grad(curr_fit, curr_embedding)[0] # get gradient
grad_list.append(grad.detach())
# update step
update_step = grad * lr
curr_embedding += update_step
# cycle bool
model = model.eval()
if cycle:
nseq = model.decode(curr_embedding).argmax(1)
curr_embedding = model.encode(nseq)
curr_fit = model.regressor_module(curr_embedding)
# save step i info
out_embedding_array[step] = curr_embedding
out_fit_array[step] = curr_fit.detach().numpy()
return out_embedding_array, out_fit_array, grad_list
|
{"hexsha": "7e931af73a686790cb4cc3d33eccddff4b8b466f", "size": 12490, "ext": "py", "lang": "Python", "max_stars_repo_path": "relso/optim/optim_algs.py", "max_stars_repo_name": "ec1340/ReLSO-Guided-Generative-Protein-Design-using-Regularized-Transformers", "max_stars_repo_head_hexsha": "2320b3ebd97df908474b23e7a4395b8fa13133f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2022-01-16T18:19:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T20:48:24.000Z", "max_issues_repo_path": "relso/optim/optim_algs.py", "max_issues_repo_name": "ec1340/ReLSO-Guided-Generative-Protein-Design-using-Regularized-Transformers", "max_issues_repo_head_hexsha": "2320b3ebd97df908474b23e7a4395b8fa13133f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "relso/optim/optim_algs.py", "max_forks_repo_name": "ec1340/ReLSO-Guided-Generative-Protein-Design-using-Regularized-Transformers", "max_forks_repo_head_hexsha": "2320b3ebd97df908474b23e7a4395b8fa13133f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-02-18T03:36:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-19T11:56:13.000Z", "avg_line_length": 25.8592132505, "max_line_length": 111, "alphanum_fraction": 0.6264211369, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2859}
|
%% SECTION HEADER /////////////////////////////////////////////////////////////////////////////////////
\section{The Time Integration}
\label{sec:time}
%% SECTION CONTENT ////////////////////////////////////////////////////////////////////////////////////
Similar to the \ac{fem}, the time solution of the governing equation can be realized implicitly, e.g., by the the Crank-Nicolson
scheme, or explicitly, e.g., by the central difference method \cite{bathe2006finite}.
The first method for specific parameters is an absolutely stable algorithm, i.e., independent of the time step.
In contrast, a much smaller time step in the explicit method than that required by the Nyquist-Shannon sampling theorem must be taken.
The critical value of time increment (\(\Delta t_{cr}\)) depends on the mesh size and the wave mode velocity.
The most significant advantage of this method is that only the sum of the mass and damping matrices needs to be inverted, which is trivial in the presented scheme because both matrices are diagonal.
Considering piezoelectric coupling \ref{eq:elecmechcoupling} and the displacement interface \ref{eq:cond_disp} the global equation of motion is expressed as:
\begin{eqnarray}
\label{eq:motion_coupling}
\textbf{M}_{dd} \widehat{\ddot{\textbf{d}}} +
\textbf{D}_{dd} \widehat{\dot{\textbf{d}}} +
\left [\begin{array}{ccc}
\textbf{K}_{dd}&\textbf{K}_{d\phi}&\textbf{G}^T\\
\textbf{K}_{d\phi}^T&\textbf{K}_{\phi \phi}&\textbf{0}\\
\textbf{G}&\textbf{0}&\textbf{0}
\end{array}\right]
\left \{\begin{array}{c}
\widehat{\textbf{d}}\\
\widehat{\boldsymbol{\phi}}\\
\widehat{\boldsymbol{\lambda}}
\end{array}\right\} =
\left \{\begin{array}{c}
\widehat{\textbf{f}}_{ext} \\
\widehat{\textbf{Q}}\\
\textbf{0}
\end{array}\right \},
\end{eqnarray}
\(\widehat{\boldsymbol{\lambda}}\) is the nodal Lagrange multipliers vector.
Substituting Equations~(\ref{eq:pztboundary}) and (\ref{eq:freePotetial}) into Equation~(\ref{eq:motion_coupling}), the equation of motion can be rearranged into the form:
\begin{eqnarray}
\textbf{M}_{dd} \widehat{\ddot{\textbf{d}}} + \textbf{D}_{dd} \widehat{\dot{\textbf{d}}} + (\textbf{K}_{dd}-\textbf{K}_{s}) \widehat{\textbf{d}} = \widehat{\textbf{f}}_{ext} + \widehat{\textbf{f}}_{a} - \textbf{G}^T \widehat{\boldsymbol{\lambda}}.
\label{eq:motionD}
\end{eqnarray}
In the scheme of central difference method, the velocity and acceleration at a certain time t is given by:
\begin{eqnarray}
\label{eq:v}
\dot{\textbf{d}}_{t} = \frac{\textbf{d}_{t+\Delta t} - \textbf{d}_{t-\Delta t}}{2\Delta t},\\
\label{eq:a}
\ddot{\textbf{d}}_{t} = \frac{\textbf{d}_{t+\Delta t} - 2\textbf{d}_{t} + \textbf{d}_{t-\Delta t}}{\Delta t^2},
\end{eqnarray}
where \(\Delta t\) is the time increment, \(\textbf{d}_{t}\), \(\textbf{d}_{t-\Delta t}\) and \(\textbf{d}_{t+\Delta t}\) are the nodal displacements vectors in time t, one step back and forward, respectively.
Thus, substituting Equations \ref{eq:v} and \ref{eq:a} into Equation~(\ref{eq:motionD}) and after some modification global equation of motion can be expressed as:
\begin{equation}
\begin{array}{c}
\left(\frac{1}{\Delta t^2}\textbf{M}_{dd}+\frac{1}{2\Delta t}\textbf{D}_{dd} \right)\widehat{\textbf{d}}_{t+\Delta t}=
\widehat{\textbf{f}}_{ext} + \widehat{\textbf{f}}_{a} - \left( \textbf{K}_{dd}-\textbf{K}_s\right)\widehat{\textbf{d}}_t+\\
+\frac{2}{\Delta t^2}\textbf{M}_{dd}\widehat{\textbf{d}}_t-\left(\frac{1}{\Delta t^2}\textbf{M}_{dd}-\frac{1}{2\Delta t}\textbf{D}_{dd}\right)\widehat{\textbf{d}}_{t-\Delta t}-\textbf{G}^T\widehat{\boldsymbol{\lambda}}_t.
\end{array}
\label{eq:cdm}
\end{equation}
The vector of Lagrange multipliers \(\widehat{\boldsymbol{\lambda}}_t\) can be extracted from Equation~(\ref{eq:cdm}) by imposing the constrain (\ref{eq:cond_disp}):
\begin{eqnarray}
\widehat{\boldsymbol{\lambda}}_t = {\left(\textbf{G}\textbf{L}_+^{-1}\textbf{G}^T \right)}^{-1}\textbf{G}\textbf{L}_+^{-1} \Bigg[ \widehat{\textbf{f}}_{ext} + \widehat{\textbf{f}}_{a} + \left.\left(\frac{2}{\Delta t^2}\textbf{M}_{dd}-\textbf{K}_{dd}+\textbf{K}_s\right)\widehat{\textbf{d}}_t -\textbf{L}_-\widehat{\textbf{d}}_{t-\Delta t} \right],
\label{eq:lambda}
\end{eqnarray}
where \(\textbf{L}_{\pm}=\frac{1}{\Delta t^2}\textbf{M}_{dd}\pm\frac{1}{2\Delta t}\textbf{C}_{dd}\).
The implementation of the central difference method is presented in Algorithm~\ref{alg:cdm}.
The implementation concerns the excitation and reception of the wave by a pair of \acp{pzt}.
\begin{algorithm}[H]
\SetAlgoLined
\KwResult{nodal displacement vector \(\widehat\textbf{d}_{t+\Delta t}\) and sensore response \(\boldsymbol{\phi}_{t+\Delta t}\)}
initialise \(\widehat{\textbf{d}}_0\), \(\widehat{\dot{\textbf{d}}}_0\), \(\widehat{\boldsymbol{\lambda}}_0\) and \(\boldsymbol{\phi}_{0}\)\\
calculate \(\widehat{\ddot{\textbf{d}}}_0\) from Equation~\ref{eq:motionD},\\
select time step \(\Delta t<=\Delta t_{cr}\),\\
extract \(\widehat{\textbf{d}}_{0-\Delta t}\) from Equations \ref{eq:v} and \ref{eq:a},\\
\For{each time step}{
calculate actuator forces \(\widehat{\textbf{f}}_a\) by Equation~\ref{eq:f_act},\\
calculate internal forces \(\widehat{\textbf{f}}_{int}=\left(\textbf{K}_{dd}-\textbf{K}_{s}\right)\,\widehat{\textbf{d}}_t\),\\
calculate Lagrange multipliers \(\widehat{\boldsymbol{\lambda}}\) by Equation~\ref{eq:lambda},\\
calculate following step displacement \(\widehat{\textbf{d}}_{t+\Delta t}\) solving equation of motion \ref{eq:cdm},\\
calculate sensore response \(\boldsymbol{\phi}_{t+\Delta t}\) by Equation \ref{eq:sensorResponse}.
}
\caption{Central difference method implementation}
\label{alg:cdm}
\end{algorithm}
|
{"hexsha": "3de91fbfb7be95d4ee531956dc775fccf2651798", "size": 5671, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/proposal/Dissertation/Chapters/Chapter4/sec:time.tex", "max_stars_repo_name": "pfiborek/model-hc", "max_stars_repo_head_hexsha": "9e49fe23117fd320be14214e5ff6bafd2b1fc1a3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "docs/proposal/Dissertation/Chapters/Chapter4/sec:time.tex", "max_issues_repo_name": "pfiborek/model-hc", "max_issues_repo_head_hexsha": "9e49fe23117fd320be14214e5ff6bafd2b1fc1a3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/proposal/Dissertation/Chapters/Chapter4/sec:time.tex", "max_forks_repo_name": "pfiborek/model-hc", "max_forks_repo_head_hexsha": "9e49fe23117fd320be14214e5ff6bafd2b1fc1a3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 65.9418604651, "max_line_length": 348, "alphanum_fraction": 0.6732498677, "num_tokens": 1969}
|
#!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
import math
import logging
from numpy.linalg import norm
import astropy.units as u
import astropy.constants as const
from naima.models import (
ExponentialCutoffPowerLaw,
ExponentialCutoffBrokenPowerLaw,
Synchrotron,
InverseCompton
)
import tgblib.pulsar as psr
import tgblib.radiative as rad
import tgblib.parameters as pars
from tgblib import data
from tgblib import absorption
from tgblib import util
class FitResult(object):
def __init__(self, n_periods, label='', color='k', SigmaMax=None, EdotMin=None):
self.color = color
self.SigmaMax = SigmaMax if SigmaMax is not None else 1e10
self.EdotMin = EdotMin if EdotMin is not None else 1e10
self.nPeriods = n_periods
self.loadData(label)
chisq_sel = list()
for (c, s) in zip(self.chiSq, self.lgSigma):
chisq_sel.append(c if s < math.log10(self.SigmaMax) else 1e20)
idxMin = np.argmin(chisq_sel)
self.chiSqMin = self.chiSq[idxMin]
self.lgEdotMin = self.lgEdot[idxMin]
self.lgSigmaMin = self.lgSigma[idxMin]
self.bMin = list()
self.distPulsarMin = list()
self.normMin = list()
for ip in range(self.nPeriods):
self.bMin.append(self.bField[ip][idxMin])
self.distPulsarMin.append(self.distPulsar[ip][idxMin])
self.normMin.append(10**self.lgNorm[ip][idxMin])
msg = (
'ChiSqMin/ndf=' + str(round(self.chiSqMin, 2)) + '/' + str(self.ndf)
+ '=' + str(round(self.chiSqMin/self.ndf, 3))
)
logging.info(msg)
self.sigma_1s, self.chiSq_1s = list(), list()
self.lgEdot_1s, self.lgSigma_1s = list(), list()
self.distPulsar_1s, self.b_1s, self.lgNorm_1s = list(), list(), list()
for ip in range(self.nPeriods):
self.distPulsar_1s.append(list())
self.b_1s.append(list())
self.lgNorm_1s.append(list())
self.sigma_2s, self.chiSq_2s = list(), list()
self.lgEdot_2s, self.lgSigma_2s = list(), list()
for ii in range(len(self.chiSq)):
sig = (
math.sqrt(self.chiSq[ii] - self.chiSqMin)
if self.chiSq[ii] - self.chiSqMin > 0 else 1e20
)
no_disk = (
self.dist[0] - self.distPulsar[0][ii] > 1.12
and self.dist[1] - self.distPulsar[1][ii] > 1.12
)
# 2 sigma band
if (
sig < math.sqrt(6.18)
and self.lgEdot[ii] > math.log10(self.EdotMin)
and self.lgSigma[ii] < math.log10(self.SigmaMax)
):
self.sigma_2s.append(sig)
self.chiSq_2s.append(self.chiSq[ii])
self.lgEdot_2s.append(self.lgEdot[ii])
self.lgSigma_2s.append(self.lgSigma[ii])
# 1 sigma band
if sig < math.sqrt(2.3):
self.sigma_1s.append(sig)
self.chiSq_1s.append(self.chiSq[ii])
self.lgEdot_1s.append(self.lgEdot[ii])
self.lgSigma_1s.append(self.lgSigma[ii])
for ip in range(self.nPeriods):
self.distPulsar_1s[ip].append(self.distPulsar[ip][ii])
self.b_1s[ip].append(self.bField[ip][ii])
self.lgNorm_1s[ip].append(self.lgNorm[ip][ii])
# Determining the line along Edot
self.lgEdotLine, self.lgSigmaLine = list(), list()
self.lgSigmaInf, self.lgSigmaSup = list(), list()
self.distPulsarLine, self.bLine, self.lgNormLine = list(), list(), list()
for ip in range(self.nPeriods):
self.distPulsarLine.append(list())
self.bLine.append(list())
self.lgNormLine.append(list())
lgEdotSet = list(set(self.lgEdot_1s))
for ii in range(len(lgEdotSet)):
colSigma = [s for (s, e) in zip(self.lgSigma_1s, self.lgEdot_1s) if e == lgEdotSet[ii]]
colChiSq = [c for (c, e) in zip(self.chiSq_1s, self.lgEdot_1s) if e == lgEdotSet[ii]]
idxMin = np.argmin(colChiSq)
self.lgEdotLine.append(lgEdotSet[ii])
self.lgSigmaLine.append(colSigma[idxMin])
self.lgSigmaInf.append(min(colSigma))
self.lgSigmaSup.append(max(colSigma))
for ip in range(self.nPeriods):
colDist, colLgNorm, colB = list(), list(), list()
for ie in range(len(self.lgEdot_1s)):
if self.lgEdot_1s[ie] != lgEdotSet[ii]:
continue
colDist.append(self.distPulsar_1s[ip][ie])
colLgNorm.append(self.lgNorm_1s[ip][ie])
colB.append(self.b_1s[ip][ie])
self.distPulsarLine[ip].append(colDist[idxMin] / self.dist[0])
self.bLine[ip].append(colB[idxMin])
self.lgNormLine[ip].append(colLgNorm[idxMin])
self.lgEdotLine_2s, self.lgSigmaLine_2s = list(), list()
self.lgSigmaInf_2s, self.lgSigmaSup_2s = list(), list()
lgEdotSet = list(set(self.lgEdot_2s))
for ii in range(len(lgEdotSet)):
colSigma = [s for (s, e) in zip(self.lgSigma_2s, self.lgEdot_2s) if e == lgEdotSet[ii]]
colChiSq = [c for (c, e) in zip(self.chiSq_2s, self.lgEdot_2s) if e == lgEdotSet[ii]]
idxMin = np.argmin(colChiSq)
self.lgEdotLine_2s.append(lgEdotSet[ii])
self.lgSigmaLine_2s.append(colSigma[idxMin])
self.lgSigmaInf_2s.append(min(colSigma))
self.lgSigmaSup_2s.append(max(colSigma))
def loadData(self, label):
fileName = 'fit_results/fit_results_' + label + '.txt'
logging.info('Loading data {}'.format(fileName))
data = np.loadtxt(fileName, unpack=True)
self.chiSq = data[0]
self.ndf = data[1][0]
self.lgNorm = list()
for ip in range(self.nPeriods):
self.lgNorm.append(data[2 + ip])
self.lgEdot = data[2 + self.nPeriods]
self.lgSigma = data[3 + self.nPeriods]
self.dist = list()
self.distPulsar = list()
self.bField = list()
for ip in range(self.nPeriods):
self.dist.append(data[4 + self.nPeriods + 3*ip][0])
self.distPulsar.append(data[5 + self.nPeriods + 3*ip])
self.bField.append(data[6 + self.nPeriods + 3*ip])
def plot_solution(
self,
band=True,
line=True,
star=True,
ms=35,
with_lines=False,
no_2s=True,
ls='-',
line_ls='-',
label=None
):
ax = plt.gca()
if band:
lgEdotBand_2s, lgSigmaSupBand_2s, lgSigmaInfBand_2s = zip(*sorted(zip(
self.lgEdotLine_2s,
self.lgSigmaSup_2s,
self.lgSigmaInf_2s
)))
lgEdotBand_1s, lgSigmaSupBand_1s, lgSigmaInfBand_1s = zip(*sorted(zip(
self.lgEdotLine,
self.lgSigmaSup,
self.lgSigmaInf
)))
if with_lines:
plt.plot(
[10**l for l in lgEdotBand_1s],
[10**l for l in lgSigmaInfBand_1s],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.7,
label=label
)
plt.plot(
[10**l for l in lgEdotBand_1s],
[10**l for l in lgSigmaSupBand_1s],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.7
)
# Connecting the borders
plt.plot(
[10**lgEdotBand_1s[-1], 10**lgEdotBand_1s[-1]],
[10**lgSigmaInfBand_1s[-1], 10**lgSigmaSupBand_1s[-1]],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.7
)
plt.plot(
[10**lgEdotBand_1s[0], 10**lgEdotBand_1s[0]],
[10**lgSigmaInfBand_1s[0], 10**lgSigmaSupBand_1s[0]],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.7
)
if not no_2s:
plt.plot(
[10**l for l in lgEdotBand_2s],
[10**l for l in lgSigmaInfBand_2s],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.3
)
plt.plot(
[10**l for l in lgEdotBand_2s],
[10**l for l in lgSigmaSupBand_2s],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.3
)
# Connecting the borders
plt.plot(
[10**lgEdotBand_2s[-1], 10**lgEdotBand_2s[-1]],
[10**lgSigmaInfBand_2s[-1], 10**lgSigmaSupBand_2s[-1]],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.3
)
plt.plot(
[10**lgEdotBand_2s[0], 10**lgEdotBand_2s[0]],
[10**lgSigmaInfBand_2s[0], 10**lgSigmaSupBand_2s[0]],
marker='None',
ls=ls,
linewidth=3,
c=self.color,
alpha=0.3
)
else:
plt.fill_between(
[10**l for l in lgEdotBand_1s],
[10**l for l in lgSigmaInfBand_1s],
[10**l for l in lgSigmaSupBand_1s],
color=self.color,
alpha=0.4
)
if not no_2s:
plt.fill_between(
[10**l for l in lgEdotBand_2s],
[10**l for l in lgSigmaInfBand_2s],
[10**l for l in lgSigmaSupBand_2s],
color=self.color,
alpha=0.2
)
if line:
lgEdotSorted, lgSigmaSorted = zip(*sorted(zip(self.lgEdotLine, self.lgSigmaLine)))
plt.plot(
[10**l for l in lgEdotSorted],
[10**l for l in lgSigmaSorted],
marker='None',
ls=line_ls,
c=self.color
)
else: # not band
ax.scatter(
[10**l for l in self.lgEdot_1s],
[10**l for l in self.lgSigma_1s],
c=self.sigma_1s, label=label
)
if star:
plt.plot(
[10**self.lgEdotMin],
[10**self.lgSigmaMin],
marker='*',
c=self.color,
markersize=15
)
def plot_star(self, Edot=1e36, marker='*', ms=15):
idx = np.argmin(np.array([math.fabs(l - math.log10(Edot)) for l in self.lgEdotLine]))
Edot_star = 10**self.lgEdotLine[idx]
sig_star = 10**self.lgSigmaLine[idx]
plt.plot(
Edot_star,
sig_star,
marker=marker,
c=self.color,
markersize=ms
)
def plot_sigma(self, line=True, star=True):
if line:
lgEdotSorted, lgSigmaSorted = zip(*sorted(zip(
self.lgEdotLine,
self.lgSigmaLine
)))
plt.plot(
[10**l for l in lgEdotSorted],
[10*l for l in lgSigmaSorted],
marker='None',
ls='--',
c=self.color
)
if star:
plt.plot(
[10**self.lgEdotMin],
[10**self.lgSigmaMin],
marker='*',
ls='None',
c=self.color,
markersize=10
)
def plot_sigma_dist(self, line=True, star=True, ls='-', label='None', in_cm=True, lw=1):
if line:
distSorted, lgSigmaSorted = zip(*sorted(zip(
self.distPulsar0Line,
self.lgSigmaLine
)))
fac = u.au.to(u.cm) if in_cm else 1
plt.plot(
[fac * d * self.dist0 for d in distSorted],
[10**l for l in lgSigmaSorted],
marker='None',
ls=ls,
c=self.color,
label=label,
linewidth=lw
)
def plot_crab_sigma(self, alpha=1, ls='-'):
def comp_sig_crab(rs, alpha):
return 3e-3 * pow(3e17 * u.cm.to(u.au) / rs, alpha)
sig_crab = [comp_sig_crab(rs * self.dist[0], alpha) for rs in self.distPulsarLine[0]]
lgEdotSorted, sigmaSorted = zip(*sorted(zip(
self.lgEdotLine,
sig_crab
)))
plt.plot(
[10**l for l in lgEdotSorted],
sigmaSorted,
marker='None',
ls=ls,
c=self.color
)
def plot_B(self, line=True, star=True, iperiod=0, ls='-', label='None'):
if line:
lgEdotSorted, bSorted = zip(*sorted(zip(
self.lgEdotLine,
self.bLine[iperiod]
)))
plt.plot(
[10**l for l in lgEdotSorted],
bSorted,
marker='None',
ls=ls,
c=self.color,
label=label
)
if star:
plt.plot(
[10**self.lgEdotMin],
[self.bMin[iperiod]],
marker='*',
ls='None',
c=self.color,
markersize=10
)
def plot_esyn(self, only_0=True, ls='-', label='None'):
Tstar = 30e3
density0 = [psr.PhotonDensity(Tstar=Tstar, Rstar=7.8, d=self.dist0 * (1 - r))
for r in self.distPulsar0Line]
density1 = [psr.PhotonDensity(Tstar=Tstar, Rstar=7.8, d=self.dist1 * (1 - r))
for r in self.distPulsar1Line]
Esyn0 = [1e9*rad.Esyn(E=rad.Ebreak(B=b, T=Tstar, U=u), B=b) for (b, u) in
zip(self.b0Line, density0)]
Esyn1 = [1e9*rad.Esyn(E=rad.Ebreak(B=b, T=Tstar, U=u), B=b) for (b, u) in
zip(self.b1Line, density1)]
lgEdotSorted, Esyn0Sorted, Esyn1Sorted = zip(*sorted(zip(self.lgEdotLine,
Esyn0,
Esyn1)))
plt.plot([10**l for l in lgEdotSorted], Esyn0Sorted,
marker='None', ls=ls, c=self.color, label=label)
if not only_0:
plt.plot([10**l for l in lgEdotSorted], Esyn1Sorted,
marker='None', ls=ls, c=self.color)
def plot_ebreak(self, only_0=True, ls='-', label='None'):
Tstar = 30e3
density0 = [psr.PhotonDensity(Tstar=Tstar, Rstar=7.8, d=self.dist0 * (1 - r))
for r in self.distPulsar0Line]
density1 = [psr.PhotonDensity(Tstar=Tstar, Rstar=7.8, d=self.dist1 * (1 - r))
for r in self.distPulsar1Line]
Ebr0 = [rad.Ebreak(B=b, T=Tstar, U=u) for (b, u) in zip(self.b0Line, density0)]
Ebr1 = [rad.Ebreak(B=b, T=Tstar, U=u) for (b, u) in zip(self.b1Line, density1)]
lgEdotSorted, Ebr0Sorted, Ebr1Sorted = zip(*sorted(zip(self.lgEdotLine, Ebr0, Ebr1)))
plt.plot([10**l for l in lgEdotSorted], Ebr0Sorted,
marker='None', ls=ls, c=self.color, label=label)
if not only_0:
plt.plot([10**l for l in lgEdotSorted], Ebr1Sorted,
marker='None', ls=ls, c=self.color)
def plot_density(self, line=True, iperiod=0, ls='-', label='None'):
if line:
density = [
psr.PhotonDensity(Tstar=pars.TSTAR, Rstar=pars.RSTAR, d=self.dist[iperiod]*(1 - r))
for r in self.distPulsarLine[iperiod]
]
lgEdotSorted, densitySorted = zip(*sorted(zip(
self.lgEdotLine,
density
)))
plt.plot(
[10**l for l in lgEdotSorted],
densitySorted,
marker='None',
ls=ls,
c=self.color,
label=label
)
def plot_dist(self, line=True, star=True, iperiod=0, label='None', ls='-', ratio=True):
fac = 1 if ratio else self.dist[iperiod]
if line:
lgEdotSorted, distSorted = zip(*sorted(zip(
self.lgEdotLine,
self.distPulsarLine[iperiod]
)))
plt.plot(
[10**l for l in lgEdotSorted],
[fac * d for d in distSorted],
marker='None',
ls=ls,
c=self.color,
label=label
)
if star:
plt.plot(
[10**self.lgEdotMin],
[fac * self.distPulsarMin[iperiod] / self.dist[iperiod]],
marker='*',
ls='None',
c=self.color,
markersize=12
)
def plot_optical_depth(
self,
pos,
line=True,
star=True,
iperiod=0,
label='None',
ls='-',
Tstar=pars.TSTAR,
Rstar=pars.RSTAR
):
if line:
lgEdotSorted, distSorted = zip(*sorted(zip(
self.lgEdotLine,
self.distPulsarLine[iperiod]
)))
lgEdotPlot, distPlot = list(), list()
for i in range(len(lgEdotSorted)):
if i % 5 == 0:
lgEdotPlot.append(lgEdotSorted[i])
distPlot.append(distSorted[i])
Obs = np.array([0, 0, -1])
Abs = absorption.Absorption(Tstar=Tstar, Rstar=Rstar)
tau = [
Abs.TauGG(en=0.2, obs=Obs, pos=pos * self.dist[iperiod] * (1 - r) / norm(pos))
for r in distPlot
]
tauMin = Abs.TauGG(
en=0.2,
obs=Obs,
pos=pos * (self.dist[iperiod] - self.distPulsarMin[iperiod]) / norm(pos)
)
plt.plot(
[10**l for l in lgEdotPlot],
tau,
marker='None',
ls=ls,
c=self.color,
label=label
)
if star:
plt.plot(
[10**self.lgEdotMin],
[tauMin],
marker='*',
ls='None',
c=self.color,
markersize=12
)
def plot_norm(self):
ax = plt.gca()
ax.scatter([10**l for l in self.lgEdot_1s],
[10**l0/10**l1 for (l0, l1) in zip(self.lgNorm0_1s, self.lgNorm1_1s)],
color=self.color, marker='o', s=3)
def plot_sed(
self,
iperiod=0,
period=0,
best_solution=True,
Edot=1e36,
theta_ic=90,
dist=2,
pos=np.array([1, 1, 1]),
ls='-',
lw=1,
label='None',
Tstar=pars.TSTAR,
Rstar=pars.RSTAR,
emin=0.1,
ecut=50,
fast=False
):
Alpha = pars.ELEC_SPEC_INDEX[period]
Eref = 1 * u.TeV
Ecut = ecut * u.TeV
Emax = 20 * u.PeV
Emin = emin * u.TeV
SourceDist = pars.SRC_DIST * u.kpc
n_en = 1 if fast else 2
Obs = np.array([0, 0, -1])
Abs = absorption.Absorption(Tstar=Tstar, Rstar=Rstar)
if best_solution:
b_sed = self.bMin[iperiod]
norm_sed = self.normMin[iperiod]
dist_sed = self.distPulsarMin[iperiod]
dist_star = dist - dist_sed
density_sed = psr.PhotonDensity(Tstar=Tstar, Rstar=Rstar, d=dist_star)
else:
idx = np.argmin(np.array([math.fabs(l - math.log10(Edot)) for l in self.lgEdotLine]))
b_sed = self.bLine[iperiod][idx]
norm_sed = 10**self.lgNormLine[iperiod][idx]
dist_sed = self.distPulsarLine[iperiod][idx]
dist_star = dist * (1 - dist_sed)
density_sed = psr.PhotonDensity(Tstar=Tstar, Rstar=Rstar, d=dist_star)
EnergyToPlot = np.logspace(-0.5, 9.6, n_en * 300) * u.keV
ECPL = ExponentialCutoffPowerLaw(
amplitude=norm_sed / u.eV,
e_0=Eref,
alpha=Alpha,
e_cutoff=Ecut
)
SYN = Synchrotron(
particle_distribution=ECPL,
B=b_sed * u.G,
Eemax=Emax,
Eemin=Emin
)
IC = InverseCompton(
particle_distribution=ECPL,
seed_photon_fields=[[
'STAR',
Tstar * u.K,
density_sed * u.erg / u.cm**3,
theta_ic * u.deg
]],
Eemax=Emax,
Eemin=Emin
)
tau = list()
for e in EnergyToPlot:
if e.value * u.keV.to(u.TeV) < 1e-4:
tau.append(0)
else:
tau.append(Abs.TauGG(
en=e.value * u.keV.to(u.TeV),
obs=Obs,
pos=pos * dist_star / norm(pos)
))
model = (
SYN.sed(photon_energy=EnergyToPlot, distance=SourceDist)
+ IC.sed(photon_energy=EnergyToPlot, distance=SourceDist)
)
model_abs = [math.exp(-t) * m.value for (m, t) in zip(model, tau)]
EnergyToPlot, model_abs = util.fix_naima_bug(EnergyToPlot, model_abs)
model_abs = util.smooth_break(EnergyToPlot, model_abs)
ax = plt.gca()
ax.plot(EnergyToPlot, model_abs, ls=ls, lw=lw, c=self.color, label=label)
# Integrating spectrum
# spec = [m.value / e.value / u.keV.to(u.erg) for (m, e) in zip(model, EnergyToPlot)]
# en = [e.value * u.keV.to(u.erg) for e in EnergyToPlot]
# L = np.trapz(x=en, y=spec) * 4 * math.pi * (SourceDist * u.kpc.to(u.cm))**2
# print('SED Luminosity = ', L, 'ergs/s')
|
{"hexsha": "b8f897557f45d60778171a62fb662b04a5a6b3fc", "size": 22939, "ext": "py", "lang": "Python", "max_stars_repo_path": "tgblib/fit_results.py", "max_stars_repo_name": "RaulRPrado/tev-binaries-model", "max_stars_repo_head_hexsha": "c60959caaffbcdf3398914b03531647f95e97da0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-03T15:39:38.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-03T15:39:38.000Z", "max_issues_repo_path": "tgblib/fit_results.py", "max_issues_repo_name": "RaulRPrado/tev-binaries-model", "max_issues_repo_head_hexsha": "c60959caaffbcdf3398914b03531647f95e97da0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tgblib/fit_results.py", "max_forks_repo_name": "RaulRPrado/tev-binaries-model", "max_forks_repo_head_hexsha": "c60959caaffbcdf3398914b03531647f95e97da0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2365591398, "max_line_length": 99, "alphanum_fraction": 0.4747373469, "include": true, "reason": "import numpy,from numpy,import astropy", "num_tokens": 6066}
|
# -*- coding: utf-8 -*-
"""Creating sets, variables, constraints and parts of the objective function
for Flow objects.
SPDX-FileCopyrightText: Uwe Krien <krien@uni-bremen.de>
SPDX-FileCopyrightText: Simon Hilpert
SPDX-FileCopyrightText: Cord Kaldemeyer
SPDX-FileCopyrightText: Patrik Schönfeldt
SPDX-FileCopyrightText: Birgit Schachler
SPDX-FileCopyrightText: jnnr
SPDX-FileCopyrightText: jmloenneberga
SPDX-License-Identifier: MIT
"""
from pyomo.core import BuildAction
from pyomo.core import Constraint
from pyomo.core import NonNegativeIntegers
from pyomo.core import Set
from pyomo.core import Var
from pyomo.core.base.block import SimpleBlock
class Flow(SimpleBlock):
r""" Flow block with definitions for standard flows.
**The following variables are created**:
negative_gradient :
Difference of a flow in consecutive timesteps if flow is reduced
indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.
positive_gradient :
Difference of a flow in consecutive timesteps if flow is increased
indexed by NEGATIVE_GRADIENT_FLOWS, TIMESTEPS.
**The following sets are created:** (-> see basic sets at :class:`.Model` )
SUMMED_MAX_FLOWS
A set of flows with the attribute :attr:`summed_max` being not None.
SUMMED_MIN_FLOWS
A set of flows with the attribute :attr:`summed_min` being not None.
NEGATIVE_GRADIENT_FLOWS
A set of flows with the attribute :attr:`negative_gradient` being not
None.
POSITIVE_GRADIENT_FLOWS
A set of flows with the attribute :attr:`positive_gradient` being not
None
INTEGER_FLOWS
A set of flows where the attribute :attr:`integer` is True (forces flow
to only take integer values)
**The following constraints are build:**
Flow max sum :attr:`om.Flow.summed_max[i, o]`
.. math::
\sum_t flow(i, o, t) \cdot \tau
\leq summed\_max(i, o) \cdot nominal\_value(i, o), \\
\forall (i, o) \in \textrm{SUMMED\_MAX\_FLOWS}.
Flow min sum :attr:`om.Flow.summed_min[i, o]`
.. math::
\sum_t flow(i, o, t) \cdot \tau
\geq summed\_min(i, o) \cdot nominal\_value(i, o), \\
\forall (i, o) \in \textrm{SUMMED\_MIN\_FLOWS}.
Negative gradient constraint
:attr:`om.Flow.negative_gradient_constr[i, o]`:
.. math::
flow(i, o, t-1) - flow(i, o, t) \geq \
negative\_gradient(i, o, t), \\
\forall (i, o) \in \textrm{NEGATIVE\_GRADIENT\_FLOWS}, \\
\forall t \in \textrm{TIMESTEPS}.
Positive gradient constraint
:attr:`om.Flow.positive_gradient_constr[i, o]`:
.. math:: flow(i, o, t) - flow(i, o, t-1) \geq \
positive\__gradient(i, o, t), \\
\forall (i, o) \in \textrm{POSITIVE\_GRADIENT\_FLOWS}, \\
\forall t \in \textrm{TIMESTEPS}.
**The following parts of the objective function are created:**
If :attr:`variable_costs` are set by the user:
.. math::
\sum_{(i,o)} \sum_t flow(i, o, t) \cdot variable\_costs(i, o, t)
The expression can be accessed by :attr:`om.Flow.variable_costs` and
their value after optimization by :meth:`om.Flow.variable_costs()` .
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
r"""Creates sets, variables and constraints for all standard flows.
Parameters
----------
group : list
List containing tuples containing flow (f) objects and the
associated source (s) and target (t)
of flow e.g. groups=[(s1, t1, f1), (s2, t2, f2),..]
"""
if group is None:
return None
m = self.parent_block()
# ########################## SETS #################################
# set for all flows with an global limit on the flow over time
self.SUMMED_MAX_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].summed_max is not None
and g[2].nominal_value is not None
]
)
self.SUMMED_MIN_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].summed_min is not None
and g[2].nominal_value is not None
]
)
self.NEGATIVE_GRADIENT_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].negative_gradient["ub"][0] is not None
]
)
self.POSITIVE_GRADIENT_FLOWS = Set(
initialize=[
(g[0], g[1])
for g in group
if g[2].positive_gradient["ub"][0] is not None
]
)
self.INTEGER_FLOWS = Set(
initialize=[(g[0], g[1]) for g in group if g[2].integer]
)
# ######################### Variables ################################
self.positive_gradient = Var(self.POSITIVE_GRADIENT_FLOWS, m.TIMESTEPS)
self.negative_gradient = Var(self.NEGATIVE_GRADIENT_FLOWS, m.TIMESTEPS)
self.integer_flow = Var(
self.INTEGER_FLOWS, m.TIMESTEPS, within=NonNegativeIntegers
)
# set upper bound of gradient variable
for i, o, f in group:
if m.flows[i, o].positive_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
self.positive_gradient[i, o, t].setub(
f.positive_gradient["ub"][t] * f.nominal_value
)
if m.flows[i, o].negative_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
self.negative_gradient[i, o, t].setub(
f.negative_gradient["ub"][t] * f.nominal_value
)
# ######################### CONSTRAINTS ###############################
def _flow_summed_max_rule(model):
"""Rule definition for build action of max. sum flow constraint."""
for inp, out in self.SUMMED_MAX_FLOWS:
lhs = sum(
m.flow[inp, out, ts] * m.timeincrement[ts]
for ts in m.TIMESTEPS
)
rhs = (
m.flows[inp, out].summed_max
* m.flows[inp, out].nominal_value
)
self.summed_max.add((inp, out), lhs <= rhs)
self.summed_max = Constraint(self.SUMMED_MAX_FLOWS, noruleinit=True)
self.summed_max_build = BuildAction(rule=_flow_summed_max_rule)
def _flow_summed_min_rule(model):
"""Rule definition for build action of min. sum flow constraint."""
for inp, out in self.SUMMED_MIN_FLOWS:
lhs = sum(
m.flow[inp, out, ts] * m.timeincrement[ts]
for ts in m.TIMESTEPS
)
rhs = (
m.flows[inp, out].summed_min
* m.flows[inp, out].nominal_value
)
self.summed_min.add((inp, out), lhs >= rhs)
self.summed_min = Constraint(self.SUMMED_MIN_FLOWS, noruleinit=True)
self.summed_min_build = BuildAction(rule=_flow_summed_min_rule)
def _positive_gradient_flow_rule(model):
"""Rule definition for positive gradient constraint."""
for inp, out in self.POSITIVE_GRADIENT_FLOWS:
for ts in m.TIMESTEPS:
if ts > 0:
lhs = m.flow[inp, out, ts] - m.flow[inp, out, ts - 1]
rhs = self.positive_gradient[inp, out, ts]
self.positive_gradient_constr.add(
(inp, out, ts), lhs <= rhs
)
else:
pass # return(Constraint.Skip)
self.positive_gradient_constr = Constraint(
self.POSITIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True
)
self.positive_gradient_build = BuildAction(
rule=_positive_gradient_flow_rule
)
def _negative_gradient_flow_rule(model):
"""Rule definition for negative gradient constraint."""
for inp, out in self.NEGATIVE_GRADIENT_FLOWS:
for ts in m.TIMESTEPS:
if ts > 0:
lhs = m.flow[inp, out, ts - 1] - m.flow[inp, out, ts]
rhs = self.negative_gradient[inp, out, ts]
self.negative_gradient_constr.add(
(inp, out, ts), lhs <= rhs
)
else:
pass # return(Constraint.Skip)
self.negative_gradient_constr = Constraint(
self.NEGATIVE_GRADIENT_FLOWS, m.TIMESTEPS, noruleinit=True
)
self.negative_gradient_build = BuildAction(
rule=_negative_gradient_flow_rule
)
def _integer_flow_rule(block, ii, oi, ti):
"""Force flow variable to NonNegativeInteger values."""
return self.integer_flow[ii, oi, ti] == m.flow[ii, oi, ti]
self.integer_flow_constr = Constraint(
self.INTEGER_FLOWS, m.TIMESTEPS, rule=_integer_flow_rule
)
def _objective_expression(self):
r"""Objective expression for all standard flows with fixed costs
and variable costs.
"""
m = self.parent_block()
variable_costs = 0
gradient_costs = 0
for i, o in m.FLOWS:
if m.flows[i, o].variable_costs[0] is not None:
for t in m.TIMESTEPS:
variable_costs += (
m.flow[i, o, t]
* m.objective_weighting[t]
* m.flows[i, o].variable_costs[t]
)
if m.flows[i, o].positive_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
gradient_costs += (
self.positive_gradient[i, o, t]
* m.flows[i, o].positive_gradient["costs"]
)
if m.flows[i, o].negative_gradient["ub"][0] is not None:
for t in m.TIMESTEPS:
gradient_costs += (
self.negative_gradient[i, o, t]
* m.flows[i, o].negative_gradient["costs"]
)
return variable_costs + gradient_costs
|
{"hexsha": "4a18d63bf253d6255e1ed516047eae50cffccf8e", "size": 10574, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/oemof/solph/blocks/flow.py", "max_stars_repo_name": "lensum/oemof-solph", "max_stars_repo_head_hexsha": "75789b1578035d0b658c4b97fcc41fc3ca61638e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 59, "max_stars_repo_stars_event_min_datetime": "2020-04-01T12:02:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T06:31:06.000Z", "max_issues_repo_path": "src/oemof/solph/blocks/flow.py", "max_issues_repo_name": "lensum/oemof-solph", "max_issues_repo_head_hexsha": "75789b1578035d0b658c4b97fcc41fc3ca61638e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 170, "max_issues_repo_issues_event_min_datetime": "2020-03-31T12:04:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T15:41:04.000Z", "max_forks_repo_path": "src/oemof/solph/blocks/flow.py", "max_forks_repo_name": "lensum/oemof-solph", "max_forks_repo_head_hexsha": "75789b1578035d0b658c4b97fcc41fc3ca61638e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2020-04-28T11:17:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T21:25:08.000Z", "avg_line_length": 36.5882352941, "max_line_length": 79, "alphanum_fraction": 0.5415169283, "include": true, "reason": "from pyomo", "num_tokens": 2562}
|
from collections.abc import Iterable
import re
import warnings
import numpy as np
import h5py
import openmc
import openmc.checkvalue as cv
from openmc.region import Region
_VERSION_SUMMARY = 6
class Summary(object):
"""Summary of geometry, materials, and tallies used in a simulation.
Attributes
----------
date_and_time : str
Date and time when simulation began
geometry : openmc.Geometry
The geometry reconstructed from the summary file
materials : openmc.Materials
The materials reconstructed from the summary file
nuclides : dict
Dictionary whose keys are nuclide names and values are atomic weight
ratios.
macroscopics : list
Names of macroscopic data sets
version: tuple of int
Version of OpenMC
"""
def __init__(self, filename):
if not filename.endswith(('.h5', '.hdf5')):
msg = 'Unable to open "{0}" which is not an HDF5 summary file'
raise ValueError(msg)
self._f = h5py.File(filename, 'r')
cv.check_filetype_version(self._f, 'summary', _VERSION_SUMMARY)
self._geometry = openmc.Geometry()
self._fast_materials = {}
self._fast_surfaces = {}
self._fast_cells = {}
self._fast_universes = {}
self._fast_lattices = {}
self._materials = openmc.Materials()
self._nuclides = {}
self._macroscopics = []
self._read_nuclides()
self._read_macroscopics()
with warnings.catch_warnings():
warnings.simplefilter("ignore", openmc.IDWarning)
self._read_geometry()
@property
def date_and_time(self):
return self._f.attrs['date_and_time'].decode()
@property
def geometry(self):
return self._geometry
@property
def materials(self):
return self._materials
@property
def nuclides(self):
return self._nuclides
@property
def macroscopics(self):
return self._macroscopics
@property
def version(self):
return tuple(self._f.attrs['openmc_version'])
def _read_nuclides(self):
if 'nuclides/names' in self._f:
names = self._f['nuclides/names'].value
awrs = self._f['nuclides/awrs'].value
for name, awr in zip(names, awrs):
self._nuclides[name.decode()] = awr
def _read_macroscopics(self):
if 'macroscopics/names' in self._f:
names = self._f['macroscopics/names'].value
for name in names:
self._macroscopics = name.decode()
def _read_geometry(self):
if "dagmc" in self._f['geometry'].attrs.keys():
return
# Read in and initialize the Materials and Geometry
self._read_materials()
self._read_surfaces()
cell_fills = self._read_cells()
self._read_universes()
self._read_lattices()
self._finalize_geometry(cell_fills)
def _read_materials(self):
for group in self._f['materials'].values():
material = openmc.Material.from_hdf5(group)
# Add the material to the Materials collection
self.materials.append(material)
# Store in the dictionary of materials for fast queries
self._fast_materials[material.id] = material
def _read_surfaces(self):
for group in self._f['geometry/surfaces'].values():
surface = openmc.Surface.from_hdf5(group)
self._fast_surfaces[surface.id] = surface
def _read_cells(self):
# Initialize dictionary for each Cell's fill
cell_fills = {}
for key, group in self._f['geometry/cells'].items():
cell_id = int(key.lstrip('cell '))
name = group['name'].value.decode() if 'name' in group else ''
fill_type = group['fill_type'].value.decode()
if fill_type == 'material':
fill = group['material'].value
elif fill_type == 'universe':
fill = group['fill'].value
else:
fill = group['lattice'].value
region = group['region'].value.decode() if 'region' in group else ''
# Create this Cell
cell = openmc.Cell(cell_id=cell_id, name=name)
if fill_type == 'universe':
if 'translation' in group:
translation = group['translation'][...]
translation = np.asarray(translation, dtype=np.float64)
cell.translation = translation
if 'rotation' in group:
rotation = group['rotation'][...]
rotation = np.asarray(rotation, dtype=np.int)
cell._rotation = rotation
elif fill_type == 'material':
cell.temperature = group['temperature'][...]
# Store Cell fill information for after Universe/Lattice creation
cell_fills[cell.id] = (fill_type, fill)
# Generate Region object given infix expression
if region:
cell.region = Region.from_expression(region, self._fast_surfaces)
# Add the Cell to the global dictionary of all Cells
self._fast_cells[cell.id] = cell
return cell_fills
def _read_universes(self):
for group in self._f['geometry/universes'].values():
universe = openmc.Universe.from_hdf5(group, self._fast_cells)
self._fast_universes[universe.id] = universe
def _read_lattices(self):
for group in self._f['geometry/lattices'].values():
lattice = openmc.Lattice.from_hdf5(group, self._fast_universes)
self._fast_lattices[lattice.id] = lattice
def _finalize_geometry(self, cell_fills):
# Keep track of universes that are used as fills. That way, we can
# determine which universe is NOT used as a fill (and hence is the root
# universe)
fill_univ_ids = set()
# Iterate over all Cells and add fill Materials, Universes and Lattices
for cell_id, (fill_type, fill_id) in cell_fills.items():
# Retrieve the object corresponding to the fill type and ID
if fill_type == 'material':
if isinstance(fill_id, Iterable):
fill = [self._fast_materials[mat] if mat > 0 else None
for mat in fill_id]
else:
fill = self._fast_materials[fill_id] if fill_id > 0 else None
elif fill_type == 'universe':
fill = self._fast_universes[fill_id]
fill_univ_ids.add(fill_id)
else:
fill = self._fast_lattices[fill_id]
for idx in fill._natural_indices:
univ = fill.get_universe(idx)
fill_univ_ids.add(univ.id)
if fill.outer is not None:
fill_univ_ids.add(fill.outer.id)
# Set the fill for the Cell
self._fast_cells[cell_id].fill = fill
# Determine root universe for geometry
non_fill = set(self._fast_universes.keys()) - fill_univ_ids
self.geometry.root_universe = self._fast_universes[non_fill.pop()]
def add_volume_information(self, volume_calc):
"""Add volume information to the geometry within the summary file
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
self.geometry.add_volume_information(volume_calc)
|
{"hexsha": "866dd8288a563efc647b483a6c684116727dc610", "size": 7627, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmc/summary.py", "max_stars_repo_name": "johnnyliu27/openmc", "max_stars_repo_head_hexsha": "d7359f151cc9eece99fb155e80f73a1b3393f7f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openmc/summary.py", "max_issues_repo_name": "johnnyliu27/openmc", "max_issues_repo_head_hexsha": "d7359f151cc9eece99fb155e80f73a1b3393f7f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openmc/summary.py", "max_forks_repo_name": "johnnyliu27/openmc", "max_forks_repo_head_hexsha": "d7359f151cc9eece99fb155e80f73a1b3393f7f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3056768559, "max_line_length": 81, "alphanum_fraction": 0.5997115511, "include": true, "reason": "import numpy", "num_tokens": 1696}
|
struct VanDerWaals{S,T,A} <: CubicModel
type::S
tc::T
pc::T
ω::T
mw::T
vc::Union{T,Nothing}
_a::T
_b::T
aij::A
function VanDerWaals(tc,pc,ω,mw,vc=nothing,aij = nothing)
if length(tc) == 1
type = SINGLE()
else
type = MULTI()
end
S = typeof(type)
tc,pc,ω,mw = promote(tc,pc,ω,mw)
T = typeof(tc)
A = typeof(aij)
_a = 0.421875 .*((RGAS .*tc).^2) ./pc
_b = 0.125 .*((RGAS .*tc)) ./pc
return new{S,T,A}(type,tc,pc,ω,mw,vc,_a,_b,aij)
end
end
volume_solver_type(model::VanDerWaals) = CubicRoots()
single_sat_Approx(model::VanDerWaals{SINGLE}) =VdWSatApprox(model)
mol_density(model::VanDerWaals{SINGLE},::CriticalPoint,unit=u"mol/(m^3)") = convert_unit(u"mol/L",unit,inv(only(model.vc)))
pressure(model::VanDerWaals{SINGLE},::CriticalPoint,unit=u"Pa") = convert_unit(u"Pa",unit,only(model.pc))
temperature(model::VanDerWaals{SINGLE},::CriticalPoint,unit=u"K") = convert_unit(u"K",unit,only(model.tc))
mol_volume(model::VanDerWaals{SINGLE},::CriticalPoint,unit=u"m^3/mol") = convert_unit(u"m^3/mol",unit,only(model.vc))
acentric_factor(model::VanDerWaals{SINGLE}) = only(model.ω)
molecular_weight(model::VanDerWaals{SINGLE}) = only(model.mw)
molecular_weight(model::VanDerWaals{MULTI}) = model.mw
function mol_density(model::VanDerWaals{MULTI},::CriticalPoint,unit=u"mol/(m^3)")
return convert_unit.(u"mol/L",unit,1 ./ model.vc)
end
function pressure(model::VanDerWaals{MULTI},::CriticalPoint,unit=u"Pa")
return convert_unit.(u"Pa",unit,model.pc)
end
function temperature(model::VanDerWaals{MULTI},::CriticalPoint,unit=u"K")
return convert_unit.(u"K",unit,model.tc)
end
function mol_volume(model::VanDerWaals{MULTI},::CriticalPoint,unit=u"m^3/mol")
return convert_unit.(u"m^3/mol",unit,model.vc)
end
function acentric_factor(model::VanDerWaals{MULTI})
return model.ω
end
function VanDerWaals(;tc,pc,ω,mw,vc=nothing,aij=nothing)
return VanDerWaals(tc,pc,ω,mw,vc,aij)
end
function VanDerWaals(model::ThermoModel)
tc = temperature(model,CriticalPoint())
pc = pressure(model,CriticalPoint())
ω = acentric_factor(model)
mw = molecular_weight(model)
return VanDerWaals(tc,pc,ω,mw)
end
function cubic_ab(mt::SinglePT,model::VanDerWaals{SINGLE},v,t)
a = only(model._a)
b = only(model._b)
return a,b
end
function cubic_ab(mt::MultiPT,model::VanDerWaals{MULTI},p,t,x)
#two options to introduce alpha:
#here: it will allocate, but less ops
ai = model._a
bi = model._b
b = dot(bi,x)
#here: it will not allocate, but more ops
mixrule= (ai,aj)->sqrt(ai*aj)
a = mixing_rule(mixrule, x, ai,model.aij)
return a,b
end
function cubic_abp(mt::SingleVT,model::VanDerWaals{SINGLE},v,t)
a,b = cubic_ab(QuickStates.pt(),model,v,t) #v is ignored
p = RGAS*t/(v-b) - a/(v*v)
return a,b,p
end
function cubic_abp(mt::MultiVT,model::VanDerWaals{MULTI},v,t,x)
a,b = cubic_ab(QuickStates.ptx(),model,v,t,x) #v is ignored
p = RGAS*t/(v-b) - a/(v*v)
return a,b,p
end
function fugacity_coeff_impl(mt::SingleVT,model::VanDerWaals{SINGLE},v,t)
a,b,p = cubic_abp(mt,model,v,t)
RTinv = 1/(RGAS*t)
A = a*p*RTinv*RTinv
B = b*p*RTinv
z = p*v*RTinv
_1 = one(z)
logϕ = z - _1 - log(z-B) - A/z
end
function αR_impl(mt::MultiVT,model::VanDerWaals{MULTI},rho,t,x)
R = RGAS
RTinv = 1/(RGAS*t)
v = inv(rho)
a,b,p = cubic_abp(mt,model,v,t,x)
-log(1-b*rho) - a*rho*RTinv
end
function cubic_poly(mt::SinglePT,model::VanDerWaals{SINGLE},p,t)
a,b = cubic_ab(QuickStates.pt(),model,p,t)
RTinv = 1/(RGAS*t)
A = a*p*RTinv*RTinv
B = b*p*RTinv
_1 = one(a)
return (-A*B, A, -B-_1, _1)
end
function cubic_poly(mt::MultiPT,model::VanDerWaals{MULTI},p,t,x)
a,b = cubic_ab(QuickStates.ptx(),model,p,t,x)
RTinv = 1/(RGAS*t)
A = a*p*RTinv*RTinv
B = b*p*RTinv
_1 = one(a)
return (-A*B, A, -B-_1, _1)
end
struct VdWSatApprox{M} <: SaturationModel
model::M
function VdWSatApprox(model::CubicModel)
T = typeof(model)
return new{T}(model)
end
end
function VdWSatApprox(tc,pc,mw=copy(pc),vc=nothing,ω=nothing,aij = nothing)
model = VanDerWaals(tc,pc,mw,vc,ω,aij)
return VdWSatApprox(model)
end
function VdWSatApprox(;tc,pc,mw=copy(pc),vc=nothing,ω=nothing,aij=nothing)
model = VanDerWaals(tc,pc,mw,vc,ω,aij)
return VdWSatApprox(model)
end
function pressure_impl(mt::SingleSatT,model::VdWSatApprox,t)
a = 4.406664258927600
b = 2.205610041969020
c = 0.243757663628277
d = -0.206185849671953
e = -4.650419726136550
f = 0.019582516700758
tc = only(model.model.tc)
pc = only(model.model.pc)
tr = t/tc
atr = tc/t
num = evalpoly(atr,(a,c,e))
denom = evalpoly(atr,(1.0,b,d,f))
pr = exp(num/denom)*tr
p = pr*pc
end
initial_temperature(model::VdWSatApprox,p) = critical_sat_interpolation(model,p)
|
{"hexsha": "b04fa3e681a09102461ea07f4f2692e657be422e", "size": 5013, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/models/cubic/vdw.jl", "max_stars_repo_name": "longemen3000/ThermoModels.jl", "max_stars_repo_head_hexsha": "a817ba1677c0b1bed39fe5a695d3ada63b673f40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-10-02T10:12:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-19T11:16:37.000Z", "max_issues_repo_path": "src/models/cubic/vdw.jl", "max_issues_repo_name": "longemen3000/ThermoModels.jl", "max_issues_repo_head_hexsha": "a817ba1677c0b1bed39fe5a695d3ada63b673f40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-28T17:40:50.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-28T17:40:50.000Z", "max_forks_repo_path": "src/models/cubic/vdw.jl", "max_forks_repo_name": "longemen3000/ThermoModels.jl", "max_forks_repo_head_hexsha": "a817ba1677c0b1bed39fe5a695d3ada63b673f40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.0055865922, "max_line_length": 123, "alphanum_fraction": 0.6529024536, "num_tokens": 1775}
|
from keras.preprocessing.text import Tokenizer #this is used to assign some numeric value to every word that appear in the training set
from keras.preprocessing.sequence import pad_sequences
import pandas as pd
import nltk
import numpy as np
import re
from sklearn.model_selection import train_test_split
import string
#Read in and clean the text
#Load the stopwords
stopwords = nltk.corpus.stopwords.words("english")
data = pd.read_csv("agr_balanced.csv", encoding = "utf-8")
data.columns = ["label", "text"]
#we replace the classes by 1 and 0 for y and n respectively
data["label"] = np.where(data["label"]=="y",1,0)
#define function to clean text
def clean_text(text):
text = "".join([word.lower() for word in text if word not in string.punctuation])
tokens = re.split("\W+", text)
text = [word for word in tokens if word not in stopwords]
return text
#actually clean the text and create a new column for it
data["clean_text"] = data["text"].apply(lambda x: clean_text(x))
#Now split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(data["clean_text"], data["label"], test_size = 0.2)
#Train the tokenizer and use that tokenizer to convert the sentences to sequences of numbers
tokenizer = Tokenizer() #Load the tokenizer
tokenizer.fit_on_texts(X_train)#Train the tokenizer
X_train_seq = tokenizer.texts_to_sequences(X_train)#create token sequences for the sentences in the X_train file
X_test_seq = tokenizer.texts_to_sequences(X_test)#create token sequences for the sentences in the X_test file
#Now since the number of tokens in each sentence is not the same we pad it- meaning we either truncate or pad each sentence so that the number of tokens is as we want it to be
#Pad the sequences so that each sequence is the same length in our case 50
X_train_seq_padded = pad_sequences(X_train_seq, 64)#####WE CAN CHANGE 50
X_test_seq_padded = pad_sequences(X_test_seq, 64)
import keras.backend as K
from keras.layers import Dense, Embedding, LSTM, Bidirectional
from keras.layers import Dropout, GRU
from keras.models import Sequential
#Since its not an sklearn model but rather a neural network we define our own recall and precision functions
#recall
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
#precision
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives/ (predicted_positives + K.epsilon())
return precision
###CONSTRUCT the basic RNN model framework
#we are creating a sequential model
model = Sequential()
#check with tanh and two lstm layers
model.add(Embedding(len(tokenizer.index_word)+1, 128)) #its like embedding text in the form of vectors
#model.add(LSTM(32, dropout = 0.5, recurrent_dropout = 0.5, activation = "sigmoid", recurrent_activation = "sigmoid"))#dropouts can usually take values between 0.0 to 1.0 [kind of like percent]# return_sequences = True
#LSTM is an RNN layer
model.add(Bidirectional(GRU(128, return_sequences = True)))
model.add(Bidirectional(GRU(128)))
#dropout params are added in case the RNN overfits the data
#that happens when the training accuracy is significantly greater than the testing accuracy
model.add(Dense(32,activation = "relu"))#relu, sigmoid, softmax, softplus, softsign, tanh, selu, elu, exponential
#model.add(Dropout(0.2))
model.add(Dense(2, activation = "relu"))#since we wish to predict only one class
model.summary()
#Compile the model
##model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy", precision_m, recall_m])
model.compile(optimizer = "adam", loss = "sparse_categorical_crossentropy", metrics = ["accuracy",precision_m, recall_m])
##from keras.callbacks import EarlyStopping
##es_callback = EarlyStopping(monitor='val_loss', patience=3)
###Fit the RNN - train the neural network
##history = model.fit(X_train_seq_padded, y_train, batch_size = 50, epochs = 10, validation_data = (X_test_seq_padded, y_test), verbose = 1,callbacks=[es_callback])
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
es = EarlyStopping(monitor = "val_loss", min_delta = 0.0005, patience = 3, verbose = 1, mode = "min", baseline = 1.5)
mc = ModelCheckpoint("best_weights.h5", monitor = "val_loss", verbose = 1, mode = "auto")
rd = ReduceLROnPlateau(monitor = "val_loss", factor = 0.1, patience = 3, verbose = 1, mode = "auto", min_delta = 0.0001, cooldown = 0, min_lr = 0)
#Fit the RNN - train the neural network
history = model.fit(X_train_seq_padded, y_train, epochs = 10, validation_data = (X_test_seq_padded, y_test), verbose = 1,callbacks=[es, mc, rd])
|
{"hexsha": "bc782cb6d120284a38090ecc4a7d567ec4d2453c", "size": 4982, "ext": "py", "lang": "Python", "max_stars_repo_path": "RNN.py", "max_stars_repo_name": "SimralPimenta20/personality-detection-using-social-media-messages", "max_stars_repo_head_hexsha": "f841266fcd2d9e1fd9de7d8e33abe0e9fcd2661c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-29T08:24:01.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-29T08:24:01.000Z", "max_issues_repo_path": "RNN.py", "max_issues_repo_name": "SimralPimenta20/personality-detection-using-social-media-messages", "max_issues_repo_head_hexsha": "f841266fcd2d9e1fd9de7d8e33abe0e9fcd2661c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "RNN.py", "max_forks_repo_name": "SimralPimenta20/personality-detection-using-social-media-messages", "max_forks_repo_head_hexsha": "f841266fcd2d9e1fd9de7d8e33abe0e9fcd2661c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.8367346939, "max_line_length": 219, "alphanum_fraction": 0.7422721798, "include": true, "reason": "import numpy", "num_tokens": 1259}
|
##### OVERRIDES FOR EFFICIENCY / CORRECTNESS
function add_vertices!(g::AbstractSimpleWeightedGraph, n::Integer)
T = eltype(g)
U = weighttype(g)
(nv(g) + one(T) <= nv(g)) && return false # test for overflow
emptycols = spzeros(U, nv(g) + n, n)
g.weights = hcat(g.weights, emptycols[1:nv(g), :])
g.weights = vcat(g.weights, emptycols')
return true
end
function degree_matrix(g::AbstractSimpleWeightedGraph, T::DataType=weighttype(g); dir::Symbol=:out)
if is_directed(g)
if dir == :out
d = vec(sum(g.weights, dims=1))
elseif dir == :in
d = vec(sum(g.weights, dims=2))
elseif dir == :both
d = vec(sum(g.weights, dims=1)) + vec(sum(g.weights, dims=2))
else
throw(DomainError(dir, "invalid argument, only accept :in, :out and :both"))
end
else
d = vec(sum(g.weights, dims=1))
end
return SparseMatrixCSC(T.(diagm(0=>d)))
end
function adjacency_matrix(g::AbstractSimpleWeightedGraph, T::DataType=weighttype(g); dir::Symbol=:out)
if dir == :out
return SparseMatrixCSC(T.(copy(g.weights))')
else
return T.(copy(g.weights))
end
end
function laplacian_matrix(g::AbstractSimpleWeightedGraph, T::DataType=weighttype(g); dir::Symbol=:out)
degree_matrix(g, T; dir=dir) - adjacency_matrix(g, T; dir=dir)
end
function pagerank(g::SimpleWeightedDiGraph, α=0.85, n::Integer=100, ϵ=1.0e-6)
A = weights(g)
S = vec(sum(A, dims=1))
S = 1 ./ S
S[findall(S .== Inf)] .= 0.0
M = A' # need a separate line due to bug #17456 in julia
# scaling the adjmat to stochastic adjacency matrix
M = (Diagonal(S) * M)'
N = Int(nv(g))
# solution vector
x = fill(1.0 / N, N)
# personalization vector
p = fill(1.0 / N, N)
# temporary to hold the results of SpMV
y = zeros(Float64, N)
# adjustment for leaf nodes in digraph
dangling_weights = p
is_dangling = findall(S .== 0)
# save some flops by precomputing this
pscaled = (1 .- α) .* p
for _ in 1:n
xlast = x
# in place SpMV to conserve memory
mul!(y, M, x)
# using broadcast to avoid temporaries
x = α .* (y .+ sum(x[is_dangling]) .* dangling_weights) .+ pscaled
# l1 change in solution convergence criterion
err = sum(abs, (x .- xlast))
if (err < N * ϵ)
return x
end
end
error("Pagerank did not converge after $n iterations.")
end
savegraph(fn::AbstractString, g::AbstractSimpleWeightedGraph, gname::AbstractString="graph"; compress=true) =
savegraph(fn, g, gname, SWGFormat(), compress=compress)
savegraph(fn::AbstractString, d::Dict{T, U}; compress=true) where T <: AbstractString where U <: AbstractSimpleWeightedGraph =
savegraph(fn, d, SWGFormat(), compress=compress)
# It is possible that this is suboptimal, but it is the most trivial extension of the implementation used in Graphs.jl
function cartesian_product(g::G, h::G) where G <: AbstractSimpleWeightedGraph
z = G(nv(g) * nv(h))
id(i, j) = (i - 1) * nv(h) + j
for e in edges(g)
i1, i2 = Tuple(e)
for j = 1:nv(h)
add_edge!(z, id(i1, j), id(i2, j), weight(e))
end
end
for e in edges(h)
j1, j2 = Tuple(e)
for i in vertices(g)
add_edge!(z, id(i, j1), id(i, j2), weight(e))
end
end
return z
end
# Connected Components on a Sparse Matrix
function _cc(g::SimpleWeightedGraph{T,U}) where T where U
a = weights(g)
comp = 0
n = size(a, 1)
marks = zeros(T, n)
queue = Vector{T}()
for i = 1:n
if marks[i] == 0
comp += 1
push!(queue, i)
while !isempty(queue)
v = pop!(queue)
marks[v] = comp
for index in nzrange(a,v)
n = a.rowval[index]
if marks[n] == 0
push!(queue, n)
end
end
end
end
end
marks, comp
end
function connected_components(g::SimpleWeightedGraph{T,U}) where T where U
marks, num_cc = _cc(g)
cc = [Vector{T}() for i = 1:num_cc]
for (i,v) in enumerate(marks)
push!(cc[v], i)
end
cc
end
function induced_subgraph(g::T, vlist::AbstractVector{U}) where T <: AbstractSimpleWeightedGraph where U <: Integer
E = eltype(g)
allunique(vlist) || throw(ArgumentError("Vertices in subgraph list must be unique"))
new_weights = g.weights[E.(vlist), E.(vlist)]
newg = zero(g)
newg.weights = new_weights
return newg, Vector{E}(vlist)
end
|
{"hexsha": "eb6a19d43fdc50b99df9f4ec51718272b942c868", "size": 4663, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/overrides.jl", "max_stars_repo_name": "scheidan/SimpleWeightedGraphs.jl-1", "max_stars_repo_head_hexsha": "e500596d906193d2de8d052f72ceeb750c1de1bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 57, "max_stars_repo_stars_event_min_datetime": "2017-07-09T11:38:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T10:39:16.000Z", "max_issues_repo_path": "src/overrides.jl", "max_issues_repo_name": "scheidan/SimpleWeightedGraphs.jl-1", "max_issues_repo_head_hexsha": "e500596d906193d2de8d052f72ceeb750c1de1bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 79, "max_issues_repo_issues_event_min_datetime": "2017-07-15T19:10:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-22T11:55:02.000Z", "max_forks_repo_path": "src/overrides.jl", "max_forks_repo_name": "scheidan/SimpleWeightedGraphs.jl-1", "max_forks_repo_head_hexsha": "e500596d906193d2de8d052f72ceeb750c1de1bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2017-07-10T16:18:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-28T09:12:07.000Z", "avg_line_length": 31.7210884354, "max_line_length": 126, "alphanum_fraction": 0.5867467296, "num_tokens": 1372}
|
import numpy as np
import scipy.stats.distributions as sc_dist
from itertools import compress
def aggarwal_limits(mu, alpha=0.68268949):
"""Get Poissonian limits for specified contour levels
Parameters
----------
pdfs : array_like
The expected number of events (Poisson mean) in each observable bin.
Shape: [n_bins]
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
mu_large = np.zeros((len(mu), len(alpha)))
alpha_large = np.zeros_like(mu_large)
for i, a_i in enumerate(alpha):
alpha_large[:, i] = a_i
mu_large[:, i] = mu
mu_large_flat = mu_large.reshape(np.prod(mu_large.shape))
alpha_large_flat = alpha_large.reshape(mu_large_flat.shape)
lower, upper = sc_dist.poisson.interval(alpha_large_flat, mu_large_flat)
lower[lower != 0] -= 0.5
upper += 0.5
return lower.reshape(mu_large.shape), upper.reshape(mu_large.shape)
def aggarwal_limits_pdf(pdfs, ks, alpha=0.68268949):
"""Get limits for specified contour levels
In contrast to `aggarwal_limits` this function computes the limits based
on the evaluated and normalized likelihood as opposed to the theoretical
limits from the Poisson disribution.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
alpha : float or list of float, optional
The list of alpha values, which define the contour levels which will
be computed.
Returns
-------
array_like
The lower limits (minus -0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
array_like
The upper limits (plus +0.5) for each of the observable bins and
chosen contour value alpha.
Shape: [n_bins, n_alpha]
"""
if isinstance(alpha, float):
alpha = [alpha]
lower = np.zeros((len(pdfs), len(alpha)))
upper = np.zeros((len(pdfs), len(alpha)))
for i, pdf in enumerate(pdfs):
if len(ks[i]) == 0:
continue
cdf = np.cumsum(pdf)
if cdf[-1] < 0.999:
print('Cdf only goes up to {}'.format(cdf[-1]))
lower[i, :] = np.nan
upper[i, :] = np.nan
continue
for j, alpha_j in enumerate(alpha):
q1 = (1.-alpha_j) / 2.
q2 = (1.+alpha_j) / 2.
lower_idx = np.searchsorted(cdf, q1)
upper_idx = np.searchsorted(cdf, q2)
lower[i, j] = ks[i][lower_idx]
upper[i, j] = ks[i][upper_idx]
lower[lower != 0] -= 0.5
upper += 0.5
return lower, upper
def evaluate_normalized_likelihood(llh_func, coverage,
first_guess, **llh_kwargs):
"""Compute normalized likelihood
This function evaluates the likelihood function `llh_func` iteratively over
possible values of k (observed number of events in Poissonian) until
the specified coverage is reached.
This can then be used to normalize the likelihood and to define the PDF
in observed values k and to compute the limits in k.
Parameters
----------
llh_func : callable
The likelihood function
coverage : float
The minimum coverage value to obtain. Max value is 1. The closer to
1, the more accurate, but also more time consuming.
first_guess : float
A first guess of the valid range of k values. Typically, this can
be set to the expected number of values in the observable bin.
**llh_kwargs
Keyword arguments that are passed on to the likelihood function.
Returns
-------
array_like
The (sorted) k values at which the likelhood was evaluted.
array_like
The corresponding likelihood values to each of the (sorted) k values.
These are normalized, i.e. their sum should approach 1, but be at
least as high as the specified `coverage`.
"""
mu = int(first_guess)
prob = np.exp(llh_func(mu, **llh_kwargs))
unsorted_pdf = [prob]
ks = [mu]
max_k = mu
min_k = mu
reached_bottom = False
while prob < coverage:
if not reached_bottom:
if min_k == 0:
reached_bottom = True
else:
min_k -= 1
ks.append(min_k)
new_val = np.exp(llh_func(min_k, **llh_kwargs))
unsorted_pdf.append(
new_val)
prob += new_val
max_k += 1
ks.append(max_k)
new_val = np.exp(llh_func(max_k, **llh_kwargs))
unsorted_pdf.append(new_val)
prob += new_val
ks = np.array(ks)
unsorted_pdf = np.array(unsorted_pdf)
sort_idx = np.argsort(ks)
sorted_ks = ks[sort_idx]
sorted_pdf = unsorted_pdf[sort_idx]
return sorted_ks, sorted_pdf
def map_aggarwal_ratio(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the ratios are above the expectation values, i.e. the
transformed values will be in the range of [0, 1].
If False, the ratios are below the expectation values in each bin
and the transformed values will be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = np.copy(flattened_y)
transformed_values[finite] = finite_y
is_nan = np.isnan(flattened_y)
is_pos_inf = np.isposinf(flattened_y)
is_neg_inf = np.isneginf(flattened_y)
got_divided_by_zero = flattened_y == 1.
if upper:
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
else:
transformed_values[finite] *= -1.
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = -np.inf
transformed_values[got_divided_by_zero] = 0
transformed_values = transformed_values.reshape(y_values.shape)
return transformed_values, y_min
def map_aggarwal_limits(y_values, y_0=1., upper=True):
"""Map p-values to relative y-values wrt minimium p-value.
The provided p-values `y_values` are mapped to relative y-values.
These transformed y-values are relative to the minimum p-value (in log10).
Depending on whether or not `upper` is True, these relative values will
either be positive or negative. In other words, the p-values are mapped
to y-values in the range of [0, 1] for upper == True and [-1, 0] for
upper == False.
This function is similar to `map_aggarwal_ratio`, but the handling
of positive and negative infinities are different. These are set to finite
values, such that appropriate limit contours may be drawn.
Parameters
----------
y_values : array_like
The p-values for each observable bin.
Shape: [n_bins]
y_0 : float, optional
The highest possible p-value. Anything above this is set to NaN, i.e.
it will not be plotted later.
upper : bool, optional
If True, the limits are upper limits, i.e. the
transformed values will be in the range of [0, 1].
If False, the limits are lower limits and the transformed values will
be in the range of [-1, 0].
Returns
-------
array_like
The transformed y-values for each of the p-values `y_values`.
Shape: [n_bins]
"""
flattened_y = np.copy(y_values.reshape(np.prod(y_values.shape)))
finite = np.isfinite(flattened_y)
finite_y = flattened_y[finite]
if len(finite_y) == 0:
return y_values, 0.
finite_y[finite_y > y_0] = np.NaN
finite_y = np.log10(finite_y)
y_min = np.min(finite_y)
y_min *= 1.1
finite_y /= y_min
transformed_values = np.copy(flattened_y)
transformed_values[finite] = finite_y
is_nan = np.isnan(flattened_y)
is_pos_inf = np.isposinf(flattened_y)
is_neg_inf = np.isneginf(flattened_y)
if upper:
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = np.inf
transformed_values[is_neg_inf] = 0.
else:
transformed_values[finite] *= -1.
transformed_values[is_nan] = np.nan
transformed_values[is_pos_inf] = -8000
transformed_values[is_neg_inf] = -8000
transformed_values = transformed_values.reshape(y_values.shape)
return transformed_values, y_min
def rescale_ratio(values, y_min, y_min_wanted):
"""Rescale relative y-values
Rescales relative y-values `values` to `y_min_wanted`. It is assumed
that the provied values are relative to the minimum p-value as specified
in the provided `y_min`.
Similar to `rescale_limit`, but does additional handling of points that
are outside of the plot region (these are set to inf, such that they will
not be plotted).
Parameters
----------
values : array_like
The relative y-values that should be rescaled.
Shape: [n_bins]
y_min : float
The minimum p-value. This is the anchor point to which the original
p-values were scaled to, i.e. `values` are relative to this minimum
p-value.
Shape: []
y_min_wanted : flaot
The desired new minimum p-value. This is the new anchor point to which
the original p-values will be re-scaled to.
Shape: []
Returns
-------
array_like
The rescaled y-values now relative to `y_min_wanted`.
Shape: [n_bins]
"""
values = np.copy(values)
finite = np.isfinite(values)
factor = y_min / y_min_wanted
values[finite] *= factor
finite_values = values[finite]
finite_values[np.absolute(finite_values) > 1] = np.inf
values[finite] = finite_values
return values
def rescale_limit(values, y_min, y_min_wanted):
"""Rescale relative y-values
Rescales relative y-values `values` to `y_min_wanted`. It is assumed
that the provied values are relative to the minimum p-value as specified
in the provided `y_min`.
Parameters
----------
values : array_like
The relative y-values that should be rescaled.
Shape: [n_bins]
y_min : float
The minimum p-value. This is the anchor point to which the original
p-values were scaled to, i.e. `values` are relative to this minimum
p-value.
Shape: []
y_min_wanted : flaot
The desired new minimum p-value. This is the new anchor point to which
the original p-values will be re-scaled to.
Shape: []
Returns
-------
array_like
The rescaled y-values now relative to `y_min_wanted`.
Shape: [n_bins]
"""
values = np.copy(values)
finite = np.isfinite(values)
factor = y_min / y_min_wanted
values[finite] *= factor
return values
def calc_p_alpha_limits(mu, rel_std):
"""Get the CDF ratio at the limits `rel_std` in each observable bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
rel_std : array_like
The relative limits wrt the expected number (Poisson mean) of events
in each bin, i.e. limits / mu. The last dimension corresponds to lower
and upper relative limits, respectively.
Shape: [n_bins, n_alpha, 2]
array_like
The ratio of the PDF tails:
P(x <= limit_i) / P(x <= mu_i) if limit_i <= mu_i
P(x > limit_i) / P(x > mu_i) if limit_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins, n_alpha, 2]
"""
abs_std = np.zeros_like(rel_std)
limits = np.zeros_like(rel_std)
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 0]
returned_vals = __calc_p_alpha__(mu, abs_std, upper=False)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 0] = returned_vals
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 1]
returned_vals = __calc_p_alpha__(mu, abs_std, upper=True)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 1] = returned_vals
return limits
def calc_p_alpha_limits_pdf(pdfs, ks, mu, rel_std):
"""Get the CDF ratio at the limits `rel_std` in each observable bin.
Similar to `calc_p_alpha_limits`, but the CDF calculation is based on the
normalized likelihood values `pdfs` and corresponding k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
rel_std : array_like
The relative limits wrt the expected number (Poisson mean) of events
in each bin, i.e. limits / mu. The last dimension corresponds to lower
and upper relative limits, respectively.
Shape: [n_bins, n_alpha, 2]
Returns
-------
array_like
The ratio of the PDF tails:
P(x <= limit_i) / P(x <= mu_i) if limit_i <= mu_i
P(x > limit_i) / P(x > mu_i) if limit_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins, n_alpha, 2]
"""
abs_std = np.zeros_like(rel_std)
limits = np.zeros_like(rel_std)
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 0]
returned_vals = __calc_p_alpha_pdf__(pdfs, ks,
mu, abs_std,
upper=False)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 0] = returned_vals
for i in range(rel_std.shape[1]):
abs_std = mu * rel_std[:, i, 1]
returned_vals = __calc_p_alpha_pdf__(pdfs, ks,
mu, abs_std,
upper=True)
is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
is_zero_mu = mu == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
limits[:, i, 1] = returned_vals
return limits
def __calc_p_alpha__(mu, k, upper=True):
"""Get the CDF ratio at a given number of observed events k in each bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
The CDF ratio is evaluated at these k values.
Shape: [n_bins]
upper : bool, optional
If true, the upper PDF tail will be considered, i.e. the ratio
P(x > k_i) / P(x > mu_i) will be computed.
If false, P(x <= k_i) / P(x <= mu_i) is computed.
Returns
-------
array_like
The ratio P(x <= k_i) / P(x <= mu_i) for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution. If upper is True, then '<=' switches to '>'.
Shape: [n_bins]
"""
assert mu.shape == k.shape, 'Shape of \'mu\' and \'k\' have to be the same'
limit = np.copy(k)
is_nan = np.logical_or(np.isnan(k), np.isnan(mu))
is_finite = mu != 0.
a_ref = sc_dist.poisson.cdf(mu[is_finite], mu[is_finite])
a_k = sc_dist.poisson.cdf(k[is_finite], mu[is_finite])
if upper:
ratio = (1 - a_k) / (1 - a_ref)
ratio[1 - a_k == 0.] = np.inf
else:
ratio = a_k / a_ref
ratio[a_k == 0.] = np.inf
limit[is_finite] = ratio
limit[is_nan] = np.nan
return limit
def __calc_p_alpha_pdf__(pdfs, ks, mu, k, upper=True):
"""Get the CDF ratio at a given number of observed events k in each bin.
Similar to `__calc_p_alpha__`, but CDF is calculated based on the
computed normalized likelihood values `pdfs` and the corresponding
k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
The CDF ratio is evaluated at these k values.
Shape: [n_bins]
upper : bool, optional
If true, the upper PDF tail will be considered, i.e. the ratio
P(x > k_i) / P(x > mu_i) will be computed.
If false, P(x <= k_i) / P(x <= mu_i) is computed.
Returns
-------
array_like
The ratio P(x <= k_i) / P(x <= mu_i) for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
If upper is True, then '<=' switches to '>'.
Shape: [n_bins]
"""
assert mu.shape == k.shape, 'Shape of \'mu\' and \'k\' have to be the same'
limit = np.copy(k)
is_nan = np.logical_or(np.isnan(k), np.isnan(mu))
is_finite = mu != 0.
for i, (pdf, ksi) in enumerate(zip(pdfs, ks)):
cdf = np.cumsum(pdf)
if is_finite[i]:
mu_idx = np.where(ksi == int(mu[i]))[0]
if len(mu_idx) == 0:
a_ref = np.nan
else:
a_ref = cdf[mu_idx]
k_idx = np.where(ksi == int(k[i]))[0]
if len(k_idx) == 0:
if upper:
a_k = 1
else:
a_k = 0
else:
a_k = cdf[k_idx]
if upper:
if 1 - a_k == 0.:
limit[i] = np.inf
else:
ratio = (1 - a_k) / (1 - a_ref)
limit[i] = ratio
else:
if a_k == 0:
limit[i] = np.inf
else:
ratio = a_k / a_ref
limit[i] = ratio
limit[is_nan] = np.nan
return limit
def calc_p_alpha_ratio(mu, k):
"""Get the CDF ratio at the measured `k` values in each observable bin.
Parameters
----------
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
Shape: [n_bins]
array_like
The ratio of the PDF tails:
P(x <= k_i) / P(x <= mu_i) if k_i <= mu_i
P(x > k_i) / P(x > mu_i) if k_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the expected number of events
in each observable bin and under the assumption of a Poisson
distribution.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins]
"""
is_upper = k > mu
ratio = np.zeros_like(mu)
for upper in [False, True]:
if upper:
mask = is_upper
else:
mask = ~is_upper
returned_vals = __calc_p_alpha__(mu[mask],
k[mask],
upper=upper)
is_nan = np.logical_or(np.isnan(k[mask]), np.isnan(mu[mask]))
is_zero_mu = mu[mask] == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
ratio[mask] = returned_vals
return ratio
def calc_p_alpha_ratio_pdf(pdfs, ks, mu, k):
"""Get the CDF ratio at the measured `k` values in each observable bin.
Similar to `calc_p_alpha_ratio`, but the CDF calculation is based on the
normalized likelihood values `pdfs` and corresponding k values `ks`.
Parameters
----------
pdfs : list of list of float
The pdf values for each feature bin and for each value k.
The value k is the observed number of events in the Poisson Likelihood.
The number of evaluted k values is different for each observable bin,
and it is chosen such that a certain coverage is obtained.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
ks : list of list of int
The corresponding k value for each of the evaluated pdf values `pdfs`.
Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
mu : array_like
The expected number (Poisson mean) of events in each observable bin.
Shape: [n_bins]
k : array_like
The measured number (Poisson k) of events in each observable bin.
Shape: [n_bins]
Returns
-------
array_like
The ratio of the PDF tails:
P(x <= k_i) / P(x <= mu_i) if k_i <= mu_i
P(x > k_i) / P(x > mu_i) if k_i > mu_i
for each observable bin i.
The CDF P(x <= y) is calculated based on the normalized likelihood
values `pdfs` and corresponding k values `ks`.
This ratio reaches 1., if the measured values `k` agree well with the
expected values `mu`. The smaller this ratio is, the higher the
discrepancy.
Shape: [n_bins]
"""
is_upper = k > mu
ratio = np.zeros_like(mu)
for upper in [False, True]:
if upper:
mask = is_upper
else:
mask = ~is_upper
returned_vals = __calc_p_alpha_pdf__(compress(pdfs, mask),
compress(ks, mask),
mu[mask],
k[mask],
upper=upper)
is_nan = np.logical_or(np.isnan(k[mask]), np.isnan(mu[mask]))
is_zero_mu = mu[mask] == 0.
only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)
returned_vals[only_zero_mu] = -np.inf
ratio[mask] = returned_vals
return ratio
|
{"hexsha": "e098f3ba8bc8d407daa2610883cca99591581e8a", "size": 26216, "ext": "py", "lang": "Python", "max_stars_repo_path": "disteval/visualization/comparison_plotter/functions/calc_funcs.py", "max_stars_repo_name": "jebuss/pydisteval", "max_stars_repo_head_hexsha": "52c1c21cd5568b640732deb29f4216d881d6dd53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-22T10:51:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T10:51:12.000Z", "max_issues_repo_path": "disteval/visualization/comparison_plotter/functions/calc_funcs.py", "max_issues_repo_name": "jebuss/pydisteval", "max_issues_repo_head_hexsha": "52c1c21cd5568b640732deb29f4216d881d6dd53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-02-25T09:17:48.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-11T10:41:29.000Z", "max_forks_repo_path": "disteval/visualization/comparison_plotter/functions/calc_funcs.py", "max_forks_repo_name": "jebuss/pydisteval", "max_forks_repo_head_hexsha": "52c1c21cd5568b640732deb29f4216d881d6dd53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-07-05T11:40:44.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-05T11:40:44.000Z", "avg_line_length": 36.6657342657, "max_line_length": 79, "alphanum_fraction": 0.6134803174, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6566}
|
[STATEMENT]
lemma ex_gt_count_imp_le_multiset:
"(\<forall>y :: 'a :: order. y \<in># M + N \<longrightarrow> y \<le> x) \<Longrightarrow> count M x < count N x \<Longrightarrow> M < N"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>y. y \<in># M + N \<longrightarrow> y \<le> x; count M x < count N x\<rbrakk> \<Longrightarrow> M < N
[PROOF STEP]
unfolding less_multiset\<^sub>H\<^sub>O
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>\<forall>y. y \<in># M + N \<longrightarrow> y \<le> x; count M x < count N x\<rbrakk> \<Longrightarrow> M \<noteq> N \<and> (\<forall>y. count N y < count M y \<longrightarrow> (\<exists>x>y. count M x < count N x))
[PROOF STEP]
by (metis count_greater_zero_iff le_imp_less_or_eq less_imp_not_less not_gr_zero union_iff)
|
{"llama_tokens": 311, "file": null, "length": 2}
|
(* infotheo: information theory and error-correcting codes in Coq *)
(* Copyright (C) 2020 infotheo authors, license: LGPL-2.1-or-later *)
From mathcomp Require Import all_ssreflect ssralg fingroup finalg matrix.
Require Import Reals Lra.
From mathcomp Require Import Rstruct.
Require Import ssrZ ssrR Reals_ext ssr_ext logb ssralg_ext bigop_ext Rbigop.
Require Import fdist proba entropy aep typ_seq channel.
(******************************************************************************)
(* Jointly typical sequences *)
(* *)
(* Definitions: *)
(* JTS P W n epsilon == epsilon-jointly typical sequences of size n for an *)
(* input distribution P and a channel W *)
(* JTS(n,e) is a subset of TS_{P,W}(n,e) such that *)
(* (x,y) \in JTS(n,e) <-> *)
(* x \in TS_P(n,e) /\ y \in TS_{PW}(n,e) *)
(* *)
(* Lemmas: *)
(* JTS_sup == Upper-bound for the set of jointly typical *)
(* sequences *)
(* JTS_1 == when they are very long, the jointly typical *)
(* sequences coincide with the typical sequences of *)
(* the joint distribution *)
(* non_typical_sequences == the probability of the same event (joint *)
(* typicality) taken over the product distribution *)
(* of the inputs and the out-puts considered *)
(* independently tends to 0 asngets large *)
(* *)
(* For details, see Reynald Affeldt, Manabu Hagiwara, and Jonas Sénizergues. *)
(* Formalization of Shannon's theorems. Journal of Automated Reasoning, *)
(* 53(1):63--103, 2014 *)
(******************************************************************************)
Declare Scope jtyp_seq_scope.
Reserved Notation "'`JTS'".
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Local Open Scope typ_seq_scope.
Local Open Scope fdist_scope.
Local Open Scope channel_scope.
Local Open Scope entropy_scope.
Local Open Scope R_scope.
Section joint_typ_seq_definition.
Variables A B : finType.
Variable P : fdist A.
Variable W : `Ch(A, B).
Variable n : nat.
Variable epsilon : R.
Definition jtyp_seq (t : 'rV[A * B]_n) :=
[&& typ_seq P epsilon (rV_prod t).1,
typ_seq (`O(P , W)) epsilon (rV_prod t).2 &
typ_seq ((P `X W)) epsilon t].
Definition set_jtyp_seq : {set 'rV[A * B]_n} := [set tab | jtyp_seq tab].
Local Notation "'`JTS'" := (set_jtyp_seq).
Lemma typical_sequence1_JTS x : prod_rV x \in `JTS ->
exp2 (- INR n * (`H P + epsilon)) <= P `^ n x.1 <= exp2 (- INR n * (`H P - epsilon)).
Proof.
rewrite inE => /and3P[/andP[/leRP JTS11 /leRP JTS12] _ _].
by rewrite prod_rVK in JTS11, JTS12.
Qed.
Lemma typical_sequence1_JTS' x : prod_rV x \in `JTS ->
exp2 (- INR n * (`H (`O( P , W)) + epsilon)) <= (`O( P , W)) `^ n x.2 <=
exp2 (- INR n * (`H (`O( P , W)) - epsilon)).
Proof.
rewrite inE => /and3P[_ /andP[/leRP JTS11 /leRP JTS12] _].
by rewrite prod_rVK in JTS11, JTS12.
Qed.
End joint_typ_seq_definition.
Notation "'`JTS'" := (set_jtyp_seq) : jtyp_seq_scope.
Local Open Scope jtyp_seq_scope.
Section jtyp_seq_upper.
Variables (A B : finType) (P : fdist A) (W : `Ch(A, B)).
Variable n : nat.
Variable epsilon : R.
Lemma JTS_sup : INR #| `JTS P W n epsilon| <= exp2 (INR n * (`H(P , W) + epsilon)).
Proof.
have : INR #|`JTS P W n epsilon| <= INR #|`TS ((P `X W)) n epsilon|.
suff : `JTS P W n epsilon \subset `TS ((P `X W)) n epsilon.
by move/subset_leq_card/leP/le_INR.
apply/subsetP => tab.
by rewrite /set_jtyp_seq inE /jtyp_seq inE => /and3P[].
move/leR_trans; apply; exact: (@TS_sup _ ((P `X W)) epsilon n).
Qed.
End jtyp_seq_upper.
Section jtyp_seq_transmitted.
Variables (A B : finType) (P : fdist A) (W : `Ch(A, B)).
Variable epsilon : R.
Local Open Scope zarith_ext_scope.
Definition JTS_1_bound :=
maxn '| up (aep_bound P (epsilon / 3)) |
(maxn '| up (aep_bound (`O(P , W)) (epsilon / 3)) |
'| up (aep_bound ((P `X W)) (epsilon / 3)) |).
Variable n : nat.
Hypothesis He : 0 < epsilon.
Lemma JTS_1 : (JTS_1_bound <= n)%nat ->
1 - epsilon <= Pr ((P `X W) `^ n) (`JTS P W n epsilon).
Proof.
have : (JTS_1_bound <= n)%nat ->
Pr ( (P `^ n `X (W ``^ n)) )
[set x | x.1 \notin `TS P n epsilon] +
Pr ( (P `^ n `X (W ``^ n)) )
[set x | x.2 \notin `TS (`O(P , W)) n epsilon] +
Pr ( (P `^ n `X (W ``^ n)))
[set x | prod_rV x \notin `TS ( (P `X W) ) n epsilon] <= epsilon.
have H1 : forall n, Pr ((P `X W) `^ n) [set x | (rV_prod x).1 \notin `TS P n epsilon ] <=
Pr (P `^ n) [set x | x \notin `TS P n (epsilon / 3)].
move=> m.
have : 1 <= 3 by lra.
move/(set_typ_seq_incl P m (ltRW He)) => Hincl.
rewrite (Pr_DMC_fst P W (fun x => x \notin `TS P m epsilon)).
apply/Pr_incl/subsetP => i /=; rewrite !inE.
apply contra.
by move/subsetP : Hincl => /(_ i); rewrite !inE.
have {H1}HnP : forall n, ('| up (aep_bound P (epsilon / 3)) | <= n)%nat ->
Pr ((P `X W) `^ n) [set x | (rV_prod x).1 \notin `TS P n epsilon ] <= epsilon /3.
move=> m Hm.
apply: leR_trans; first exact: (H1 m).
have m_prednK : m.-1.+1 = m.
rewrite prednK // (leq_trans _ Hm) // (_ : O = '| 0 |) //.
by apply/ltP/Zabs_nat_lt; split; [by [] | apply/up_pos/aep_bound_ge0; lra].
have : 1 - (epsilon / 3) <= Pr (P `^ m) (`TS P m (epsilon/3)).
rewrite -m_prednK.
apply Pr_TS_1.
- by apply divR_gt0 => //; lra.
- rewrite m_prednK.
move/leP/le_INR : Hm; apply leR_trans.
rewrite INR_Zabs_nat; last first.
apply/ltZW/up_pos/aep_bound_ge0 => //.
apply divR_gt0 => //; lra.
exact/ltRW/(proj1 (archimed _ )).
rewrite leR_subl_addr addRC -leR_subl_addr; apply: leR_trans.
by rewrite Pr_to_cplt setCK; exact/leRR.
have H1 m :
Pr ((P `X W) `^ m) [set x | (rV_prod x).2 \notin `TS ( `O(P , W) ) m epsilon ] <=
Pr ( (`O( P , W) ) `^ m) (~: `TS ( `O( P , W) ) m (epsilon / 3)).
have : 1 <= 3 by lra.
move/(set_typ_seq_incl (`O(P , W)) m (ltRW He)) => Hincl.
rewrite Pr_DMC_out.
apply/Pr_incl/subsetP => i /=; rewrite !inE.
apply contra.
move/subsetP : Hincl => /(_ i).
by rewrite !inE.
have {H1}HnPW m : ('| up (aep_bound (`O(P , W)) (epsilon / 3)) | <= m)%nat ->
Pr ((P `X W) `^ m) [set x | (rV_prod x).2 \notin `TS (`O(P , W)) m epsilon] <= epsilon /3.
move=> Hm.
apply: leR_trans; first exact: (H1 m).
have m_prednK : m.-1.+1 = m.
rewrite prednK // (leq_trans _ Hm) // (_ : O = '| 0 |) //.
apply/ltP/Zabs_nat_lt (* TODO: ssrZ? *); split; [by []|apply/up_pos/aep_bound_ge0; lra].
have : 1 - epsilon / 3 <= Pr ((`O(P , W)) `^ m) (`TS (`O(P , W)) m (epsilon / 3)).
rewrite -m_prednK.
apply Pr_TS_1.
- apply divR_gt0 => //; lra.
- move/leP/le_INR : Hm.
rewrite m_prednK.
apply leR_trans.
rewrite INR_Zabs_nat; last first.
apply/ltZW/up_pos/aep_bound_ge0; lra.
exact/ltRW/(proj1 (archimed _ )).
rewrite leR_subl_addr addRC -leR_subl_addr; apply: leR_trans.
by rewrite Pr_to_cplt setCK; exact/leRR.
have H1 m : Pr ((P `X W) `^ m) (~: `TS ((P `X W)) m epsilon) <=
Pr (((P `X W) ) `^ m) (~: `TS ((P `X W)) m (epsilon / 3)).
have : 1 <= 3 by lra.
move/(set_typ_seq_incl ((P `X W)) m (ltRW He)) => Hincl.
apply/Pr_incl/subsetP => /= v; rewrite !inE.
apply contra.
by move/subsetP : Hincl => /(_ v); by rewrite !inE.
have {H1}HnP_W m : ('| up (aep_bound ((P `X W)) (epsilon / 3)) | <= m)%nat ->
Pr ((P `X W) `^ m) (~: `TS ((P `X W)) m epsilon) <= epsilon /3.
move=> Hm.
apply: leR_trans; first exact: (H1 m).
have m_prednK : m.-1.+1 = m.
rewrite prednK // (leq_trans _ Hm) // (_ : O = '| 0 |) //.
apply/ltP/Zabs_nat_lt; split; [by []|apply/up_pos/aep_bound_ge0; lra].
have : 1 - epsilon / 3 <= Pr (((P `X W)) `^ m) (`TS ((P `X W)) m (epsilon / 3)).
rewrite -m_prednK; apply Pr_TS_1.
- apply divR_gt0 => //; lra.
- rewrite m_prednK.
move/leP/le_INR : Hm; apply leR_trans.
rewrite INR_Zabs_nat; last first.
apply/ltZW/up_pos/aep_bound_ge0; lra.
exact/Rlt_le/(proj1 (archimed _ )).
rewrite leR_subl_addr addRC -leR_subl_addr; apply: leR_trans.
by rewrite Pr_to_cplt setCK; exact/leRR.
move=> Hn.
rewrite [in X in _ <= X](_ : epsilon = epsilon / 3 + epsilon / 3 + epsilon / 3)%R; last by field.
move: Hn; rewrite 2!geq_max => /andP[Hn1 /andP[Hn2 Hn3]].
rewrite !Pr_DMC_rV_prod.
apply leR_add; first by apply leR_add; [exact: HnP | exact: HnPW].
apply: leR_trans; last exact/HnP_W/Hn3.
by apply/Req_le; congr Pr; apply/setP => /= tab; by rewrite !inE rV_prodK.
move=> Hn_Pr Hn.
suff H : Pr ((P `X W) `^ n ) (~: `JTS P W n epsilon) <= epsilon.
rewrite -(Pr_cplt ((P `X W) `^ n) (`JTS P W n epsilon)).
by rewrite leR_subl_addr leR_add2l.
apply (@leR_trans (Pr ((P `X W) `^ n)
([set x | ((rV_prod x).1 \notin `TS P n epsilon)] :|:
([set x | ((rV_prod x).2 \notin `TS (`O( P , W)) n epsilon)] :|:
(~: `TS ((P `X W)) n epsilon))))).
by apply Req_le; congr Pr; apply/setP => xy; rewrite !inE 2!negb_and orbA.
apply: leR_trans; last exact: Hn_Pr.
apply (@leR_trans (
Pr ((P `X W) `^ n) [set x | (rV_prod x).1 \notin `TS P n epsilon] +
Pr ((P `X W) `^ n) ([set x | ((rV_prod x).2 \notin `TS (`O( P , W)) n epsilon)] :|:
(~: `TS ((P `X W)) n epsilon)))).
exact: Pr_union.
rewrite -addRA !Pr_DMC_rV_prod; apply/leR_add2l; apply: leR_trans (Pr_union _ _ _).
by apply/Req_le; congr Pr; apply/setP => t; rewrite !inE rV_prodK.
Qed.
End jtyp_seq_transmitted.
Section non_typicality.
Variables (A B : finType) (P : fdist A) (W : `Ch(A, B)) (n : nat) (epsilon : R).
Lemma non_typical_sequences : Pr ((P `^ n) `x ((`O(P , W)) `^ n))
[set x | prod_rV x \in `JTS P W n epsilon] <= exp2 (- n%:R * (`I(P, W) - 3 * epsilon)).
Proof.
rewrite /Pr /=.
apply (@leR_trans (\sum_(i | i \in `JTS P W n epsilon)
(exp2 (- INR n * (`H P - epsilon)) * exp2 (- n%:R * (`H( P `o W ) - epsilon))))) => /=.
rewrite (reindex_onto (fun y => prod_rV y) (fun x => rV_prod x)) /=; last first.
by move=> ? ?; rewrite rV_prodK.
apply: leR_sumRl => i; rewrite inE => iJTS.
- rewrite fdist_prodE; apply leR_pmul => //.
exact: proj2 (typical_sequence1_JTS iJTS).
exact: proj2 (typical_sequence1_JTS' iJTS).
- exact/mulR_ge0.
- by rewrite prod_rVK eqxx andbC.
rewrite (_ : \sum_(_ | _) _ =
INR #| `JTS P W n epsilon| *
exp2 (- n%:R * (`H P - epsilon)) * exp2 (- INR n * (`H( P `o W) - epsilon))); last first.
by rewrite big_const iter_addR mulRA.
apply (@leR_trans (exp2 (INR n * (`H( P , W ) + epsilon)) *
exp2 (- n%:R * (`H P - epsilon)) * exp2 (- INR n * (`H( P `o W ) - epsilon)))).
do 2 apply leR_wpmul2r => //.
exact/JTS_sup.
apply Req_le; rewrite -2!ExpD; congr (exp2 _).
rewrite /mutual_info_chan !mulRDr 2!Rmult_opp_opp.
by rewrite (_ : 3 * epsilon = epsilon + epsilon + epsilon); field.
Qed.
End non_typicality.
|
{"author": "affeldt-aist", "repo": "infotheo", "sha": "5f9efb859dbadcbcae2330e2e21e76f9b632d879", "save_path": "github-repos/coq/affeldt-aist-infotheo", "path": "github-repos/coq/affeldt-aist-infotheo/infotheo-5f9efb859dbadcbcae2330e2e21e76f9b632d879/information_theory/joint_typ_seq.v"}
|
%--- help for dsge_solver_h ---
%
% H1 line
%
% ::
%
%
% Args:
%
% Returns:
% :
%
% Note:
%
% Example:
%
% See also:
%
% Other functions named dsge_solver_h
%
% dsge/dsge_solver_h
%
|
{"author": "jmaih", "repo": "RISE_toolbox", "sha": "1b2edfa27830c6d522f9d7d2335d33c3e4d84285", "save_path": "github-repos/MATLAB/jmaih-RISE_toolbox", "path": "github-repos/MATLAB/jmaih-RISE_toolbox/RISE_toolbox-1b2edfa27830c6d522f9d7d2335d33c3e4d84285/classes/models/@dsge/private/+deprecated/dsge_solver_h.m"}
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
import random
import numpy as np
import os
import shutil
import logging
import paddle
import paddle.nn as nn
import paddle.utils as utils
import paddle.static as static
import paddle.nn.functional as F
import paddle.distributed.auto_parallel as auto
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.distributed.passes import new_pass, PassManager, PassContext
import paddle.distributed.fleet as fleet
from dist_pass_test_base import DistPassTestBase
from paddle.distributed.auto_parallel.dist_context import DistributedContext
logging.getLogger().setLevel(logging.INFO)
paddle.enable_static()
_global_parallel_strategy = None
_global_process_mesh = None
#np.set_printoptions(suppress=True)
class MLPLayer(nn.Layer):
def __init__(self,
hidden_size=128,
intermediate_size=4 * 128,
initializer_range=0.02):
super(MLPLayer, self).__init__()
d_model = hidden_size
dim_feedforward = intermediate_size
np.random.seed(2021)
arr0 = np.random.normal(0, 0.02, size=(d_model, dim_feedforward))
arr1 = np.random.normal(0, 0.02, size=(dim_feedforward, d_model))
weight_attr0 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr0))
weight_attr1 = paddle.ParamAttr(initializer=NumpyArrayInitializer(arr1))
bias_attr = None
self.linear0 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear1 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.linear2 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear3 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.linear4 = nn.Linear(
d_model, dim_feedforward, weight_attr0, bias_attr=bias_attr)
self.linear5 = nn.Linear(
dim_feedforward, d_model, weight_attr1, bias_attr=bias_attr)
self.norm0 = nn.LayerNorm(d_model, epsilon=1e-5)
self.norm1 = nn.LayerNorm(d_model, epsilon=1e-5)
self.norm2 = nn.LayerNorm(d_model, epsilon=1e-5)
def forward(self, input):
out = self.norm0(input)
out = self.linear0(out)
out = F.gelu(out, approximate=True)
out = self.linear1(out)
out = self.norm1(out)
out = self.linear2(out)
out = F.gelu(out, approximate=True)
out = self.linear3(out)
out = self.norm2(out)
out = self.linear4(out)
out = F.gelu(out, approximate=True)
out = self.linear5(out)
return out
def mlp_forward(input, label, hidden_size):
if _global_parallel_strategy == "dp":
auto.shard_tensor(
input,
dist_attr={
"process_mesh": _global_process_mesh,
"dims_mapping": [0, -1]
})
mlp = MLPLayer(
hidden_size=hidden_size,
intermediate_size=4 * hidden_size,
initializer_range=0.02)
predict = mlp(input)
error_cost = paddle.nn.functional.square_error_cost(predict, label)
loss = paddle.mean(error_cost)
return loss
class TestGradientMergePass(DistPassTestBase):
def init(self):
self._params_grads = None
self._config = {"k_steps": 4, "avg": True}
#self._config["dist_context"] = DistributedContext()
def apply_passes(self, main_prog, startup_prog):
#self._config["params_grads"] = self._params_grads
#pass_context = PassContext()
#auto_parallel_gradient_merge_pass = new_pass(
# "auto_parallel_gradient_merge_pass", self._config)
#auto_parallel_gradient_merge_pass.apply([main_prog], [startup_prog],
# pass_context)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.gradient_merge = True
dist_strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
dist_strategy.semi_auto = True
fleet.init(is_collective=True, strategy=dist_strategy)
def test_result(self):
no_pass_rets = self._distributed_launch(
model=None,
apply_pass=False,
gpus=[0],
gradient_merge=False,
batch_size=32,
max_step=2)
pass_rets = self._distributed_launch(
model=None,
apply_pass=True,
gpus=[0],
gradient_merge=True,
batch_size=8,
max_step=8)
"""
# avg loss for gradient_merge pass
avg_loss = 0
pass_avg_ret_list = []
for i, pass_ret in enumerate(pass_rets[0]):
if (i + 1) % 4 == 0:
avg_loss += pass_ret[0]
pass_avg_ret_list.append([avg_loss / 4])
avg_loss = 0
else:
avg_loss += pass_ret[0]
for no_pass_ret, pass_ret in zip(no_pass_rets[0], pass_avg_ret_list):
print(f"no_pass_ret={no_pass_ret}, pass_ret={pass_ret}")
self.assertTrue(
np.isclose(
no_pass_ret,
pass_ret,
rtol=self.rtol,
atol=self.atol,
equal_nan=self.equal_nan))
"""
def get_model(self, place, gradient_merge, batch_size, max_step):
paddle.seed(2021)
random.seed(2021)
np.random.seed(2021)
hidden_size = 128
global _global_parallel_strategy
global _global_process_mesh
world_size = paddle.distributed.get_world_size()
if world_size == 1:
_global_parallel_strategy = "dp"
_global_process_mesh = auto.ProcessMesh([0])
elif world_size == 2:
_global_parallel_strategy = "dp"
_global_process_mesh = auto.ProcessMesh([0, 1])
train_program = static.Program()
startup_program = static.Program()
dist_strategy = fleet.DistributedStrategy()
dist_strategy.semi_auto = True
#if gradient_merge:
# dist_strategy.gradient_merge = True
# dist_strategy.gradient_merge_configs = {"k_steps": 4, "avg": True}
fleet.init(is_collective=True, strategy=dist_strategy)
with static.program_guard(train_program, startup_program), \
utils.unique_name.guard():
input = static.data(
name="input", shape=[batch_size, hidden_size], dtype='float32')
label = static.data(
name="label", shape=[batch_size, 1], dtype='float32')
input.stop_gradient = False
loss = mlp_forward(input, label, hidden_size)
optimizer = paddle.fluid.optimizer.SGDOptimizer(learning_rate=0.01)
#optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer)
_, self._params_grads, dist_startup_prog, dist_main_prog = optimizer.minimize(
loss, startup_program)
input_data = np.random.random(size=(128, hidden_size)).astype('float32')
label_data = np.random.random(size=(128, 1)).astype('float32')
def reader():
for i in range(max_step):
x_data = input_data[i * batch_size:(i + 1) * batch_size, :]
y_data = label_data[i * batch_size:(i + 1) * batch_size, :]
yield x_data, y_data
return dist_main_prog, dist_startup_prog, [input, label], [loss], reader
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "0c324ba8ee9aa2d16fadfd68e8b19e9e9a3a9abf", "size": 8208, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/distributed_passes/test_dist_gradient_merge_pass.py", "max_stars_repo_name": "DevilCarp/Paddle", "max_stars_repo_head_hexsha": "04325d2cbefb029a4478bdc069d3279cd566ac6a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2016-08-15T07:02:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-24T09:34:00.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/distributed_passes/test_dist_gradient_merge_pass.py", "max_issues_repo_name": "DevilCarp/Paddle", "max_issues_repo_head_hexsha": "04325d2cbefb029a4478bdc069d3279cd566ac6a", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-28T07:23:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T07:23:22.000Z", "max_forks_repo_path": "python/paddle/fluid/tests/unittests/distributed_passes/test_dist_gradient_merge_pass.py", "max_forks_repo_name": "DevilCarp/Paddle", "max_forks_repo_head_hexsha": "04325d2cbefb029a4478bdc069d3279cd566ac6a", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8071748879, "max_line_length": 86, "alphanum_fraction": 0.6365740741, "include": true, "reason": "import numpy", "num_tokens": 1871}
|
function part1(input)
risk = reduce(hcat, (parse.(Int, collect(line))
for line in eachline(input)))
return shortest_path(risk)
end
function part2(input)
risk = reduce(hcat, (parse.(Int, collect(line))
for line in eachline(input)))
h, w = size(risk)
risk = repeat(risk, 5, 5)
risk .+= div.(0:(size(risk, 1) - 1), h)
risk .+= permutedims(div.(0:(size(risk, 2) - 1), w))
risk .= mod1.(risk, 9)
return shortest_path(risk)
end
function shortest_path(risk)
queue = [(CartesianIndex((1, 1)), 0)]
neighbors = CartesianIndex.(((1, 0), (-1, 0), (0, 1), (0, -1)))
total_risk = fill(typemax(Int), size(risk))
C = CartesianIndices(risk)
finish = last(C)
while true
v, i = findmin(last, queue)
pos = first(queue[i])
pos == finish && return v
deleteat!(queue, i)
for neighbor in Ref(pos) .+ neighbors
neighbor in C || continue
neighbor_risk = v + risk[neighbor]
neighbor_risk < total_risk[neighbor] || continue
total_risk[neighbor] = neighbor_risk
pushfirst!(queue, (neighbor, neighbor_risk))
end
end
end
|
{"hexsha": "69921da7f1e26638dc3c87a4bf1f6962629399a1", "size": 1214, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "2021/day15.jl", "max_stars_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_stars_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-01T16:33:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T21:46:33.000Z", "max_issues_repo_path": "2021/day15.jl", "max_issues_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_issues_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2021/day15.jl", "max_forks_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_forks_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9473684211, "max_line_length": 67, "alphanum_fraction": 0.5650741351, "num_tokens": 346}
|
"""
Jacob Kaplan
kmeans.py
"""
import sys
import cv2 as cv
import numpy as np
def scale(img):
"""
Take in image
Reshape it to have width of 600 pixels
Use OpenCV mean shift to recolor each pixel by shifting it towards
the mode of a given radius of pixels
Return recolored image
"""
m, n = img.shape[:2]
img = cv.resize(img, (600, int(600*(m/n))))
shiftImg = cv.pyrMeanShiftFiltering(img, 50, 50, 2)
return shiftImg
def cluster(img, K):
"""
Take in image and integer K
Use kmeans clustering to segment and image by color
Return clustered image
"""
input = np.float32(img.reshape((-1,3)))
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
ret, label, center = \
cv.kmeans(input, K, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
cluster = center[label.flatten()]
cluster = cluster.reshape((img.shape))
return cluster
if __name__ == "__main__":
"""
Handle command line arguments
"""
if len(sys.argv) != 3:
print("Usage: {} K".format(sys.argv[0]))
sys.exit()
else:
inImgName = sys.argv[1]
K = sys.argv[2]
try:
inImg = cv.imread(inImgName)
except AttributeError:
print("{} is not a valid image!".format(inImgName))
sys.exit()
try:
K = int(K)
except ValueError:
print("K must be integer!")
sys.exit()
scaledImg = scale(inImg)
clusterImg = cluster(scaledImg, K)
name, ext = inImgName.split(".")
cv.imwrite("{}_kmeans.{}".format(name, ext), clusterImg)
|
{"hexsha": "15f38747372dd6255c68200bfe4f5def584572b2", "size": 1640, "ext": "py", "lang": "Python", "max_stars_repo_path": "kmeans/kmeans.py", "max_stars_repo_name": "jcolekaplan/computer_vision", "max_stars_repo_head_hexsha": "48d39b081a7b6b699019052eeae36ab703bb34eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kmeans/kmeans.py", "max_issues_repo_name": "jcolekaplan/computer_vision", "max_issues_repo_head_hexsha": "48d39b081a7b6b699019052eeae36ab703bb34eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kmeans/kmeans.py", "max_forks_repo_name": "jcolekaplan/computer_vision", "max_forks_repo_head_hexsha": "48d39b081a7b6b699019052eeae36ab703bb34eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.2307692308, "max_line_length": 74, "alphanum_fraction": 0.6073170732, "include": true, "reason": "import numpy", "num_tokens": 448}
|
The Medical Sciences Building (really four buildings known as Med Sci 1A, 1B, 1C, or 1D), is address(located, 38.533564, 121.763794) near the Genome and Biomedical Sciences Facility building in the Health Sciences Complex which is near the western edge of the core campus. Med Sci 1A is more commonly known as Tupper Hall.
,
|
{"hexsha": "c22df86b572de95fb6ef17618c76682fe44775a7", "size": 326, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Medical_Sciences.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Medical_Sciences.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Medical_Sciences.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.5, "max_line_length": 322, "alphanum_fraction": 0.7852760736, "num_tokens": 86}
|
# -*- coding: utf-8 -*-
from ctypes import *
import numpy as np
import time
import math
class Point(object):
x = 0
y = 0
def __init__(self, x=0, y=0):
self.x = x
self.y = y
# 判断速度是否发生突变,每判断一次消耗2us
def speed_jump(c1, c2, c3, inter_time):
'''
1、去除每辆车的前10帧,防止刚进入画面检测跳动
2、计算轨迹倒数第十帧和第二十帧之间的速度 s1
3、计算轨迹倒数第一帧和第十帧之间的速度 s2
4、判断这两个速度的变化率
c1,c2,c3从当前帧计算,倒数第10帧轨迹点,倒数第5帧轨迹点,当前帧轨迹点
inter_time根据帧率计算而来的两帧间隔时间
'''
# math calculation 经测试,math方法时间效率高于numpy的6倍,numpy需要将坐标转换为array
length1x = c1.x - c2.x
length1y = c1.y - c2.y
length2x = c2.x - c3.x
length2y = c2.y - c3.y
dis1 = math.sqrt((length1x ** 2) + (length1y ** 2))
dis2 = math.sqrt((length2x ** 2) + (length2y ** 2))
s1 = dis1 / (inter_time*9)
s2 = dis2 / (inter_time*9)
speed_jump = abs(s2 - s1) / (s1 + 0.000001)
return s2, speed_jump
if __name__ == '__main__':
p1 = Point(30, 100)
p2 = Point(50, 110)
p3 = Point(55, 115)
fre = 0.04
speed_jump(p1, p2, p3, fre)
|
{"hexsha": "e9bd113adef90afbacb4a4e62a3a7a4cd4ba87ad", "size": 1109, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/speed_jump.py", "max_stars_repo_name": "chiyukunpeng/traffic-accident-detection", "max_stars_repo_head_hexsha": "f0e33744d6bd2d634c22ef2d561558c4b10105d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-03-16T11:48:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T12:17:36.000Z", "max_issues_repo_path": "script/speed_jump.py", "max_issues_repo_name": "chiyukunpeng/traffic-accident-detection", "max_issues_repo_head_hexsha": "f0e33744d6bd2d634c22ef2d561558c4b10105d6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-22T10:01:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-15T01:56:56.000Z", "max_forks_repo_path": "script/speed_jump.py", "max_forks_repo_name": "chiyukunpeng/traffic-accident-detection", "max_forks_repo_head_hexsha": "f0e33744d6bd2d634c22ef2d561558c4b10105d6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-18T02:03:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T02:03:39.000Z", "avg_line_length": 21.7450980392, "max_line_length": 67, "alphanum_fraction": 0.565374211, "include": true, "reason": "import numpy", "num_tokens": 537}
|
"""
Inferring a binomial proportion via exact mathematical analysis.
"""
import sys
import numpy as np
from scipy.stats import beta
from scipy.special import beta as beta_func
import matplotlib.pyplot as plt
import matplotlib.patches as patches
#from HDIofICDF import *
from scipy.optimize import fmin
#from scipy.stats import *
import scipy
from scipy.stats import beta
from scipy import special
from scipy import stats
import random
from scipy.special.basic import bernoulli
import math
from pylab import mlab
import json
from builtins import staticmethod
import copy
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from scipy.stats import binom
from sklearn.preprocessing import normalize as probNorm
import timeit
import pandas
import matplotlib.pyplot as plt
from pandas.plotting import parallel_coordinates
import io
import requests
from matplotlib.pyplot import ylabel
import csv
import pandas as pd
from parcoordapp.apps import ParCoordDataFiles
class IrisDataKmeans(object):
pass
def __init__(self):
pass
csvFile = ParCoordDataFiles().getIrisCSV()
#"/Users/halil/Yandex.Disk.localized/root/academic/myphd/phd/0070-coding/parallel-coord/frontend/public/data/iris.data.csv"
csvDictReader = csv.DictReader(open(csvFile))
self.df = pd.read_csv(csvFile)
def kMeans(self, X, K, maxIters = 7):
#np.seterr(divide='ignore', invalid='ignore')
#print (np.random.choice(np.arange(len(X)), K))
cents = np.random.choice(np.arange(len(X)), K)
centroids = X[cents, :]
clusterMembers=[]
for i in range(maxIters):
# Cluster Assignment step
C = np.array([np.argmin([np.dot(x_i-y_k, x_i-y_k) for y_k in centroids]) for x_i in X])
# Move centroids step
centroids = [X[C == k].mean(axis = 0) for k in range(K)]
if i==(maxIters-1):
clusterMembers = [X[C == k].tolist() for k in range(K)]
return np.array(centroids) , C, clusterMembers
def kmeansOf(self, x1, x2, K=3):
pass
#X = np.vstack((data1,np.vstack((data2,data3))))
X = np.vstack((x1,x2)).astype(np.float)
print ("X.type", X.dtype)
centroids, C, CMembers = self.kMeans(X.T, K)
extents = [
[np.min(x1), np.max(x1)],
[np.min(x2), np.max(x2)]
]
return {"extents":extents, "centroids":centroids.tolist(), "C":C.tolist(), "members":CMembers}
def kmeansOfVars(self, var1n, var2n, K):
pass
x1 = np.array(self.df[var1n].values).astype(np.float)
x2 = np.array(self.df[var2n].values).astype(np.float)
return self.kmeansOf(x1,x2, K)
def test(self):
pass
result= self.kmeansOfVars("petal_len", "petal_w", 3)
print (result)
#print (CMembers[0])
#IrisDataKmeans().test()
class BreastCancerKmeans(object):
pass
def __init__(self):
pass
csvFile = ParCoordDataFiles().getBreastCancerCSV()
#"/Users/halil/Yandex.Disk.localized/root/academic/myphd/phd/0070-coding/parallel-coord/frontend/public/data/iris.data.csv"
csvDictReader = csv.DictReader(open(csvFile))
self.df = pd.read_csv(csvFile)
def kMeans(self, X, K, maxIters = 7):
#np.seterr(divide='ignore', invalid='ignore')
#print (np.random.choice(np.arange(len(X)), K))
cents = np.random.choice(np.arange(len(X)), K)
centroids = X[cents, :]
clusterMembers=[]
for i in range(maxIters):
# Cluster Assignment step
C = np.array([np.argmin([np.dot(x_i-y_k, x_i-y_k) for y_k in centroids]) for x_i in X])
# Move centroids step
centroids = [X[C == k].mean(axis = 0) for k in range(K)]
if i==(maxIters-1):
clusterMembers = [X[C == k].tolist() for k in range(K)]
return np.array(centroids) , C, clusterMembers
def kmeansOf(self, x1, x2, K=3):
pass
#X = np.vstack((data1,np.vstack((data2,data3))))
X = np.vstack((x1,x2)).astype(np.float)
print ("X.type", X.dtype)
centroids, C, CMembers = self.kMeans(X.T, K)
extents = [
[np.min(x1), np.max(x1)],
[np.min(x2), np.max(x2)]
]
return {"extents":extents, "centroids":centroids.tolist(), "C":C.tolist(), "members":CMembers}
def kmeansOfVars(self, var1n, var2n, K):
pass
x1 = np.array(self.df[var1n].values).astype(np.float)
x2 = np.array(self.df[var2n].values).astype(np.float)
return self.kmeansOf(x1,x2, K)
def test(self):
pass
result= self.kmeansOfVars("f1", "f2", 3)
print (result)
|
{"hexsha": "49017e15b4bb53e3727fa01344c68fc1443af428", "size": 4896, "ext": "py", "lang": "Python", "max_stars_repo_path": "second-round-intreview/parcoord-brushing/backend/src/code/parallelcoord/ParCoordKmeans.py", "max_stars_repo_name": "halilagin/parcoord-brushing", "max_stars_repo_head_hexsha": "71dde2d9b24038afb51f80fa43ea45c21f459238", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "second-round-intreview/parcoord-brushing/backend/src/code/parallelcoord/ParCoordKmeans.py", "max_issues_repo_name": "halilagin/parcoord-brushing", "max_issues_repo_head_hexsha": "71dde2d9b24038afb51f80fa43ea45c21f459238", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "second-round-intreview/parcoord-brushing/backend/src/code/parallelcoord/ParCoordKmeans.py", "max_forks_repo_name": "halilagin/parcoord-brushing", "max_forks_repo_head_hexsha": "71dde2d9b24038afb51f80fa43ea45c21f459238", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9873417722, "max_line_length": 131, "alphanum_fraction": 0.6178513072, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 1290}
|
# ¿Cómo se mueve un péndulo?
> Calificaciones: https://docs.google.com/spreadsheets/d/1X8sAHmrIErYgoAjTocclAFS0Mx_EA8BDWlp6DgYyBVo/edit?usp=sharing
> Se dice que un sistema cualquiera, mecánico, eléctrico, neumático, etc., es un oscilador armónico si, cuando se deja en libertad fuera de su posición de equilibrio, vuelve hacia ella describiendo oscilaciones sinusoidales, o sinusoidales amortiguadas en torno a dicha posición estable.
- https://es.wikipedia.org/wiki/Oscilador_armónico
Referencias:
- http://matplotlib.org
- https://seaborn.pydata.org
- http://www.numpy.org
- http://ipywidgets.readthedocs.io/en/latest/index.html
**En realidad esto es el estudio de oscilaciones. **
___
<div>
</div>
```python
from IPython.display import YouTubeVideo
YouTubeVideo('k5yTVHr6V14')
```
Los sistemas mas sencillos a estudiar en oscilaciones son el sistema ` masa-resorte` y el `péndulo simple`.
<div>
</div>
\begin{align}
\frac{d^2 x}{dt^2} + \omega_{0}^2 x &= 0, \quad \omega_{0} = \sqrt{\frac{k}{m}}\notag\\
\frac{d^2 \theta}{dt^2} + \omega_{0}^{2}\, \theta &= 0, \quad\mbox{donde}\quad \omega_{0}^2 = \frac{g}{l}
\end{align}
___
## Sistema `masa-resorte`
La solución a este sistema `masa-resorte` se explica en términos de la segunda ley de Newton. Para este caso, si la masa permanece constante y solo consideramos la dirección en $x$. Entonces,
\begin{equation}
F = m \frac{d^2x}{dt^2}.
\end{equation}
¿Cuál es la fuerza? **Ley de Hooke!**
\begin{equation}
F = -k x, \quad k > 0.
\end{equation}
Vemos que la fuerza se opone al desplazamiento y su intensidad es proporcional al mismo. Y $k$ es la constante elástica o recuperadora del resorte.
Entonces, un modelo del sistema `masa-resorte` está descrito por la siguiente **ecuación diferencial**:
\begin{equation}
\frac{d^2x}{dt^2} + \frac{k}{m}x = 0,
\end{equation}
cuya solución se escribe como
\begin{equation}
x(t) = A \cos(\omega_{o} t) + B \sin(\omega_{o} t)
\end{equation}
Y su primera derivada (velocidad) sería
\begin{equation}
\frac{dx(t)}{dt} = \omega_{0}[- A \sin(\omega_{0} t) + B\cos(\omega_{0}t)]
\end{equation}
<font color=red> Ver en el tablero que significa solución de la ecuación diferencial.</font>
### **¿Cómo se ven las gráficas de $x$ vs $t$ y $\frac{dx}{dt}$ vs $t$?**
_Esta instrucción es para que las gráficas aparezcan dentro de este entorno._
```python
import matplotlib.pyplot as plt
%matplotlib inline
```
```python
import matplotlib as mpl
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
```
```python
import numpy as np
```
```python
# Definición de funciones a graficar
A, B, w0 = .5, .1, .5 # Parámetros
t = np.linspace(0, 50, 100) # Creamos vector de tiempo de 0 a 50 con 100 puntos
x = A * np.cos(w0 * t) + B * np.sin(w0 * t) # Función de posición
dx = w0 * (-A * np.sin(w0 * t) + B * np.cos(w0 * t)) # Función de velocidad
# Gráfico
plt.figure(figsize=(7, 4)) # Ventana de gráfica con tamaño
plt.plot(t, x, '-', lw=1, ms=1,
label='$x(t)$') # Explicación
plt.plot(t, dx, 'ro-', lw=1, ms=4,
label=r'$\dot{x}(t)$')
plt.legend(loc='best')
plt.xlabel('$t$', fontsize=14) # Etiqueta eje x
plt.show()
```
```python
# Colores, etiquetas y otros formatos
plt.figure(figsize = (7, 4))
plt.scatter(t, x, lw=0, c = 'red',
label = '$x(t)$') # Gráfica con puntos
plt.plot(t, x, 'r-', lw = 1) # Grafica normal
plt.scatter(t, dx, lw = 0, c = 'b',
label = r'$\frac{dx}{dt}$') # Con la r, los backslash se tratan como un literal, no como un escape
plt.plot(t, dx, 'b-', lw = 1)
plt.xlabel('$t$', fontsize = 20)
plt.legend(loc = 'best') # Leyenda con las etiquetas de las gráficas
plt.show()
```
Y si consideramos un conjunto de frecuencias de oscilación, entonces
```python
frecuencias = np.array([.1, .2 , .5, .6]) # Vector de diferentes frecuencias
plt.figure(figsize = (7, 4)) # Ventana de gráfica con tamaño
# Graficamos para cada frecuencia
for w0 in frecuencias:
x = A * np.cos(w0 * t) + B * np.sin(w0 * t)
plt.plot(t, x, 'D-', label=f'Frecuencia $w_0$={w0}')
plt.xlabel('$t$', fontsize = 16) # Etiqueta eje x
plt.ylabel('$x(t)$', fontsize = 16) # Etiqueta eje y
plt.title('Oscilaciones', fontsize = 16) # Título de la gráfica
plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))
plt.show()
```
Si queremos manipular un poco mas las cosas, hacemos uso de lo siguiente:
```python
from ipywidgets import *
```
```python
def masa_resorte(t = 0):
A, B, w0 = .5, .1, .5 # Parámetros
x = A*np.cos(w0*t)+B*np.sin(w0*t) # Función de posición
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, [0], 'ko', ms = 10)
ax.set_xlim(xmin = -0.6, xmax = .6)
ax.axvline(x=0, color = 'r')
ax.axhline(y=0, color = 'grey', lw = 1)
fig.canvas.draw()
```
```python
interact(masa_resorte, t = (0, 50,.01));
```
interactive(children=(FloatSlider(value=0.0, description='t', max=50.0, step=0.01), Output()), _dom_classes=('…
La opción de arriba generalmente será lenta, así que lo recomendable es usar `interact_manual`.
```python
def masa_resorte(t = 0):
A, B, w0 = .5, .1, .5 # Parámetros
x = A*np.cos(w0*t)+B*np.sin(w0*t) # Función de posición
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(x, [0], 'ko', ms = 10)
ax.set_xlim(xmin = -0.6, xmax = .6)
ax.axvline(x=0, color = 'r')
ax.axhline(y=0, color = 'grey', lw = 1)
fig.canvas.draw()
```
```python
interact_manual(masa_resorte, t = (0, 50,.01));
```
interactive(children=(FloatSlider(value=0.0, description='t', max=50.0, step=0.01), Button(description='Run In…
___
## Péndulo simple
Ahora, si fijamos nuestra atención al movimiento de un péndulo simple _(oscilaciones pequeñas)_, la ecuación diferencial a resolver tiene la misma forma:
\begin{equation}
\frac{d^2 \theta}{dt^2} + \omega_{0}^{2}\, \theta = 0, \quad\mbox{donde}\quad \omega_{0}^2 = \frac{g}{l}.
\end{equation}
La diferencia más evidente es como hemos definido a $\omega_{0}$. Esto quiere decir que,
\begin{equation}
\theta(t) = A\cos(\omega_{0} t) + B\sin(\omega_{0}t)
\end{equation}
Si graficamos la ecuación de arriba vamos a encontrar un comportamiento muy similar al ya discutido anteriormente. Es por ello que ahora veremos el movimiento en el plano $xy$. Es decir,
\begin{align}
x &= l \sin(\theta), \quad
y = l \cos(\theta)
\end{align}
```python
# Podemos definir una función que nos entregue theta dados los parámetros y el tiempo
def theta_t(a, b, g, l, t):
omega_0 = np.sqrt(g / l)
return a * np.cos(omega_0 * t) + b * np.sin(omega_0 * t)
```
```python
# Hacemos un gráfico interactivo del péndulo
def pendulo_simple(t = 0):
fig = plt.figure(figsize = (5,5))
ax = fig.add_subplot(1, 1, 1)
l = 2
g = 9.8
x = l * np.sin(theta_t(.4, .6, g, l, t))
y = - l * np.cos(theta_t(.4, .6, g, l, t))
ax.plot(x, y, 'ko', ms = 10)
ax.plot([0], [0], 'rD')
ax.plot([0, x ], [0, y], 'k-', lw = 1)
ax.set_xlim(xmin = -2.2, xmax = 2.2)
ax.set_ylim(ymin = -2.2, ymax = .2)
fig.canvas.draw()
```
```python
interact_manual(pendulo_simple, t = (0, 10,.01));
```
interactive(children=(FloatSlider(value=0.0, description='t', max=10.0, step=0.01), Button(description='Run In…
### Condiciones iniciales
Realmente lo que se tiene que resolver es,
\begin{equation}
\theta(t) = \theta(0) \cos(\omega_{0} t) + \frac{\dot{\theta}(0)}{\omega_{0}} \sin(\omega_{0} t)
\end{equation}
> **Actividad.** Modificar el programa anterior para incorporar las condiciones iniciales.
```python
# Solución:
def theta_t(theta0, dtheta0, g, l, t):
omega_0 = np.sqrt(g / l)
a = theta0
b = dtheta0 / omega_0
return a * np.cos(omega_0 * t) + b * np.sin(omega_0 * t)
```
```python
def pendulo_simple(t = 0):
fig = plt.figure(figsize = (5,5))
ax = fig.add_subplot(1, 1, 1)
l = 2
g = 9.8
theta0 = np.pi / 4
dtheta0 = 0
x = l * np.sin(theta_t(np.pi / 4, 0, 9.8, 2, t))
y = - l * np.cos(theta_t(np.pi / 4, 0, 9.8, 2, t))
ax.plot(x, y, 'ko', ms = 10)
ax.plot([0], [0], 'rD')
ax.plot([0, x ], [0, y], 'k-', lw = 1)
ax.set_xlim(xmin = -2.2, xmax = 2.2)
ax.set_ylim(ymin = -2.2, ymax = .2)
fig.canvas.draw()
interact_manual(pendulo_simple, t = (0, 10,.01));
```
interactive(children=(FloatSlider(value=0.0, description='t', max=10.0, step=0.01), Button(description='Run In…
### Plano fase $(x, \frac{dx}{dt})$
La posición y velocidad para el sistema `masa-resorte` se escriben como:
\begin{align}
x(t) &= x(0) \cos(\omega_{o} t) + \frac{\dot{x}(0)}{\omega_{0}} \sin(\omega_{o} t)\\
\dot{x}(t) &= -\omega_{0}x(0) \sin(\omega_{0} t) + \dot{x}(0)\cos(\omega_{0}t)]
\end{align}
```python
k = 3 #constante elástica [N]/[m]
m = 1 # [kg]
omega_0 = np.sqrt(k/m)
x_0 = .5
dx_0 = .1
```
```python
t = np.linspace(0, 15, 300)
```
```python
x_t = x_0 * np.cos(omega_0 * t) + (dx_0 / omega_0) * np.sin(omega_0 * t)
dx_t = -omega_0 * x_0 * np.sin(omega_0 * t) + dx_0 * np.cos(omega_0 * t)
```
```python
plt.figure(figsize = (7, 4))
plt.plot(t, x_t, label = '$x(t)$', lw = 4)
#plt.plot(t, dx_t, label = '$\dot{x}(t)$', lw = 1)
plt.plot(t, dx_t/omega_0, label = '$\dot{x}(t)$', lw = 4) # Mostrar que al escalar, la amplitud queda igual
plt.legend(loc='center left', bbox_to_anchor=(1.01, 0.5), prop={'size': 14})
plt.xlabel('$t$', fontsize = 18)
plt.show()
```
```python
plt.figure(figsize = (5, 5))
plt.plot(x_t, dx_t/omega_0, 'ro', ms = 2)
plt.xlabel('$x(t)$', fontsize = 18)
plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18)
plt.show()
```
```python
plt.figure(figsize = (5, 5))
plt.scatter(x_t, dx_t/omega_0, cmap = 'viridis', c = dx_t, s = 8, lw = 0)
plt.xlabel('$x(t)$', fontsize = 18)
plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18)
plt.show()
```
#### Multiples condiciones iniciales
```python
k = 3 #constante elástica [N]/[m]
m = 1 # [kg]
omega_0 = np.sqrt(k/m)
```
```python
t = np.linspace(0, 50, 50)
```
```python
x_0s = np.array([.7, .5, .25, .1])
dx_0s = np.array([.2, .1, .05, .01])
cmaps = np.array(['viridis', 'inferno', 'magma', 'plasma'])
```
```python
plt.figure(figsize = (6, 6))
for indx, x_0 in enumerate(x_0s):
x_t = x_0 *np.cos(omega_0 *t) + (dx_0s[indx]/omega_0) * np.sin(omega_0 *t)
dx_t = -omega_0 * x_0 * np.sin(omega_0 * t) + dx_0s[indx] * np.cos(omega_0 * t)
plt.scatter(x_t, dx_t/omega_0, cmap = cmaps[indx],
c = dx_t, s = 10,
lw = 0)
plt.xlabel('$x(t)$', fontsize = 18)
plt.ylabel('$\dot{x}(t)/\omega_0$', fontsize = 18)
#plt.legend(loc='center left', bbox_to_anchor=(1.05, 0.5))
```
Trayectorias del oscilador armónico simple en el espacio fase $(x,\, \dot{x}\,/\omega_0)$ para diferentes valores de la energía.
# Anuncios parroquiales
## 1. Examen final: lo habilito el miércoles 5 de mayo, entregan el viernes 7 de mayo.
## 2. Proyecto tercer módulo para el viernes 14 de mayo.
## 3. Quiz la próxima clase
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Lázaro Alonso. Modified by Esteban Jiménez Rodríguez
</footer>
|
{"hexsha": "df61ce55d3084f4b4cf4798e2d45b8f1d441b003", "size": 279998, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Modulo3/Clase15_OsciladorArmonico.ipynb", "max_stars_repo_name": "DiegoBAL23/simmatp2021", "max_stars_repo_head_hexsha": "238e88b58cf0481de444ffd14a8b46dbdfae6066", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Modulo3/Clase15_OsciladorArmonico.ipynb", "max_issues_repo_name": "DiegoBAL23/simmatp2021", "max_issues_repo_head_hexsha": "238e88b58cf0481de444ffd14a8b46dbdfae6066", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Modulo3/Clase15_OsciladorArmonico.ipynb", "max_forks_repo_name": "DiegoBAL23/simmatp2021", "max_forks_repo_head_hexsha": "238e88b58cf0481de444ffd14a8b46dbdfae6066", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 338.5707376058, "max_line_length": 72896, "alphanum_fraction": 0.9296923549, "converted": true, "num_tokens": 4070}
|
from numpy.testing import assert_array_equal
import numpy as np
from tadataka.depth import compute_depth_mask
def test_compute_depth_mask():
depths = np.array([
[-1, 4, 2, 3, -4],
[-8, 5, 1, 0, 2]
])
assert_array_equal(
compute_depth_mask(depths, min_depth=0.0),
[False, True, True, False, False]
)
assert_array_equal(
compute_depth_mask(depths, min_depth=1.0),
[False, True, False, False, False]
)
|
{"hexsha": "ac2bda56c04edc0618fe3ff1558ac96c7719e099", "size": 475, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_depth.py", "max_stars_repo_name": "IshitaTakeshi/Tadataka", "max_stars_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2019-11-15T16:30:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T15:18:54.000Z", "max_issues_repo_path": "tests/test_depth.py", "max_issues_repo_name": "IshitaTakeshi/Tadataka", "max_issues_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-02-28T08:28:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-04-07T04:47:12.000Z", "max_forks_repo_path": "tests/test_depth.py", "max_forks_repo_name": "IshitaTakeshi/Tadataka", "max_forks_repo_head_hexsha": "852c7afb904503005e51884408e1492ef0be836f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-26T13:59:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-26T13:59:40.000Z", "avg_line_length": 22.619047619, "max_line_length": 50, "alphanum_fraction": 0.6273684211, "include": true, "reason": "import numpy,from numpy", "num_tokens": 135}
|
import pathlib
import shutil
import numpy as np
from text_recognizer.datasets.emnist_lines import EmnistLinesDataset
import text_recognizer.util as util
SUPPORT_DIRNAME = pathlib.Path(__file__).parents[0].resolve() / 'emnist_lines'
def create_emnist_lines_support_files():
shutil.rmtree(SUPPORT_DIRNAME, ignore_errors=True)
SUPPORT_DIRNAME.mkdir()
dataset = EmnistLinesDataset()
dataset.load_or_generate_data()
for ind in [0, 1, 3]:
image = dataset.x_test[ind]
print(image.sum(), image.dtype)
label = ''.join(dataset.mapping[label] for label in np.argmax(dataset.y_test[ind], axis=-1).flatten()).strip(' _')
print(label)
util.write_image(image, str(SUPPORT_DIRNAME / f'{label}.png'))
if __name__ == '__main__':
create_emnist_lines_support_files()
|
{"hexsha": "be342a8bbc96def54ceff5970ed2eaabc92e0627", "size": 822, "ext": "py", "lang": "Python", "max_stars_repo_path": "lab6_sln/text_recognizer/tests/support/create_emnist_lines_support_files.py", "max_stars_repo_name": "sergeyk/fsdl-text-recognizer-project", "max_stars_repo_head_hexsha": "8083e181f830f9f493f15b9e8c85eac8784b7d5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab6_sln/text_recognizer/tests/support/create_emnist_lines_support_files.py", "max_issues_repo_name": "sergeyk/fsdl-text-recognizer-project", "max_issues_repo_head_hexsha": "8083e181f830f9f493f15b9e8c85eac8784b7d5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab6_sln/text_recognizer/tests/support/create_emnist_lines_support_files.py", "max_forks_repo_name": "sergeyk/fsdl-text-recognizer-project", "max_forks_repo_head_hexsha": "8083e181f830f9f493f15b9e8c85eac8784b7d5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5161290323, "max_line_length": 122, "alphanum_fraction": 0.7189781022, "include": true, "reason": "import numpy", "num_tokens": 194}
|
[STATEMENT]
lemma convol_apply: "BNF_Def.convol f g x = (f x, g x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. BNF_Def.convol f g x = (f x, g x)
[PROOF STEP]
unfolding convol_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f x, g x) = (f x, g x)
[PROOF STEP]
..
|
{"llama_tokens": 137, "file": null, "length": 2}
|
[STATEMENT]
lemma (in ring) indexed_const_index_free: "index_free (indexed_const k) i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. index_free (indexed_const k) i
[PROOF STEP]
unfolding index_free_def indexed_const_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>m. i \<in># m \<longrightarrow> (if m = {#} then k else \<zero>) = \<zero>
[PROOF STEP]
by auto
|
{"llama_tokens": 154, "file": null, "length": 2}
|
import argparse
import csv
import random
import sys
from pathlib import Path
import functools
import ipdb
import numpy as np
import torch
import torch.nn as nn
from box import Box
from tqdm import tqdm
from BERT.dataset import create_data_loader
from BERT.train import Model
from BERT.common.losses import CrossEntropyLoss
from BERT.common.metrics import Accuracy
from BERT.common.utils import load_pkl
from pytorch_pretrained_bert import BertTokenizer
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_paths', nargs='+', type=Path, help='Model checkpoint paths')
parser.add_argument('prediction_dir', type=Path, help='Saved predictions dir')
parser.add_argument('--batch_size', type=int, help='Inference batch size')
args = parser.parse_args()
return vars(args)
def input_pad_to_len(words, padded_word_len, word_padding=0):
"""Pad words to 'padded_word_len' with padding if 'len(words) < padded_word_len'.
Example:
target_pad_to_len([1, 2, 3], 5, -1) == [1, 2, 3, -1, -1]
Args:
words (list): List of the word index.
padded_word_len (int): The length for padding a batch of sequences to the same length.
word_padding (int): The index used to pad.
"""
if len(words) < padded_word_len:
words += [word_padding] * (padded_word_len - len(words))
elif len(words) > padded_word_len:
words = words[:padded_word_len]
else:
pass
return words
def main(ckpt_paths, prediction_dir, batch_size):
models_logits = []
for i, ckpt_path in enumerate(ckpt_paths):
model_dir = ckpt_path.parent.parent
try:
cfg = Box.from_yaml(filename=model_dir / 'config.yaml')
except FileNotFoundError:
print('[!] Model directory({}) must contain config.yaml'.format(model_dir))
exit(1)
device = torch.device('{}:{}'.format(cfg.device.type, cfg.device.ordinal))
random.seed(cfg.random_seed)
np.random.seed(cfg.random_seed)
torch.manual_seed(cfg.random_seed)
torch.cuda.manual_seed_all(cfg.random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if i == 0:
if not prediction_dir.exists():
prediction_dir.mkdir()
print('[-] Directory {} created\n'.format(prediction_dir))
dataset_dir = Path(cfg.dataset_dir)
print('[*] Loading test dataset from {}'.format(dataset_dir))
test_dataset_path = dataset_dir / 'test.pkl'
test_dataset = load_pkl(test_dataset_path)
print('[*] Creating test data loader')
if batch_size:
cfg.data_loader.batch_size = batch_size
test_data_loader = create_data_loader(test_dataset, **cfg.data_loader, shuffle=False)
print('\n[-] Model checkpoint: {}'.format(ckpt_path))
print('[*] Creating model')
model = Model(device, cfg.net, cfg.optim, t_total=0)
model.load_state(ckpt_path)
print('[*] Creating tokenizer')
tokenizer = BertTokenizer.from_pretrained(cfg.net.bert_pretrained_model_name)
logits = get_model_logits(device, test_data_loader, model, tokenizer, mode='test')
models_logits.append(logits)
Ids, predictions = ensemble_predict(device, test_data_loader, models_logits, mode='test')
save_predictions(Ids, predictions, prediction_dir / 'predict.csv')
def get_model_logits(device, data_loader, model, tokenizer, mode='test'):
assert mode in ['dev', 'test']
model.set_eval()
if mode == 'dev':
loss = CrossEntropyLoss(device, 'logits', 'label')
metric = Accuracy(device, 'label')
with torch.no_grad():
bar = tqdm(data_loader, desc='[Get Model Logits]', leave=False, dynamic_ncols=True)
logits_list = []
for batch in bar:
text_word = []
for text in batch['text_orig']:
text = ' '.join(['[CLS]'] + text)
tokens = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
text_word.append(indexed_tokens)
# padding
padded_word_len = max(map(len, text_word))
text_word = torch.tensor(list(map(functools.partial(input_pad_to_len,
padded_word_len=padded_word_len),
text_word)),
dtype=torch.long)
# attention mask
zeros = torch.zeros_like(text_word)
ones = torch.ones_like(text_word)
attention_mask = torch.where(text_word == 0, zeros, ones)
logits = model(text_word.to(device=device), attention_mask=attention_mask.to(device=device))
logits_list.append(logits)
label = logits.max(dim=1)[1]
if mode == 'dev':
output = {'logits': logits, 'label': label}
loss.update(output, batch)
metric.update(output, batch)
bar.set_postfix(**{loss.name: loss.value, metric.name: metric.value})
bar.close()
if mode == 'dev':
print('[-] Dev {}: {}; {}: {}\n'.format(loss.name, loss.value, metric.name, metric.value))
return torch.cat(logits_list, dim=0)
def ensemble_predict(device, data_loader, models_logits, mode='test'):
if mode == 'dev':
loss = CrossEntropyLoss(device, 'logits', 'label')
metric = Accuracy(device, 'label')
ensemble_logits = torch.stack(models_logits, dim=0).mean(dim=0)
ensemble_label = ensemble_logits.max(dim=1)[1]
Ids = []
predictions = []
bar = tqdm(data_loader, desc='[Ensemble Predict]', leave=False, dynamic_ncols=True)
batch_size = data_loader.batch_size
for i, batch in enumerate(bar):
Ids += batch['Id']
logits = ensemble_logits[i * batch_size : (i + 1) * batch_size]
label = ensemble_label[i * batch_size : (i + 1) * batch_size]
predictions += label.tolist()
if mode == 'dev':
output = {'logits': logits, 'label': label}
loss.update(output, batch)
metric.update(output, batch)
bar.set_postfix(**{loss.name: loss.value, metric.name: metric.value})
bar.close()
if mode == 'dev':
print('[-] Dev {}: {}; {}: {}\n'.format(loss.name, loss.value, metric.name, metric.value))
return Ids, predictions
def save_predictions(Ids, predictions, output_path):
with output_path.open(mode='w') as f:
writer = csv.DictWriter(f, fieldnames=['Id', 'label'])
writer.writeheader()
writer.writerows(
[{'Id': Id, 'label': p + 1} for Id, p in zip(Ids, predictions)])
print('\n[-] Output saved to {}'.format(output_path))
if __name__ == "__main__":
with ipdb.launch_ipdb_on_exception():
sys.breakpointhook = ipdb.set_trace
kwargs = parse_args()
main(**kwargs)
|
{"hexsha": "4aee44294099ecdf03678447434cc7149b3d1216", "size": 7210, "ext": "py", "lang": "Python", "max_stars_repo_path": "part2/BERT/predict.py", "max_stars_repo_name": "peter850706/Contextual-embeddings-for-sequence-classification", "max_stars_repo_head_hexsha": "e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "part2/BERT/predict.py", "max_issues_repo_name": "peter850706/Contextual-embeddings-for-sequence-classification", "max_issues_repo_head_hexsha": "e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "part2/BERT/predict.py", "max_forks_repo_name": "peter850706/Contextual-embeddings-for-sequence-classification", "max_forks_repo_head_hexsha": "e26ba68f6aa30ec07319dcd37a04a8f56e07d7b0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3989071038, "max_line_length": 104, "alphanum_fraction": 0.6083217753, "include": true, "reason": "import numpy", "num_tokens": 1593}
|
************************************************************************
*
* Subroutine MLELOAD2 Called by: MLELOAD
*
*
* Estimate loads using MLE. Bias correction is done by the
* method of Bradu and Mundlak(1970). An estimate of the variance
* of the load is obtained by the method given in Likes (1980).
*
* The function PHI given by Likes (1980) is used rather the
* function GM given by Finney (1941) as coded by Cohn et al.
* (1989). PHI takes longer to compute but can handle larger
* arguments so there it is less likely that the computation of the
* variance will fail. Likes (1980) gives the relation:
*
* (DF+1)*A
* PHI[A*W,DF]=GM[----------*W]
* 2*DF**2
*
* To use GM rather than PHI see C. Crawford's original LOADEST.
*
* PHI is denoted MVUEPHI below, to distinguish it from the
* function PHI used to calculate the probability density
* function of the standard normal distribution.
*
* local vars
* ----------
* DF2 degrees of freedom of the regression, divided by 2
* LTE log of transport (untransformed value fr rating curve)
* TE uncorrected estimated population loads (transport est)
* V XO * XTXINV * XT for each population value
*
************************************************************************
SUBROUTINE MLELOAD2(NPAR,XLEST,NUMOBSE,NOBSC,PARMLE,RVARMLE,
& XTXINV,SEOPT,LOADMLE,VARMLE,PLOADMLE,NOBSE)
*
* dimensional parameters
*
INCLUDE 'fmodules.inc'
*
* subroutine arguments
*
INTEGER*4 NPAR,NUMOBSE,SEOPT,NOBSC,NOBSE
DOUBLE PRECISION RVARMLE,LOADMLE,VARMLE
DOUBLE PRECISION PARMLE(*),PLOADMLE(*)
DOUBLE PRECISION XLEST(NOBSE,*),XTXINV(MAXPARMS,*)
*
* local vars
*
INTEGER*4 I
DOUBLE PRECISION DF2,LTE(NUMOBSE),TE(NUMOBSE),V(NUMOBSE)
*
* function declarations
*
DOUBLE PRECISION MVUEPHI,PRED
*
* calculate the estimate of the mean load
*
DF2 = DBLE(NOBSC-NPAR)/2.D0
LOADMLE = 0.D0
DO 10 I=1,NUMOBSE
CALL MATMLT(V(I),XLEST,XTXINV,NPAR,I,I,NOBSE)
LTE(I) = PRED(NPAR,NOBSE,I,XLEST,PARMLE)
TE(I) = DEXP(LTE(I))
PLOADMLE(I) = TE(I)*MVUEPHI(((1.D0-V(I))*DF2*RVARMLE)/2.D0,DF2)
LOADMLE = LOADMLE + PLOADMLE(I)
10 CONTINUE
LOADMLE = LOADMLE/DBLE(NUMOBSE)
*
* estimate the variance of the mean load
*
IF ((SEOPT .EQ. 2) .OR. (SEOPT .EQ. 3))
& CALL MLEVAR(NPAR,XLEST,NUMOBSE,RVARMLE,XTXINV,VARMLE,DF2,LTE,
& TE,V,NOBSE)
RETURN
END
|
{"hexsha": "aaba927c3707dd27dbfe4cdfcb45a4a1f61f2143", "size": 2781, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "codeNexampls/[loadest]/loadsrc/loadest/source_USGS/mleload2.f", "max_stars_repo_name": "lthiamodelers/baseflow-coefficients", "max_stars_repo_head_hexsha": "183fef49548fa2e1bf0bb8cff57e96e75d629760", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codeNexampls/[loadest]/loadsrc/loadest/source_USGS/mleload2.f", "max_issues_repo_name": "lthiamodelers/baseflow-coefficients", "max_issues_repo_head_hexsha": "183fef49548fa2e1bf0bb8cff57e96e75d629760", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codeNexampls/[loadest]/loadsrc/loadest/source_USGS/mleload2.f", "max_forks_repo_name": "lthiamodelers/baseflow-coefficients", "max_forks_repo_head_hexsha": "183fef49548fa2e1bf0bb8cff57e96e75d629760", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6538461538, "max_line_length": 73, "alphanum_fraction": 0.5551959727, "num_tokens": 779}
|
# Import Flask
from flask import Flask, jsonify
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
import datetime as dt
import numpy as np
import pandas as pd
# set up database
engine = create_engine('sqlite:///Resources/hawaii.sqlite')
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create session from Python to the DB
session = Session(engine)
first_date = '2016-08-23'
last_date = '2017-08-23'
most_active_station = 'USC00519281'
# Create an app
app = Flask(__name__)
# Define static routes
@app.route("/")
def home():
return (
f"Welcome to Hawaii's Climate Data<br/>"
f"<br/>"
f"Available Routes:<br/>"
f"/api/v1.0/precipitation = Precipitation data between {first_date} and {last_date}.<br/>"
f"/api/v1.0/stations = Stations in dataset.<br/>"
f"/api/v1.0/tobs = Temperature observations from the most active station ({most_active_station}) \n between {first_date} and {last_date}.<br/>"
f"/api/v1.0/startdate* = Temperature observations from startdate* to most recent collection date.<br/>"
f"/api/v1.0/startdate*/enddate* = Temperature observations from startdate* to enddate*. <br/>"
f"<br/>"
f"*please format YYYY-MM-DD"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Convert the query results to a dictionary using date as the key and prcp as the value.
# Return the JSON representation of your dictionary.
prcp_result = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date >= first_date).all()
session.close()
prcp_dict = {}
for p in prcp_result:
prcp_dict[p[0]] = p[1]
return jsonify(prcp_dict)
@app.route("/api/v1.0/stations")
def stations():
# Return a JSON list of stations from the dataset
station_result = session.query(func.distinct(Station.station)).all()
session.close()
return jsonify(list(np.ravel(station_result)))
@app.route("/api/v1.0/tobs")
def tobs():
# Query the dates and temperature observations of the most active station for the last year of data.
# Return a JSON list of temperature observations (TOBS) for the previous year.
tobs_result = session.query(Measurement.date,Measurement.tobs).filter(Measurement.station == most_active_station)\
.filter(Measurement.date >= first_date).all()
session.close()
return jsonify(tobs_result)
@app.route("/api/v1.0/<start>")
def start(start):
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature
# for a given start or start-end range.
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
start_tobs_results = session.query(func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs))\
.filter(Measurement.date >= start).all()
session.close()
start_result_dict = {}
start_result_dict['minimum temperature (F)'] = start_tobs_results[0][0]
start_result_dict['average temperature (F)'] = round(start_tobs_results[0][1],2)
start_result_dict['maximum temperature (F)'] = start_tobs_results[0][2]
return jsonify(start_result_dict)
@app.route("/api/v1.0/<start>/<end>")
def startend(start,end):
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates
# between the start and end date inclusive
start_end_tobs_results = session.query(func.min(Measurement.tobs),func.avg(Measurement.tobs),func.max(Measurement.tobs))\
.filter(Measurement.date >= start).filter(Measurement.date <= end).all()
session.close()
start_end_result_dict = {}
start_end_result_dict['minimum temperature (F)'] = start_end_tobs_results[0][0]
start_end_result_dict['average temperature (F)'] = round(start_end_tobs_results[0][1],2)
start_end_result_dict['maximum temperature (F)'] = start_end_tobs_results[0][2]
return jsonify(start_end_result_dict)
# Define main behavior
if __name__ == "__main__":
app.run(debug=True)
|
{"hexsha": "c1246cae51a213a086ec7fc42e5c8f3dd6d8de6e", "size": 4363, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "etarakci/sqlalchemy-challenge", "max_stars_repo_head_hexsha": "aba9b7b9215755f9f751f99dd6979e89e41a3851", "max_stars_repo_licenses": ["ADSL"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "etarakci/sqlalchemy-challenge", "max_issues_repo_head_hexsha": "aba9b7b9215755f9f751f99dd6979e89e41a3851", "max_issues_repo_licenses": ["ADSL"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "etarakci/sqlalchemy-challenge", "max_forks_repo_head_hexsha": "aba9b7b9215755f9f751f99dd6979e89e41a3851", "max_forks_repo_licenses": ["ADSL"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0275229358, "max_line_length": 151, "alphanum_fraction": 0.7178546871, "include": true, "reason": "import numpy", "num_tokens": 1070}
|
#!/usr/bin/env python
import rogata_library as rgt
import cv2
import cv2.aruco as aruco
import numpy as np
import sys
def calibrate_colors(image):
"""Utility to calibrate the colors for contour detection
Allows the visual calibration of contours which can be saved by pressing the s key
Colors are defined in HSV color space. For each Value H, S and V the median value as well as the acceptable range can be defined.
Additionally the ID of a aruco marker used to specify the wanted contour can be specified.
Since this can sometimes lead to the contour being the outline of the marker, the minimum hole size in pixels can be specified.
"""
def nothing(x):
pass
cv2.namedWindow("Test image")
cv2.createTrackbar("H","Test image",0,179,nothing)
cv2.createTrackbar("H range","Test image",0,50,nothing)
cv2.createTrackbar("S","Test image",0,255,nothing)
cv2.createTrackbar("S range","Test image",0,120,nothing)
cv2.createTrackbar("V","Test image",0,255,nothing)
cv2.createTrackbar("V range","Test image",0,120,nothing)
cv2.createTrackbar("Marker Id","Test image",0,120,nothing)
hight, width, channels = image.shape
cv2.createTrackbar("Minimum contour size","Test image",0,hight*width,nothing)
while(1):
mid_color = np.array([cv2.getTrackbarPos("H","Test image"),
cv2.getTrackbarPos("S","Test image"),
cv2.getTrackbarPos("V","Test image")])
step = np.array([cv2.getTrackbarPos("H range","Test image"),
cv2.getTrackbarPos("S range","Test image"),
cv2.getTrackbarPos("V range","Test image")])
marker_id = cv2.getTrackbarPos("Marker Id", "Test image")
min_size = cv2.getTrackbarPos('Minimum contour size','Test image')
min_value = np.zeros(3)
min_value[0] = -179
max_value = np.array([179,255,255])
lower_bound = np.clip(mid_color-step,min_value,max_value)
upper_bound = np.clip(mid_color+step,min_value,max_value)
used_img = image.copy()
hsv_img = cv2.cvtColor(used_img, cv2.COLOR_BGR2HSV)
find_contour = rgt.detect_area(hsv_img,lower_bound,upper_bound,marker_id,min_size,True)
natural_img = cv2.cvtColor(hsv_img,cv2.COLOR_HSV2BGR)
cv2.imshow("Test image",natural_img)
k = cv2.waitKey(1)
if k == 27:
break
if k == ord('s'):
print("Please Enter a File name:")
file_name = raw_input()
np.save(file_name,find_contour)
print("File ",file_name," saved.")
if __name__ == "__main__":
image = cv2.imread(sys.argv[1])
calibrate_colors(image)
|
{"hexsha": "21fcaeff19924a8fd805965dc42556cced7f3953", "size": 2797, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/calibrate_scene.py", "max_stars_repo_name": "liquidcronos/RoGaTa-Engine", "max_stars_repo_head_hexsha": "3704bbd85c9d07f180f7e7516e282468ac76b557", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-04T14:04:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-04T14:04:32.000Z", "max_issues_repo_path": "scripts/calibrate_scene.py", "max_issues_repo_name": "liquidcronos/RoGaTa-Engine", "max_issues_repo_head_hexsha": "3704bbd85c9d07f180f7e7516e282468ac76b557", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-03-01T16:49:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-13T15:41:01.000Z", "max_forks_repo_path": "scripts/calibrate_scene.py", "max_forks_repo_name": "liquidcronos/RoGaTa-Engine", "max_forks_repo_head_hexsha": "3704bbd85c9d07f180f7e7516e282468ac76b557", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3943661972, "max_line_length": 133, "alphanum_fraction": 0.6338934573, "include": true, "reason": "import numpy", "num_tokens": 705}
|
"""This is the actual code we use to score people's solutions
server-side. The interfaces here are not yet stable, but we include
them so that people can reproduce our scoring calculations
independently.
We correspondly do not currently import this module.
"""
import numpy as np
import requests
import gym
def score_from_remote(url):
result = requests.get(url)
parsed = result.json()
episode_lengths = parsed['episode_lengths']
episode_rewards = parsed['episode_rewards']
episode_types = parsed.get('episode_types')
timestamps = parsed['timestamps']
# Handle legacy entries where initial_reset_timestamp wasn't set
initial_reset_timestamp = parsed.get('initial_reset_timestamp', timestamps[0])
env_id = parsed['env_id']
spec = gym.spec(env_id)
return score_from_merged(episode_lengths, episode_rewards, episode_types, timestamps, initial_reset_timestamp, spec.trials, spec.reward_threshold)
def score_from_local(directory):
"""Calculate score from a local results directory"""
results = gym.monitoring.monitor.load_results(directory)
# No scores yet saved
if results is None:
return None
episode_lengths = results['episode_lengths']
episode_rewards = results['episode_rewards']
episode_types = results['episode_types']
timestamps = results['timestamps']
initial_reset_timestamp = results['initial_reset_timestamp']
spec = gym.spec(results['env_info']['env_id'])
return score_from_merged(episode_lengths, episode_rewards, episode_types, timestamps, initial_reset_timestamp, spec.trials, spec.reward_threshold)
def score_from_merged(episode_lengths, episode_rewards, episode_types, timestamps, initial_reset_timestamp, trials, reward_threshold):
"""Method to calculate the score from merged monitor files. Scores
only a single environment; mostly legacy.
"""
if episode_types is not None:
# Select only the training episodes
t_idx = np.where(e == 't' for e in episode_types)
episode_lengths = np.array(episode_lengths)[t_idx]
episode_rewards = np.array(episode_rewards)[t_idx]
timestamps = np.array(timestamps)[t_idx]
# Make sure everything is a float -- no pesky ints.
episode_rewards = np.array(episode_rewards, dtype='float64')
episode_t_value = timestep_t_value = mean = error = None
seconds_to_solve = seconds_in_total = None
if len(timestamps) > 0:
# This is: time from the first reset to the end of the last episode
seconds_in_total = timestamps[-1] - initial_reset_timestamp
if len(episode_rewards) >= trials:
means = running_mean(episode_rewards, trials)
if reward_threshold is not None:
# Compute t-value by finding the first index at or above
# the threshold. It comes out as a singleton tuple.
(indexes_above_threshold, ) = np.where(means >= reward_threshold)
if len(indexes_above_threshold) > 0:
# Grab the first episode index that is above the threshold value
episode_t_value = indexes_above_threshold[0]
# Find timestep corresponding to this episode
cumulative_timesteps = np.cumsum(np.insert(episode_lengths, 0, 0))
# Convert that into timesteps
timestep_t_value = cumulative_timesteps[episode_t_value]
# This is: time from the first reset to the end of the first solving episode
seconds_to_solve = timestamps[episode_t_value] - initial_reset_timestamp
# Find the window with the best mean
best_idx = np.argmax(means)
best_rewards = episode_rewards[best_idx:best_idx+trials]
mean = np.mean(best_rewards)
if trials == 1: # avoid NaN
error = 0.
else:
error = np.std(best_rewards) / (np.sqrt(trials) - 1)
return {
'episode_t_value': episode_t_value,
'timestep_t_value': timestep_t_value,
'mean': mean,
'error': error,
'number_episodes': len(episode_rewards),
'number_timesteps': sum(episode_lengths),
'seconds_to_solve': seconds_to_solve,
'seconds_in_total': seconds_in_total,
}
def benchmark_score_from_merged(benchmark, env_id, episode_lengths, episode_rewards, episode_types):
"""Method to calculate an environment's benchmark score from merged
monitor files.
"""
return benchmark.score(benchmark, env_id, episode_lengths, episode_rewards, episode_types)
def running_mean(x, N):
x = np.array(x, dtype='float64')
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
def compute_graph_stats(episode_lengths, episode_rewards, timestamps, initial_reset_timestamp, buckets):
"""Method to compute the aggregates for the graphs."""
# Not a dependency of OpenAI Gym generally.
import scipy.stats
num_episodes = len(episode_lengths)
# Catch for if no files written which causes error with scipy.stats.binned_statistic
if num_episodes == 0:
return None
episode_rewards = np.array(episode_rewards)
episode_lengths = np.array(episode_lengths)
# The index of the start of each episode
x_timestep = np.cumsum(np.insert(episode_lengths, 0, 0))[:-1]
assert len(x_timestep) == num_episodes
# Delta since the beginning of time
x_seconds = [timestamp - initial_reset_timestamp for timestamp in timestamps]
# The index of each episode
x_episode = range(num_episodes)
# Calculate the appropriate x/y statistics
x_timestep_y_reward = scipy.stats.binned_statistic(x_timestep, episode_rewards, 'median', buckets)
x_timestep_y_length = scipy.stats.binned_statistic(x_timestep, episode_lengths, 'median', buckets)
x_episode_y_reward = scipy.stats.binned_statistic(x_episode, episode_rewards, 'median', buckets)
x_episode_y_length = scipy.stats.binned_statistic(x_episode, episode_lengths, 'median', buckets)
x_seconds_y_reward = scipy.stats.binned_statistic(x_seconds, episode_rewards, 'median', buckets)
x_seconds_y_length = scipy.stats.binned_statistic(x_seconds, episode_lengths, 'median', buckets)
return {
'initial_reset_timestamp': initial_reset_timestamp,
'x_timestep_y_reward': graphable_binned_statistic(x_timestep_y_reward),
'x_timestep_y_length': graphable_binned_statistic(x_timestep_y_length),
'x_episode_y_reward': graphable_binned_statistic(x_episode_y_reward),
'x_episode_y_length': graphable_binned_statistic(x_episode_y_length),
'x_seconds_y_length': graphable_binned_statistic(x_seconds_y_length),
'x_seconds_y_reward': graphable_binned_statistic(x_seconds_y_reward),
}
def graphable_binned_statistic(binned):
x = running_mean(binned.bin_edges, 2)
y = binned.statistic
assert len(x) == len(y)
# Get rid of nasty NaNs
valid = np.logical_not(np.isnan(x)) & np.logical_not(np.isnan(y))
x = x[valid]
y = y[valid]
return {
'x': x,
'y': y,
}
|
{"hexsha": "bc9edf5e1a40e3666cf199b44dd7218e26b7821d", "size": 7066, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym/scoreboard/scoring.py", "max_stars_repo_name": "soochyboy/openaiattempt", "max_stars_repo_head_hexsha": "2933a95ec5cc7efd282ed793d5efd6af6f2ce00f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gym/scoreboard/scoring.py", "max_issues_repo_name": "soochyboy/openaiattempt", "max_issues_repo_head_hexsha": "2933a95ec5cc7efd282ed793d5efd6af6f2ce00f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym/scoreboard/scoring.py", "max_forks_repo_name": "soochyboy/openaiattempt", "max_forks_repo_head_hexsha": "2933a95ec5cc7efd282ed793d5efd6af6f2ce00f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.8106508876, "max_line_length": 150, "alphanum_fraction": 0.7118596094, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1574}
|
import os
import tensorflow as tf
import datetime
import numpy as np
import pandas as pd
from tensorflow.keras.callbacks import ModelCheckpoint
import utils as ut
class ConvBlock:
def __init__(self, n_filters=64, filter_size=(3, 3), strides=(1, 1), padding='same', activation='elu', use_bn=True):
self.n_filters = n_filters
self.filter_size = filter_size
self.strides = strides
self.padding = padding
self.activation = activation
self.use_bn = use_bn
def __call__(self, x):
x = tf.keras.layers.Conv2D(self.n_filters, self.filter_size, strides=self.strides, padding=self.padding)(x)
if self.use_bn:
x = tf.keras.layers.BatchNormalization()(x)
return tf.keras.layers.Activation(self.activation)(x)
class ConvBlockTranspose:
def __init__(self, n_filters=64, filter_size=(3, 3), strides=(2, 2), padding='same', activation='elu', use_bn=True):
self.n_filters = n_filters
self.filter_size = filter_size
self.strides = strides
self.padding = padding
self.activation = activation
self.use_bn = use_bn
def __call__(self, x):
x = tf.keras.layers.Conv2DTranspose(self.n_filters, self.filter_size, strides=self.strides,
padding=self.padding)(x)
if self.use_bn:
x = tf.keras.layers.BatchNormalization()(x)
return tf.keras.layers.Activation(self.activation)(x)
class SSIMLoss(tf.keras.losses.Loss):
def __init__(self, add_mae=False):
super(SSIMLoss, self).__init__(name='ssim')
self.add_mae = add_mae
self.tf_mae = tf.keras.losses.MeanAbsoluteError(reduction=tf.keras.losses.Reduction.NONE)
def __call__(self, y_e, y_pred, sample_weight=None):
loss_ssim = 2 - tf.image.ssim(y_e, y_pred, 1., filter_size=9)
loss_ssim = tf.math.reduce_mean(loss_ssim)
if self.add_mae:
loss_ssim = loss_ssim + 5 * self.tf_mae(y_e, y_pred)
return tf.math.reduce_mean(loss_ssim)
class BCELoss(tf.keras.losses.Loss):
def __init__(self):
self.tf_bce = tf.keras.losses.BinaryCrossentropy()
def dice_coef(self, y_e, y_pred, smooth=1):
intersection = tf.math.abs(y_e * y_pred)
intersection = tf.math.reduce_sum(intersection)
total_area = tf.math.square(y_e) + tf.math.square(y_pred)
total_area = tf.math.reduce_sum(total_area)
return 1 - (2. * intersection + smooth) / (total_area + smooth + 1e-8)
def __call__(self, y_e, y_pred):
return self.tf_bce(y_e, y_pred) + self.dice_coef(y_e, y_pred)
def decay_schedule(epoch, learning_rate):
if epoch > 1 and epoch % 9 == 0 and learning_rate <= 1e-5:
learning_rate /= 3
return learning_rate
def sampling(args):
z_mean, z_log_var = args
epsilon = tf.random.normal(shape=tf.shape(z_mean), name='get_epsilon')
offset = tf.math.multiply(epsilon, z_log_var, name='offset')
return z_mean + offset
class KLLoss:
def __init__(self, factor):
self.factor = factor
def __call__(self, z, mean, logvar):
loss = 1 + 2 * logvar - tf.math.square(mean) - tf.math.exp(2 * logvar)
return - self.factor * .5 * tf.math.reduce_mean(loss)
class Encoder:
def __init__(self):
pass
def __call__(self, x_input):
x = ConvBlock(n_filters=32)(x_input)
x = ConvBlock(n_filters=32)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=64)(x)
x = ConvBlock(n_filters=64)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=128)(x)
x = ConvBlock(n_filters=128)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=256)(x)
x = ConvBlock(n_filters=256)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=256)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=256)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
x = ConvBlock(n_filters=512)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), padding='valid')(x)
return tf.keras.layers.Flatten(name='out_encoder')(x)
class DecoderMask:
def __init__(self):
pass
def __call__(self, z_latent, out_name, out_nc):
x = tf.keras.layers.Reshape((1, 1, 512))(z_latent)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='valid')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
return tf.keras.layers.Conv2D(out_nc, (3, 3), strides=(1, 1), activation='sigmoid',
padding='same', name=out_name)(x)
class Decoder:
def __init__(self):
pass
def __call__(self, z_latent, out_name, out_nc):
x = tf.keras.layers.Reshape((1, 1, 512))(z_latent)
x = ConvBlockTranspose(256, padding='same')(x)
x = ConvBlock(256)(x)
x = ConvBlockTranspose(256, padding='same')(x)
x = ConvBlock(256)(x)
x = ConvBlockTranspose(256, padding='valid')(x)
x = ConvBlock(256)(x)
x = ConvBlockTranspose(128, padding='same')(x)
x = ConvBlock(128)(x)
x = ConvBlockTranspose(64, padding='same')(x)
x = ConvBlock(64)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
x = ConvBlockTranspose(32, padding='same')(x)
x = ConvBlock(32)(x)
return tf.keras.layers.Conv2D(out_nc, (3, 3), strides=(1, 1), activation='sigmoid',
padding='same', name=out_name)(x)
class VAE2train:
def __init__(self, factor_kl=1e-3):
self.factor_kl = factor_kl
self._build_model()
def _build_model(self):
tf.keras.backend.clear_session()
self.in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
self.in_mask = tf.keras.layers.Input(shape=(144, 144, 1), name='in_mask')
out_encoder = Encoder()(self.in_image)
x = tf.keras.layers.Dense(512)(out_encoder)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
self.z_latent = tf.keras.layers.Lambda(sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
out_image_pre = Decoder()(self.z_latent, 'out_image_pre', 3)
self.out_mask = DecoderMask()(self.z_latent, 'out_mask', 1)
# Tidy the image to use only the face regions of the estimated output and join with the original background
x = tf.keras.layers.Multiply()([out_image_pre, self.in_mask])
x_bg = tf.keras.layers.Multiply()([self.in_image, 1. - self.in_mask])
self.out_image = tf.keras.layers.Add(name='out_image')([x, x_bg])
self.model = tf.keras.models.Model([self.in_image, self.in_mask],
[self.out_image, self.out_mask, out_image_pre])
def add_losses(self, add_KL=True, add_SSIM=True, add_MAE=True, add_BCE=True, add_MSE=False):
self.checkpoint_fname = 'weights.{epoch:03d}-{loss:.2f}-{val_loss:.2f}'
if add_KL:
loss_KL = KLLoss(factor=self.factor_kl)(self.z_latent, self.z_mean, self.z_log_var)
self.model.add_loss(loss_KL)
self.model.add_metric(loss_KL, name='kl', aggregation='mean')
self.checkpoint_fname += '-{val_kl:.2f}'
if add_SSIM:
loss_face_ssim = SSIMLoss()(self.in_image, self.out_image)
self.model.add_loss(loss_face_ssim)
self.model.add_metric(loss_face_ssim, name='ssim', aggregation='mean')
self.checkpoint_fname += '-{val_ssim:.2f}'
if add_MAE:
loss_face_mae = 4 * tf.keras.losses.MeanAbsoluteError()(self.in_image, self.out_image)
self.model.add_loss(loss_face_mae)
self.model.add_metric(loss_face_mae, name='mae', aggregation='mean')
self.checkpoint_fname += '-{val_mae:.2f}'
if add_MSE:
loss_face_mse = 100 * tf.keras.losses.MeanSquaredError()(self.in_image, self.out_image)
self.model.add_loss(loss_face_mse)
self.model.add_metric(loss_face_mse, name='mse', aggregation='mean')
self.checkpoint_fname += '-{val_mse:.2f}'
if add_BCE:
loss_mask_bce = BCELoss()(self.in_mask, self.out_mask)
self.model.add_loss(loss_mask_bce)
self.model.add_metric(loss_mask_bce, name='bce', aggregation='mean')
self.checkpoint_fname += '-{val_bce:.2f}'
self.checkpoint_fname += '.h5'
def setup_checkpoint(self, ckptdir=None, save_best_only=True):
if ckptdir is None:
self.ckptdir = os.path.join(self.traindir, 'ckpt')
else:
self.ckptdir = ckptdir
if not os.path.exists(self.ckptdir):
os.makedirs(self.ckptdir)
self.checkpoint = ModelCheckpoint(os.path.join(self.ckptdir, self.checkpoint_fname),
save_weights_only=True, save_best_only=save_best_only,
save_format="tf", monitor='val_loss', verbose=1,
mode='min')
def setup_indir(self, traindir=None):
if traindir is None:
self.traindir = os.path.join('traindir', datetime.datetime.now().stime("%Y%m%d-%H%M%S"))
else:
self.traindir = traindir
def setup_logdir(self):
self.logdir = os.path.join(self.traindir, 'logs')
if not os.path.exists(self.logdir):
os.makedirs(self.logdir, exist_ok=True)
def train(self, gen_in, gen_val, traindir=None, steps_per_epoch=4000, validation_steps=700, epochs=100,
initial_epoch=0, save_best_only=True):
self.setup_indir(traindir)
self.setup_logdir()
self.setup_checkpoint(save_best_only=save_best_only)
tensorboard_callback = tf.keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4, clipnorm=1e-3))
self.model.fit(gen_in,
validation_data=gen_val,
verbose=1,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=epochs,
initial_epoch=initial_epoch,
callbacks=[self.checkpoint, tensorboard_callback])
def train_checkup(self, gen_in, ckptdir='.', steps_per_epoch=10, epochs=2):
if not os.path.exists(ckptdir):
os.makedirs(ckptdir, exist_ok=True)
self.setup_checkpoint(ckptdir=ckptdir)
self.model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4, clipnorm=1e-3))
self.model.fit(gen_in,
validation_data=gen_in,
verbose=1,
callbacks=[self.checkpoint],
steps_per_epoch=steps_per_epoch,
validation_steps=2,
epochs=epochs)
class VAENoMask(VAE2train):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _build_model(self):
tf.keras.backend.clear_session()
self.in_image = tf.keras.layers.Input(shape=(144, 144, 3), name='in_image')
self.out_encoder = Encoder()(self.in_image)
x = tf.keras.layers.Dense(512)(self.out_encoder)
# x = tf.keras.layers.BatchNormalization(name='bn1')(x)
# x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('elu', name='out_latent_1')(x)
self.z_mean = tf.keras.layers.Dense(512, name='z_mean')(x)
self.z_log_var = tf.keras.layers.Dense(512, name='z_logvar')(x)
self.z_latent = tf.keras.layers.Lambda(sampling, output_shape=(512,),
name='z_sampling')([self.z_mean, self.z_log_var])
self.out_image = Decoder()(self.z_latent, 'out_image', 3)
self.model = tf.keras.models.Model(self.in_image, self.out_image)
if __name__ == '__main__':
celeba_dir = os.environ['celeba']
list_eval_partition = pd.read_csv(os.path.join(celeba_dir, 'list_eval_partition.txt'), sep='\s+', header=None)
list_eval_partition.columns = ['bname', 'set_id']
list_eval_partition['path'] = list_eval_partition['bname'].apply(lambda x: os.path.join(celeba_dir, 'imgs', x))
fpaths_in = list_eval_partition.query('set_id == 0')['path'].values.tolist()
fpaths_val = list_eval_partition.query('set_id == 1')['path'].values.tolist()
fpaths_test = list_eval_partition.query('set_id == 2')['path'].values.tolist()
np.random.seed(5)
for fset in [fpaths_in, fpaths_val, fpaths_test]:
np.random.shuffle(fset)
gen_in = ut.InputGen(impaths=fpaths_in, loadsize_factor=2)
gen_val = ut.InputGen(impaths=fpaths_val, loadsize_factor=2)
vae = VAE2train()
vae.train(gen_in, gen_val)
|
{"hexsha": "16f76a1abf15b199e113967f4156ad16b4b93127", "size": 13803, "ext": "py", "lang": "Python", "max_stars_repo_path": "training.py", "max_stars_repo_name": "tldrafael/FaceReconstructionWithVAEAndFaceMasks", "max_stars_repo_head_hexsha": "a7ec6a424142167e5e68cb2f09552f7a84706362", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-12-07T05:56:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T12:11:50.000Z", "max_issues_repo_path": "training.py", "max_issues_repo_name": "tldrafael/FaceReconstructionWithVAEAndFaceMasks", "max_issues_repo_head_hexsha": "a7ec6a424142167e5e68cb2f09552f7a84706362", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "training.py", "max_forks_repo_name": "tldrafael/FaceReconstructionWithVAEAndFaceMasks", "max_forks_repo_head_hexsha": "a7ec6a424142167e5e68cb2f09552f7a84706362", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9584569733, "max_line_length": 120, "alphanum_fraction": 0.6181989423, "include": true, "reason": "import numpy", "num_tokens": 3538}
|
//
// Copyright (C) 2009-2012 Artyom Beilis (Tonkikh)
//
// Distributed under the Boost Software License, Version 1.0. (See
// accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
#define BOOSTER_SOURCE
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <process.h>
#include <booster/thread.h>
#include <booster/system_error.h>
#include <booster/refcounted.h>
#include <booster/intrusive_ptr.h>
#include <set>
#include <errno.h>
#include <string.h>
#include <iostream>
//
// This file is for Windows 2000 and above, not super
// efficient, but that what we can get from crappy API
//
namespace booster {
namespace winthread {
typedef std::set<DWORD> keeper;
DWORD index = TLS_OUT_OF_INDEXES;
void on_process_start()
{
index = TlsAlloc();
if(index == TLS_OUT_OF_INDEXES) {
throw booster::runtime_error("Could not allocate thread local index");
}
}
void on_process_end()
{
TlsFree(index);
}
keeper *get_keeper()
{
keeper *r = 0;
void *ptr=TlsGetValue(index);
if(!ptr) {
r = new keeper();
TlsSetValue(index,static_cast<void*>(r));
}
else {
r = static_cast<keeper *>(ptr);
}
return r;
}
void on_thread_end()
{
void *ptr=TlsGetValue(index);
if(!ptr)
return;
keeper *data = static_cast<keeper *>(ptr);
for(keeper::iterator p=data->begin();p!=data->end();++p) {
void *ptr = TlsGetValue(*p);
if(ptr) {
delete static_cast<details::tls_object*>(ptr);
TlsSetValue(*p,0);
}
}
delete data;
TlsSetValue(index,0);
}
DWORD tls_alloc()
{
DWORD id = TlsAlloc();
if(id == TLS_OUT_OF_INDEXES) {
throw booster::runtime_error("Could not allocate thread local key");
}
return id;
}
void tls_set(DWORD id,void *p)
{
get_keeper()->insert(id);
TlsSetValue(id,p);
}
void *tls_get(DWORD id)
{
return TlsGetValue(id);
}
void tls_free(DWORD id)
{
TlsFree(id);
}
#ifdef DLL_EXPORT
extern "C" BOOL WINAPI DllMain(HINSTANCE, DWORD reason,LPVOID)
{
switch(reason) {
case DLL_PROCESS_ATTACH:
on_process_start();
break;
case DLL_PROCESS_DETACH:
on_thread_end();
on_process_end();
break;
case DLL_THREAD_DETACH:
on_thread_end();
break;
}
return TRUE;
}
#else
struct win_thread_initializer {
win_thread_initializer() {
on_process_start();
}
~win_thread_initializer()
{
on_thread_end();
on_process_end();
}
} win_thread_initializer_instance;
#endif
} // winthread
class win_thread_data : public refcounted {
public:
win_thread_data(function<void()> const &cb) :
handle_(0),
is_complete_(false),
callback_(cb)
{
}
void transfer_handle_ownership(HANDLE h)
{
{
unique_lock<mutex> guard(lock_);
if(!is_complete_) {
handle_ = h;
h = 0;
}
}
if(h) {
CloseHandle(h);
}
}
void run()
{
try {
callback_();
callback_ = function<void()>();
}
catch(...) {}
winthread::on_thread_end();
unique_lock<mutex> guard(lock_);
is_complete_ = true;
}
~win_thread_data()
{
try {
unique_lock<mutex> guard(lock_);
if(handle_) {
CloseHandle(handle_);
handle_ = 0;
}
}
catch(...) {}
}
private:
mutex lock_;
HANDLE handle_;
bool is_complete_;
function<void()> callback_;
};
struct thread::data {
intrusive_ptr<win_thread_data> shared;
HANDLE h;
};
extern "C" void *booster_thread_func(void *p)
{
{
// Do not add reference count as it was added upon sucesseful thread creation
intrusive_ptr<win_thread_data> d(reinterpret_cast<win_thread_data *>(p),false);
try {
d->run();
}
catch(...) {
}
}
_endthreadex(0);
return 0;
}
unsigned WINAPI booster_real_thread_func(void *p) { booster_thread_func(p); return 0; }
thread::thread(function<void()> const &cb) :
d(new thread::data)
{
d->shared=new win_thread_data(cb);
uintptr_t p=_beginthreadex(0,0,booster_real_thread_func,d->shared.get(),0,0);
if(p!=0) {
// we want to transfer ownership to the thread explicitly
intrusive_ptr_add_ref(d->shared.get());
}
else {
throw system::system_error(system::error_code(errno,system::system_category));
}
d->h=(HANDLE)(p);
}
void thread::detach()
{
if(d->h && d->shared) {
d->shared->transfer_handle_ownership(d->h);
d->h = 0;
d->shared = 0;
}
}
thread::~thread()
{
try {
detach();
}
catch(...){}
}
void thread::join()
{
if(d->h) {
WaitForSingleObject(d->h,INFINITE);
d->h = 0;
d->shared = 0;
}
}
unsigned thread::hardware_concurrency()
{
SYSTEM_INFO info=SYSTEM_INFO();
GetSystemInfo(&info);
return info.dwNumberOfProcessors;
}
struct recursive_mutex::data { CRITICAL_SECTION m; };
recursive_mutex::recursive_mutex() : d(new data)
{
InitializeCriticalSection(&d->m);
}
recursive_mutex::~recursive_mutex()
{
DeleteCriticalSection(&d->m);
}
void recursive_mutex::lock() { EnterCriticalSection(&d->m);}
void recursive_mutex::unlock() { LeaveCriticalSection(&d->m); }
struct recursive_shared_mutex::data {
mutex lock;
condition_variable can_lock;
int read_lock;
int write_lock;
int pending_lock;
static const unsigned hash_size = 2048;
unsigned short recursive_locks[hash_size];
unsigned static id()
{
return GetCurrentThreadId() % hash_size;
}
};
recursive_shared_mutex::recursive_shared_mutex() : d(new data)
{
d->read_lock = 0;
d->write_lock = 0;
d->pending_lock = 0;
memset(&d->recursive_locks,0,sizeof(d->recursive_locks));
}
recursive_shared_mutex::~recursive_shared_mutex()
{
}
void recursive_shared_mutex::shared_lock()
{
unsigned id = data::id();
booster::unique_lock<mutex> g(d->lock);
for(;;) {
if(d->write_lock == 0 && (d->pending_lock == 0 || d->recursive_locks[id]>0)) {
d->read_lock++;
d->recursive_locks[id]++;
break;
}
d->can_lock.wait(g);
}
}
void recursive_shared_mutex::unique_lock()
{
booster::unique_lock<mutex> g(d->lock);
for(;;) {
if(d->write_lock == 0 && d->read_lock==0) {
d->write_lock = 1;
d->pending_lock = 0;
break;
}
else {
if(d->read_lock)
d->pending_lock = 1;
d->can_lock.wait(g);
}
}
}
void recursive_shared_mutex::unlock()
{
unsigned id = data::id();
booster::unique_lock<mutex> g(d->lock);
if(d->write_lock) {
d->write_lock = 0;
d->pending_lock = 0;
d->can_lock.notify_all();
}
else if(d->read_lock) {
d->read_lock--;
d->recursive_locks[id]--;
if(d->read_lock == 0)
d->can_lock.notify_all();
}
}
namespace details {
class wintls_key : public key {
public:
wintls_key(void (*d)(void *)) : key(d)
{
key_ = winthread::tls_alloc();
}
virtual ~wintls_key()
{
winthread::tls_free(key_);
}
tls_object *get_object()
{
void *p=winthread::tls_get(key_);
if(p)
return static_cast<tls_object*>(p);
tls_object *res = new tls_object(intrusive_ptr<key>(this));
winthread::tls_set(key_,static_cast<void*>(res));
return res;
}
private:
DWORD key_;
};
intrusive_ptr<key> make_key(void (*dtor)(void *))
{
return new wintls_key(dtor);
}
} // details
} // booster
|
{"hexsha": "b652aa3ead410c9ace00087e2d6f50d9e6685ed2", "size": 7379, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "booster/lib/thread/src/thread_winapi.cpp", "max_stars_repo_name": "gatehouse/cppcms", "max_stars_repo_head_hexsha": "61da055ffeb349b4eda14bc9ac393af9ce842364", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 388.0, "max_stars_repo_stars_event_min_datetime": "2017-03-01T07:39:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T19:38:41.000Z", "max_issues_repo_path": "booster/lib/thread/src/thread_winapi.cpp", "max_issues_repo_name": "gatehouse/cppcms", "max_issues_repo_head_hexsha": "61da055ffeb349b4eda14bc9ac393af9ce842364", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 81.0, "max_issues_repo_issues_event_min_datetime": "2017-03-08T20:28:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T08:19:31.000Z", "max_forks_repo_path": "booster/lib/thread/src/thread_winapi.cpp", "max_forks_repo_name": "gatehouse/cppcms", "max_forks_repo_head_hexsha": "61da055ffeb349b4eda14bc9ac393af9ce842364", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 127.0, "max_forks_repo_forks_event_min_datetime": "2017-03-05T21:53:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-25T02:31:01.000Z", "avg_line_length": 19.2161458333, "max_line_length": 91, "alphanum_fraction": 0.6261010977, "num_tokens": 2180}
|
#!/usr/bin/env python3
"""
@summary: for the jupyter notebooks: tools, column creators, diagramming routines, etc.
@version: v40 (29/November/2018)
@since: 26/June/2018
@organization:
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
@TODO: this needs usage comments; not every function has a docstring yet
"""
#global DBFILE, NAME_PREFIX
#DBFILE = "temp.db"
#NAME_PREFIX = "TEMP"
################
## Dependencies:
# standard library
import sys, os, json, time
import sqlite3
from pprint import pprint
# pypi:
import pandas
import numpy
import matplotlib
import matplotlib.pyplot as plt
# chainhammer
# extend sys.path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from hammer.config import RPCaddress, EMPTY_BLOCKS_AT_END
################
def DB_query(SQL, conn):
"""
any SQL query, with many answers
"""
cur = conn.cursor()
cur.execute(SQL)
result = cur.fetchall()
return result
def DB_tableSize(tablename, conn):
"""
prints number of rows
"""
count = DB_query("SELECT COUNT(*) FROM %s" % tablename, conn)
print ("TABLE %s has %d rows" % (tablename, count[0][0]))
return count[0][0]
def maxBlockNumber(conn):
"""
what is the first & last block we have?
"""
result = DB_query("SELECT MIN(blocknumber), MAX(blocknumber) FROM blocks", conn)
print ("MIN(blocknumber), MAX(blocknumber) = %s " % (result) )
return result
def check_whether_complete(blocknumbers):
"""
do we have consecutive blocks, none missing?
"""
start = min(blocknumbers)[0]
last = max(blocknumbers)[0]
old = start-1
total=0
for bn in blocknumbers:
bn = bn[0]
missing=bn-old-1
if missing>0:
print ("from ", old+1, "to", bn - 1, "there are ", missing, " missing")
total+=missing
old = bn
print()
complete = (not total)
print ("complete" if complete else "some %d blocks missing" % total, end=" ")
print ("between blocks %d and %d." %(min(blocknumbers)[0], max(blocknumbers)[0]))
return complete
##################
## add columns
def add_blocktime(df):
"""
blocktime = timestamp[n] - timestamp[n-1]
"""
df['blocktime'] = df['timestamp'] - df['timestamp'].shift()
df.loc[1, "blocktime"] = numpy.nan
def add_TPS(df, numBlocks):
"""
transactions per second
with differently sized (rectangular) windows
"""
name = 'TPS_%dblks'%numBlocks if numBlocks>1 else 'TPS_%dblk'%numBlocks
df[name]=df['txcount'].rolling(numBlocks).sum() / df['blocktime'].rolling(numBlocks).sum()
def add_GUPS(df, numBlocks):
"""
gasUsed per second
"""
name = 'GUPS_%dblks'%numBlocks if numBlocks>1 else 'GUPS_%dblk'%numBlocks
df[name]=df['gasUsed'].rolling(numBlocks).sum() / df['blocktime'].rolling(numBlocks).sum()
def add_GLPS(df, numBlocks):
"""
gasLimit per second
"""
name = 'GLPS_%dblks'%numBlocks if numBlocks>1 else 'GLPS_%dblk'%numBlocks
df[name]=df['gasLimit'].rolling(numBlocks).sum() / df['blocktime'].rolling(numBlocks).sum()
##################################################
## diagramming stand-alone
## does the same as the jupyter notebook
## but more convenient for cloud server
## ... on the command line
##
##################################################
## TODOs:
## * also get the simpler single diagrams ?
## from the original blocksDB_analyze.ipynb
## * doc strings for the following routines:
##################################################
def load_dependencies():
import sqlite3; print("sqlite3 version", sqlite3.version)
import pandas; print("pandas version", pandas.__version__)
import numpy; print("numpy version", numpy.__version__)
import matplotlib; print("matplotlib version", matplotlib.__version__)
from matplotlib import pyplot as plt
backend=matplotlib.get_backend()
print("matplotlib backend", backend)
# get_ipython().run_line_magic('matplotlib', 'inline')
# https://github.com/matplotlib/matplotlib/issues/5907#issuecomment-179001811
matplotlib.rcParams['agg.path.chunksize'] = 10000
# my own routines are now all in separate .py file:
# from blocksDB_diagramming import DB_query, DB_tableSize, maxBlockNumber, check_whether_complete
# from blocksDB_diagramming import add_blocktime, add_TPS, add_GUPS, add_GLPS
def load_db_and_check_complete(DBFILE):
print ("\nReading blocks table from", DBFILE)
# open database connection
conn = sqlite3.connect(DBFILE)
print ("DB table names: ",
DB_query("SELECT name FROM sqlite_master WHERE type='table';", conn)[0])
# number of rows?
_=DB_tableSize("blocks", conn)
# what is the first & last block we have?
minblock, maxblock = maxBlockNumber(conn)[0]
blocknumbers = DB_query("SELECT blocknumber FROM blocks ORDER BY blocknumber", conn)
print ("len(blocknumbers)=", len(blocknumbers))
# do we have consecutive blocks, none missing?
check_whether_complete(blocknumbers)
print ()
return conn, blocknumbers
def simple_stats(conn):
# simple statistics
txcount_sum = DB_query("SELECT SUM(txcount) FROM blocks", conn); print ("txcount_sum", txcount_sum[0][0])
size_max = DB_query("SELECT MAX(size) FROM blocks", conn); print ("blocksize_max", size_max[0][0])
txcount_max = DB_query("SELECT MAX(txcount) FROM blocks", conn); print ("txcount_max", txcount_max[0][0])
txcount_av = DB_query("SELECT AVG(txcount) FROM blocks", conn); print ("txcount average per block", txcount_av[0][0])
blocks_nonempty_count = DB_query("SELECT COUNT(blocknumber) FROM blocks WHERE txcount != 0", conn); print ("blocks_nonempty_count", blocks_nonempty_count[0][0])
print ("txcount average per NONEMPTY blocks = ", txcount_sum[0][0] / blocks_nonempty_count[0][0] )
print ()
def read_whole_table_into_dataframe(conn):
# SQL="SELECT * FROM blocks WHERE 48500<blocknumber and blocknumber<49000 ORDER BY blocknumber"
SQL="SELECT * FROM blocks ORDER BY blocknumber"
df = pandas.read_sql(SQL, conn)
return df
def check_timestamp_format(df):
"""
some clients report absolute blocktime as epochtime in seconds,
some in nanoseconds
that should have been handled already, in the timestampToSeconds() function
but if it hasn't, the problem would show up here.
"""
# print ("example- first 4 rows:")
# print (df[0:4])
# better come up with an automated test, not just visual inspection:
# print (" is timestamp in seconds?")
# ### `geth` based clients have a nanosecond timestamp
# not anymore?
# transform nanoseconds to seconds
# df["timestamp"]=df["timestamp"]/1000000000
problematic = []
for ts in df["timestamp"]:
# year 2001 year 2255 testrpc-py issue https://github.com/pipermerriam/eth-testrpc/issues/117
if not ((1000000000 < ts < 9000000000) or (6000000 < ts < 8000000)):
problematic.append(ts)
if problematic:
print ("%d problematic timestamps = probably not in unit of seconds" % len(problematic))
try:# try, for the case that the list is short
problematic = problematic[:3] + problematic[-3:]
problematic = sorted(list(set(problematic))) # remove duplicates
except:
pass
print ("examples:", problematic)
# hello year 2255, you might have a Y2286 problem
# when epochtime goes from 9999999999 to 10000000000
# someone warned you 30 years earlier. Hahaha :-)
return not problematic
def add_columns(df):
# blocktime = timestamp[n] - timestamp[n-1]
add_blocktime(df)
#df["TPS_1"]=df['txcount']/df['blocktime']
#df
# transactions per second
# with differently sized (rectangular) windows
add_TPS(df, numBlocks=1)
add_TPS(df, numBlocks=3)
add_TPS(df, numBlocks=5)
add_TPS(df, numBlocks=10)
# gasUsed and gasLimit per second
add_GUPS(df, numBlocks=1)
add_GUPS(df, numBlocks=3)
add_GUPS(df, numBlocks=5)
add_GLPS(df, numBlocks=1)
add_GLPS(df, numBlocks=3)
add_GLPS(df, numBlocks=5)
print ("\nColumns added. Now: ", df.columns.tolist() )
print ()
def show_peak_TPS(df):
columns = ['blocknumber',
'TPS_1blk', 'TPS_3blks', 'TPS_5blks', 'TPS_10blks',
'txcount', 'size', 'gasUsed', 'gasLimit', 'timestamp', 'blocktime']
print ("peak TPS single block:")
df1 = df.sort_values(by=['TPS_1blk'], ascending=False)[0:10]
max1 = max(df1['TPS_1blk'])
pprint (df1[columns])
print ("\npeak TPS over ten blocks:")
df10 = df.sort_values(by=['TPS_10blks'], ascending=False)[0:10]
max10 = max(df10['TPS_10blks'])
pprint (df10[columns])
print ("\nSingle block, vs averaged over 10 blocks:")
print ("peak( TPS_1blk) = %.2f \npeak(TPS_10blk) = %.2f" % (max1,max10))
return max1, max10
def diagrams_oldversion(df, blockFrom, blockTo, prefix="", gas_logy=True, bt_logy=True, imgpath="img"):
"""
OBSOLETE NOW!
"""
from matplotlib import pyplot as plt
# https://github.com/matplotlib/matplotlib/issues/5907#issuecomment-179001811
matplotlib.rcParams['agg.path.chunksize'] = 10000
###################################################
# prepare 2x2 subplots
# plt = matplotlib.pyplot
fig, axes = plt.subplots(nrows=2, ncols=2,figsize=(15,10))
plt.tight_layout(pad=6.0, w_pad=6.0, h_pad=7.5)
title = prefix + " blocks %d to %d" % (blockFrom, blockTo)
plt.suptitle(title, fontsize=16)
####################################
# TPS
# TPS averages --> legend
cols=['TPS_1blk', 'TPS_3blks', 'TPS_5blks', 'TPS_10blks']
averages=df[cols][blockFrom:blockTo].mean()
legend = [col + " (av %.1f)" % averages[col] for col in cols]
# print (legend)
# TPS diagram
cols = ['blocknumber'] + cols
ax=df[cols][blockFrom:blockTo].plot(x='blocknumber', rot=90, ax=axes[0,0])
ax.set_title("transactions per second")
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.legend(legend);
###########################################
# bar charts or line charts
# bar charts are too expensive when too many blocks
numBlocks = blockTo - blockFrom
kind = 'bar' if numBlocks<2000 else 'line'
#############################################
# BT
ax=df[['blocknumber', 'blocktime']][blockFrom:blockTo].plot(x='blocknumber', kind=kind, ax=axes[0,1],
logy=bt_logy)
ax.set_title("blocktime since last block")
ax.locator_params(nbins=1, axis='x') # TODO: matplotlib's ticks - how to autoselect few? Any idea welcome
#############################################
# blocksize
ax=df[['blocknumber', 'size']][blockFrom:blockTo].plot(x='blocknumber', rot=90, kind=kind, ax=axes[1,0])
# ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
ax.set_title("blocksize in bytes")
ax.locator_params(nbins=1, axis='x') # TODO: matplotlib's ticks - how to autoselect few? Any idea welcome
####################################
# gas
ax=df[['blocknumber', 'GLPS_1blk', 'GUPS_1blk']][blockFrom:blockTo].plot(x='blocknumber',
rot=90, ax=axes[1,1],
logy=gas_logy)
ax.get_xaxis().get_major_formatter().set_useOffset(False)
if not gas_logy:
ax.get_yaxis().get_major_formatter().set_scientific(False)
ax.set_title("gasUsed and gasLimit per second")
##############################################
# save diagram to PNG file
filename = "%s_tps-bt-bs-gas_blks%d-%d.png" % (prefix,blockFrom,blockTo)
filepath = os.path.join(imgpath, filename)
fig.savefig(filepath)
return filepath
################################################################################
# new diagrams
# completely overhauled, mostly written new actually
################################################################################
def experiment_slice(df, FROM_BLOCK, TO_BLOCK, emptyBlocks):
"""
cut out the dataframe from FROM_BLOCK to TO_BLOCK+emptyBlocks (incl that last one)
can handle that df starts not at block 0
can handle that limits are smaller or larger than available blocknumbers
"""
assert FROM_BLOCK <= TO_BLOCK
index_from = min( df[df['blocknumber'] >= FROM_BLOCK].index.tolist() )
# print (slice_from)
index_to = max( df[df['blocknumber'] <= TO_BLOCK+emptyBlocks].index.tolist() )
# print(slice_to)
dfs = df[index_from:index_to + 1]
return dfs, index_from, index_to
def averageTps_wholeExperiment(dfs, FROM_BLOCK, TO_BLOCK):
"""
works on already sliced dataframe,
where first experiment block is index 0
and last experiment(!) block is index [TO_BLOCK - FROM_BLOCK],
(so the 10 empty blocks at the end are NOT part of this!)
N.B.:
we cannot rely on the blocktime of very first block
so we simply leave the transactions out of the summation, and
the duration is from when that first block WAS MINED = its timestamp.
"""
blocks = TO_BLOCK - FROM_BLOCK + 1
ts1 = dfs.iloc[0]['timestamp'] # stop clock starts WHEN block 0 is in already!
bn1 = dfs.iloc[0]['blocknumber']
ts2 = dfs.iloc[blocks-1]['timestamp'] # and clock ends at last filled block
bn2 = dfs.iloc[blocks-1]['blocknumber']
duration = ts2-ts1
txs=sum(dfs['txcount'][1:blocks]) # N.B.: start summing at block 1 not 0 !
tps=(txs/duration)
print ("second to last experiment block, averaging:")
txt="blocks %d-%d, timestamps %d-%d, duration %d seconds, txcount %d, tps %.1f"
print (txt % (bn1, bn2, ts1, ts2, duration, txs, tps))
print()
return tps, "%.1f" % tps
def averager(dfs, col, emptyBlocks, fmt="%.1f"):
"""
We want the real average of that 'col', taken only over the non-empty blocks.
N.B.: this assumes that there are actually enough emptyBlocks at the end!
"""
filledSlice = dfs[col] [:len(dfs)-emptyBlocks-1]
av = avCopy = filledSlice .mean()
if fmt=="%d":
avCopy = int(round(av))
avTxt = fmt % avCopy
return av, avTxt
def avgLine(ax, dfs, emptyBlocks, avg, avgTxt):
"""
horizontal line plus text on white background
"""
lastFilledBlock_index = len(dfs)-emptyBlocks-1
blMin, blMax = min(dfs["blocknumber"])+1, max(dfs["blocknumber"][:lastFilledBlock_index])
ax.plot([blMin, blMax], [avg, avg], "k-")
ax.text(blMin + (blMax-blMin + emptyBlocks)*0.95, avg, avgTxt,
bbox=dict(facecolor='white', edgecolor='white'))
def axes_simplifier(ax, logYscale=False):
"""
otherwise matplotlib automatically switches on notations on the ticks
that might be confusing to non-technical people
"""
ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.get_xaxis().get_major_formatter().set_scientific(False)
if not logYscale:
ax.get_yaxis().get_major_formatter().set_useOffset(False)
ax.get_yaxis().get_major_formatter().set_scientific(False)
def tps_plotter(ax, dfs, FROM_BLOCK, TO_BLOCK, emptyBlocks):
"""
TPS average calculated only over non-empty blocks!
average calculated for TPS (not for smoothed 3, 5, 10 blocks averages)
N.B.: this assumes that in dfs there are actually enough emptyBlocks at the end!
"""
cols=['TPS_1blk', 'TPS_3blks', 'TPS_5blks', 'TPS_10blks']
for col in cols:
ax.plot(dfs['blocknumber'], dfs[col])
axes_simplifier(ax)
#avg1, avg1Txt = averager(dfs, cols[0], emptyBlocks, "%.1f")
#legend = [cols[0] + " (avg1 %s)"%avg1Txt ] + cols[1:]
ax.legend(cols);
avg, avgTxt = averageTps_wholeExperiment(dfs, FROM_BLOCK, TO_BLOCK)
avgLine(ax, dfs, emptyBlocks, avg, avgTxt)
print ("averaged over whole experiment: %s TPS" %avgTxt)
ax.set_title("avg TPS %s = #TX whole experiment / blocktimes diff" % avgTxt)
return avg
def blocktimes_plotter(ax, dfs):
"plot the blocktimes"
ax.set_title("blocktime seconds since last block")
ax.scatter(x=dfs['blocknumber'], y=dfs['blocktime'], c="b", marker="x")
axes_simplifier(ax)
def blocksizes_plotter(ax, dfs, emptyBlocks):
"""
blocksizes
plus average line
"""
ax.scatter(dfs['blocknumber'], dfs['size'], c="g", marker="o")
ax.plot( dfs['blocknumber'], dfs['size'], "g-")
avg, avgTxt = averager(dfs, 'size', emptyBlocks, "%d")
avgLine(ax, dfs, emptyBlocks, avg, avgTxt)
print ('averaged ( " ) blocksize: %s bytes' % avgTxt)
ax.set_title("blocksizes in bytes")
axes_simplifier(ax)
def gas_plotter(ax, dfs):
"""
plot gasUsed and gasLimit per second
"""
ax.set_title("gasUsed and gasLimit per second")
ax.plot( dfs['blocknumber'], dfs['GLPS_1blk']) # , "g-")
ax.plot( dfs['blocknumber'], dfs['GUPS_1blk']) #
ax.set_yscale('log')
axes_simplifier(ax, logYscale=True)
ax.legend (["gasLimit/sec", "gasUsed/sec"] )
def diagrams(prefix, df, blockFrom, blockTo, emptyBlocks):
"""
new version
more precise & consistent
* slice of whole experiment (from/to), plus some emptyBlocks at the end
* averages are calc'ed over the experiment blocks only!
* average lines & number for tps & block size
* title shows more infos about experiment
* x-axis ticks issues solved
"""
# offset=min(df["blocknumber"])
# just the slice of the experiment + 10 extra blocks:
# dfs = df[FROM_BLOCK-offset:TO_BLOCK-offset+emptyBlocks+1]
dfs, index_from, index_to = experiment_slice(df, blockFrom, blockTo, emptyBlocks)
# https://github.com/matplotlib/matplotlib/issues/5907#issuecomment-179001811
import matplotlib
matplotlib.rcParams['agg.path.chunksize'] = 10000
fig, axes = plt.subplots(2, 2, figsize=(16,9)) #, sharex=True)
fig.subplots_adjust(hspace=0.25, wspace=0.20)
tpsAv = tps_plotter(axes[0,0], dfs, blockFrom, blockTo, emptyBlocks)
blocktimes_plotter(axes[0,1], dfs)
blocksizes_plotter(axes[1,0], dfs, emptyBlocks)
gas_plotter(axes[1,1], dfs)
txs=sum(dfs['txcount'][0:-emptyBlocks+1])
title = prefix + " blocks %d-%d with %d txs ~ %d txs/block"
title = title % (blockFrom, blockTo, txs, round(txs/(blockTo-blockFrom+1)))
fig.suptitle(title, fontsize=16)
return fig, axes, dfs, txs, tpsAv
def read_experiment_infofile(fn):
"""
now the experiments are all writing out basic information.
read this in here, to know the range of blocks.
"""
with open(fn, "r") as f:
info = json.load(f)
return info
def timestamp_humanreadable(epoch):
return time.strftime("%Y%m%d-%H%M", time.localtime(epoch))
def savePlot(fig, prefix, blockFrom, blockTo, imgpath, INFOFILE=None):
if INFOFILE:
info = read_experiment_infofile(INFOFILE)
ts = timestamp_humanreadable(info['tps']['start_epochtime'])
prefix = prefix + "-" +ts
filename = "%s_blks%d-%d.png" % (prefix,blockFrom,blockTo)
filepath = os.path.join(imgpath, filename)
fig.savefig(filepath)
return filepath
def add_to_infofile(INFOFILE, img_fn, tpsAv, prefix):
info = read_experiment_infofile(fn=INFOFILE)
info['diagrams']={}
info['diagrams']['filename'] = img_fn
info['diagrams']['blocktimestampsTpsAv'] = tpsAv
info['diagrams']['prefix'] = prefix
with open(INFOFILE, "w") as f:
json.dump(info, f)
################################################################################
def load_prepare_plot_save(DBFILE, NAME_PREFIX,
FROM_BLOCK, TO_BLOCK, EMPTY_BLOCKS,
INFOFILE, imgpath="img"):
load_dependencies()
conn, blocknumbers = load_db_and_check_complete(DBFILE)
simple_stats(conn)
df = read_whole_table_into_dataframe(conn)
conn.close()
assert check_timestamp_format(df)
add_columns(df)
show_peak_TPS(df)
if FROM_BLOCK==-1: FROM_BLOCK = min(blocknumbers)[0]
if TO_BLOCK==-1: TO_BLOCK = max(blocknumbers)[0]
# print (FROM_BLOCK, TO_BLOCK); exit()
print()
# fn = diagrams_oldversion(df, FROM_BLOCK, TO_BLOCK, NAME_PREFIX, gas_logy=True, bt_logy=True, imgpath=imgpath)
fig, axes, dfs, txs, tpsAv = diagrams(NAME_PREFIX, df, FROM_BLOCK, TO_BLOCK,
emptyBlocks=EMPTY_BLOCKS)
fn = savePlot(fig, NAME_PREFIX, FROM_BLOCK, TO_BLOCK, imgpath, INFOFILE)
print ("\ndiagrams saved to: ", fn)
if INFOFILE:
add_to_infofile(INFOFILE, fn, tpsAv, NAME_PREFIX)
return fn
###############################################################################
def sanify(mystring):
"""
from given string, make something that can be used as filename
"""
keepcharacters = ('-','.','_')
sane = "".join(c for c in mystring if c.isalnum() or c in keepcharacters)
sane = sane.rstrip()
return sane
def CLI_params():
if len(sys.argv) not in (3, 4, 5):
print ("Please give\n"
"THREE arguments DBFILE PREFIX INFOFILE\n\n"
"Or give FOUR arguments, \n"
"the filename DBFILE ___.db, \n"
"a PREFIX for characterising the diagram output files; \n"
"and FROM_BLOCK and TO_BLOCK for where to zoom,\n"
"or\n"
"give only the first TWO arguments, for the whole chain\n\n"
"examples:\n"
"%s temp.db TEMP ../hammer/last-experiment.json\n"
"%s temp.db TEMP 115 230\n"
"%s temp.db TEMP\n" % (sys.argv[0], sys.argv[0], sys.argv[0]))
exit(1)
DBFILE=sys.argv[1]
NAME_PREFIX = sanify( sys.argv[2] )
print ("using DBFILE=%s NAME_PREFIX=%s" % (DBFILE, NAME_PREFIX))
if len(sys.argv)==3:
FROM_BLOCK=-1
TO_BLOCK=-1
print ("for the whole chain, first to last block")
INFOFILE=None
EMPTY_BLOCKS = EMPTY_BLOCKS_AT_END
if len(sys.argv) == 4:
INFOFILE=sys.argv[3]
print ("reading blocks range from", INFOFILE)
info = read_experiment_infofile(fn=INFOFILE)
# pprint(info); exit()
FROM_BLOCK = info['send']['block_first']
TO_BLOCK = info['send']['block_last']
EMPTY_BLOCKS = info['send']['empty_blocks']
txt = "from block %d to block %d, with %d empty blocks afterwards"
print (txt % (FROM_BLOCK, TO_BLOCK, EMPTY_BLOCKS) )
if len(sys.argv)==5:
FROM_BLOCK=int(sys.argv[3])
TO_BLOCK =int(sys.argv[4])
print ("from block %d to block %d" % (FROM_BLOCK, TO_BLOCK) )
print ()
return DBFILE, NAME_PREFIX, FROM_BLOCK, TO_BLOCK, EMPTY_BLOCKS, INFOFILE
if __name__ == '__main__':
# ./blocksDB_diagramming.py temp1.db TEMP 54 124
params = CLI_params();
# params = ("temp1.db", "TEMP", 54, 124)
# params = ("temp1.db", "TEMP", 0, 233)
# params = ("temp2.db", "TEMP", 0, 5000)
load_prepare_plot_save(*params)
print ("Done.")
|
{"hexsha": "1c41dffec20d92c8af4de2b7464ed7d62eef7243", "size": 23885, "ext": "py", "lang": "Python", "max_stars_repo_path": "reader/blocksDB_diagramming.py", "max_stars_repo_name": "nairobi222/chainhammer", "max_stars_repo_head_hexsha": "94ab5269a9a9c751d355b41f90ac244026ccf46b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reader/blocksDB_diagramming.py", "max_issues_repo_name": "nairobi222/chainhammer", "max_issues_repo_head_hexsha": "94ab5269a9a9c751d355b41f90ac244026ccf46b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reader/blocksDB_diagramming.py", "max_forks_repo_name": "nairobi222/chainhammer", "max_forks_repo_head_hexsha": "94ab5269a9a9c751d355b41f90ac244026ccf46b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3124128312, "max_line_length": 164, "alphanum_fraction": 0.6144023446, "include": true, "reason": "import numpy", "num_tokens": 6278}
|
"""
cubeset - Defines a CubeSet class that contains code to handle operations
on several IFU data cubes, e.g., coaddition
"""
import os
import numpy as np
from matplotlib import pyplot as plt
from astropy.io import ascii
from astropy.io import fits as pf
from .oscube import OsCube
class CubeSet(list):
"""
The Cubeset class is effectively just a list of Spec1d instances that
includes operations that act on the whole list (e.g., coadds)
"""
# ------------------------------------------------------------------------
def __init__(self, inlist, informat=None, indir=None, maskfile='default',
maskdir=None, verbose=True):
"""
There are three ways to create a Cubeset instance
Option 1: List of filenames
Option 2: List of already existing OsCubeinstances
Option 3: A filename, where the file contains within it a list
of input files
- Within this option is a specialized format for the coadd, which
can be chosen by setting informat='coadd'
For this option, the file must have the following columns:
File CRVAL1 CRVAL2 CRPIX1 CRPIX2
where the column names are fairly self-explanatory
"""
""" Set default values """
self.info = None
"""
First check to see if inlist is a list, of either filenames (strings)
or OsCube instances (options 1 and 2).
If it's not a list, check to see if it is a single string, in which
case it may be a file that contains a list of filenames (option 3)
"""
if isinstance(inlist, list):
""" Option 1 or 2: list of files or of OsCube instances """
if isinstance(inlist[0], str):
for f in inlist:
tmp = ((os.path.basename(f)).split('.')[0]).split('_')
frame, filt, lenslet = tmp[1:4]
if maskfile == 'default':
maskfile = 'mask_%s_%s.fits' % (filt, lenslet)
if indir is not None:
infile = os.path.join(indir, f)
else:
infile = f
if maskdir is not None:
maskpath = os.path.join(maskdir, maskfile)
else:
maskpath = maskfile
try:
cube = OsCube(infile, maskfile=maskpath, verbose=False)
except IOError:
print('')
print('Could not open requested file: %s' % f)
return
self.append(cube)
elif isinstance(inlist[0], OsCube):
for cube in inlist:
self.append(cube)
else:
raise TypeError('input list must be a list of filenames or'
' OsCube instances')
elif isinstance(inlist, str):
""" Option 3: a file containing a list of filenames """
try:
if informat == 'coadd' or informat is None:
intab = ascii.read(inlist)
else:
intab = ascii.read(inlist, guess=False, format=informat)
except IOError:
print('')
print('Could not open requested file: %s' % inlist)
print('')
return
"""
Read in the files.
If informat is 'coadd', then use the G column to only select
the good files
"""
if informat == 'coadd':
goodmask = (intab['CRPIX1'] > 0.) & (intab['CRPIX2'] > 0.)
goodtab = intab[goodmask]
flist = goodtab['File']
self.info = goodtab
else:
flist = intab.columns[0]
self.info = intab
for f, info in zip(flist, goodtab):
tmp = ((os.path.basename(f)).split('.')[0]).split('_')
obsdir = tmp[0].replace('s', '20')
frame, filt, lenslet = tmp[1:4]
if indir is 'fromfile':
osirisdir = os.getenv('osiris')
infile = os.path.join(osirisdir, obsdir, 'Clean', f)
elif indir is not None:
infile = os.path.join(indir, f)
else:
infile = f
try:
cube = OsCube(infile, maskfile=maskfile,verbose=False)
crpix = [info['CRPIX1'], info['CRPIX2']]
crval = [info['CRVAL1'], info['CRVAL2']]
cube.update_wcs_from_2d(crpix, crval)
except IOError:
print('')
print('Could not open requested file: %s' % infile)
return
self.append(cube)
# ------------------------------------------------------------------------
def clean(self, maskfile='default', maskdir='../Clean', dsdir='../DarkSub',
outdir='../Clean', debug=False, **kwargs):
"""
Runs the clean algorithm for each cube in the CubeSet
"""
"""
Read the mask file into the first cube, and then copy it to the
subsequent cubes
"""
self[0].read_maskfile(maskfile, maskdir)
for i in range(1, len(self)):
self[i].mask = self[0].mask.copy()
"""
Loop through the files, cleaning each one and saving to the
designated directory
"""
print('')
for f in self:
basename = os.path.basename(f.infile)
outfile = os.path.join(outdir, basename)
varfile = outfile.replace('.fits', '_varspec.fits')
if dsdir is not None:
dsfile = os.path.join(dsdir, basename)
varcubefile = varfile.replace('varspec', 'varcube')
else:
dsfile = None
if debug:
print('Input file: %s' % f.infile)
print('Output file: %s' % outfile)
print('Varspec file: %s' % varfile)
if dsfile is not None:
print('Darksub file: %s' % dsfile)
print('Varcube file: %s' % varcubefile)
f.make_varspec(outfile=varfile, outformat='fitstab')
f.clean(**kwargs)
f.save_drp(outfile, 'clean')
if dsfile is not None:
f.make_varcube('darksub', dsfile=dsfile, outfile=varcubefile)
# ------------------------------------------------------------------------
def coadd(self, lensroot, configfile, centpos=None, whttype='mask',
varsuff='varcube', wlim=None,
testslice='default', swarp='swarp', testonly=False,
slroot='slice', verbose=True,
**kwargs):
"""
Coadd the cubes through calls to swarp
(NOT YET IMPLEMENTED)
"""
"""
Get the range of wavelength slices to extract from the cube.
The default is to use the full wavelength range.
"""
if wlim is not None:
wmin = wlim[0]
wmax = wlim[1]
else:
wmin = 0
wmax = self[0].wsize
""" Get variance information if given """
if whttype == 'varcube':
varlist = []
for i, cube in enumerate(self):
if cube.infile is not None:
inname = cube.infile.replace('.fits', '_%s.fits' % varsuff)
varlist.append(inname)
else:
raise IOError('No filename associated with cube %d' % i)
varcube = CubeSet(varlist)
else:
varcube = None
"""
Make a test file, with the swarped coadd of a single slice.
This is used to set the size of the output array
"""
if testslice == 'default':
testslice = int((wmin + wmax) / 2.)
for i, c in enumerate(self):
""" Get the data in the test slice """
c.set_imslice(testslice, display=False)
outname = '%s_%03d_%02d.fits' % (slroot, testslice, i)
""" Get information from the fits header """
hdr = c['slice'].header
if 'ELAPTIME' in hdr.keys():
hdr['exptime'] = hdr['elaptime']
elif 'ITIME' in hdr.keys():
hdr['exptime'] = hdr['itime'] / 1000.
else:
hdr['exptime'] = 1.
""" Save the science data """
c['slice'].writeto(outname)
""" Set the input weight type for swarp """
mask = c.mask.astype(int)
if whttype == 'mask':
wsuff = '_wht.fits'
wstr = 'MAP_WEIGHT'
whtdat = mask
elif whttype == 'varspec':
wsuff = '_wht.fits'
wstr = 'MAP_WEIGHT'
if c.varspec is None:
c.make_varspec()
whtdat = mask * c.varspec[testslice]
elif whttype == 'varcube':
wsuff = '_rms.fits'
wstr = 'MAP_RMS'
varcube[i].set_imslice(testslice, display=False)
whtdat = varcube[i]['slice']
else:
raise ValueError('whttype must be one of "varcube", "varspec"'
' or "mask"')
""" Define and save the weight image"""
outwht = outname.replace('.fits', wsuff)
pf.PrimaryHDU(whtdat, hdr).writeto(outwht, overwrite=True)
""" Save the mask and exposure time images """
outmask = outname.replace('.fits', '_mask.fits')
texp = mask * hdr['exptime']
outtexp = outmask.replace('mask', 'texp')
pf.PrimaryHDU(mask, hdr).writeto(outmask, overwrite=True)
pf.PrimaryHDU(texp, hdr).writeto(outtexp, overwrite=True)
""" Run swarp on the science and ancillary files """
if whttype == 'mask':
keyvals = '-WEIGHT_TYPE %s -WEIGHT_SUFFIX %s' % (wstr, wsuff)
os.system('%s %s*fits -c %s %s' % (swarp, slroot, configfile, keyvals))
addkeys = '-WEIGHT_TYPE NONE -COMBINE_TYPE SUM'
addkeys = '%s -RESAMPLING_TYPE BILINEAR' % addkeys
mkeys = '-IMAGEOUT_NAME %s_mask.fits' % lensroot
os.system('%s %s*mask.fits -c %s %s %s' %
(swarp, slroot, configfile, addkeys, mkeys))
# os.system('%s %s*fits -c %s %s' % (swarp, slroot, configfile, keyvals))
""" If the testonly mode has been requested, quit here """
if testonly:
# os.system('rm %s*fits' % slroot)
return
"""
Get the relevant information out of the test file, and then delete
the temporary files.
The reason for using the WCS call is to get the WCS information in
the header of the swarped file into a standard format
"""
tmphdu = pf.open('coadd.fits')
hdr2d = (wcs.WCS(tmphdu[0].header)).to_header()
dim2d = tmphdu[0].data.shape
if testonly:
return
else:
os.system('rm %s*fits' % slroot)
os.system('rm coadd*fits')
""" Split all the cubes into their slices """
if(verbose):
print('Splitting the cubes into slices')
for i, c in enumerate(self):
outroot = '%s_%i' % (slroot, i)
c.slice_cube(wlim=wlim, outroot=outroot)
|
{"hexsha": "d283ad58515b8f34b747e30d5a0d4d0b7f651b04", "size": 11676, "ext": "py", "lang": "Python", "max_stars_repo_path": "keckcode/osiris/cubeset.py", "max_stars_repo_name": "cdfassnacht/keck_code", "max_stars_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "keckcode/osiris/cubeset.py", "max_issues_repo_name": "cdfassnacht/keck_code", "max_issues_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "keckcode/osiris/cubeset.py", "max_forks_repo_name": "cdfassnacht/keck_code", "max_forks_repo_head_hexsha": "a952b3806b3e64eef70deec2b2d1352e6ef6dfa0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-15T23:16:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-15T23:16:36.000Z", "avg_line_length": 37.5434083601, "max_line_length": 81, "alphanum_fraction": 0.4935765673, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2661}
|
import os
import time
import torch
import numpy as np
from hparam import hparam as hp
from speech_embedder_net import get_cossim, R2Plus1DNet
import sys
def extract(model_path,dataset):
device = torch.device("cpu")
embedder_net = R2Plus1DNet([2,2,2,2]).to(device)
embedder_net.load_state_dict(torch.load(model_path))
embedder_net.eval()
save_path = os.path.join(hp.data.embedding_path + dataset)
if dataset == "train":
path = hp.data.train_path
else:
path = hp.data.test_path
np_file_dir = [b for b in os.listdir(path) if b[0] != "."]
os.makedirs(save_path, exist_ok=True)
for d in np_file_dir:
np_file_list = os.listdir(os.path.join(path,d))
buffer = []
for f in np_file_list:
utter = np.load(os.path.join(path,d,f))
utter = np.resize(utter,(1,)+utter.shape)
utter = torch.tensor(utter).to(device)
buffer.append(embedder_net(utter)[0].detach())
buffer = np.stack(buffer)
np.save(os.path.join(save_path,d) + ".npy",buffer)
if __name__ == "__main__":
dataset = sys[1]
extract(hp.model.model_path,dataset)
|
{"hexsha": "0e297a37e5a0140a1c34e09296d61fd8ff1be08c", "size": 1158, "ext": "py", "lang": "Python", "max_stars_repo_path": "extract.py", "max_stars_repo_name": "amadeusuzx/PyTorch_Speaker_Verification", "max_stars_repo_head_hexsha": "0ad5b01822cbd88da82258cd1930d024c04109f6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extract.py", "max_issues_repo_name": "amadeusuzx/PyTorch_Speaker_Verification", "max_issues_repo_head_hexsha": "0ad5b01822cbd88da82258cd1930d024c04109f6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extract.py", "max_forks_repo_name": "amadeusuzx/PyTorch_Speaker_Verification", "max_forks_repo_head_hexsha": "0ad5b01822cbd88da82258cd1930d024c04109f6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0857142857, "max_line_length": 62, "alphanum_fraction": 0.6511226252, "include": true, "reason": "import numpy", "num_tokens": 305}
|
using Oceananigans.Utils: work_layout
using Oceananigans.Architectures: device
using Oceananigans.TimeSteppers: store_tracer_tendency!
import Oceananigans.TimeSteppers: store_tendencies!
""" Store source terms for `uh`, `vh`, and `h`. """
@kernel function store_solution_tendencies!(G⁻, grid, G⁰)
i, j, k = @index(Global, NTuple)
@unroll for t in 1:3
@inbounds G⁻[t][i, j, k] = G⁰[t][i, j, k]
end
end
""" Store previous source terms before updating them. """
function store_tendencies!(model::ShallowWaterModel)
barrier = Event(device(model.architecture))
workgroup, worksize = work_layout(model.grid, :xyz)
store_solution_tendencies_kernel! = store_solution_tendencies!(device(model.architecture), workgroup, worksize)
store_tracer_tendency_kernel! = store_tracer_tendency!(device(model.architecture), workgroup, worksize)
solution_event = store_solution_tendencies_kernel!(model.timestepper.G⁻,
model.grid,
model.timestepper.Gⁿ,
dependencies=barrier)
events = [solution_event]
# Tracer fields
for i in 4:length(model.timestepper.G⁻)
@inbounds Gc⁻ = model.timestepper.G⁻[i]
@inbounds Gc⁰ = model.timestepper.Gⁿ[i]
tracer_event = store_tracer_tendency_kernel!(Gc⁻, model.grid, Gc⁰, dependencies=barrier)
push!(events, tracer_event)
end
wait(device(model.architecture), MultiEvent(Tuple(events)))
return nothing
end
|
{"hexsha": "cd9dd427a0f441ce35d50eb1d6fef86834857b6d", "size": 1588, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Models/ShallowWaterModels/store_shallow_water_tendencies.jl", "max_stars_repo_name": "ali-ramadhan/OceanDispatch.jl", "max_stars_repo_head_hexsha": "65b8851d37052e90ca4a3e0c4a1c20398b0ee09a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 239, "max_stars_repo_stars_event_min_datetime": "2019-03-05T03:46:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-04T21:53:14.000Z", "max_issues_repo_path": "src/Models/ShallowWaterModels/store_shallow_water_tendencies.jl", "max_issues_repo_name": "climate-machine/Oceananigans.jl", "max_issues_repo_head_hexsha": "066c45aee9cd1400ba3e6f2c775eb198cf062e92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 654, "max_issues_repo_issues_event_min_datetime": "2019-03-02T02:20:29.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-02T00:13:53.000Z", "max_forks_repo_path": "src/Models/ShallowWaterModels/store_shallow_water_tendencies.jl", "max_forks_repo_name": "ali-ramadhan/OceanDispatch.jl", "max_forks_repo_head_hexsha": "65b8851d37052e90ca4a3e0c4a1c20398b0ee09a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 45, "max_forks_repo_forks_event_min_datetime": "2019-03-05T18:25:16.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-04T08:04:25.000Z", "avg_line_length": 35.2888888889, "max_line_length": 115, "alphanum_fraction": 0.65302267, "num_tokens": 401}
|
function [compartmentReactions] = findRxnFromCompartment(model, compartment)
% Finds all the reactions and their identifiers in a compartment of interest.
%
% USAGE:
%
% [compartmentReactions] = findRxnFromCompartment(model,Compartment)
%
% INPUTS:
% model: COBRA model strcture
% compartment: compartment of interest (e.g.: '[m]', '[n]', '[e]', etc.)
%
% OUTPUT:
% compartmentMetabolites: List of reactions in the compartment of interest
%
% .. Authors:
% - written by Diana El Assal 01/06/16
% - rewritten by Uri David Akavia 6-Jul-2018
if (length(compartment) == 1)
compartment = ['[' compartment ']'];
end
% Find mets in this compartment
compartmentMets = ~cellfun(@isempty, strfind(model.mets, compartment));
% Find reactions that involve the above mets
compartmentRxns = model.rxns(any(model.S(compartmentMets, :)));
compartmentReactions = [compartmentRxns, printRxnFormula(model, 'rxnAbbrList', compartmentRxns, 'printFlag', false)];
|
{"author": "opencobra", "repo": "cobratoolbox", "sha": "e60274d127f65d518535fd0814d20c53dc530f73", "save_path": "github-repos/MATLAB/opencobra-cobratoolbox", "path": "github-repos/MATLAB/opencobra-cobratoolbox/cobratoolbox-e60274d127f65d518535fd0814d20c53dc530f73/src/analysis/exploration/findRxnFromCompartment.m"}
|
% Conclusions
\chapter{Conclusions}
\label{ch:conclusion}
\gls{spirit} is a novel robotics teleoperation system which overlays the current position and orientation of a vehicle onto previously acquired images.
This research focuses on developing a \gls{spirit}-based user interface for aerial robots.
The proposed method combines \gls{fov} information with state estimation, and selects a suitable image to use as a background.
It works even in a low-throughput environment where video data might suffer, and only requires one camera.
Experiments were conducted to study the efficacy at increasing accuracy while decreasing time and wasted movements.
Participants were tasked with flying a drone to a location above a known target while using the proposed system, and comparing the results with those obtained by flying with a slowed-down onboard camera.
\gls{spirit} significantly improved accuracy from 0.666\,m to 0.401\,m, an improvement of almost 40\% ($\Delta$\sym{mean}=0.266\,m, \sym{effect}=1.053, \acrshort{ci}=98.1\%), and reduced the overall workload by 37.5\% ($p$=0.02408, \sym{effect}=$-0.978$), especially in the physical, temporal, and performance metrics.
It also significantly improved the users' control and awareness of absolute and relative positioning.
Their self-assessed ability to stay above the target increased by 86.9\% from an average rating of $2.556/7$ to $4.778/7$ ($p$=0.00126, \sym{effect}=2.511).
There were non-significant increases in time and path lengths, but improvements over subsequent runs indicate that these are simply an issue of familiarity with the system.
Further testing is needed to verify the effect of \gls{spirit} on completion time and path length, and its efficacy in various environments.
Overall, the tests were successful, and all participants gave praise for the system, as well as valuable feedback.
\chapter{Future work}
Several suggestions for improving the functionality of \gls{spirit} are presented here.
They have been drawn from personal experience as well as user suggestions.
The user experience may be improved if zoom functionality is added.
That way, the size of the drone model would remain relatively constant, while the background changes, even with the same image as its base.
This was present in the version of \gls{spir} shown in \cite{ito2008}.
Similarly, it might be useful to always keep the horizon horizontal and tilt the image by the amount the drone was tilted when the frame was captured, as was implemented by Hing et al.\cite{hing2009}
Extending it to all three Eulerian axes would allow, for instance, the pitch to be used more effectively.
This would be useful with modern drones, which often use gimballed cameras.
It was discovered that depth perception was difficult when using \gls{spirit}, and is problematic when moving around obstacles.
It might be possible to improve this by showing a shadow where the ground should be.
If distance estimation is used, it could be used in a wide variety of environments.
Another potential solution is to use binocular cameras and have the user wear a head-mounted display.
Many users mentioned that the motion capture pole near the target was used as a marker to help orient the drone when flying using the onboard view.
They felt that it provided an unfair advantage, and that other methods of placing the motion capture camera might make the task much more difficult.
Removing the pole might give a fairer comparison with \gls{spirit}.
Alternatively, use other methods of localization to allow the drone to be used in various locations.
Performance can be improved by replacing \gls{pygame} with \gls{pyqt5}.
This might also solve the problem with the sound system not being able to initialize, which had previously necessitated restarts.
Finally, using an $n$-tree (an octree, or, its extension, a hextree), to prune the search space might enable a bigger buffer.
A hextree would be implemented using \sym{posx}, \sym{posy}, and \sym{posz} positions, as well as yaw.
Higher dimensions for gimballed platforms would also include pitch and roll.
Instead of evaluating the entire buffer, only the frames in which the drone would have been visible in the first place can be checked, thereby significantly reducing processing time.
|
{"hexsha": "531a0ef80a4225ff40f046a1b68ec1ddfce08b07", "size": 4277, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "reports/thesis/conclusions.tex", "max_stars_repo_name": "masasin/spirit", "max_stars_repo_head_hexsha": "c8366e649eb105a8a579fb7a47dcc5aaeae6a0d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reports/thesis/conclusions.tex", "max_issues_repo_name": "masasin/spirit", "max_issues_repo_head_hexsha": "c8366e649eb105a8a579fb7a47dcc5aaeae6a0d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-03-28T12:11:45.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-31T05:44:00.000Z", "max_forks_repo_path": "reports/thesis/conclusions.tex", "max_forks_repo_name": "masasin/spirit", "max_forks_repo_head_hexsha": "c8366e649eb105a8a579fb7a47dcc5aaeae6a0d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-06-29T08:19:20.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-29T08:19:20.000Z", "avg_line_length": 82.25, "max_line_length": 318, "alphanum_fraction": 0.7970540098, "num_tokens": 941}
|
% Part: first-order-logic
% Chapter: model-theory
% Section: nonstandard-arithmetic
\documentclass[../../../include/open-logic-section]{subfiles}
\begin{document}
\olfileid{mod}{bas}{nsa}
\section{Non-standard Models of Arithmetic}
\begin{defn}
Let $\Lang{L_N}$ be the !!{language} of arithmetic, comprising a
!!{constant} $\Obj{0}$, a 2-place !!{predicate} $<$, a 1-place
!!{function} $\prime$, and 2-place !!{function}s $+$ and $\times$.
\begin{enumerate}
\item The \emph{standard model} of arithmetic is the !!{structure}
$\Struct{N}$ for $\Lang{L_N}$ having $\Nat = \{ 0, 1, 2, \dots\}$
and interpreting $\Obj{0}$ as $0$, $<$ as the less-than relation
over~$\Nat$, and $\prime$, $+$ and $\times$ as successor, addition,
and multiplication over $\Nat$, respectively.
\item \emph{True arithmetic} is the theory $\Theory{N}$.
\end{enumerate}
\end{defn}
When working in $\Lang{L_N}$ we abbreviate each term of
the form $\Obj{0}^{\prime\dots\prime}$, with $n$ applications of the
successor function to $\Obj{0}$, as~$\num{n}$.
\begin{defn}
A !!{structure}~$\Struct{M}$ for $\Lang{L_N}$ is \emph{standard} if
and only $\Struct{N} \iso \Struct{M}$.
\end{defn}
\begin{thm}\ollabel{thm:non-std}
There are non-standard !!{enumerable} models of true arithmetic.
\end{thm}
\begin{proof}
Expand $\Lang{L_N}$ by introducing a new !!{constant}~$c$, and
consider the theory
\[
\Theory{N} \cup \Setabs{\num{n} < c}{n \in \Nat}.
\]
The theory is finitely satisfiable, so by compactness it has a model
$\Struct{M}$, which can be taken to be !!{enumerable} by the Downward
L\"owenheim-Skolem theorem. Where $\Domain{M}$ is the domain of
$\Struct{M}$, let $\Struct{M}$ interpret the non-logical constants of
$\Lang{L}$ as $\mathbf{z} = \Assign{\Obj{0}}{M} \in \Domain{M}$, ${\prec} =
\Assign{<}{M} \subseteq M^2$, $* = \Assign{\prime}{M} \colon
\Domain{M} \to \Domain{M}$, and $\oplus = \Assign{+}{M}, \otimes =
\Assign{\times}{M} \colon \Domain{M}^2 \to \Domain{M}$. For each $x
\in \Domain{M}$, we write $x^*$ for the element of $\Domain{M}$
obtained from $x$ by application of~$*$.
Now, if $h$ were an isomorphism of $\Struct{N}$ and $\Struct{M}$,
there would be $n \in \Nat$ such that $h(n) = \Assign{c}{M}$. So let
$s$ be any assignment in $\Struct{N}$ such that $s(x) = n$. Then
$\Sat{N}{\eq[\num{n}][x]}[s]$; by the proof of \olref[iso]{thm:isom},
also $\Sat{M}{\eq[\num{n}][x]}[h \circ s]$, so that $\Assign{c}{M} =
\mathbf{z}^{*\cdots *}$ (with $*$ iterated $n$ times). But this is impossible
since by assumption $\Sat{M}{\num{n} < c}$ and $\prec$ is
irreflexive. So $\Struct{M}$ is non-standard.
\end{proof}
\begin{prob}
A relation $R$ over a set $X$ is \emph{well-founded} if and only if
there are no infinite descending chains in~$R$, i.e., if there are no
$x_0$, $x_1$, $x_2$,~\dots in $X$ such that $\dots x_2Rx_1Rx_0$. Assuming
Zermelo-Fraenkel set theory~$ZF$ is consistent, show that there are
non-well-founded models of $ZF$, i.e., models $\mathfrak{M}$ such that
$\dots x_2 \in x_1 \in x_0$.
\end{prob}
Since the non-standard model $\Struct{M}$ from \olref{thm:non-std} is
elementarily equivalent to the standard one, a number of properties of
$\Struct{M}$ can be derived. The rest of this section is devoted to
such a task, which will allow us to obtain a precise characterization
of !!{enumerable} non-standard models of $\Theory{N}$.
\begin{enumerate}
\item No member of $\Domain{M}$ is $\prec$-less than itself: the sentence
$\lforall[x][\lnot x < x]$ is true in $\Struct{N}$ and therefore in
$\Struct{M}$.
\item By a similar reasoning we obtain that $\prec$ is a \emph{linear
ordering} of $\Domain{M}$, i.e., a total, irreflexive, transitive relation
on $\Domain{M}$.
\item The element $\mathbf{z}$ is the $\prec$-least element of $\Domain{M}$.
\item Any member of $\Domain{M}$ is $\prec$-less than its $*$-successor and
$x^*$ is the $\prec$-least member of $\Domain{M}$ greater than $x$.
\item $\Struct{M}$ contains an initial segment (of $\prec$) isomorphic
to $\Nat$: $\mathbf{z}, \mathbf{z}^*, \mathbf{z}^{**}, \dots$, which
we call the \emph{standard part} of $\Domain{M}$. Any other member
of $\Domain{M}$ is \emph{non-standard}. There must be non-standard
members of $\Domain{M}$, or else the function $h$ from the proof of
\olref{thm:non-std} is an isomorphism. We use $n, m, \dots$ as
!!{variable}s ranging on this standard part of $\Struct{M}$.
\item Every non-standard element is greater than any standard one;
this is because for every $n \in \Nat$,
\[
\Sat{N}{\lforall[z][(\lnot(\eq[z][\Obj{0}] \lor
\dots \lor \eq[z][\num{n}]) \lif \num{n} < z)]},
\]
so if $z \in \Domain{M}$ is different from all the standard
elements, it must be \emph{greater} than all of them.
\item Any member of $\Domain{M}$ other than $\mathbf{z}$ is the
$*$-successor of some unique element of $\Domain{M}$, denoted by
$^*x$. If $x = y^*$ then both $x$ and $y$ are standard if one of
them is (and both non-standard if one of them is).
\item Define an equivalence relation $\approx$ over $\Domain{M}$ by
saying that $x \approx y$ if and only if for some \emph{standard}
$n$, either $x \oplus n = y$ or $y \oplus n =x$. In other words, $x
\approx y$ if and only if $x$ and $y$ are a finite distance
apart. If $n$ and $m$ are standard then $n \approx m$. Define the
\emph{block} of $x$ to be the equivalence class $[x] = \Setabs{y}{x
\approx y}$.
\item Suppose that $x \prec y$ where $x \not\approx y$. Since
$\Sat{N}{\lforall[x][\lforall[y][(x < y \lif (x' < y \lor x' =
y))]]}$, either $x^* \prec y$ or $x^* = y$. The latter is
impossible because it implies $x \approx y$, so $x \prec
y$. Similarly, if $x \prec y$ and $x \not\approx y$, then $x \prec
{^*y}$. Therefore if $x \prec y$ and $x \not\approx y$, then every
$w \approx x$ is $\prec$-less than every $v \approx y$. Accordingly,
each block $[x]$ forms a doubly infinite chain
\[
\cdots \prec {^{**}x} \prec {^*}x \prec x \prec x^* \prec x^{**}
\prec \cdots
\]
which is referred to as a $Z$-chain because it has the order type of
the integers.
\item The $\prec$ ordering can be lifted up to the blocks: if $x \prec y$
then the block of $x$ is less than the block of $y$. A block is
\emph{non-standard} if it contains a non-standard element. The
standard block is the least block.
\item There is no least non-standard block: if $y$ is non-standard
then there is a $x \prec y$ where $x$ is also non-standard and $x
\not\approx y$. Proof: in the standard model $\Struct{N}$, every
number is divisible by two, possibly with remainder one, i.e.,
$\Sat{N}{\lforall[y][\lforall[x][(y = x + x \lor y = x + x +
\Obj{0}')]]}$. By elementary equivalence, for every $y \in
\Domain{M}$ there is $x \in \Domain{M}$ such that either $x \oplus x
= y$ or $x \oplus x \oplus \mathbf{z}^*= y$. If $x$ were standard,
then so would be $y$; so $x$ is non-standard. Furthermore, $x$ and
$y$ belong to different blocks, i.e, $x \not\approx y$. To see
this, assume they did belong to the same block, i.e., $x \oplus n =
y$ for some standard $n$. If $y = x \oplus x$, then $x \oplus n = x
\oplus x$, whence $x = n$ by the cancellation law for addition
(which holds in $\Struct{N}$ and therefore in $\Struct{M}$ as well),
and $x$ would be standard after all. Similarly if $y = x \oplus x
\oplus \mathbf{z}^*$.
\item By a similar argument, there is no greatest block.
\item The ordering of the blocks is dense: if $[x]$ is less than $[y]$
(where $x \not\approx y$), then there is a block $[z]$ distinct from
both that is between them. Suppose $x \prec y$. As before, $x \oplus
y$ is divisible by two (possibly with remainder) so there is a $u
\in \Domain{M}$ such that either $x \oplus y = u \oplus u$ or $x
\oplus y = u \oplus u \oplus \mathbf{z}^*$. The element $u$ is the
average of $x$ and $y$, and so is between them. Assume $x \oplus y =
u \oplus u$ (the other case being similar): if $u \approx x$ then
for some standard $n$:
\[
x \oplus y = x \oplus n \oplus x \oplus n,
\]
so $y = x \oplus n \oplus n$ and we would have $x \approx y$,
against assumption. We conclude that $u \not\approx x$. A similar
argument gives $u \not\approx y$.
\end{enumerate}
The non-standard blocks are therefore ordered like the rationals: they
form !!a{enumerable} linear ordering without endpoints. It follows
that for any two !!{enumerable} non-standard models $\Struct{M}_1$ and
$\Struct{M_2}$ of true arithmetic, their reducts to the language
containing $<$ and $=$ only are isomorphic. Indeed, an isomorphism $h$
can be defined as follows: the standard parts of $\Struct{M_1}$ and
$\Struct{M_2}$ are isomorphic to the standard model $\Struct{N}$ and
hence to each other. The blocks making up the non-standard part are
themselves ordered like the rationals and therefore by
\olref[dlo]{thm:cantorQ} are isomorphic; an isomorphism of the blocks
can be extended to an isomorphism \emph{within} the blocks by matching
up arbitrary elements in each, and then taking the image of the
successor of $x$ in $\Struct{M_1}$ to be the successor of the image of
$x$ in $\Struct{M_2}$. Note that it does \emph{not} follow that
$\mathfrak{M}_1$ and $\mathfrak{M}_2$ are isomorphic in the full
language of arithmetic (indeed, isomorphism is always relative to a
signature), as there are non-isomorphic ways to define addition and
multiplication over $\Domain{M_1}$ and $\Domain{M_2}$. (This also
follows from a famous theorem due to Vaught that the number of
countable models of a complete theory cannot be~2.)
\begin{prob}
Show that there can be no greatest block in a non-standard model of
arithmetic.
\end{prob}
\begin{prob}
Let $\Lang{L}$ be the first-order !!{language} containing $<$ as its
only !!{predicate} (besides $\eq$), and let $\Struct{N} = (\Nat,
<)$. All the finite or cofinite subsets of $\Struct{N}$ are
definable. Show that these are the \emph{only} definable subsets of
$\Struct{N}$.
(Hint: First, let $\Obj{prc}(x,y)$ be the
$\Lang{L}$-formula abbreviating ``$x$ is the immediate predecessor
of $y$:''
\[
x<y \land \lnot \lexists[z][(x<z \land z < y)].
\]
Now, to any definable subset of $\Struct{N}$ there corresponds a
formula $!A(x)$ in $\Lang{L}$. For any such $!A$,
consider the sentence $!D$:
\[
\lexists[x][\lforall[y][\lforall[z][((x<y \land x<z \land \Obj{prc}(y,z)
\land !A(y)) \lif !A(z))]]].
\]
Show that $\Sat{N}{!D}$ if and only if the subset of
$\Struct{N}$ defined by $!A$ is either finite or cofinite.
Now, let $\Struct{M}$ be a non-standard model elementarily
equivalent to $\Struct{N}$. If $a \in \Domain{M}$ is
non-standard, let $b, c \in \Domain{M}$ be greater than $a$, and
let $b$ be the immediate predecessor of~$c$. Then there is an
automorphism $h$ of $\Domain{M}$ such that $h(b)=c$
(why?). Therefore, if $b$ satisfies $!A$, so does $c$ (why?). It
follows that $!D$ is true in $\Struct{M}$, and hence also in
$\Struct{N}$. But this implies that the subset of $\Struct{N}$
defined by $!A$ is either finite or co-finite.
\end{prob}
\end{document}
|
{"hexsha": "451377a942b1eccb2c60383f7648dd35a41e7099", "size": 11089, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/model-theory/basics/nonstandard-arithmetic.tex", "max_stars_repo_name": "GKerfImf/OpenLogic", "max_stars_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "content/model-theory/basics/nonstandard-arithmetic.tex", "max_issues_repo_name": "GKerfImf/OpenLogic", "max_issues_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/model-theory/basics/nonstandard-arithmetic.tex", "max_forks_repo_name": "GKerfImf/OpenLogic", "max_forks_repo_head_hexsha": "5791905d3149f68e05885290f448054b98a0e51b", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.423580786, "max_line_length": 78, "alphanum_fraction": 0.6654342141, "num_tokens": 3737}
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as spi
#total no. agents
n = 50
#fraction of cooperators initial
fc0 = 0.7
#amount of resource available initial
R0 = 100
# Maximum amount of resource
Rmax = 200
# Social parameters
ec = 0.483/n #level of effort (cooperators)
ed = 1.826/n #level of effort (defectors)
w = 15 #cost of harvesting
# Resource parameters
# c inflow, d discharge, q catchability
c, d, q = 50, 50, 1
# harvest-cooperator equations
def deriv(t, y):
fc, R = y
E = n*(fc*ec+(1-fc)*ed) # Extraction
gamma = 10
a = 0.6
b = 0.2
cobbdoug = gamma*(E**a)*(R**b)
h = 0.34
t = -150
g = -10
gomp = h * np.exp(t * np.exp(g*fc))
uc = ec*(cobbdoug/E)-w #Cooperator utility
ud = ed*(cobbdoug/E)-w-gomp*(ed-ec)/ed #Defector utility
dRdt = c - d*(R/Rmax)**2 - q*E*R
dfcdt = fc*(1-fc)*(uc-ud)
return [dRdt,dfcdt]
# initial condition
y0 = fc0, R0
# time points
t = np.linspace(0, 200,num=200)
ode = spi.ode(deriv)
# BDF method suited to stiff systems of ODEs
t_end = 15000.
t_start = 1.
t_step = 1.
t_interval = np.arange(t_start, t_end, t_step)
ode.set_integrator('vode',nsteps=500,method='bdf')
ode.set_initial_value(y0,t_start)
ts = []
ys = []
while ode.successful() and ode.t < t_end:
ode.integrate(ode.t + t_step)
ts.append(ode.t)
ys.append(ode.y)
t = np.vstack(ts)
F,R = np.vstack(ys).T
# Plot
plt.subplot(211)
plt.plot(t, F, 'b', lw=2)
plt.xlabel('Time')
plt.ylabel('Fraction Cooperators')
plt.subplot(212)
plt.plot(t,R,'r', lw=2)
plt.xlabel('Time')
plt.ylabel('Resource Stock')
plt.show()
|
{"hexsha": "4e07feb302fca2de47098470c8c9dc032eb44638", "size": 1656, "ext": "py", "lang": "Python", "max_stars_repo_path": "TSLModel2.py", "max_stars_repo_name": "adrianhindes/network-tsl", "max_stars_repo_head_hexsha": "70de88f3dde801e99481b3e4365b1c0461e54db3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-30T01:51:46.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-30T01:51:46.000Z", "max_issues_repo_path": "TSLModel2.py", "max_issues_repo_name": "adrianhindes/network-tsl", "max_issues_repo_head_hexsha": "70de88f3dde801e99481b3e4365b1c0461e54db3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "TSLModel2.py", "max_forks_repo_name": "adrianhindes/network-tsl", "max_forks_repo_head_hexsha": "70de88f3dde801e99481b3e4365b1c0461e54db3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.7272727273, "max_line_length": 60, "alphanum_fraction": 0.6304347826, "include": true, "reason": "import numpy,import scipy", "num_tokens": 576}
|
import numpy as np
import pandas as pd
import os, copy
import joblib, logging
import skfuzzy as fuzz
import difflib, random, pickle
from deap import base, creator, tools, algorithms
from itertools import repeat
from collections import Sequence
import re
from sklearn.linear_model import LinearRegression, ElasticNetCV
from joblib import Parallel, delayed
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
MAX_EVALUATIONS = 30000
JOBS = 5
POPULATION_SIZE = 50
class cluster_optimize():
def __init__(self, static_data):
self.istrained = False
self.add_individual_rules = static_data['clustering']['add_rules_indvidual']
self.import_external_rules = static_data['clustering']['import_external_rules']
self.njobs=static_data['clustering']['njobs']
self.resampling = static_data['resampling']
self.path_fuzzy = static_data['path_fuzzy_models']
self.file_fuzzy = static_data['clustering']['cluster_file']
self.type = static_data['type']
self.static_data = static_data
try:
self.load()
except:
pass
logger = logging.getLogger('log_fuzzy.log')
logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.path_fuzzy, 'log_fuzzy.log'), 'w')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
self.logger = logger
def create_mfs(self, model_mfs, var_name, num_mf, old_num_mf):
mfs = []
var_range = [-0.005, 1.005]
if var_name in {'hdd_h', 'temp_max', 'flux', 'wind', 'temp', 'Temp', 'load', 'power'}:
mean = np.linspace(var_range[0], var_range[1], num=num_mf)
std = 1.25 * var_range[1] / num_mf
for i in range(num_mf):
mfs.append({'name': 'mf_' + var_name + str(old_num_mf + i),
'var_name': var_name,
'prange':std,
'type': 'gauss',
'param': [mean[i], std/2],
'universe': np.arange(var_range[0] - std - .01, var_range[1] + std + .01, .001),
'func': fuzz.gaussmf(np.arange(var_range[0] - std - .01, var_range[1] + std + .01, .001),
mean[i], std)})
elif var_name in {'sp_index', 'dayweek', 'cloud', 'hour', 'month', 'direction', 'sp_days'}:
mean = np.linspace(var_range[0], var_range[1], num=num_mf)
std = 1.25 * var_range[1] / num_mf
std1 = 1.125 * var_range[1] / (num_mf)
for i in range(num_mf):
param = [mean[i] - std, mean[i] - std1, mean[i] + std1, mean[i] + std]
mfs.append({'name': 'mf_' + var_name + str(old_num_mf + i),
'var_name': var_name,
'prange': std,
'type': 'trap',
'param': param,
'universe': np.arange(var_range[0] - .01 - std, var_range[1] + std + .01, .001),
'func': fuzz.trapmf(np.arange(var_range[0] - .01 - std, var_range[1] + std + .01, .001, ),
param)})
else:
raise NameError('MF type not recognize')
model_mfs[var_name] = mfs
return model_mfs
def run(self, X_train, y_train, X_test, y_test, rated, num_samples=500, n_ratio=0.4, ngen=300):
scale_y = joblib.load(os.path.join(self.static_data['path_data'], 'Y_scaler.pickle'))
y_test = scale_y.inverse_transform(y_test.values).ravel()
if rated is None:
rated = y_test
else:
rated = rated
self.n_ratio = n_ratio
if not os.path.exists(os.path.join(self.path_fuzzy, 'models.pickle')):
fuzzy_models = []
self.p_list = []
for case in self.static_data['clustering']['var_imp']:
self.base_name = [v for v in sorted(case.keys())][0]
self.var_names = [v for v in sorted(case[self.base_name][0].keys())]
self.num_base_mfs = len(case[self.base_name])
self.base_mfs = dict()
self.base_mfs = self.create_mfs(self.base_mfs, self.base_name, self.num_base_mfs, 0)
fuzzy_model = dict()
fuzzy_model['mfs'] = dict()
for n in range(len(self.base_mfs[self.base_name])):
fuzzy_model['mfs'][self.base_name + str(n)] = dict()
fuzzy_model['mfs'][self.base_name+str(n)][self.base_name] = [self.base_mfs[self.base_name][n]]
for var in self.var_names:
old_num_mf = 0
for n, base_case in enumerate(case[self.base_name]):
n_mf = base_case[var]['mfs']
fuzzy_model['mfs'][self.base_name + str(n)] = self.create_mfs(fuzzy_model['mfs'][self.base_name + str(n)]
, var, n_mf, old_num_mf)
old_num_mf += n_mf
self.var_lin = self.static_data['clustering']['var_lin']
self.p = len(self.var_names) + 1
self.p_list.append(self.p)
var_del=[]
for var in self.var_names + [self.base_name]:
if var not in X_train.columns:
var_names = [c for c in X_train.columns if var in c]
X_train[var] = X_train[var_names].mean(axis=1)
X_test[var] = X_test[var_names].mean(axis=1)
var_del.append(var)
if var not in self.var_lin:
self.var_lin.append(var)
lin_models = ElasticNetCV(cv=5).fit(X_train[self.var_lin].values, y_train.values.ravel())
preds = scale_y.inverse_transform(lin_models.predict(X_test[self.var_lin].values).reshape(-1, 1)).ravel()
err = (preds.ravel() - y_test) / rated
self.rms_before = np.sum(np.square(err))
self.mae_before = np.mean(np.abs(err))
print('rms = %s', self.rms_before)
print('mae = %s', self.mae_before)
self.logger.info("Objective before train: %s", self.mae_before)
problem = cluster_problem(fuzzy_model['mfs'], X_train[self.var_lin], self.p, rated, self.resampling, self.add_individual_rules, self.logger, self.njobs, num_samples=num_samples, n_ratio=self.n_ratio)
problem.run(X_train[self.var_lin], y_train, X_test[self.var_lin], y_test, scale_y, fuzzy_model['mfs'], 75, 100, ngen=ngen)
fuzzy_model = problem.fmodel
self.logger.info("Objective after train: %s", str(fuzzy_model['result']))
fuzzy_model['p'] = self.p
fuzzy_models.append(fuzzy_model)
self.fuzzy_models = fuzzy_models
joblib.dump(fuzzy_models, os.path.join(self.path_fuzzy, 'models.pickle'))
else:
self.fuzzy_models = joblib.load(os.path.join(self.path_fuzzy, 'models.pickle'))
fmodel = dict()
fmodel['mfs'] = dict()
fmodel['rules'] = dict()
fmodel['result'] = []
num = 0
for fuzzy_model in self.fuzzy_models:
i = 0
for var in fuzzy_model['mfs'].keys():
fmodel['mfs'][var] = fuzzy_model['mfs'][var]
for rule in fuzzy_model['rules']:
fmodel['rules']['rule.' + str(num + i)] = fuzzy_model['rules'][rule]
for mf in range(len(fmodel['rules']['rule.' + str(num + i)])):
fmodel['rules']['rule.' + str(num + i)][mf]['p'] = fuzzy_model['p']
i += 1
fmodel['result'].append(fuzzy_model['result'])
num += len(fuzzy_model['rules'])
self.best_fuzzy_model = copy.deepcopy(fmodel)
joblib.dump(self.best_fuzzy_model, os.path.join(self.path_fuzzy, self.file_fuzzy))
if 'horizon' in self.import_external_rules:
self.compact_external_mfs()
if len(var_del) > 0:
X_train = X_train.drop(columns=var_del)
X_test = X_test.drop(columns=var_del)
self.istrained = True
self.save()
def compact_external_mfs(self):
self.fuzzy_file = os.path.join(self.path_fuzzy, self.file_fuzzy)
fmodel = joblib.load(self.fuzzy_file)
type_mf = 'horizon'
var_name = 'horizon'
params = [
[0.5, 0.9, 1.1, 1.5],
[1.5, 1.9, 2.1, 2.5],
[2.5, 2.9, 3.1, 3.5],
[3.5, 3.9, 4.1, 4.5],
[4.5, 4.9, 5.1, 5.5],
[5.5, 5.9, 6.1, 6.5],
[6.5, 6.9, 7.1, 7.5],
[7.5, 7.9, 8.1, 8.5],
[8.5, 8.9, 12.1, 15.5],
[12, 13.2, 22.1, 27.5],
[22.1, 25.2, 36.1, 42.5],
[38.1, 42.2, 48.1, 52.5],
]
mfs = []
i = 0
for param in params:
mfs.append({'name': 'mf_' + type_mf + str(i),
'var_name': var_name,
'type': 'trap',
'param': param,
'universe': np.arange(0, 49, .01),
'func': fuzz.trapmf(np.arange(0, 49, .01), param)})
i += 1
fmodel['mfs']['horizon'] = mfs
i = 0
rules = dict()
for mf in mfs:
for rule in fmodel['rules']:
rules['rule.' + str(i)] = fmodel['rules'][rule] + [mf]
i += 1
fmodel['rules'] = rules
joblib.dump(fmodel, os.path.join(self.path_fuzzy, self.file_fuzzy))
def load(self):
if os.path.exists(os.path.join(self.path_fuzzy, 'fuzzy_model.pickle')):
try:
f = open(os.path.join(self.path_fuzzy, 'fuzzy_model.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
tdict={}
for k in tmp_dict.keys():
if k not in ['logger', 'static_data', 'data_dir', 'cluster_dir', 'n_jobs']:
tdict[k] = tmp_dict[k]
self.__dict__.update(tdict)
except:
raise ImportError('Cannot open fuzzy model')
else:
raise ImportError('Cannot find fuzzy model')
def save(self):
f = open(os.path.join(self.path_fuzzy, 'fuzzy_model.pickle'), 'wb')
dict = {}
for k in self.__dict__.keys():
if k not in ['logger', 'static_data', 'data_dir', 'cluster_dir', 'n_jobs']:
dict[k] = self.__dict__[k]
pickle.dump(dict, f)
f.close()
def cx_fun(ind1,ind2, alpha):
if random.random()>0.5:
size = min(len(ind1), len(ind2))
cxpoint1 = random.randint(1, size)
cxpoint2 = random.randint(1, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
ind1[cxpoint1:cxpoint2], ind2[cxpoint1:cxpoint2] \
= ind2[cxpoint1:cxpoint2], ind1[cxpoint1:cxpoint2]
else:
for i, (x1, x2) in enumerate(zip(ind1, ind2)):
gamma = (1. + 2. * alpha) * random.random() - alpha
ind1[i] = (1. - gamma) * x1 + gamma * x2
ind2[i] = gamma * x1 + (1. - gamma) * x2
return ind1, ind2
def mut_fun(individual, mu, sigma, eta, low, up, indpb):
if random.random() > 0.75:
size = len(individual)
if not isinstance(mu, Sequence):
mu = repeat(mu, size)
elif len(mu) < size:
raise IndexError("mu must be at least the size of individual: %d < %d" % (len(mu), size))
if not isinstance(sigma, Sequence):
sigma = repeat(sigma, size)
elif len(sigma) < size:
raise IndexError("sigma must be at least the size of individual: %d < %d" % (len(sigma), size))
for i, m, s in zip(range(size), mu, sigma):
if random.random() < indpb:
individual[i] += random.gauss(m, s)
else:
size = len(individual)
if not isinstance(low, Sequence):
low = repeat(low, size)
elif len(low) < size:
raise IndexError("low must be at least the size of individual: %d < %d" % (len(low), size))
if not isinstance(up, Sequence):
up = repeat(up, size)
elif len(up) < size:
raise IndexError("up must be at least the size of individual: %d < %d" % (len(up), size))
for i, xl, xu in zip(range(size), low, up):
if random.random() <= indpb:
x = individual[i]
delta_1 = (x - xl) / (xu - xl)
delta_2 = (xu - x) / (xu - xl)
rand = random.random()
mut_pow = 1.0 / (eta + 1.)
if rand < 0.5:
xy = 1.0 - delta_1
if xy<0:
xy=1e-6
val = 2.0 * rand + (1.0 - 2.0 * rand) * xy ** (eta + 1)
delta_q = val ** mut_pow - 1.0
else:
xy = 1.0 - delta_2
if xy<0:
xy=1e-6
val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * xy ** (eta + 1)
delta_q = 1.0 - val ** mut_pow
x = x + delta_q * (xu - xl)/2
x = min(max(x, xl), xu)
individual[i] = x
return individual,
def checkBounds(mn, mx):
def decorator(func):
def wrappper(*args, **kargs):
offspring = func(*args, **kargs)
for child in offspring:
for i in range(len(child)):
if child[i] > mx[i]:
child[i] = mx[i]
elif child[i] < mn[i]:
child[i] = mn[i]
return offspring
return wrappper
return decorator
class cluster_problem():
def __init__(self, mfs, X, p, rated, resampling, add_individual_rules, logger, njobs, num_samples=500, n_ratio=0.4):
self.logger = logger
self.njobs=njobs
self.resampling = resampling
self.add_individual_rules = add_individual_rules
self.num_samples = num_samples
self.n_ratio = n_ratio
self.mfs = mfs
self.rated = rated
self.p = p
self.rules = dict()
for base_case in self.mfs.keys():
self.rules = self.create_rules(self.rules, self.mfs[base_case])
x = []
self.lower_bound = []
self.upper_bound = []
self.sigma = []
self.index_constrains = []
self.number_of_constraints = 0
for rule_name, rule in sorted(self.rules.items()):
for mf in rule:
param = mf['param']
xrange = [mf['universe'][0], mf['universe'][-1]]
prange = mf['prange']
x = x + param
if len(param) == 2:
self.index_constrains.append(np.arange(len(x) - 2, len(x)))
self.number_of_constraints = self.number_of_constraints + 3
lo = param[0] - prange if (param[0] - prange) > xrange[0] else xrange[0]
up = param[0] + prange if (param[0] + prange)< xrange[1] else xrange[1]
self.lower_bound.extend([lo, 0.0001])
self.upper_bound.extend([up, prange])
self.sigma.extend([prange, prange])
elif len(param) == 4:
self.index_constrains.append(np.arange(len(x) - 4, len(x)))
self.number_of_constraints = self.number_of_constraints + 7
for i in param:
lo = param[0] - prange if (param[0] - prange) > xrange[0] else xrange[0]
up = param[3] + prange if (param[3] + prange) < xrange[1] else xrange[1]
self.lower_bound.append(lo)
self.upper_bound.append(up)
self.sigma.append(prange)
self.number_of_variables = len(x)
self.number_of_objectives = 2
self.x = x
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", np.ndarray, fitness=creator.FitnessMin)
self.toolbox = base.Toolbox()
attributes=[]
for i in range(self.number_of_variables):
self.toolbox.register("attribute"+str(i), random.gauss, self.lower_bound[i], self.upper_bound[i])
attributes.append(self.toolbox.__getattribute__("attribute"+str(i)))
self.toolbox.register("individual1", tools.initCycle, creator.Individual, tuple(attributes), n=1)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual1, n=100)
self.toolbox.register("mate", cx_fun, alpha=0.5)
self.toolbox.register("mutate", mut_fun, mu=0, sigma=self.sigma, eta=0.8, low=self.lower_bound, up=self.upper_bound, indpb=0.6)
self.toolbox.register("select", tools.selTournament, tournsize=4)
self.toolbox.register("evaluate", evaluate)
self.hof = tools.ParetoFront(lambda x, y: (x == y).all())
self.stats = tools.Statistics(lambda ind: ind.fitness.values)
self.stats.register("Avg", np.mean)
self.stats.register("Std", np.std)
self.stats.register("Min", np.min)
self.stats.register("Max", np.max)
self.toolbox.decorate("mate", checkBounds(self.lower_bound, self.upper_bound))
self.toolbox.decorate("mutate", checkBounds(self.lower_bound, self.upper_bound))
def create_rules(self, final_rules, model_mfs):
rules = []
for mf in sorted(model_mfs.keys()):
if len(rules) == 0:
for f in model_mfs[mf]:
rules.append([f])
else:
new_rules = []
for rule in rules:
for f in model_mfs[mf]:
new_rules.append(rule + [f])
rules = new_rules
if self.add_individual_rules:
for mf in sorted(model_mfs.keys()):
for f in model_mfs[mf]:
rules.append([f])
n_old_rules = len(final_rules)
for i in range(len(rules)):
final_rules['rule.' + str(n_old_rules + i)] = rules[i]
return final_rules
def run(self, X, y, X_test, y_test, scale_y, mfs, mu, lambda_, cxpb=0.6, mutpb=0.4, ngen=300):
perf = np.inf
front_best = None
rules = copy.deepcopy(self.rules)
self.population=self.toolbox.population()
param_ind = creator.Individual(self.x)
self.population.pop()
self.population.insert(len(self.population), param_ind)
i=0
while i < 0.5 * len(self.population):
param_ind = mut_fun(self.x, 0, self.sigma, 0.8, self.lower_bound, self.upper_bound, 0.6)
param_ind = creator.Individual(param_ind[0])
self.population.pop(i)
self.population.insert(i, param_ind)
i+=1
assert lambda_ >= mu, "lambda must be greater or equal to mu."
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in self.population if not ind.fitness.valid]
rules = copy.deepcopy(self.rules)
fit1 = evaluate(np.array(invalid_ind[-1]).ravel(),
X, y, X_test, y_test,scale_y, self.rated,
mfs, rules, self.p, self.resampling, self.num_samples, self.n_ratio)
print('initial candidate error ', fit1[1])
rules = copy.deepcopy(self.rules)
fitnesses = Parallel(n_jobs=self.njobs)(delayed(evaluate)(np.array(individual).ravel(),
X, y, X_test, y_test, scale_y, self.rated,
mfs, rules, self.p, self.resampling, self.num_samples, self.n_ratio) for individual in invalid_ind)
# fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if self.hof is not None:
self.hof.update(self.population)
self.logbook = tools.Logbook()
# Gather all the fitnesses in one list and compute the stats
fits = np.array([ind.fitness.values for ind in self.population])
maximums = np.nanmax(fits, axis=0)
minimums = np.nanmin(fits, axis=0)
self.logbook.header = ['gen', 'nevals'] + ['Max_sse:', 'Min_sse:', 'Max_mae:', 'Min_mae:']
self.logger.info('Iter: %s, Max_sse: %s, Min_mae: %s', 0, *minimums)
record = {'Max_sse:': maximums[0], 'Min_sse:': minimums[0], 'Max_mae:': maximums[1], 'Min_mae:': minimums[1]}
print('GA rbf running generation 0')
print(record)
self.logbook.record(gen=0, nevals=len(invalid_ind), **record)
print(self.logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Vary the population
rules = copy.deepcopy(self.rules)
offspring = algorithms.varOr(self.population, self.toolbox, lambda_, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = Parallel(n_jobs=self.njobs)(delayed(evaluate)(np.array(individual).ravel(),
X, y, X_test, y_test, scale_y, self.rated,
mfs, rules, self.p, self.resampling, self.num_samples, self.n_ratio) for
individual in invalid_ind)
# fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
fits = np.array([ind.fitness.values for ind in self.population])
maximums = np.nanmax(fits, axis=0)
minimums = np.nanmin(fits, axis=0)
# Update the hall of fame with the generated individuals
if self.hof is not None:
self.hof.update(self.population)
# Select the next generation population
self.population[:] = self.toolbox.select(offspring, mu)
# Update the statistics with the new population
record = {'Max_sse:': maximums[0], 'Min_sse:': minimums[0], 'Max_mae:': maximums[1],
'Min_mae:': minimums[1]}
print('GA rbf running generation ', str(gen))
print(record)
self.logbook.record(gen=gen, nevals=len(invalid_ind), **record)
front = self.population
for i in range(len(front)):
if front[i].fitness.getValues()[0] < perf:
front_best = front[i]
perf = front[i].fitness.getValues()[0]
self.logger.info('Iter: %s, Max_sse: %s, Min_mae: %s', str(gen), *minimums)
self.fmodel = self.evaluate(np.array(front_best).ravel(), X, y, X_test, y_test, scale_y, self.rated,
mfs, self.rules, self.p, self.resampling)
def evaluate(self, x, X, y, X_test, y_test, scale_y, rated, mfs, rules, p, resampling):
# print(solution.variables)
i = 0
for rule_name, rule in sorted(rules.items()):
for mf in rule:
if mf['type'] == 'gauss':
mf['param'] = x[i:i + 2]
mf['func'] = fuzz.gaussmf(mf['universe'],
mf['param'][0],
np.abs(mf['param'][1]))
i += 2
elif mf['type'] == 'trap':
mf['param'] = sorted(x[i:i + 4])
mf['func'] = fuzz.trapmf(mf['universe'], mf['param'])
i += 4
activations = pd.DataFrame(index=X.index, columns=[rule for rule in sorted(rules.keys())])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
lin_models = dict()
remove_null_rules = []
total=0
for rule in sorted(activations.columns):
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
if len(indices) > self.num_samples and len(indices) < self.n_ratio * X.shape[0] :
X1 = X.loc[indices].values
y1 = y.loc[indices].values
lin_models[rule] = LinearRegression().fit(X1, y1.ravel())
else:
act = activations.loc[indices].copy(deep=True)
act = act.drop(columns=[rule])
if not act.isnull().all(axis=1).any():
del rules[rule]
else:
raise ValueError('Cannot remove rule ', rule)
print(len(indices))
self.logger.info("Number of samples of rule %s is %s", rule, len(indices))
total += len(indices)
print(total)
self.logger.info("Number of samples of dataset with %s is %s", X.shape[0], total)
activations_test = pd.DataFrame(index=X_test.index,
columns=[rule for rule in sorted(rules.keys())])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X_test[mf['var_name']]))
activations_test[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
preds = pd.DataFrame(index=X_test.index, columns=sorted(lin_models.keys()))
for rule in sorted(rules.keys()):
indices = activations_test[rule].index[activations_test[rule] >= 0.01].tolist()
if len(indices) != 0:
X1 = X_test.loc[indices].values
preds.loc[indices, rule] = scale_y.inverse_transform(
lin_models[rule].predict(X1).reshape(-1, 1)).ravel()
pred = preds.mean(axis=1)
# pred.name='target'
# pred=pred.to_frame()
err = (pred.values.ravel() - y_test.ravel()) / rated
self.objectives = [np.sum(np.square(err)),np.mean(np.abs(err))]
self.rules = rules
self.mfs = mfs
fmodel = dict()
fmodel['mfs'] = self.mfs
fmodel['rules'] = self.rules
fmodel['result'] = self.objectives[1]
print('Error = ', self.objectives[1])
return fmodel
def evaluate(x, X, y, X_test, y_test, scale_y, rated, mfs, rules, p, resampling, num_samples=500, n_ratio=0.33):
# print(solution.variables)
i = 0
for rule_name, rule in sorted(rules.items()):
for mf in rule:
if mf['type'] == 'gauss':
mf['param'] = x[i:i + 2]
mf['func'] = fuzz.gaussmf(mf['universe'],
mf['param'][0],
np.abs(mf['param'][1]))
i += 2
elif mf['type'] == 'trap':
mf['param'] = sorted(x[i:i + 4])
mf['func'] = fuzz.trapmf(mf['universe'], mf['param'])
i += 4
activations = pd.DataFrame(index=X.index, columns=[rule for rule in rules.keys()])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
lin_models = dict()
for rule in activations.columns:
indices = activations[rule].index[activations[rule] >= 0.01].tolist()
if len(indices) > num_samples and len(indices) < n_ratio * X.shape[0] :
X1 = X.loc[indices].values
y1 = y.loc[indices].values
# if resampling:
# if X1.shape[0] < 300:
# X1, y1 = resampling_fun(X1,y1)
lin_models[rule] = LinearRegression().fit(X1, y1.ravel())
else:
del rules[rule]
activations_test = pd.DataFrame(index=X_test.index,
columns=[rule for rule in rules.keys()])
for rule in sorted(rules.keys()):
act = []
for mf in rules[rule]:
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X_test[mf['var_name']]))
activations_test[rule] = np.power(np.prod(np.array(act), axis=0), 1 / p)
preds = pd.DataFrame(index=X_test.index, columns=sorted(rules.keys()))
for rule in sorted(rules.keys()):
indices = activations_test[rule].index[activations_test[rule] >= 0.01].tolist()
if len(indices) != 0:
X1 = X_test.loc[indices].values
preds.loc[indices, rule] = scale_y.inverse_transform(lin_models[rule].predict(X1).reshape(-1, 1)).ravel()
pred = preds.mean(axis=1)
pred[pred.isnull()] = 1e+15
# pred.name='target'
# pred=pred.to_frame()
err = (pred.values.ravel() - y_test.ravel()) / rated
objectives = [np.sum(np.square(err)),np.mean(np.abs(err))]
return objectives
class clusterer(object):
def __init__(self, static_data):
self.istrained = False
self.train_online = static_data['train_online']
self.add_individual_rules = static_data['clustering']['add_rules_indvidual']
self.import_external_rules = static_data['clustering']['import_external_rules']
self.njobs = static_data['clustering']['njobs']
self.resampling = static_data['resampling']
self.path_fuzzy = static_data['path_fuzzy_models']
self.file_fuzzy = static_data['clustering']['cluster_file']
self.type = static_data['type']
self.static_data = static_data
try:
self.load()
except:
pass
def compute_activations(self, X):
if not hasattr(self, 'best_fuzzy_model'):
self.best_fuzzy_model = joblib.load(os.path.join(self.path_fuzzy, self.file_fuzzy))
self.rules = self.best_fuzzy_model['rules']
activations = pd.DataFrame(index=X.index, columns=[i for i in sorted(self.rules.keys())])
var_del=[]
for rule in sorted(self.rules.keys()):
act = []
for mf in self.rules[rule]:
if mf['var_name'] not in X.columns:
var_names = [c for c in X.columns if mf['var_name'] in c]
X[mf['var_name']] = X[var_names].mean(axis=1)
var_del.append(mf['var_name'])
act.append(fuzz.interp_membership(mf['universe'], mf['func'], X[mf['var_name']]))
if not 'p' in mf.keys():
mf['p'] = 2
activations[rule] = np.power(np.prod(np.array(act), axis=0), 1 / mf['p'])
if len(var_del)>0:
X = X.drop(columns=var_del)
return activations
def load(self):
if os.path.exists(os.path.join(self.path_fuzzy, 'fuzzy_model.pickle')):
try:
f = open(os.path.join(self.path_fuzzy, 'fuzzy_model.pickle'), 'rb')
tmp_dict = pickle.load(f)
f.close()
tdict={}
for k in tmp_dict.keys():
if k not in ['logger', 'static_data', 'data_dir', 'cluster_dir', 'n_jobs']:
tdict[k] = tmp_dict[k]
self.__dict__.update(tdict)
except:
raise ImportError('Cannot open fuzzy model')
else:
raise ImportError('Cannot find fuzzy model')
#
# if __name__ == '__main__':
# import sys
# warnings.filterwarnings("ignore", category=FutureWarning)
# if sys.platform == 'linux':
# sys_folder = '/media/smartrue/HHD1/George/models/'
# else:
# sys_folder = 'D:/models/'
# project_name = 'APE_net_ver2'
# project_country = 'APE_net_ver2'
# project_owner = '4cast_models'
# path_project = sys_folder + project_owner + '/' + project_country + '/' + project_name
# cluster_dir = path_project +'/Regressor_layer/rule.12'
# data_dir = path_project + '/Regressor_layer/rule.12/data'
# # logger = logging.getLogger(__name__)
# # logger.setLevel(logging.INFO)
# # handler = logging.FileHandler(os.path.join(cluster_dir, 'log_rbf_cnn_test.log'), 'a')
# # handler.setLevel(logging.INFO)
# #
# # # create a logging format
# # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# # handler.setFormatter(formatter)
# #
# # # add the handlers to the logger
# # logger.addHandler(handler)
#
# rated = None
#
# static_data = write_database()
# X = pd.read_csv(os.path.join(static_data['path_data'], 'training_inputs.csv'), index_col=0,
# parse_dates=True, dayfirst=True)
# y = pd.read_csv(os.path.join(static_data['path_data'], 'training_target.csv'), index_col=0,
# header=None,
# names=['target'], parse_dates=True, dayfirst=True)
# X_train = X.loc[X.index <= pd.to_datetime('2019-01-01 00:00')]
# X_test = X.loc[X.index > pd.to_datetime('2019-01-01 00:00')]
# y_train = y.loc[y.index <= pd.to_datetime('2019-01-01 00:00')]
# y_test = y.loc[y.index > pd.to_datetime('2019-01-01 00:00')]
#
# optimizer = cluster_optimize(static_data)
# optimizer.run(X_train, y_train, X_test, y_test, rated)
|
{"hexsha": "2fb15c25f6dfb16e4fc6c0aae7cca0fe6a5c8526", "size": 34313, "ext": "py", "lang": "Python", "max_stars_repo_path": "Fuzzy_clustering/version3/FuzzyClusteringManager/Clusterer_optimize_deep.py", "max_stars_repo_name": "joesider9/forecasting_library", "max_stars_repo_head_hexsha": "db07ff8f0f2693983058d49004f2fc6f8849d197", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Fuzzy_clustering/version3/FuzzyClusteringManager/Clusterer_optimize_deep.py", "max_issues_repo_name": "joesider9/forecasting_library", "max_issues_repo_head_hexsha": "db07ff8f0f2693983058d49004f2fc6f8849d197", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fuzzy_clustering/version3/FuzzyClusteringManager/Clusterer_optimize_deep.py", "max_forks_repo_name": "joesider9/forecasting_library", "max_forks_repo_head_hexsha": "db07ff8f0f2693983058d49004f2fc6f8849d197", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0474967908, "max_line_length": 215, "alphanum_fraction": 0.5358610439, "include": true, "reason": "import numpy", "num_tokens": 8437}
|
import os.path
import h5py
import numpy as np
import PIL
import PIL.Image
import pyx
from pyxutil import *
def make_fig_microstructure(name):
L = 3
a = 0.75*L
dim_shift = 0.6
c = pyx.canvas.canvas()
attrs = [pyx.style.linewidth.normal, pyx.deco.earrow()]
c.stroke(pyx.path.line(-DIM_LEG, 0, L+2*DIM_LEG, 0), attrs)
c.stroke(pyx.path.line(0, -DIM_LEG, 0, L+2*DIM_LEG), attrs)
c.text(L+2*DIM_LEG, -0.1, r'$x_1$', [pyx.text.halign.boxcenter,
pyx.text.valign.top])
c.text(-0.1, L+2*DIM_LEG, r'$x_2$', [pyx.text.halign.boxright,
pyx.text.valign.middle])
attrs = [pyx.deco.filled([pyx.color.gray(0.75)])]
c.draw(pyx.path.rect(0, 0, L, L), attrs)
attrs = [pyx.deco.stroked([pyx.style.linewidth.normal]),
pyx.deco.filled([pyx.color.gray.white])]
c.draw(pyx.path.rect(0, 0, a, a), attrs)
attrs = [pyx.deco.stroked([pyx.style.linewidth.Thick])]
c.draw(pyx.path.rect(0, 0, L, L), attrs)
dim(0, -dim_shift, a, -dim_shift, c)
dim(-dim_shift, 0, -dim_shift, a, c)
dim(0, L+dim_shift, L, L+dim_shift, c)
dim(L+dim_shift, 0, L+dim_shift, L, c)
attrs = [pyx.text.halign.boxcenter, pyx.text.valign.middle]
text = pyx.text.text(0.5*a, -dim_shift, r'\color{black}$a$', attrs)
c.draw(text.bbox().path(), [pyx.deco.filled([pyx.color.gray.white])])
c.insert(text)
text = pyx.text.text(0.5*L, L+dim_shift, r'\color{black}$L$', attrs)
c.draw(text.bbox().path(), [pyx.deco.filled([pyx.color.gray.white])])
c.insert(text)
cc = pyx.canvas.canvas()
text = pyx.text.text(0, 0, r'\color{black}\color{black}$a$', attrs)
cc.draw(text.bbox().path(), [pyx.deco.filled([pyx.color.gray.white])])
cc.insert(text)
c.insert(cc, [pyx.trafo.rotate(90),
pyx.trafo.translate(-dim_shift, 0.5*a)])
cc = pyx.canvas.canvas()
text = pyx.text.text(0, 0, r'\color{black}$L$', attrs)
cc.draw(text.bbox().path(), [pyx.deco.filled([pyx.color.gray.white])])
cc.insert(text)
c.insert(cc, [pyx.trafo.rotate(-90),
pyx.trafo.translate(L+dim_shift, 0.5*L)])
c.text(0.5*a, 0.5*a,
r'\color{black}$\mu_\mathrm{i},\nu_\mathrm{i}$', attrs)
c.text(0.5*L, 0.5*(L-a)+a,
r'\color{black}$\mu_\mathrm{m},\nu_\mathrm{m}$', attrs)
c.writePDFfile(name)
c.writeSVGfile(name)
if __name__ == '__main__':
# Using package txfonts leads to LaTeX messages that pyx cannot parse.
pyx.text.set(pyx.text.LatexRunner,
errordetail=pyx.text.errordetail.full,
docopt='12pt',
texmessages_preamble=[pyx.text.texmessage.ignore],
texmessages_run=[pyx.text.texmessage.ignore])
pyx.text.preamble(r'\usepackage{amsmath, color, txfonts}')
make_fig_microstructure('microstructure')
|
{"hexsha": "8ac64846e779c1880dd9b7a550a08666334fd88f", "size": 2899, "ext": "py", "lang": "Python", "max_stars_repo_path": "sphinx/tutorials/square_basic/make_pyx_figs.py", "max_stars_repo_name": "sbrisard/janus", "max_stars_repo_head_hexsha": "a6196a025fee6bf0f3eb5e636a6b2f895ca6fbc9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-01-18T14:16:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-04T11:21:19.000Z", "max_issues_repo_path": "sphinx/tutorials/square_basic/make_pyx_figs.py", "max_issues_repo_name": "sbrisard/janus", "max_issues_repo_head_hexsha": "a6196a025fee6bf0f3eb5e636a6b2f895ca6fbc9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2015-07-15T08:47:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T11:56:32.000Z", "max_forks_repo_path": "sphinx/tutorials/square_basic/make_pyx_figs.py", "max_forks_repo_name": "sbrisard/janus", "max_forks_repo_head_hexsha": "a6196a025fee6bf0f3eb5e636a6b2f895ca6fbc9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-05-18T13:39:21.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-27T06:36:02.000Z", "avg_line_length": 35.3536585366, "max_line_length": 74, "alphanum_fraction": 0.6012418075, "include": true, "reason": "import numpy", "num_tokens": 922}
|
import shutil
import time
import numpy
import pickle
from pathlib import Path
from trsfile.common import Header, SampleCoding
from trsfile.engine.engine import Engine
from trsfile.parametermap import TraceParameterMap
from trsfile.trace import Trace
from trsfile.traceparameter import ByteArrayParameter
class FileEngine(Engine):
"""
This engine tries to save traces to disk in the most versatile and simple
manner available. No known tools support this file format and serve only as
an intermediate step to later convert it to a supported format.
This is can be useful when the trace length (number of samples) varies as
this is often not supported in trace files.
After acquisition, the file can be converted to the proper format with the
correct padding mode.
This engine supports the following options:
+--------------+-----------------------------------------------------------+
| Option | Description |
+==============+===========================================================+
| headers | Dictionary containing zero or more headers, see |
| | :py:class:`trsfile.common.Header` |
+--------------+-----------------------------------------------------------+
"""
INFO_FILE = 'traceset.pickle'
def __get_trace_path(self, i, name):
return (self.path / '{0:d}.{1:s}'.format(i, name))
def __init__(self, path, mode = 'x', **options):
# Defaults
self.path = Path(path)
self.headers = {}
self.read_only = False
# Shadow list of traces in files
self.shadow_trace_index = -1
self.shadow_traces = []
# Parse the mode
if mode == 'r':
"""r = open for reading"""
if not self.path.is_dir() or not (self.path / self.INFO_FILE).is_file():
raise FileNotFoundError('Path \'{0:s}\' does not point to a tmp trace set'.format(path))
# Load the headers
with (self.path / self.INFO_FILE).open('rb') as f:
self.headers = pickle.load(f)
# Initialize the shadow_traces list
self.shadow_traces = sorted([int(trace_path.stem) for trace_path in self.path.glob('*.samples')])
self.shadow_trace_index = max(self.shadow_traces) + 1
self.read_only = True
elif mode == 'w':
"""open for writing, truncating the file first"""
# Remove the directory if it exists
if self.path.is_dir():
shutil.rmtree(str(self.path), True)
# Wait until it is removed
try:
while self.path.is_dir():
time.sleep(0.001)
except:
pass
# Create the temporary folder and initialize this class
self.path.mkdir()
self.__initialize_headers()
elif mode == 'x':
"""open for exclusive creation, failing if the file already exists (default)"""
if self.path.is_dir():
raise FileExistsError('Trace set already exists at path \'{0:s}\''.format(str(self.path)))
# Create the temporary folder and initialize this class
self.path.mkdir()
self.__initialize_headers()
elif mode == 'a':
"""a = open for writing, appending to the end of the file if it exists"""
if self.path.is_dir() and (self.path / self.INFO_FILE).is_file():
# Load the headers
with (self.path / self.INFO_FILE).open('rb') as f:
self.headers = pickle.load(f)
# Initialize the shadow_traces list
self.shadow_traces = sorted([int(trace_path.stem) for trace_path in self.path.glob('*.samples')])
self.shadow_trace_index = max(self.shadow_traces) + 1
else:
# Create the temporary folder and initialize this class
self.path.mkdir()
self.__initialize_headers()
else:
raise ValueError('invalid mode: \'{0:s}\''.format(mode))
# Update the headers
headers = options.get('headers', None)
if self.is_read_only() and headers is not None:
raise ValueError('Cannot add headers when opening in read-only mode')
elif headers is not None:
self.update_headers(headers)
def __initialize_headers(self):
headers = {}
# Let's support dynamic sample coding depending on the trace
headers[Header.SAMPLE_CODING] = None
# Add any mandatory headers that are missing
for header in Header.get_mandatory():
if not header in headers:
headers[header] = header.default
# Store these default headers
self.update_headers(headers)
def update_headers(self, headers):
changed_headers = super().update_headers(headers)
if len(changed_headers) > 0:
# Dump all headers to disk
with (self.path / self.INFO_FILE).open('wb') as f:
pickle.dump(self.headers, f)
def is_closed(self):
return not self.path.is_dir() or not (self.path / self.INFO_FILE).is_file()
def length(self):
return len(self.shadow_traces)
def del_traces(self, index):
# Remove the shadow traces and with that check if indexes are correct
exception = None
try:
indices = self.shadow_traces[index]
del self.shadow_traces[index]
if not isinstance(index, slice):
indices = [indices]
except IndexError as err:
exception = err
# Do we have an exception, re-raise
if exception is not None:
raise IndexError(exception)
# Delete all traces on the file system
for trace_index in indices:
for category in ['title', 'data', 'samples']:
path = self.__get_trace_path(trace_index, category)
if path.is_file():
path.unlink()
def get_traces(self, index):
# Try access, and re-raise if wrong for fancy indexing errors
try:
indices = self.shadow_traces[index]
if not isinstance(index, slice):
indices = [indices]
except IndexError as exception:
raise IndexError(exception)
# Now obtain all requested traces from file
traces = []
for i in indices:
# Read the samples
path = self.__get_trace_path(i, 'samples')
if path.is_file():
with path.open('rb') as tmp_file:
# First byte is always sample coding
sample_coding = SampleCoding(tmp_file.read(1)[0])
samples = numpy.fromfile(tmp_file, sample_coding.format, -1)
else:
raise IOError('Unable to read samples from trace {0:d}'.format(i))
# Title
path = self.__get_trace_path(i, 'title')
if path.is_file():
with path.open('rb') as tmp_file:
title = tmp_file.read().decode('utf-8')
else:
title = Header.TRACE_TITLE.default
# Read the data
path = self.__get_trace_path(i, 'data')
if path.is_file():
with path.open('rb') as tmp_file:
data = tmp_file.read()
else:
data = b''
parameters = TraceParameterMap()
if data:
parameters['LEGACY_DATA'] = ByteArrayParameter(data)
# Create trace and make sure headers point to our dict
traces.append(Trace(sample_coding, samples, parameters, title, self.headers))
return traces
def set_traces(self, index, traces):
# Make sure we have iterable traces
if isinstance(traces, Trace):
traces = [traces]
# Get all traces that we are going to remove
try:
indices = self.shadow_traces[index]
if not isinstance(index, slice):
indices = [indices]
except IndexError as exception:
raise IndexError(exception)
# Remove the traces from disk only to keep storage lean and mean
for trace_index in indices:
for category in ['title', 'data', 'samples']:
path = self.__get_trace_path(trace_index, category)
if path.is_file():
path.unlink()
# Store all traces with the next sequence numbers and keep these numbers as a list
new_traces = []
for trace in traces:
self.shadow_trace_index += 1
new_traces.append(self.shadow_trace_index)
# Save the trace data
# Write the title as ascii
with self.__get_trace_path(self.shadow_trace_index, 'title').open('wb') as tmp_file:
tmp_file.write(trace.title if not isinstance(trace.title, str) else trace.title.encode('utf-8'))
# Write the data file
if trace.parameters is not None and len(trace.parameters) > 0:
with open(self.__get_trace_path(self.shadow_trace_index, 'data'), 'wb') as tmp_file:
tmp_file.write(trace.parameters.serialize())
# Write the sample file
with open(self.__get_trace_path(self.shadow_trace_index, 'samples'), 'wb') as tmp_file:
tmp_file.write(bytes([trace.sample_coding.value]))
trace.samples.tofile(tmp_file)
# Now we just assign the new_traces however, the slicing works
if isinstance(index, slice):
self.shadow_traces[index] = new_traces
else:
if len(new_traces) != 1:
raise TypeError('assigning multiple new traces to single trace')
self.shadow_traces[index] = new_traces[0]
def close(self):
# We do not need to close anything :)
pass
|
{"hexsha": "7d76f3da6d73ba8e71be7d25e83a5b68bfd44163", "size": 8477, "ext": "py", "lang": "Python", "max_stars_repo_path": "trsfile/engine/file.py", "max_stars_repo_name": "StefanD986/python-trsfile", "max_stars_repo_head_hexsha": "228df9d1cf1f2e18912c68d5c11c45c1493a2ece", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-09-18T18:30:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T00:44:54.000Z", "max_issues_repo_path": "trsfile/engine/file.py", "max_issues_repo_name": "StefanD986/python-trsfile", "max_issues_repo_head_hexsha": "228df9d1cf1f2e18912c68d5c11c45c1493a2ece", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-02-27T09:05:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-21T13:16:50.000Z", "max_forks_repo_path": "trsfile/engine/file.py", "max_forks_repo_name": "StefanD986/python-trsfile", "max_forks_repo_head_hexsha": "228df9d1cf1f2e18912c68d5c11c45c1493a2ece", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-12-17T02:14:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T08:59:47.000Z", "avg_line_length": 31.7490636704, "max_line_length": 101, "alphanum_fraction": 0.6770083756, "include": true, "reason": "import numpy", "num_tokens": 2072}
|
# extract_features.r - part of TF2
# Purpose: Take appropriately formatted (what is that?) DNase-1 signal and 'extract features' (binary). 0 means 'missing data', 1 'no', 2 'yes'.
# Warning: parallelising this means the peaks might (requires testing) get all messed up, order-wise, so you need to sort-bed on this afterwards! Hopefully this doesn't negate the speed gain from parallelisation.
# Possible solution: mclapply returns the value for each peak, then write this all to the files...
options(warn=-1)
# missingness threshold: what frac of 0s in the signal counts as missing?
THRE<-0.2
# above average signal?
get_feature1 <- function(signal_data,peak_mean){
col_means <- colMeans(signal_data)
f1<-ifelse(col_means>peak_mean,2,1)
f1[is.na(f1)]<-0
return(f1)
}
get_f2_and_f3 <- function(signal_data){
win_size <- nrow(signal_data)
f2<-vector("integer")
f3<-vector("integer")
# centred
x <- seq(win_size)-ceiling(win_size/2)
for (loc in 1:ncol(signal_data)){
if (mean(signal_data[,loc]==0)>THRE){
# missing data
increasing<-0
quadratic<-0
} else{
linear <- lm(signal_data[,loc] ~ x)
increasing <- ifelse(linear$coefficients["x"]>1/win_size,2,1)
quad <- lm(signal_data[,loc] ~ x^2)
# note presence of another arbitrary threshold
quadratic <- ifelse(abs(quad$coefficients["x"])>0.3,2,1)
}
f2<-c(f2,increasing)
f3<-c(f3,quadratic)
}
return(list("f2"=f2,"f3"=f3))
}
get_peak_features <- function(peak,peaklist,signal,feature1,feature2,feature3){
hor_str <- peaklist[peak,1]
if (peak%%1000==0){
cat("Peak",peak,"\n")
cat(hor_str,"\n")
}
# this is just string formatting
broken_hor_str <- unlist(strsplit(hor_str,":"))
peak_range <- unlist(strsplit(broken_hor_str[2],"-"))
peak_start <- as.integer(peak_range[1])
peak_end <- as.integer(peak_range[2])
peak_length <- diff(c(peak_start,peak_end))-1
buff <- matrix(scan(signal,sep="\t",what=character(),nlines=peak_length,quiet=TRUE),nrow=peak_length,byrow=T)
peak_mean <- mean(as.numeric(buff[,5]))
# signal data is now actually columns for locations and rows for the values...
signal_data<- apply(buff[,5:ncol(buff)],1,as.numeric)
f1<-get_feature1(signal_data,peak_mean)
f2_and_f3<-get_f2_and_f3(signal_data)
chro<-broken_hor_str[1]
f2<-f2_and_f3$"f2"
f3<-f2_and_f3$"f3"
cat(chro,"\t",peak_start,"\t",peak_end,"\t",f1,"\n",file=feature1)
cat(chro,"\t",peak_start,"\t",peak_end,"\t",f2,"\n",file=feature2)
cat(chro,"\t",peak_start,"\t",peak_end,"\t",f3,"\n",file=feature3)
}
# --- Main bit! --- #
#
signal<-file("dnase_signal_final.bed.gz",open="r")
feature1<-file("feature1.bed",open="w")
feature2<-file("feature2.bed",open="w")
feature3<-file("feature3.bed",open="w")
peaklist<-read.table("k562_peak_list",as.is=TRUE)
N_PEAKS<-nrow(peaklist)
# i can already feel the race conditions
mclapply(1:N_PEAKS,get_peak_features,peaklist,signal,feature1,feature2,feature3)
|
{"hexsha": "f5c8ac6c955a27e436e12ffc9d7db904a621b79e", "size": 3103, "ext": "r", "lang": "R", "max_stars_repo_path": "preprocess/extract_features.r", "max_stars_repo_name": "corcra/tf2", "max_stars_repo_head_hexsha": "46013e22f627f14bfbfa735f1d4b6e8e0a201d8f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocess/extract_features.r", "max_issues_repo_name": "corcra/tf2", "max_issues_repo_head_hexsha": "46013e22f627f14bfbfa735f1d4b6e8e0a201d8f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocess/extract_features.r", "max_forks_repo_name": "corcra/tf2", "max_forks_repo_head_hexsha": "46013e22f627f14bfbfa735f1d4b6e8e0a201d8f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2784810127, "max_line_length": 212, "alphanum_fraction": 0.6635514019, "num_tokens": 929}
|
C @(#)chk_aredta.f 20.3 12/21/96
integer function chk_aredta(ptr, field, count, out_buffer)
integer ptr, field, count
character out_buffer(10)*120
C This subroutine checks AREA(*,PTR) extensively for data errors.
include 'ipfinc/parametr.inc'
include 'ipfinc/blank.inc'
include 'ipfinc/arcntl.inc'
include 'ipfinc/area.inc'
include 'ipfinc/prt.inc'
chk_aredta = 0
if ((field .eq. 0 .or. field .eq. 27) .and.
& (arcnet(ptr)*bmva .lt. -6000.0 .or.
& arcnet(ptr)*bmva .gt. 6000.0)) then
chk_aredta = 1
count = min0 (count + 1, 10)
write (out_buffer(count), 10010) arcnet(ptr) * bmva
10010 format (' Export (', f10.1, ') < -6000 or > 6000 ')
endif
return
end
|
{"hexsha": "a04811e6641abd973c8fd53f555d1fe95ca70631", "size": 790, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ipf/chk_aredta.f", "max_stars_repo_name": "mbheinen/bpa-ipf-tsp", "max_stars_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-04-02T15:34:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T08:57:45.000Z", "max_issues_repo_path": "ipf/chk_aredta.f", "max_issues_repo_name": "cuihantao/bpa-ipf-tsp", "max_issues_repo_head_hexsha": "cb2d0917ae42eff571017e9162f550f87900b83f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-02-08T14:21:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-13T01:27:56.000Z", "max_forks_repo_path": "ipf/chk_aredta.f", "max_forks_repo_name": "mbheinen/bpa-ipf-tsp", "max_forks_repo_head_hexsha": "bf07dd456bb7d40046c37f06bcd36b7207fa6d90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2020-02-03T04:26:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T15:04:31.000Z", "avg_line_length": 30.3846153846, "max_line_length": 69, "alphanum_fraction": 0.5860759494, "num_tokens": 270}
|
import torch
import torch.optim as optim
from network import resnet,HRnet,PB_resnet,PB_net
from tool.dataset import VOC_Dataset
import argparse
from torchvision import transforms
from torch.utils.data import DataLoader
import torch.nn as nn
from sklearn.metrics import average_precision_score
import torch.nn.functional as F
from tool.utils import save_cam,save_mask
import numpy as np
from evaluation import do_python_eval
from tqdm import tqdm
import yaml
voc_root = '/users4/mxtuo/zhanghan/data/VOC2012'
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size',default=64,type=int)
parser.add_argument('--lr',default=1e-4,type=float)
parser.add_argument('--gpu_index',default='2',type=str)
parser.add_argument('--crop_size',default=224,type=int)
parser.add_argument('--num_epochs',default=7,type=int)
parser.add_argument("--train_list", default="voc12/train_aug.txt", type=str)
parser.add_argument("--val_list", default="voc12/val.txt", type=str)
args = parser.parse_args()
print(args)
import os
# os.makedirs(args.session_name,exist_ok=True)
# os.makedirs(f'{args.session_name}/model_weights', exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_index
val_dataset = VOC_Dataset(args.val_list,voc_root,transform = transforms.Compose([
transforms.Resize(args.crop_size),
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]) )
# loglist = do_python_eval(f'baseline_3_5/pred_dir', args, 0)
val_dataloader = DataLoader(val_dataset,batch_size=32,shuffle=False,num_workers=4,pin_memory=False,drop_last=True)
test_dataset = VOC_Dataset(args.val_list,voc_root,transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]) )
test_dataloader = DataLoader(test_dataset,shuffle=False,num_workers=8,pin_memory=True)
model = PB_resnet.net(20)
model.load_state_dict(torch.load("/users4/mxtuo/zhanghan/DBnet/model_weights/res50-bs=96-cf=20.0-mIoU=47.811.pth"))
weight = model.fc.weight[0].view(-1)
print(torch.topk(weight,int(2048*0.1)))
|
{"hexsha": "d060f652b0f47ff1eae00d3f38cbfea7d3c21445", "size": 2369, "ext": "py", "lang": "Python", "max_stars_repo_path": "Adaptive-Spatial-Feature-Pooling/weight_mask.py", "max_stars_repo_name": "code6levels/Adaptive-Spatial-Feature-Pooling", "max_stars_repo_head_hexsha": "200e10ae5fd99a1a13d7525beedc10912fdb2397", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Adaptive-Spatial-Feature-Pooling/weight_mask.py", "max_issues_repo_name": "code6levels/Adaptive-Spatial-Feature-Pooling", "max_issues_repo_head_hexsha": "200e10ae5fd99a1a13d7525beedc10912fdb2397", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Adaptive-Spatial-Feature-Pooling/weight_mask.py", "max_forks_repo_name": "code6levels/Adaptive-Spatial-Feature-Pooling", "max_forks_repo_head_hexsha": "200e10ae5fd99a1a13d7525beedc10912fdb2397", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.8360655738, "max_line_length": 120, "alphanum_fraction": 0.7062051499, "include": true, "reason": "import numpy", "num_tokens": 590}
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score, learning_curve, validation_curve, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, make_scorer, roc_curve, auc
from scipy import interp
from sklearn.utils import resample
author = "Oscar Ding"
# breast cancer dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases'
'/breast-cancer-wisconsin/wdbc.data', header=None)
X = df.loc[:, 2:].values
y = df.loc[:, 1].values
le = LabelEncoder()
y = le.fit_transform(y)
le.classes_
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
stratify=y, random_state=1)
"""
sklearn pipeline / stratified k fold cross validation
"""
# Combining transformers and estimators in a pipeline
pipe_lr = make_pipeline(StandardScaler(),
PCA(n_components=2),
LogisticRegression(solver='liblinear', multi_class='auto', random_state=1))
pipe_lr.fit(X_train, y_train)
y_pred = pipe_lr.predict(X_test)
print("test accuracy: ", pipe_lr.score(X_test, y_test))
# Using k-fold cross validation to assess model performance
kfold = StratifiedKFold(n_splits=10, random_state=1).split(X_train, y_train)
# manual way to check CV accuracy
scores = []
for k, (train, test) in enumerate(kfold):
pipe_lr.fit(X_train[train], y_train[train])
score = pipe_lr.score(X_train[test], y_train[test])
scores.append(score)
print("Fold: {}, Class dist: {}, Accuracy: {}".format(k+1, np.bincount(y_train[train]), score))
print("CV accuracy: {} +/- {}".format(np.mean(scores), np.std(scores)))
# check CV with cross_val_score
scores = cross_val_score(estimator=pipe_lr, X=X_train, y=y_train, cv=10, n_jobs=4)
print('CV accuracy scores: %s' % scores)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
"""
learning curve
"""
# Debugging algorithms with learning curves to check issues with under-fitting and over-fitting
pipe_lr = make_pipeline(StandardScaler(),
LogisticRegression(penalty='l2', solver='liblinear', multi_class='auto',
random_state=1))
train_sizes, train_scores, test_scores = learning_curve(estimator=pipe_lr,
X=X_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 10),
cv=10,
n_jobs=8)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(train_sizes, train_mean, color='blue', marker='o', markersize=5, label='training accuracy')
plt.fill_between(train_sizes, train_mean + train_std, train_mean - train_std,
alpha=0.15, color='blue')
plt.plot(train_sizes, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy')
plt.fill_between(train_sizes, test_mean + test_std, test_mean - test_std,
alpha=0.15, color='green')
plt.grid()
plt.grid()
plt.xlabel('Number of training samples')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.ylim([0.8, 1.03])
plt.tight_layout()
plt.show()
"""
validation curve
"""
# use validation curves to tune a hyper-parameter
param_range = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]
train_scores, test_scores = validation_curve(estimator=pipe_lr,
X=X_train,
y=y_train,
param_name='logisticregression__C',
param_range=param_range,
cv=10,
n_jobs=8)
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
plt.plot(param_range, train_mean, color='blue', marker='o', markersize=5, label='training accuracy')
plt.fill_between(param_range, train_mean + train_std, train_mean - train_std, alpha=0.15, color='blue')
plt.plot(param_range, test_mean, color='green', linestyle='--', marker='s', markersize=5, label='validation accuracy')
plt.fill_between(param_range, test_mean + test_std, test_mean - test_std, alpha=0.15, color='green')
plt.grid()
plt.xscale('log')
plt.legend(loc='lower right')
plt.xlabel('Parameter C')
plt.ylabel('Accuracy')
plt.ylim([0.8, 1.0])
plt.tight_layout()
plt.show()
"""
grid search
"""
# Tuning hyper-parameters via grid search
# find the optimal combination of hyper-parameter values
pipe_svc = make_pipeline(StandardScaler(),
SVC(random_state=1))
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'svc__C': param_range, 'svc__kernel': ['linear']},
{'svc__C': param_range, 'svc__gamma': param_range, 'svc__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=10,
n_jobs=8)
gs = gs.fit(X_train, y_train)
print("best score: ", gs.best_score_)
print("best parameter combos: \n", gs.best_params_)
clf = gs.best_estimator_
clf.fit(X_train, y_train)
print("test accuracy: ", clf.score(X_test, y_test))
"""
algorithm selection with nested cross validation
"""
# compare SVM and decision tree
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring='accuracy',
cv=2,
n_jobs=4)
scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5, n_jobs=4)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
gs = GridSearchCV(estimator=DecisionTreeClassifier(random_state=0),
param_grid=[{'max_depth': [1, 2, 3, 4, 5, 6, 7, None]}],
scoring='accuracy',
cv=2)
scores = cross_val_score(gs, X_train, y_train, scoring='accuracy', cv=5)
print('CV accuracy: %.3f +/- %.3f' % (np.mean(scores), np.std(scores)))
"""
performance evaluation metrics
"""
# confusion matrix
pipe_svc.fit(X_train, y_train)
y_pred = pipe_svc.predict(X_test)
conf_mat = confusion_matrix(y_true=y_test, y_pred=y_pred)
print("confusion matrix: \n", conf_mat)
fig, ax = plt.subplots(figsize=(2.5, 2.5))
ax.matshow(conf_mat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(conf_mat.shape[0]):
for j in range(conf_mat.shape[1]):
ax.text(x=j, y=i, s=conf_mat[i, j], va='center', ha='center')
plt.xlabel('Predicted label')
plt.ylabel('True label')
# plt.tight_layout()
plt.show()
# Optimizing the precision and recall of a classification model
print('Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_pred))
print('Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_pred))
print('F1: %.3f' % f1_score(y_true=y_test, y_pred=y_pred))
# user f1 score in grid search
scorer = make_scorer(f1_score, pos_label=0)
c_gamma_range = [0.01, 0.1, 1.0, 10.0]
param_grid = [{'svc__C': c_gamma_range,
'svc__kernel': ['linear']},
{'svc__C': c_gamma_range,
'svc__gamma': c_gamma_range,
'svc__kernel': ['rbf']}]
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring=scorer,
cv=10,
n_jobs=8)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
# Plotting a receiver operating characteristic
pipe_lr = make_pipeline(StandardScaler(),
PCA(n_components=2),
LogisticRegression(penalty='l2', multi_class='auto', solver='liblinear',
random_state=1, C=100.0))
X_train2 = X_train[:, [4, 14]]
cv = list(StratifiedKFold(n_splits=3, random_state=1).split(X_train, y_train))
fig = plt.figure(figsize=(7, 5))
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas = pipe_lr.fit(X_train2[train],
y_train[train]).predict_proba(X_train2[test])
fpr, tpr, thresholds = roc_curve(y_train[test],
probas[:, 1],
pos_label=1)
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='ROC fold %d (area = %0.2f)' % (i+1, roc_auc))
plt.plot([0, 1], [0, 1], linestyle='--', color=(0.6, 0.6, 0.6), label='random guessing')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--', label='mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.plot([0, 0, 1], [0, 1, 1], linestyle=':', color='black', label='perfect performance')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('false positive rate')
plt.ylabel('true positive rate')
plt.legend(loc="lower right")
plt.tight_layout()
plt.show()
# The scoring metrics for multi-class classification
pre_scorer = make_scorer(score_func=precision_score,
pos_label=1,
greater_is_better=True,
average='micro')
# Dealing with class imbalance
X_imb = np.vstack((X[y == 0], X[y == 1][:40]))
y_imb = np.hstack((y[y == 0], y[y == 1][:40]))
y_pred = np.zeros(y_imb.shape[0])
np.mean(y_pred == y_imb) * 100
print('Number of class 1 samples before:', X_imb[y_imb == 1].shape[0])
X_upsampled, y_upsampled = resample(X_imb[y_imb == 1],
y_imb[y_imb == 1],
replace=True,
n_samples=X_imb[y_imb == 0].shape[0],
random_state=123)
print('Number of class 1 samples after:', X_upsampled.shape[0])
X_bal = np.vstack((X[y == 0], X_upsampled))
y_bal = np.hstack((y[y == 0], y_upsampled))
y_pred = np.zeros(y_bal.shape[0])
np.mean(y_pred == y_bal) * 100
|
{"hexsha": "c7e0857cc681dd1bdeea70dbe1323674c8ad0030", "size": 10683, "ext": "py", "lang": "Python", "max_stars_repo_path": "06_Hyperparameter_Metrics.py", "max_stars_repo_name": "OscarDing/Oscar-s-Machine-Learning-in-Python", "max_stars_repo_head_hexsha": "d9dd359a178f5435b405235821147e5ea8a73c80", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "06_Hyperparameter_Metrics.py", "max_issues_repo_name": "OscarDing/Oscar-s-Machine-Learning-in-Python", "max_issues_repo_head_hexsha": "d9dd359a178f5435b405235821147e5ea8a73c80", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "06_Hyperparameter_Metrics.py", "max_forks_repo_name": "OscarDing/Oscar-s-Machine-Learning-in-Python", "max_forks_repo_head_hexsha": "d9dd359a178f5435b405235821147e5ea8a73c80", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2574257426, "max_line_length": 134, "alphanum_fraction": 0.6214546476, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2735}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, argparse, itertools
import numpy as np
from mCNN.processing import read_csv, save_data_array
from scipy.spatial.distance import pdist, squareform
def main():
parser = argparse.ArgumentParser()
parser.description = 'A script to calculate mCSM features'
parser.add_argument('dataset_name')
parser.add_argument('csv_feature_dir', type=str, help='eg:~/mCNN/dataset/S1925/feature/mCNN/wild/csv')
parser.add_argument('-o', '--outdir', type=str, required=True, help='output dir')
parser.add_argument('--min', type=str, required=True, help='The minimum distance from mutant center')
parser.add_argument('--max', type=str, required=True, help='The maximum distance from mutant center')
parser.add_argument('--step', type=str, required=True, help='The cutoff step')
parser.add_argument('--center', type=str, choices=['CA', 'geometric'], default='geometric', help='The MT center type, default is "geometric"')
parser.add_argument('--class_num', type=int, choices=[2,8], default=2, help='atom classification number, default is 2')
args = parser.parse_args()
csv_feature_dir = args.csv_feature_dir
outdir = args.outdir
minimum = float(args.min)
maximum = float(args.max)
step = float(args.step)
center = args.center
print(center)
class_num = args.class_num
filename = 'min_%.1f_max_%.1f_step_%.1f_center_%s_class_%s' % (minimum, maximum, step, center, class_num)
feature_dirlst = [csv_feature_dir + '/' + x + '/center_%s.csv' % center for x in os.listdir(csv_feature_dir)]
for feature_dir in feature_dirlst:
assert os.path.exists(feature_dir)
feature_all = []
ylst = []
ddglst = []
for feature_dir in feature_dirlst:
df = read_csv(feature_dir)
ddg = df.loc[:, 'ddg'].values[0]
# print(type(ddg),ddg)# @@++
ddglst.append(ddg)
if ddg >= 0:
ylst.append(1)
else:
ylst.append(0)
feature_arr = cal_mCSM(df, maximum=maximum, minimum=minimum, step=step, class_num=class_num)
feature_all.append(feature_arr)
x = np.array(feature_all)
ddg = np.array(ddglst).reshape(-1, 1)
y = np.array(ylst).reshape(-1, 1)
save_data_array(x, y, ddg, filename, outdir)
def cal_mCSM(df, maximum, minimum, step, class_num=2):
'''
:param df: feature df calculated by coord.py
column name:
['chain', 'res', 'het', 'posid', 'inode', 'full_name', 'atom_name', 'secondary', 'dist', 'x', 'y', 'z',
'occupancy', 'b_factor', 's_H', 's_G', 's_I', 's_E', 's_B', 's_T', 's_C', 's_Helix', 's_Strand', 's_Coil',
'sa', 'rsa', 'asa', 'phi', 'psi', 'ph', 'temperature', 'C', 'O', 'N', 'Other', 'hydrophobic', 'positive',
'negative', 'neutral', 'acceptor', 'donor', 'aromatic', 'sulphur', 'hydrophobic_bak', 'polar', 'C_mass',
'O_mass', 'N_mass', 'S_mass', 'dC', 'dH', 'dO', 'dN', 'dOther', 'dhydrophobic', 'dpositive', 'dnegative',
'dneutral', 'dacceptor', 'ddonor', 'daromatic', 'dsulphur', 'dhydrophobic_bak', 'dpolar', 'dEntropy',
'entWT', 'entMT', 'WT_A', 'WT_R', 'WT_N', 'WT_D', 'WT_C', 'WT_Q', 'WT_E', 'WT_G', 'WT_H', 'WT_I','WT_L',
'WT_K', 'WT_M', 'WT_F', 'WT_P', 'WT_S', 'WT_T', 'WT_W', 'WT_Y', 'WT_V', 'WT_-', 'MT_A', 'MT_R', 'MT_N',
'MT_D', 'MT_C', 'MT_Q', 'MT_E', 'MT_G', 'MT_H', 'MT_I', 'MT_L', 'MT_K', 'MT_M', 'MT_F', 'MT_P', 'MT_S',
'MT_T', 'MT_W', 'MT_Y', 'MT_V', 'MT_-', 'fa_atr', 'fa_rep', 'fa_sol', 'fa_intra_rep', 'fa_intra_sol_xover4',
'lk_ball_wtd', 'fa_elec', 'pro_close', 'hbond_sr_bb', 'hbond_lr_bb', 'hbond_bb_sc', 'hbond_sc', 'dslf_fa13',
'atom_pair_constraint', 'angle_constraint', 'dihedral_constraint', 'omega', 'fa_dun', 'p_aa_pp', 'yhh_planarity',
'ref', 'rama_prepro', 'total', 'ddg']
:param maximum: maximum length considered by mCSM environment.
:param minimum: minimum length considered by mCSM environment.
:param step: cutoff setp.
:param class_num: atom classification class number, the default is 2 (HP).
'''
if class_num == 8:
atom_class = ['hydrophobic', 'positive', 'negative', 'neutral', 'acceptor', 'donor', 'aromatic', 'sulphur']
delta_r = ['dhydrophobic', 'dpositive', 'dnegative', 'dneutral', 'dacceptor', 'ddonor', 'daromatic', 'dsulphur']
if class_num == 2:
atom_class = ['hydrophobic_bak', 'polar']
delta_r = ['dhydrophobic_bak', 'dpolar']
################################################################################################################
# pay attention here!!! #
# drop atoms which was not assigned by hp.
################################################################################################################
df = df.loc[(df.hydrophobic_bak != 0) | (df.polar != 0), :]
class_num = len(atom_class)
combilst = list(itertools.combinations(list(range(class_num)), 2)) + [(x, x) for x in range(class_num)] #[(0, 1),(),(),...]
featurelst = []
class_arr = df.loc[df.dist<=maximum,atom_class].values
coords = df.loc[df.dist<=maximum,['x','y','z']].values
delta_r_arr = df.loc[:, delta_r].values[0]
dist_matrix = squareform(pdist(coords, metric='euclidean'))
cutofflist = list(np.arange(minimum, maximum, step))
cutofflist.append(maximum)
## Cutoff scanning here.
for cutoff in cutofflist:
initlst = [0 for _ in range(len(combilst))]
indices = [list(x) for x in np.argwhere(dist_matrix <= cutoff)] #[[],[],[],...]
indices = list(filter(lambda x: x[0] > x[1], indices))
if indices == []:
featurelst.append(initlst)
continue
subfeature_arr = np.array(initlst)
for indice in indices:
arr_1, arr_2 = class_arr[indice[0], :], class_arr[indice[1], :] # one-hot coding feature vector(0D array)
tmplst = []
for combi in combilst:
if arr_1[combi[0]] + arr_2[combi[1]] == 2 or arr_1[combi[1]] + arr_2[combi[0]] == 2:
tmplst.append(1)
else:
tmplst.append(0)
subfeature_arr = subfeature_arr+tmplst
featurelst.append(list(subfeature_arr))
# print('delta_r_arr shape:', delta_r_arr.shape)
# print('len featurelst:', len(featurelst))
# print(np.array(featurelst).shape)
feature_arr = np.hstack((np.array(featurelst).reshape(-1),delta_r_arr))
# print(feature_arr.shape)
return feature_arr
if __name__ == '__main__':
main()
|
{"hexsha": "b3acef18a475ef0086e67361648369cb86a97607", "size": 6755, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/Spatial/mCSM.py", "max_stars_repo_name": "ruiyangsong/mCNN", "max_stars_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Spatial/mCSM.py", "max_issues_repo_name": "ruiyangsong/mCNN", "max_issues_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Spatial/mCSM.py", "max_forks_repo_name": "ruiyangsong/mCNN", "max_forks_repo_head_hexsha": "889f182245f919fb9c7a8d97965b11576b01a96c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.5971223022, "max_line_length": 153, "alphanum_fraction": 0.5814951887, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1914}
|
\chapter{``N'' Standard Extension for User-Level Interrupts, Version 1.1}
\label{chap:n}
\begin{commentary}
This is a placeholder for a more complete writeup of the N
extension, and to form a basis for discussion.
\end{commentary}
This chapter presents a proposal for adding RISC-V user-level
interrupt and exception handling. When the N extension is present,
and the outer execution environment has delegated designated
interrupts and exceptions to user-level, then hardware can transfer
control directly to a user-level trap handler without invoking the
outer execution environment.
\begin{commentary}
User-level interrupts are primarily intended to support secure
embedded systems with only M-mode and U-mode present, but can also be
supported in systems running Unix-like operating systems to support
user-level trap handling.
When used in an Unix environment, the user-level interrupts would
likely not replace conventional signal handling, but could be used as
a building block for further extensions that generate user-level
events such as garbage collection barriers, integer overflow,
floating-point traps.
\end{commentary}
\section{Additional CSRs}
The user-visible CSRs added to support the N extension are listed in
Table~\ref{tab:ncsrs}.
\begin{table}[hbt]
\centering
\begin{tabular}{|l|l|l|}
\hline
Number & Name & Description \\
\hline
\tt 0x000 & \tt ustatus & User status register. \\
\tt 0x004 & \tt uie & User interrupt-enable register. \\
\tt 0x005 & \tt utvec & User trap handler base address. \\
\tt 0x040 & \tt uscratch & Scratch register for user trap handlers. \\
\tt 0x041 & \tt uepc & User exception program counter. \\
\tt 0x042 & \tt ucause & User trap cause. \\
\tt 0x043 & \tt utval & User bad address or instruction. \\
\tt 0x044 & \tt uip & User interrupt pending. \\
\hline
\end{tabular}
\caption{CSRs for N extension.}
\label{tab:ncsrs}
\end{table}
\section{User Status Register ({\tt ustatus})}
The {\tt ustatus} register is an XLEN-bit read/write register
formatted as shown in Figure~\ref{ustatusreg}. The {\tt ustatus}
register keeps track of and controls the hart's current operating
state.
\begin{figure*}[h!]
\begin{center}
\setlength{\tabcolsep}{4pt}
\begin{tabular}{KccFc}
\\
\instbitrange{XLEN}{5} &
\instbit{4} &
\instbitrange{3}{1} &
\instbit{0} \\
\hline
\multicolumn{1}{|c|}{\wpri} &
\multicolumn{1}{c|}{UPIE} &
\multicolumn{1}{c|}{\wpri} &
\multicolumn{1}{c|}{UIE} \\
\hline
XLEN-5 & 1 & 3 & 1 \\
\end{tabular}
\end{center}
\vspace{-0.1in}
\caption{User-mode status register ({\tt ustatus}).}
\label{ustatusreg}
\end{figure*}
The user interrupt-enable bit UIE disables user-level interrupts when
clear. The value of UIE is copied into UPIE when a user-level trap is
taken, and the value of UIE is set to zero to provide atomicity for
the user-level trap handler.
\begin{commentary}
There is no UPP bit to hold the previous privilege mode as it can
only be user mode.
\end{commentary}
The URET instructions is used to return from traps in U-mode, and URET
copies UPIE into UIE, then sets UPIE.
\begin{commentary}
UPIE is set after the UPIE/UIE stack is popped to enable interrupts
and help catch coding errors.
\end{commentary}
\section{Other CSRs}
The remaining CSRs function in an analogous way to the trap handling
registers defined for M-mode and S-mode.
\begin{commentary}
A more complete writeup to follow.
\end{commentary}
\section{N Extension Instructions}
The URET instruction is added to perform the analogous function to
MRET and SRET.
\section{Reducing Context-Swap Overhead}
The user-level interrupt-handling registers add considerable state to
the user-level context, yet will usually rarely be active in normal
use. In particular, {\tt uepc}, {\tt ucause}, and {\tt utval} are
only valid during execution of a trap handler.
An NS field can be added to {\tt mstatus} and {\tt sstatus} following
the format of the FS and XS fields to reduce context-switch overhead
when the values are not live. Execution of URET will place the {\tt
uepc}, {\tt ucause}, and {\tt utval} back into initial state.
|
{"hexsha": "00812a7bba9854d8ca4bc2a2f1d8b6ed4917c798", "size": 4206, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/n.tex", "max_stars_repo_name": "T-J-Teru/riscv-isa-manual", "max_stars_repo_head_hexsha": "ebeb14b4259e078097a2fb07aa25bdecc2e9e4d6", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-05-20T12:54:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-23T02:08:04.000Z", "max_issues_repo_path": "src/n.tex", "max_issues_repo_name": "T-J-Teru/riscv-isa-manual", "max_issues_repo_head_hexsha": "ebeb14b4259e078097a2fb07aa25bdecc2e9e4d6", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-07-09T19:28:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-11T22:25:12.000Z", "max_forks_repo_path": "src/n.tex", "max_forks_repo_name": "T-J-Teru/riscv-isa-manual", "max_forks_repo_head_hexsha": "ebeb14b4259e078097a2fb07aa25bdecc2e9e4d6", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-01-30T14:52:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-01T10:28:57.000Z", "avg_line_length": 33.648, "max_line_length": 76, "alphanum_fraction": 0.7349025202, "num_tokens": 1156}
|
Christopher Civil is a fourth year student at UC Davis, majoring in Political Science. He is currently the PR chair in Phi Alpha Delta, the International PreLaw Professional fraternity. Chris is also an intern at the UC Davis News Service UC Davis Campus News Service, where he compiles a daily collection of news articles that reference the campus, which are sent out as part of the UC Davis in the News email mailings.
Welcome to the wiki! I would have loved to have had your job when I was an undergrad. Users/ElleWeber
20090612 20:14:48 nbsp YAY :) Users/StevenLee
|
{"hexsha": "4a7531bf4e5e650aa6cb4b12df866f4b0bc6ed63", "size": 573, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/ChristopherCivil.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/ChristopherCivil.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/ChristopherCivil.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.8571428571, "max_line_length": 420, "alphanum_fraction": 0.7958115183, "num_tokens": 131}
|
import datetime
import math
import time
import cv2
import numpy as np
ESC_KEY = 27
width = 0
height = 0
ContadorVerde = 0
ContadorAmarelo = 0
AreaContornoLimiteMin = 3000
OffsetLinhasRef = 260
cap = cv2.VideoCapture(0)
def TestaInterseccao(y, CoordenadaYLinha):
DiferencaAbsoluta = abs(y - CoordenadaYLinha)
if (DiferencaAbsoluta <= 7):
return True
else:
return False
def Videotracking(frame, hue, sat, val, verde = True):
global ticAmarelo, ticVerde, tocAmarelo, tocVerde
#transforma a imagem de RGB para HSV
hsvImage = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
#definir os intervalos de cores que vão aparecer na imagem final
lowerColor = np.array([hue['min'], sat["min"], val["min"]])
upperColor = np.array([hue['max'], sat["max"], val["max"]])
#marcador pra saber se o pixel pertence ao intervalo ou não
mask = cv2.inRange(hsvImage, lowerColor, upperColor)
#aplica máscara que "deixa passar" pixels pertencentes ao intervalo, como filtro
result = cv2.bitwise_and(frame, frame, mask = mask)
#aplica limiarização
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
_,gray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#encontra pontos que circundam regiões conexas (contour)
contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#se existir contornos
if contours:
#retornando a área do primeiro grupo de pixels brancos
maxArea = cv2.contourArea(contours[0])
contourMaxAreaId = 0
i = 0
#para cada grupo de pixels branco
for cnt in contours:
#procura o grupo com a maior área
if maxArea < cv2.contourArea(cnt):
maxArea = cv2.contourArea(cnt)
contourMaxAreaId = i
i += 1
#achei o contorno com maior área em pixels
cntMaxArea = contours[contourMaxAreaId]
#retorna um retângulo que envolve o contorno em questão
xRect, yRect, wRect, hRect = cv2.boundingRect(cntMaxArea)
CoordenadaXCentroContorno = round((xRect+xRect+wRect)/2)
CoordenadaYCentroContorno = round((yRect+yRect+hRect)/2)
PontoCentralContorno = (CoordenadaXCentroContorno,CoordenadaYCentroContorno)
#desenha linhas de referencia
CoordenadaYLinha = round(height / 2)
cv2.line(frame, (0,CoordenadaYLinha), (width,CoordenadaYLinha), (0, 0, 0), 3)
#desenha caixa envolvente com espessura 3
if verde:
cv2.rectangle(frame, (xRect, yRect), (xRect + wRect, yRect + hRect), (140, 230, 140), 2)
cv2.circle(frame, PontoCentralContorno, 1, (140, 230, 140), 5)
if (TestaInterseccao(CoordenadaYCentroContorno,CoordenadaYLinha)):
global ContadorVerde
if (ticVerde - tocVerde) > 4:
ContadorVerde += 1
print(tocVerde, ticVerde)
tocVerde = time.time()
else:
cv2.rectangle(frame, (xRect, yRect), (xRect + wRect, yRect + hRect), (0, 255, 255), 2)
cv2.circle(frame, PontoCentralContorno, 1, (0, 255, 255), 5)
if (TestaInterseccao(CoordenadaYCentroContorno,CoordenadaYLinha)):
global ContadorAmarelo
if True: #(tocAmarelo - ticAmarelo) > 4:
ContadorAmarelo += 1
tocAmarelo = time.time()
return frame, gray
hue_verde = {'min':30, 'max':100}
sat_verde = {'min':70, 'max':190}
val_verde = {'min':80, 'max':165}
hue_amarelo = {'min':15, 'max':50}
sat_amarelo = {'min':150, 'max':210}
val_amarelo = {'min':145, 'max':230}
ticVerde = time.time()
ticAmarelo = time.time()
tocVerde = time.time()
tocAmarelo = time.time()
while True:
success, frame = cap.read()
height = np.size(frame,0)
width = np.size(frame,1)
ticVerde = time.time()
frame, gray_verde = Videotracking(frame, hue_verde, sat_verde, val_verde)
ticAmarelo = time.time()
frame, gray_amarelo = Videotracking(frame, hue_amarelo, sat_amarelo, val_amarelo, verde=False)
#Escreve na imagem o numero de pessoas que entraram ou sairam da area vigiada
cv2.putText(frame, f"Entradas Verde: {ContadorVerde}", (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (140, 230, 140), 2)
cv2.putText(frame, f"Entradas Amarelo: {ContadorAmarelo}", (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 2)
cv2.imshow("mascara", gray_verde)
cv2.imshow("webcam", frame)
if cv2.waitKey(1) & 0xFF == ord('q') or 0xFF == ESC_KEY:
break
cap.release()
cv2.destroyAllWindows()
|
{"hexsha": "4dfb71d8c6807d67c8a99cc94228131d22ce36d9", "size": 4754, "ext": "py", "lang": "Python", "max_stars_repo_path": "Identificador banana/Identificador+Contador.py", "max_stars_repo_name": "Lucas-Marcelino/CV_Pi-VII", "max_stars_repo_head_hexsha": "a7fdc0955e9710f351a7d16278de2093e9e84c69", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Identificador banana/Identificador+Contador.py", "max_issues_repo_name": "Lucas-Marcelino/CV_Pi-VII", "max_issues_repo_head_hexsha": "a7fdc0955e9710f351a7d16278de2093e9e84c69", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Identificador banana/Identificador+Contador.py", "max_forks_repo_name": "Lucas-Marcelino/CV_Pi-VII", "max_forks_repo_head_hexsha": "a7fdc0955e9710f351a7d16278de2093e9e84c69", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4788732394, "max_line_length": 100, "alphanum_fraction": 0.629995793, "include": true, "reason": "import numpy", "num_tokens": 1464}
|
import wandb
import pytest
import numpy as np
import datetime
def test_basic_ndx():
# Base Case
table_a = wandb.Table(columns=["b"], data=[["a"], ["b"]])
table = wandb.Table(columns=["fi", "c"])
for _ndx, row in table_a.iterrows():
table.add_data(_ndx, "x")
assert all([row[0]._table == table_a for row in table.data])
# Adding is supported
table.add_data(3, "c")
# Adding duplicates is supported
table.add_data(3, "c")
# Adding None isn't supported
with pytest.raises(TypeError):
table.add_data(None, "d")
# Assert that the data in this column is valid, but also properly typed
assert [row[0] for row in table.data] == [0, 1, 3, 3]
assert all([row[0] is None or row[0]._table == table_a for row in table.data])
def test_pk_cast(use_helper=False):
# Base Case
table = wandb.Table(columns=["id", "b"], data=[["1", "a"], ["2", "b"]])
# Validate that iterrows works as intended for no pks
assert [id_ for id_, row in list(table.iterrows())] == [0, 1]
# Cast as a PK
if use_helper:
table.set_pk("id")
else:
table.cast("id", wandb.data_types._PrimaryKeyType())
assert all(
[row[0]._table == table and row[0]._col_name == "id" for row in table.data]
)
# Adding is supported
table.add_data("3", "c")
# Adding Duplicates fail
# TODO: Enforce duplicate (not supported today)
# with pytest.raises(TypeError):
# table.add_data("3", "d")
# Adding None should fail
with pytest.raises(TypeError):
table.add_data(None, "d")
# Assert that the data in this column is valid, but also properly typed
assert [row[0] for row in table.data] == ["1", "2", "3"]
assert all(row[0]._table == table for row in table.data)
assert isinstance(
table._column_types.params["type_map"]["id"], wandb.data_types._PrimaryKeyType,
)
# Assert that multiple PKs are not supported
with pytest.raises(AssertionError):
if use_helper:
table.set_pk("b")
else:
table.cast("b", wandb.data_types._PrimaryKeyType())
# Fails on Numerics for now
table = wandb.Table(columns=["id", "b"], data=[[1, "a"], [2, "b"]])
with pytest.raises(TypeError):
if use_helper:
table.set_pk("id")
else:
table.cast("id", wandb.data_types._PrimaryKeyType())
# Assert that the table was not modified
assert all([row[0].__class__ == int for row in table.data])
assert not isinstance(
table._column_types.params["type_map"]["id"], wandb.data_types._PrimaryKeyType,
)
# TODO: Test duplicates (not supported today)
# Fails on initial duplicates
# table = wandb.Table(columns=["id", "b"], data=[["1", "a"], ["1", "b"]])
# with pytest.raises(TypeError):
# if use_helper:
# table.set_pk("id")
# else:
# table.cast("id", wandb.data_types._PrimaryKeyType())
# # Assert that the table was not modified
# assert all([row[0].__class__ == str for row in table.data])
# assert not isinstance(
# table._column_types.params["type_map"]["id"],wandb.data_types._ForeignKeyType
# )
def test_pk_helper():
test_pk_cast(use_helper=True)
def test_fk_cast(use_helper=False):
# Base Case
table_a = wandb.Table(columns=["id", "col_1"], data=[["1", "a"], ["2", "b"]])
table_a.set_pk("id")
table = wandb.Table(columns=["fk", "col_2"], data=[["1", "c"], ["2", "d"]])
# Cast as a FK
if use_helper:
table.set_fk("fk", table_a, "id")
else:
table.cast("fk", wandb.data_types._ForeignKeyType(table_a, "id"))
# Adding is supported
table.add_data("3", "c")
# Adding Duplicates is supported
table.add_data("3", "d")
# TODO: Implement constraint to only allow valid keys
# Assert that the data in this column is valid, but also properly typed
assert [row[0] for row in table.data] == ["1", "2", "3", "3"]
assert all(
[row[0]._table == table_a and row[0]._col_name == "id" for row in table.data]
)
assert isinstance(
table._column_types.params["type_map"]["fk"], wandb.data_types._ForeignKeyType,
)
# Fails on Numerics for now
table = wandb.Table(columns=["fk", "col_2"], data=[[1, "c"], [2, "d"]])
with pytest.raises(TypeError):
if use_helper:
table.set_fk("fk", table_a, "id")
else:
table.cast("fk", wandb.data_types._ForeignKeyType(table_a, "id"))
# Assert that the table was not modified
assert all([row[0].__class__ == int for row in table.data])
assert not isinstance(
table._column_types.params["type_map"]["fk"], wandb.data_types._ForeignKeyType,
)
def test_fk_helper():
test_fk_cast(use_helper=True)
def test_fk_from_pk_local_draft():
table_a = wandb.Table(columns=["id", "col_1"], data=[["1", "a"], ["2", "b"]])
table_a.set_pk("id")
table = wandb.Table(
columns=["fk", "col_2"], data=[[table_a.data[0][0], "c"], ["2", "d"]]
)
table.add_data("3", "c")
# None should not be supported
with pytest.raises(TypeError):
table.add_data(None, "c")
# Assert that the data in this column is valid, but also properly typed
assert [row[0] for row in table.data] == ["1", "2", "3"]
assert all(
[
row[0] is None or (row[0]._table == table_a and row[0]._col_name == "id")
for row in table.data
]
)
table = wandb.Table(columns=["fk", "col_2"], data=[["1", "c"], ["2", "d"]])
table.add_data(table_a.data[0][0], "c")
with pytest.raises(TypeError):
table.add_data(None, "c")
# Assert that the data in this column is valid, but also properly typed
assert [row[0] for row in table.data] == ["1", "2", "1"]
assert all(
[
row[0] is None or (row[0]._table == table_a and row[0]._col_name == "id")
for row in table.data
]
)
def test_loading_from_json_with_mixed_types():
"""
When a Table was saved with `allow_mixed_types=True`, the correct datatype
was saved to the serialized json object. However, loading that Table
caused an error; that datatype was never used in Table instantiation.
This unit test makes sure this path runs correctly.
"""
json_obj = {
"_type": "table",
"column_types": {
"params": {
"type_map": {
"Column_1": {
"params": {
"allowed_types": [{"wb_type": "any"}, {"wb_type": "none"},]
},
"wb_type": "union",
},
"Column_2": {
"params": {
"allowed_types": [{"wb_type": "any"}, {"wb_type": "none"},]
},
"wb_type": "union",
},
}
},
"wb_type": "typedDict",
},
"columns": ["Column_1", "Column_2"],
"data": [[0.0, None], [0.0, 5], [None, "cpu"]],
"ncols": 2,
"nrows": 3,
}
artifact = wandb.Artifact("my_artifact", type="dataset")
_ = wandb.Table.from_json(json_obj, artifact)
assert True
def test_datetime_conversion():
art = wandb.Artifact("A", "B")
t = wandb.Table(
columns=["dt", "t", "np", "d"],
data=[
[
datetime.datetime(2000, 12, d),
datetime.date(2000, 12, d),
np.datetime64("2000-12-" + ("0" if d < 10 else "") + str(d)),
d,
]
for d in range(1, 3)
],
)
json = t.to_json(art)
assert json["data"] == [
[975628800000, 975628800000, 975628800000, 1],
[975715200000, 975715200000, 975715200000, 2],
]
|
{"hexsha": "13baa3400aa8b18627c3423f66560e19fa5a05c8", "size": 7922, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tables.py", "max_stars_repo_name": "borisgrafx/client", "max_stars_repo_head_hexsha": "c079f7816947a3092b500751eb920fda3866985f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_tables.py", "max_issues_repo_name": "borisgrafx/client", "max_issues_repo_head_hexsha": "c079f7816947a3092b500751eb920fda3866985f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_tables.py", "max_forks_repo_name": "borisgrafx/client", "max_forks_repo_head_hexsha": "c079f7816947a3092b500751eb920fda3866985f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.688, "max_line_length": 87, "alphanum_fraction": 0.5614743752, "include": true, "reason": "import numpy", "num_tokens": 2121}
|
import os,sys,inspect
sys.path.insert(1, os.path.join(sys.path[0], '../../../'))
import datetime
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import pdb
import torch
import core.datasets.utils as utils
import core.datasets.fastmri.transforms as transforms
import core.datasets.fastmri.subsample as subsample
from pathlib import Path
import random
import h5py
import xml.etree.ElementTree as etree
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
def et_query(
root: etree.Element,
qlist: Sequence[str],
namespace: str = "http://www.ismrm.org/ISMRMRD",
) -> str:
"""
ElementTree query function.
This can be used to query an xml document via ElementTree. It uses qlist
for nested queries.
Args:
root: Root of the xml to search through.
qlist: A list of strings for nested searches, e.g. ["Encoding",
"matrixSize"]
namespace: Optional; xml namespace to prepend query.
Returns:
The retrieved data as a string.
"""
s = "."
prefix = "ismrmrd_namespace"
ns = {prefix: namespace}
for el in qlist:
s = s + f"//{prefix}:{el}"
value = root.find(s, ns)
if value is None:
raise RuntimeError("Element not found")
return str(value.text)
# Much of this code was taken from https://github.com/facebookresearch/fastMRI
class FastMRIDataset(Dataset):
def __init__(self, path, normalize_input, normalize_output, mask_info, num_volumes=None, slice_sample_period=1):
# Normalization parameters will be None at first
self.norm_params = None
print('loading dataset from ' + path + '...')
self.challenge = path.split('/')[-2].split('_')[0]
self.recons_key = (
"reconstruction_esc" if self.challenge == "singlecoil" else "reconstruction_rss"
)
self.cache_path = os.path.join(path, '.cache/')
os.makedirs(self.cache_path, exist_ok=True)
# load the dataset as a list of filenames
self.examples = []
# gather up volumes
files = list(Path(path).iterdir())
random.shuffle(files)
files = files[0:num_volumes] if (num_volumes and num_volumes < len(files)) else files
print('Loading ' + str(len(files)) + ' volumes...')
# gather up slices
for fname in files:
if 'cache' in str(fname):
continue
metadata, num_slices = self._retrieve_metadata(fname)
assert(num_slices > slice_sample_period)
self.examples += [
(fname, slice_ind, metadata) for slice_ind in range(0,num_slices, slice_sample_period)
]
print('Using ' + str(len(self.examples)) + ' total slices')
random.shuffle(self.examples)
# create sampling mask
mask_func = subsample.create_mask_for_mask_type( mask_info['type'], mask_info['center_fraction'], mask_info['acceleration'])
# create a data transform including k-space subsampling mask and normalization
self.transform = transforms.UnetDataTransform(self.challenge, mask_func=mask_func, use_seed=False)
self.normalize_input = normalize_input
self.normalize_output = normalize_output
def _retrieve_metadata(self, fname):
with h5py.File(fname, "r") as hf:
et_root = etree.fromstring(hf["ismrmrd_header"][()])
enc = ["encoding", "encodedSpace", "matrixSize"]
enc_size = (
int(et_query(et_root, enc + ["x"])),
int(et_query(et_root, enc + ["y"])),
int(et_query(et_root, enc + ["z"])),
)
rec = ["encoding", "reconSpace", "matrixSize"]
recon_size = (
int(et_query(et_root, rec + ["x"])),
int(et_query(et_root, rec + ["y"])),
int(et_query(et_root, rec + ["z"])),
)
lims = ["encoding", "encodingLimits", "kspace_encoding_step_1"]
enc_limits_center = int(et_query(et_root, lims + ["center"]))
enc_limits_max = int(et_query(et_root, lims + ["maximum"])) + 1
padding_left = enc_size[1] // 2 - enc_limits_center
padding_right = padding_left + enc_limits_max
num_slices = hf["kspace"].shape[0]
metadata = {
"padding_left": padding_left,
"padding_right": padding_right,
"encoding_size": enc_size,
"recon_size": recon_size,
}
return metadata, num_slices
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
fname, dataslice, metadata = self.examples[idx]
with h5py.File(fname, "r") as hf:
kspace = hf["kspace"][dataslice]
mask = np.asarray(hf["mask"]) if "mask" in hf else None
target = hf[self.recons_key][dataslice] if self.recons_key in hf else None
attrs = dict(hf.attrs)
attrs.update(metadata)
if self.transform is None:
sample = (kspace, mask, target, attrs, fname.name, dataslice)
else:
sample = self.transform(kspace, mask, target, attrs, fname.name, dataslice)
if self.normalize_input == 'standard' and self.norm_params != None:
input_img = (sample[0] - self.norm_params['input_mean'])/self.norm_params['input_std']
elif self.normalize_input == 'min-max' and self.norm_params != None:
input_img = (sample[0] - self.norm_params['input_min'])/self.norm_params['input_max']
else:
input_img = sample[0]
if self.normalize_output == 'standard' and self.norm_params != None:
output_img = (sample[1] - self.norm_params['output_mean'])/self.norm_params['output_std']
elif self.normalize_output == 'min-max' and self.norm_params != None:
output_img = (sample[1] - self.norm_params['output_min'])/self.norm_params['output_max']
else:
print("No normalization parameters yet.")
output_img = sample[1]
return (input_img.unsqueeze(0), output_img.unsqueeze(0))
if __name__ == "__main__":
random.seed(1)
path = '/clusterfs/abc/amit/fastmri/knee/singlecoil_train/'
mask_info = {'type': 'equispaced', 'center_fraction' : [0.08], 'acceleration' : [4]}
dataset = FastMRIDataset(path, normalize_input='standard', normalize_output = 'min-max', mask_info=mask_info, num_volumes=5)
utils.normalize_dataset(dataset)
#loader = DataLoader(dataset, batch_size=5, shuffle=True)
pdb.set_trace()
|
{"hexsha": "592b7167030a33f117f2a36cc2411914cf33082e", "size": 6637, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/datasets/fastmri/FastMRIDataset.py", "max_stars_repo_name": "aangelopoulos/im2im-uq", "max_stars_repo_head_hexsha": "b95c3620b4741c09e7104a24fc5e87d77249971c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2022-02-11T03:26:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T15:09:42.000Z", "max_issues_repo_path": "core/datasets/fastmri/FastMRIDataset.py", "max_issues_repo_name": "aangelopoulos/im2im-uq", "max_issues_repo_head_hexsha": "b95c3620b4741c09e7104a24fc5e87d77249971c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/datasets/fastmri/FastMRIDataset.py", "max_forks_repo_name": "aangelopoulos/im2im-uq", "max_forks_repo_head_hexsha": "b95c3620b4741c09e7104a24fc5e87d77249971c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9257142857, "max_line_length": 132, "alphanum_fraction": 0.6239264728, "include": true, "reason": "import numpy", "num_tokens": 1550}
|
import json
import os
import re
import pandas as pd
import numpy as np
from python_lib.errors import ExtensionError
path_exp = "./results_server/results/210612_004737/"
class CompilePheWAS_Results():
def __init__(self,
path_exp):
self.path_logs_stats = os.path.join(path_exp, "logs_association_statistics")
self.path_logs_hpds = os.path.join(path_exp, "logs_association_statistics")
self.path_association_statistics_results = os.path.join(path_exp, "association_statistics")
@staticmethod
def download_dropbox(function):
def _download_dropbox(*args, **kwargs):
if args[0].dropbox is True:
# TODO: to be implemented eventually
pass
return function(*args, **kwargs)
return _download_dropbox
@staticmethod
@download_dropbox
def read_file(file_name, dir_path, *args, **kwargs):
file_path = os.path.join(dir_path, file_name)
extension_regex = re.compile(r"\.[a-z]+$")
extension = re.search(extension_regex, file_path).group()
if extension == ".csv":
output = pd.read_csv(file_path, *args, **kwargs)
elif extension == ".json":
with open(file_path, "w+") as json_file:
output = json.load(json_file, *args, **kwargs)
elif extension == ".txt":
with open(file_path, "w+") as text_file:
output = text_file.read()
else:
raise ExtensionError
return output
def get_logs_hpds(self):
pass
return
def get_logs_quality_checking(self):
pass
return
def get_logs_statistics(self):
pass
return
def get_descriptive_statistics(self):
pass
return
def compile_association_statistics(self):
# pd.read_csv(self.path_association_statistics_results)
return
if __name__ == '__main__':
path_exp = "./results_server/results/210612_004737/"
path_logs_stats = os.path.join(path_exp, "logs_association_statistics")
path_logs_hpds = os.path.join(path_exp, "logs_association_statistics")
path_association_statistics_results = os.path.join(path_exp, "association_statistics")
path_test_df = os.path.join(path_association_statistics_results, "phs000007/0.csv")
path_test_df = os.path.join(path_association_statistics_results, "phs000007/23.csv")
test_df = pd.read_csv(path_test_df)
test_df.to_pickle(os.path.join(path_association_statistics_results, "phs000007/0.zip"),
compression="infer")
test_df.to_pickle(os.path.join(path_association_statistics_results, "phs000007/0.gzip"),
compression="infer")
test_df.to_pickle(os.path.join(path_association_statistics_results, "phs000007/23.zip"),
compression="infer")
test_df.to_pickle(os.path.join(path_association_statistics_results, "phs000007/23.pickle"),
compression="infer")
test2 = pd.read_pickle(os.path.join(path_association_statistics_results, "phs000007/23.zip"))
test2 = pd.read_pickle(os.path.join(path_association_statistics_results, "phs000007/0.zip"))
path_test_df = os.path.join(path_association_statistics_results, "phs000007/0.csv")
## Get the number of phenotypic variables
study_info = pd.read_csv("env_variables/studies_info_manual_dont_erase.csv", index_col=0)
df_eligible_variables = pd.read_csv("env_variables/list_eligible_variables.csv")\
.join(study_info["BDC_study_name"], on="phs")
df_eligible_variables.BDC_study_name.unique().shape[0]
df_eligible_variables_value_counts = df_eligible_variables["BDC_study_name"]\
.value_counts()
sum_df_eligible_variables_counts = df_eligible_variables_value_counts.sum()
df_eligible_variables_value_counts\
.append(pd.Series([sum_df_eligible_variables_counts], index=["Total"]), ignore_index=False)\
.rename_axis("Name Study")\
.rename("Phenotypic Variables Count")\
.to_frame()\
.to_csv("exports_presentation/tables/ind_variable_counts_per_studies.csv")
def read_json_associations(path_dir):
import glob, os
from pathlib import Path
list_logs = []
for phs in os.listdir(path_dir):
print(phs)
path_subdir = os.path.join(path_dir, phs)
print(path_subdir)
for batch_group in os.listdir(path_subdir):
print(batch_group)
path_file = os.path.join(path_subdir, batch_group)
print(path_file)
with open(path_file, "r") as json_file:
logs = json.load(json_file)
logs = {}
list_logs.append(logs)
df_logs = pd.concat([pd.DataFrame.from_dict(df) for df in list_logs])
return df_logs
def read_association_results(path_dir):
list_results = []
for phs in os.listdir(path_dir):
print(phs)
path_subdir = os.path.join(path_dir, phs)
print(path_subdir)
for batch_group in os.listdir(path_subdir):
print(batch_group)
path_file = os.path.join(path_subdir, batch_group)
print(path_file)
results = pd.read_csv(path_file)
pvalues = results.loc[lambda df: df["indicator"].str.contains("LRT") == True, ["value", "independent_var_id", "dependent_var_id"]]
list_results.append(pvalues)
df_results_pvalues = pd.concat([pd.DataFrame.from_dict(df) for df in list_results])
return df_results_pvalues
pvalues2 = read_association_results(path_dir)
# Number of eligible variables after quality checking eligible variables
quality_checking = pd.read_csv("quality_checking")
# Information to gather
## Number of batch runs
monitor_process = pd.read_table(os.path.join(path_exp, "monitor_process.tsv"),
sep="\t",
header=None)\
.rename({0: "phs", 1:"batch_group"}, axis=1)\
.set_index(["phs", "batch_group"])
df_eligible_variables.set_index(["phs", "batch_group"])\
.join(monitor_process, how="inner")
## Number of studies
## Number of pvalues
## Number of associations ran
df_results_pvalues = read_association_results(os.path.join(path_exp, "association_statistics"))
df_results_pvalues.shape[0]
# Bonferonni threshold for significance
bonferonni = 0.05/df_results_pvalues.shape[0]
## Number of failure
df_results_pvalues.value.isna().value_counts()
## Number of results below threshold
df_results_pvalues.loc[lambda df: df["value"]< bonferonni, :]
df_results_pvalues.loc[lambda df: df["value"]< 0.05, :]
## Number of counts
## Parameters to tweak
## Size of results
## Expected size of results
## Some
## Technical challenges
--> adapt the instance
## Methodological challenges
--> how to produce meaningful results?
--> Cross with PubMed results
--> Produce result explorator
- What I suspect is that there is some categorical variables that create a lot of subcategories, creating a lot of supplemental models to be ran
results.info()
df_results = pd.read_csv(path_dir + "")
for sub_dir in sub_directories:
os.chdir(sub_dir)
for batch_group in glob.glob("*.json"):
all_json_files.append(sub_dir + "/" + batch_group)
# Get back to original working directory
os.chdir(working_directory)
list_of_dfs = [pd.read_json(x) for x in all_json_files]
return jsons
path_association_results = "results/archives/association_statistics/phs000007/0.csv"
association_statistics = pd.read_csv(path_association_results)
association_statistics.info()
len(association_statistics.independent_var_name.unique())
len(association_statistics.dependent_var_name.unique())
test = pd.pivot(association_statistics,
columns="indicator",
values="value")
test.columns
test.coeff_LogR.isna().value_counts()
# from datetime import datetime, timezone
# dt = datetime(2020, 6, 1)
# timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
# print(timestamp)
root_dir = "results/associations"
list_files = [f for f in os.listdir(root_dir, ) if re.match(".*_pvalues.json$", f)]
studies_info = pd.read_csv("env_variables/studies_info.csv").loc[:, ["phs", "BDC_study_name"]]\
.set_index("phs").iloc[:, 0]
#studies_info["file_name"] = studies_info["phs"] + "_pvalues.json"
#dic_file_study_name = {file_name: study_name for file_name, study_name in studies_info.set_index("file_name")["BDC_study_name"].iteritems()}
dic_pvalues = {}
#for file_name in list_files:
with open("env_variables/phs_list.txt", "r") as f:
phs_list = f.read().splitlines()
for file_path in list_files:
phs = re.search("phs[0-9]+(?=_)", file_path).group()
with open(os.path.join("results/associations", file_path), "r") as f:
dic_pvalues[phs] = json.load(f)
#df_pvalues = pd.DataFrame.from_dict({(file_name, variable): pvalue for
# file_name, dic_var_pvalues in dic_pvalues.items() for
# variable, pvalue in dic_var_pvalues.items()
#}, orient="index")
### Changes because some scenario didn't run
# dic_pvalues.pop("NHLBI TOPMed: Genetics of Cardiometabolic Health in the Amish")
# dic_pvalues.pop("Heart and Vascular Health Study (HVH)")
# df_pvalues = pd.DataFrame.from_dict({(study_name, dependent_var_name, variable): pvalue["llr_pvalue"] for
# study_name, dependent_var_dic in dic_pvalues.items() for
# dependent_var_name, dic_var_pvalues in dependent_var_dic.items() for
# variable, pvalue in dic_var_pvalues.items()
# }, orient="index")
flat_dic = {}
flat_dic_subvar = {}
for study_name, dependent_var_dic in dic_pvalues.items():
for dependent_var_name, dic_var_pvalues in dependent_var_dic.items():
for variable, pvalue in dic_var_pvalues.items():
if isinstance(pvalue, float):
flat_dic[(study_name, dependent_var_name, variable)] = pvalue
elif isinstance(pvalue, dict):
flat_dic[(study_name, dependent_var_name, variable)] = pvalue.pop("llr_pvalue")
for subvar, params in pvalue.items():
for param, param_value in params.items():
flat_dic_subvar[(study_name, dependent_var_name, variable, subvar, param)] = param_value
else:
raise TypeError("error for {}".format((study_name, dependent_var_name, variable)))
df_pvalues = pd.Series(flat_dic, name="pvalues").reset_index()
df_params = pd.Series(flat_dic_subvar, name="param").reset_index()
df_pvalues.to_csv("results/df_results/df_pvalues.csv")
df_params.to_csv("results/df_results/df_params.csv")
import matplotlib.pyplot as plt
# df_pvalues.set_index("level_0")["pvalues"].plot(kind="hist", bins=30)
# df_pvalues.index = pd.MultiIndex.from_tuples(df_pvalues.index)
# df_pvalues = df_pvalues.reset_index(-1, drop=False)
# df_pvalues.columns = ["variable", "pvalue"]
# df_pvalues = df_pvalues.dropna(subset=["pvalue"])
# mask_0 = df_pvalues["pvalue"] == 0
# df_pvalues = df_pvalues.loc[~mask_0,:]
# df_pvalues.index.value_counts().to_frame()
#
# multiIndex_variablesDict = pd.read_csv("multiIndex_variablesDict.csv", index_col=list(range(0, 13)), low_memory=False)
# simplified_varnames = multiIndex_variablesDict.loc[:, ["varName", "simplified_varName"]].reset_index(drop=True)
#
# df_pvalues = df_pvalues.join(simplified_varnames.rename({"varName": "variable"}, axis=1).set_index("variable"), on= "variable", how="left")
# df_pvalues.to_csv("df_pvalues_bis.csv")
####################
# group_counts = df_pvalues["group"].value_counts()
# group_to_merge = group_counts[group_counts < threshold_group_cat].index
# mask_group_to_merge = df_pvalues["group"].isin(group_to_merge)
# df_pvalues.loc[mask_group_to_merge, "group"] = "Other"
# df_pvalues = df_pvalues.sort_values(by="group", axis=0)
# dic_renaming = {
# 'Genetic Epidemiology of COPD (COPDGene)': 'COPDGene',
# 'Genetic Epidemiology Network of Arteriopathy (GENOA)': 'GENOA',
# 'NHLBI TOPMed: Genetics of Cardiometabolic Health in the Amish': 'Genetics',
# 'Genome-wide Association Study of Adiposity in Samoans': 'GEWAS Samoans',
# 'Genetics of Lipid Lowering Drugs and Diet Network (GOLDN) Lipidomics Study': 'GOLDN',
# 'Heart and Vascular Health Study (HVH)': 'HVH'
# }
# df_pvalues["group"] = df_pvalues["group"].replace(dic_renaming)
# df_pvalues["variable"] = df_pvalues["variable"].str.replace("[0-9]+[A-z]*", "").to_frame()
# order_studies = df_pvalues.index.get_level_values(0).unique().tolist()[::-1]
# df_pvalues = df_pvalues.reindex(order_studies, level=0)
pair_ind = 0 # To shift label which might overlap because to close
for n, row in group.iterrows():
# if pair_ind %2 == 0:
# shift = 1.1
# else:
# shift = -1.1
if row["log_p"] > threshold_top_values:
ax.text(row['ind'] + 3, row["log_p"] + 0.05, row["simplified_varName"], rotation=0, alpha=1, size=8,
color="black")
# pair_ind += 1
ax.set_xticks(x_labels_pos)
ax.set_xticklabels(x_labels)
ax.set_xlim([0, len(df_pvalues) + 1])
ax.set_ylim(y_lims)
ax.set_ylabel('-log(p-values)', style="italic")
ax.set_xlabel('Phenotypes', fontsize=15)
ax.axhline(y=-np.log10(adjusted_alpha), linestyle=":", color="black", label="Bonferonni Adjusted Threshold")
plt.xticks(fontsize=9, rotation=30)
plt.yticks(fontsize=8)
plt.title(title_plot,
loc="left",
style="oblique",
fontsize=20,
y=1)
xticks = ax.xaxis.get_major_ticks()
handles, labels = ax.get_legend_handles_labels()
plt.legend(handles=handles, labels=labels, loc="upper left")
plt.show()
|
{"hexsha": "24b27442d9f43d0229db0f3981f3c199a56fe6d6", "size": 13890, "ext": "py", "lang": "Python", "max_stars_repo_path": "compile_run_PheWAS.py", "max_stars_repo_name": "hms-dbmi/BDC_HarmonizedVars_PheWAS", "max_stars_repo_head_hexsha": "e5bf15cfbd7a9e329e5760d1427d3debc8290bc5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "compile_run_PheWAS.py", "max_issues_repo_name": "hms-dbmi/BDC_HarmonizedVars_PheWAS", "max_issues_repo_head_hexsha": "e5bf15cfbd7a9e329e5760d1427d3debc8290bc5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compile_run_PheWAS.py", "max_forks_repo_name": "hms-dbmi/BDC_HarmonizedVars_PheWAS", "max_forks_repo_head_hexsha": "e5bf15cfbd7a9e329e5760d1427d3debc8290bc5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-15T22:09:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-15T22:09:15.000Z", "avg_line_length": 39.3484419263, "max_line_length": 144, "alphanum_fraction": 0.6804895608, "include": true, "reason": "import numpy", "num_tokens": 3379}
|
import numpy as np
import math
import tensorflow as tf
def tf_1d_to_ndarray(data, datatype=tf.float64):
with tf.Session() as sess:
data = sess.run(data)
return data
def tf_to_ndarray(data, datatype=tf.float32):
data = tf.image.convert_image_dtype(data[0, ..., 0], dtype=datatype)
with tf.Session() as sess:
data = sess.run(data)
return data
def tf_rgb_to_ndarray(data, datatype=tf.float32):
data = tf.image.convert_image_dtype(data[0, ..., :], dtype=datatype)
with tf.Session() as sess:
data = sess.run(data)
return data
def tf2_rgb_to_ndarray(data, datatype=tf.float32):
data = tf.image.convert_image_dtype(data[0, ..., :], dtype=datatype)
return data
def tf_rank4_to_ndarray(data, datatype=tf.float32):
data = tf.image.convert_image_dtype(data[0, ..., 0], dtype=datatype)
with tf.Session() as sess:
data = sess.run(data)
return data
def tf_rank2_to_ndarray(data, datatype=tf.float32):
data = tf.image.convert_image_dtype(data, dtype=datatype)
with tf.Session() as sess:
data = sess.run(data)
return data
def cast_like_matlab_uint8_2d_rgb(data):
data = np.clip(data, 0, 255)
h, w, c = data.shape
for ch in range(c):
for row in range(h):
for col in range(w):
frac, integ = math.modf(data[row, col, ch])
if frac > 0.5:
data[row, col, ch] = np.ceil(data[row, col, ch])
elif frac <= 0.5:
data[row, col, ch] = np.floor(data[row, col, ch])
return data.astype('uint8')
def cast_like_matlab_uint8_2d(data):
data = np.clip(data, 0, 255)
h, w = data.shape
for row in range(h):
for col in range(w):
frac, integ = math.modf(data[row,col])
if frac > 0.5:
data[row, col] = np.ceil(data[row, col])
elif frac <= 0.5:
data[row, col] = np.floor(data[row, col])
return data.astype('uint8')
|
{"hexsha": "5d29420169d1f35c21e9cc9558e9efafce6f66b7", "size": 2012, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tensorflow_wavelets/utils/cast.py", "max_stars_repo_name": "simonsimon006/tensorflow-wavelets", "max_stars_repo_head_hexsha": "21a095bf0048ae2488ca5ae4961d2cbfe94263a9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tensorflow_wavelets/utils/cast.py", "max_issues_repo_name": "simonsimon006/tensorflow-wavelets", "max_issues_repo_head_hexsha": "21a095bf0048ae2488ca5ae4961d2cbfe94263a9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-11T14:47:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T14:52:51.000Z", "max_forks_repo_path": "src/tensorflow_wavelets/utils/cast.py", "max_forks_repo_name": "simonsimon006/tensorflow-wavelets", "max_forks_repo_head_hexsha": "21a095bf0048ae2488ca5ae4961d2cbfe94263a9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-11T12:18:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-11T12:18:21.000Z", "avg_line_length": 27.1891891892, "max_line_length": 72, "alphanum_fraction": 0.5994035785, "include": true, "reason": "import numpy", "num_tokens": 532}
|
\documentclass[11pt, a4paper, oneside]{article}
\pagenumbering{arabic}
\usepackage{amssymb,amsmath}
\usepackage[utf8]{inputenc}
\usepackage[unicode=true]{hyperref}
\usepackage{titling} % configure maketitle
\usepackage{longtable,booktabs,lscape}
\usepackage[margin=2.5cm]{geometry}
\PassOptionsToPackage{usenames,dvipsnames}{color} % color is loaded by hyperref
\hypersetup{
$if(title-meta)$
pdftitle={$title-meta$},
$endif$
$if(author-meta)$
pdfauthor={$author-meta$},
$endif$
$if(keywords)$
pdfkeywords={$for(keywords)$$keywords$$sep$, $endfor$},
$endif$
$if(colorlinks)$
colorlinks=true,
linkcolor=$if(linkcolor)$$linkcolor$$else$Maroon$endif$,
citecolor=$if(citecolor)$$citecolor$$else$Blue$endif$,
urlcolor=$if(urlcolor)$$urlcolor$$else$Blue$endif$,
$else$
pdfborder={0 0 0},
$endif$
breaklinks=true}
\urlstyle{same} % don't use monospace font for urls
\usepackage[margin=2.5cm]{geometry}
$if(natbib)$
\usepackage{natbib}
\bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$}
$endif$
\usepackage{longtable,booktabs}
\usepackage{graphicx,grffile}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{$if(secnumdepth)$$secnumdepth$$else$5$endif$}
$if(csl-refs)$
\newlength{\cslhangindent}
\setlength{\cslhangindent}{1.5em}
\newlength{\csllabelwidth}
\setlength{\csllabelwidth}{3em}
\newlength{\cslentryspacingunit} % times entry-spacing
\setlength{\cslentryspacingunit}{\parskip}
\newenvironment{CSLReferences}[2] % #1 hanging-ident, #2 entry spacing
{% don't indent paragraphs
\setlength{\parindent}{0pt}
% turn on hanging indent if param 1 is 1
\ifodd #1
\let\oldpar\par
\def\par{\hangindent=\cslhangindent\oldpar}
\fi
% set entry spacing
\setlength{\parskip}{#2\cslentryspacingunit}
}%
{}
\usepackage{calc}
\newcommand{\CSLBlock}[1]{#1\hfill\break}
\newcommand{\CSLLeftMargin}[1]{\parbox[t]{\csllabelwidth}{#1}}
\newcommand{\CSLRightInline}[1]{\parbox[t]{\linewidth - \csllabelwidth}{#1}\break}
\newcommand{\CSLIndent}[1]{\hspace{\cslhangindent}#1}
$endif$
$for(header-includes)$
$header-includes$
$endfor$
$if(author-header)$
\usepackage{fancyhdr}
\setlength{\headheight}{15pt}
\pagestyle{fancy}
\fancyhead{} % clear all header fields
\fancyhead[LE,RO]{\thepage}
\fancyhead[LE]{\emph{$author-header$}}
\fancyhead[RO]{\emph{$title$}}
\cfoot{\thepage}
$endif$
$if(title)$
\title{$title$$if(thanks)$\thanks{$thanks$} $endif$ }
$endif$
$if(subtitle)$
\providecommand{\subtitle}[1]{}
\subtitle{$subtitle$}
$endif$
\renewcommand\and{\\[\baselineskip]}
$if(author)$
\author{$for(author)$$author.name$\thanks{$author.affiliation$, \texttt{$author.email$}, $if(author.orcid)$ ORCID: \href{https://orcid.org/$author.orcid$}{$author.orcid$}$endif$}$sep$\par $endfor$}
$endif$
% $if(author)$
% \author{$for(author)$$author.name$\par
% $author.affiliation$,\par
% \texttt{$author.email$}, $if(author.orcid)$\par
% ORCID: \href{https://orcid.org/$author.orcid$}{$author.orcid$}$endif$$sep$\and $endfor$}
% $endif$
$if(author)$
\providecommand{\institute}[1]{}
\institute{$for(author)$$author.affiliation$$sep$ \and $endfor$}
$endif$
\date{$date$}
\renewcommand{\abstractname}{}
\pretitle{\begin{flushright}\LARGE\bfseries}
\posttitle{\end{flushright}}
\preauthor{\begin{flushleft}\Large}
\postauthor{\end{flushleft}}
\predate{\begin{flushleft}}
\postdate{\end{flushleft}}
\thanksmarkseries{fnsymbol}
\begin{document}
\maketitle
\thispagestyle{empty}
% manual title block:
\begin{flushleft}
\par\vspace*{3em}\LARGE\bfseries{$title$}
\mdseries\par\vskip 1em
$for(author)$
\Large $author.name$\vspace{1.2mm} \newline
\normalsize\emph{$author.affiliation$} \newline
\footnotesize \url{$author.email$}\newline
$if(author.orcid)$ ORCID: \href{https://orcid.org/$author.orcid$}{$author.orcid$}$endif$\vspace*{5mm}\newline
$sep$
$endfor$
\normalsize $date$
\vspace*{5mm}
\end{flushleft}
% end custom title
\normalsize
\pagestyle{fancy}
$if(abstract)$
\begin{abstract}
\vspace{-1cm}\noindent \emph{Abstract:} $abstract$
\end{abstract}
$endif$
$for(include-before)$
$include-before$
$endfor$
$body$
$if(natbib)$
$if(bibliography)$
$if(biblio-title)$
$if(book-class)$
\renewcommand\bibname{$biblio-title$}
$else$
\renewcommand\refname{$biblio-title$}
$endif$
$endif$
\bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$}
$endif$
$else$
% $if(bibliography)$
% % print references if there is a bibliography but NOT natbib.
% \section*{References}
% $endif$
$endif$
$if(biblatex)$
\printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$
$endif$
$for(include-after)$
$include-after$
$endfor$
\end{document}
|
{"hexsha": "87c59f1717289b4f4ea2d5c9a9eb8967464e7246", "size": 5339, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "templates/chroma-titling.tex", "max_stars_repo_name": "cpmpercussion/chroma-template", "max_stars_repo_head_hexsha": "b8f00e934d15b0e1de0c6a52f12500b6df4f1ba1", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "templates/chroma-titling.tex", "max_issues_repo_name": "cpmpercussion/chroma-template", "max_issues_repo_head_hexsha": "b8f00e934d15b0e1de0c6a52f12500b6df4f1ba1", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "templates/chroma-titling.tex", "max_forks_repo_name": "cpmpercussion/chroma-template", "max_forks_repo_head_hexsha": "b8f00e934d15b0e1de0c6a52f12500b6df4f1ba1", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.695, "max_line_length": 198, "alphanum_fraction": 0.7085596554, "num_tokens": 1789}
|
\section*{Work Experience}
\begin{entrylist}
\entry
{March 2021\\ Ongoing}
{Software Developer}
{Multimedia Srl}
{Developed software for very large sanitary institutions, following all the needed security measures to make sure sensitive data stays protected.
I had to work under pressure to quickly provide software before the Covid-19 vaccination campaign launched.}
\entry
{May 2014\\ June 2014}
{Full-Stack Developer}
{Applied Genomics Institute}
{During the internship period at the company, I developed a management system that helps researchers interact without needing technical knowledge about how the cluster infrastructure works.}
\end{entrylist}
|
{"hexsha": "237a87513e9e7baf92cf1ec9a18d188d00187cbb", "size": 697, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "sections/experience.tex", "max_stars_repo_name": "LLoyderino/Curriculum-Vitae", "max_stars_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sections/experience.tex", "max_issues_repo_name": "LLoyderino/Curriculum-Vitae", "max_issues_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sections/experience.tex", "max_forks_repo_name": "LLoyderino/Curriculum-Vitae", "max_forks_repo_head_hexsha": "2f6c5159f1afa42f99265ab8c47fc048600fecca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.4666666667, "max_line_length": 194, "alphanum_fraction": 0.7661406026, "num_tokens": 152}
|
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
from collections import OrderedDict
import os
from copy import copy
# embedding vector
Z_dim = 128
# L1 reconstruction loss balance
reconstruction_loss_lambda = 1.
# to avoid log(0) of loss
epsilon = 1e-12
img_height, img_width = 32, 32
channel = 3
GPU = True
device = torch.device("cuda" if GPU else "cpu")
torch.manual_seed(0)
save_dir = 'output_gan'
os.makedirs(save_dir, exist_ok=True)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('bn') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0)
class Flatten(torch.nn.Module):
def forward(self, x):
x = x.view(x.size()[0], -1)
return x
class Reshape(torch.nn.Module):
def __init__(self, c, h, w):
super(Reshape, self).__init__()
self.c = c
self.h = h
self.w = w
def forward(self, x):
x = x.view(x.size()[0], self.c, self.h, self.w)
return x
# ResNet block
class ResBlock(torch.nn.Module):
def __init__(self, dim_first=None, dim=128, activation_fn=torch.nn.ReLU(), batch_norm=False):
super(ResBlock, self).__init__()
if dim_first is None:
dim_first = dim
else:
if batch_norm:
self.skip_conv = torch.nn.Sequential(
torch.nn.BatchNorm2d(dim_first),
activation_fn,
torch.nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1)
)
else:
self.skip_conv = torch.nn.Sequential(
activation_fn,
torch.nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1)
)
if batch_norm:
self.block = torch.nn.Sequential(
torch.nn.BatchNorm2d(dim_first),
activation_fn,
torch.nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1),
torch.nn.BatchNorm2d(dim),
activation_fn,
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1)
)
else:
self.block = torch.nn.Sequential(
activation_fn,
torch.nn.Conv2d(dim_first, dim, kernel_size=3, padding=1, stride=1),
activation_fn,
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=1, stride=1)
)
def forward(self, x):
res_x = self.block(x)
if hasattr(self, 'skip_conv'):
x = self.skip_conv(x)
x = torch.add(res_x, x)
x = F.relu(x)
return x
class Encoder(torch.nn.Module):
def __init__(self):
super(Encoder, self).__init__()
in_h = img_height // 8
in_w = img_width // 8
dim = 128
self.module = torch.nn.Sequential(
torch.nn.Conv2d(channel, dim, kernel_size=3, stride=1, padding=1),
torch.nn.BatchNorm2d(dim),
torch.nn.ReLU(),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.MaxPool2d(2, stride=2),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.MaxPool2d(2, stride=2),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.MaxPool2d(2, stride=2),
Flatten(),
torch.nn.Linear(dim * in_h * in_w, Z_dim)
)
def forward(self, x):
x = self.module(x)
return x
class Generator(torch.nn.Module):
def __init__(self):
in_h = img_height // 8
in_w = img_width // 8
dim = 128
super(Generator, self).__init__()
self.module = torch.nn.Sequential(
torch.nn.Linear(Z_dim, dim * in_h * in_w),
Reshape(dim, in_h, in_w),
torch.nn.BatchNorm2d(dim),
torch.nn.ReLU(),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.UpsamplingBilinear2d(scale_factor=2),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.UpsamplingBilinear2d(scale_factor=2),
ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.UpsamplingBilinear2d(scale_factor=2),
#ResBlock(dim=dim, activation_fn=torch.nn.ReLU(), batch_norm=True),
torch.nn.Conv2d(dim, channel, kernel_size=3, stride=1, padding=1),
torch.nn.Tanh(),
)
def forward(self, x):
x = self.module(x)
return x
class Discriminator(torch.nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
dim = 128
self.module = torch.nn.Sequential(
ResBlock(dim_first=channel, dim=dim, activation_fn=torch.nn.LeakyReLU(0.2), batch_norm=False),
#torch.nn.Conv2d(channel, dim, kernel_size=3, padding=1, stride=1),
#torch.nn.LeakyReLU(0.2),
torch.nn.AvgPool2d(2, stride=2),
ResBlock(dim=dim, activation_fn=torch.nn.LeakyReLU(0.2), batch_norm=False),
torch.nn.AvgPool2d(2, stride=2),
ResBlock(dim=dim, activation_fn=torch.nn.LeakyReLU(0.2), batch_norm=False),
ResBlock(dim=dim, activation_fn=torch.nn.LeakyReLU(0.2), batch_norm=False),
torch.nn.ReLU(),
torch.nn.AdaptiveAvgPool2d((1, 1)),
Flatten(),
#torch.nn.Linear(dim * (img_height // 8) * (img_width // 8), 1),
torch.nn.Linear(dim, 1),
torch.nn.Sigmoid()
)
def forward(self, x):
x = self.module(x)
return x
class Code_Discriminator(torch.nn.Module):
def __init__(self):
super(Code_Discriminator, self).__init__()
hidden_dim = 750
self.module = torch.nn.Sequential(
torch.nn.Linear(Z_dim, hidden_dim),
torch.nn.LeakyReLU(0.2),
torch.nn.Linear(hidden_dim, hidden_dim),
torch.nn.LeakyReLU(0.2),
#torch.nn.Linear(hidden_dim, hidden_dim),
#torch.nn.LeakyReLU(0.2),
torch.nn.Linear(hidden_dim, 1),
torch.nn.Sigmoid()
)
def forward(self, x):
x = self.module(x)
return x
import pickle
import os
def load_cifar10():
path = 'cifar-10-batches-py'
if not os.path.exists(path):
os.system("wget {}".format(path))
os.system("tar xvf {}".format(path))
# train data
train_x = np.ndarray([0, 32, 32, 3], dtype=np.float32)
train_y = np.ndarray([0, ], dtype=np.int)
for i in range(1, 6):
data_path = path + '/data_batch_{}'.format(i)
with open(data_path, 'rb') as f:
datas = pickle.load(f, encoding='bytes')
print(data_path)
x = datas[b'data']
x = x.reshape(x.shape[0], 3, 32, 32)
x = x.transpose(0, 2, 3, 1)
train_x = np.vstack((train_x, x))
y = np.array(datas[b'labels'], dtype=np.int)
train_y = np.hstack((train_y, y))
print(train_x.shape)
print(train_y.shape)
# test data
data_path = path + '/test_batch'
with open(data_path, 'rb') as f:
datas = pickle.load(f, encoding='bytes')
print(data_path)
x = datas[b'data']
x = x.reshape(x.shape[0], 3, 32, 32)
test_x = x.transpose(0, 2, 3, 1)
test_y = np.array(datas[b'labels'], dtype=np.int)
print(test_x.shape)
print(test_y.shape)
return train_x, train_y, test_x, test_y
# train
def train():
# model
G = Generator().to(device)
D = Discriminator().to(device)
E = Encoder().to(device)
CD = Code_Discriminator().to(device)
opt_G = torch.optim.Adam(G.parameters(), lr=0.0005, betas=(0.5, 0.9))
opt_D = torch.optim.Adam(D.parameters(), lr=0.0005, betas=(0.5, 0.9))
opt_E = torch.optim.Adam(E.parameters(), lr=0.0001, betas=(0.5, 0.9))
opt_CD = torch.optim.Adam(CD.parameters(), lr=0.0005, betas=(0.5, 0.9))
train_x, train_y, test_x, test_y = load_cifar10()
xs = train_x / 127.5 - 1
xs = xs.transpose(0, 3, 1, 2)
# training
mb = 64
mbi = 0
train_N = len(xs)
train_ind = np.arange(train_N)
np.random.seed(0)
np.random.shuffle(train_ind)
BCE_loss = torch.nn.BCELoss()
L1_loss = torch.nn.L1Loss()
# get next minibatch index
def get_next_minibatch(train_ind, mbi, mb=mb):
train_N = len(train_ind)
if mbi + mb > train_N:
mb_ind = copy(train_ind[mbi:])
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb - (train_N - mbi))]))
mbi = mb - (train_N - mbi)
else:
mb_ind = train_ind[mbi: mbi + mb]
mbi += mb
return mb_ind, train_ind
for i in range(100000):
#if mbi + mb > train_N:
# mb_ind = copy(train_ind[mbi:])
# np.random.shuffle(train_ind)
# mb_ind = np.hstack((mb_ind, train_ind[:(mb-(train_N-mbi))]))
# mbi = mb - (train_N - mbi)
#else:
# mb_ind = train_ind[mbi: mbi+mb]
# mbi += mb
mb_ind, train_ind = get_next_minibatch(train_ind, mbi, mb=mb)
z = np.random.randn(mb, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
#----
# update
# Encoder update
opt_E.zero_grad()
#loss_L1.backward(retain_graph=True)
#loss_CD_Gen.backward(retain_graph=True)
#loss_E = loss_L1 + loss_CD_Gen
#loss_L1.backward()
#R_Cw_z_hat.backward()
#loss_E = loss_L1 + R_Cw_z_hat
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
z_hat = E(x)
x_hat = G(E(x))
Cw_z_hat = CD(z_hat)
loss_Reconstruction = reconstruction_loss_lambda * L1_loss(x, x_hat)
loss_E = loss_Reconstruction
#loss_E += (- torch.log(Cw_z_hat + epsilon) + torch.log(1 - Cw_z_hat + epsilon)).mean()
#loss_E += (- torch.log(Cw_z_hat + epsilon)).mean()
loss_E += BCE_loss(Cw_z_hat, torch.ones(mb).to(device))
loss_E.backward(retain_graph=True)
opt_E.step()
# Generator update
opt_G.zero_grad()
# get x from p(x)
#mb_ind, train_ind = get_next_minibatch(train_ind, mbi, mb=mb)
#x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
x_hat = G(E(x))
z_hat = E(x)
Dphi_x_hat = D(x_hat)
# get z from p(z)
z = np.random.randn(mb, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
Dphi_Gz = D(G(z))
loss_G = reconstruction_loss_lambda * L1_loss(x, x_hat) # Reconstruction loss
#loss_G += (- torch.log(Dphi_x_hat + epsilon) + torch.log(1 - Dphi_x_hat + epsilon)).mean() # R_Dphi_x_hat loss
#loss_G += (- torch.log(Dphi_Gz + epsilon) + torch.log(1 - Dphi_Gz + epsilon)).mean() # R_Dphi_Gz loss
#loss_G += (- torch.log(Dphi_Gz + epsilon)).mean()
loss_G += BCE_loss(Dphi_Gz, torch.ones(mb).to(device))
loss_G.backward(retain_graph=True)
for _ in range(2):
opt_G.step()
# Discriminator update
opt_D.zero_grad()
# get x from p(x)
#mb_ind, train_ind = get_next_minibatch(train_ind, mbi, mb=mb)
#x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
Dphi_x = D(x)
Dphi_x_hat = D(G(E(x)))
z = np.random.randn(mb, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
Dphi_Gz = D(G(z))
#loss_D = - torch.log(Dphi_x + epsilon).mean() - torch.log(1 - Dphi_x_hat + epsilon).mean() - torch.log(1 - Dphi_Gz + epsilon).mean()
#loss_D.backward(retain_graph=True)
_loss_D = BCE_loss(Dphi_x, torch.ones(mb).to(device)) + BCE_loss(Dphi_x_hat, torch.zeros(mb).to(device))
_loss_D.backward(retain_graph=True)
loss_D = _loss_D
_loss_D_ = BCE_loss(Dphi_Gz, torch.zeros(mb).to(device))
_loss_D.backward(retain_graph=True)
loss_D += _loss_D
opt_D.step()
# Code Discriminator update
opt_CD.zero_grad()
# get x from p(x)
#mb_ind, train_ind = get_next_minibatch(train_ind, mbi, mb=mb)
#x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
z_hat = E(x)
z = np.random.randn(mb, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
Cw_z = CD(z)
Cw_z_hat = CD(z_hat)
#loss_CD = - torch.log(1 - Cw_z_hat + epsilon).mean() - torch.log(Cw_z + epsilon).mean()
loss_CD = BCE_loss(Cw_z, torch.ones(mb).to(device)) + BCE_loss(Cw_z_hat, torch.zeros(mb).to(device))
loss_CD.backward(retain_graph=True)
opt_CD.step()
if (i + 1) % 50 == 0:
print('iter : {} , Loss E : {:.5f} , G : {:.5f} , D : {:.5f} , CD : {:.5f}'.format(
i + 1, loss_E.item(), loss_G.item(), loss_D.item(), loss_CD.item()))
if (i + 1) % 1000 == 0:
img_N = 16
z = np.random.randn(img_N, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
Gz = G(z)
if GPU:
Gz = Gz.cpu()
Gz = Gz.detach().numpy()
Gz = (Gz + 1) / 2
Gz = Gz.transpose(0, 2, 3, 1)
for j in range(img_N):
generated = Gz[j]
plt.subplot(1, img_N, j + 1)
plt.imshow(generated)
plt.axis('off')
plt.savefig('{}/alphaGAN_iter_{:05d}.jpg'.format(save_dir, i + 1), bbox_inches='tight')
plt.close()
torch.save(G.state_dict(), 'alphaGAN_G.pt')
torch.save(D.state_dict(), 'alphaGAN_D.pt')
torch.save(E.state_dict(), 'alphaGAN_E.pt')
torch.save(CD.state_dict(), 'alphaGAN_CD.pt')
# test
def test():
G = Generator().to(device)
G.eval()
G.load_state_dict(torch.load('alphaGAN_G.pt'))
D = Discriminator().to(device)
D.eval()
D.load_state_dict(torch.load('alphaGAN_D.pt'))
E = Encoder().to(device)
E.eval()
E.load_state_dict(torch.load('alphaGAN_E.pt'))
CD = Code_Discriminator().to(device)
CD.eval()
CD.load_state_dict(torch.load('alphaGAN_CD.pt'))
np.random.seed(100)
with torch.no_grad():
for i in range(3):
mb = 10
#z = np.random.uniform(-1, 1, size=(mb, Z_dim))
z = np.random.randn(mb, Z_dim)
z = torch.tensor(z, dtype=torch.float).to(device)
Gz = G(z)
if GPU:
Gz = Gz.cpu()
Gz = Gz.detach().numpy()
Gz = (Gz + 1) / 2
Gz = Gz.transpose(0,2,3,1)
for j in range(mb):
generated = Gz[j]
plt.subplot(1, mb, j + 1)
plt.imshow(generated)
plt.axis('off')
plt.savefig(save_dir + '/alphaGAN_test_{}.jpg'.format(i))
plt.show()
def arg_parse():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
train()
test()
|
{"hexsha": "3fd81c41dc642dcb79554216d38a94fee743a460", "size": 16000, "ext": "py", "lang": "Python", "max_stars_repo_path": "Question_imageGenerate/answers/alphaGAN_cifar10_pytorch.py", "max_stars_repo_name": "OverHall27/DLMugenKnock", "max_stars_repo_head_hexsha": "f08553213028e90baff7b4de3c640b51485f4a15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Question_imageGenerate/answers/alphaGAN_cifar10_pytorch.py", "max_issues_repo_name": "OverHall27/DLMugenKnock", "max_issues_repo_head_hexsha": "f08553213028e90baff7b4de3c640b51485f4a15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Question_imageGenerate/answers/alphaGAN_cifar10_pytorch.py", "max_forks_repo_name": "OverHall27/DLMugenKnock", "max_forks_repo_head_hexsha": "f08553213028e90baff7b4de3c640b51485f4a15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3605313093, "max_line_length": 142, "alphanum_fraction": 0.5470625, "include": true, "reason": "import numpy", "num_tokens": 4245}
|
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def cross_entropy_2d(predict, target):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), f"{predict.size(0)} vs {target.size(0)}"
assert predict.size(2) == target.size(1), f"{predict.size(2)} vs {target.size(1)}"
assert predict.size(3) == target.size(2), f"{predict.size(3)} vs {target.size(3)}"
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target < 200)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, size_average=True)
return loss
def entropy_loss(v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert v.dim() == 4
n, c, h, w = v.size()
return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
|
{"hexsha": "fc4db4d8a9c7ab30f62a7c96bdf8313459193349", "size": 1307, "ext": "py", "lang": "Python", "max_stars_repo_path": "advent/utils/loss.py", "max_stars_repo_name": "MLIA/ESL", "max_stars_repo_head_hexsha": "86679fd25d03667880379d59bc73194e7d8d03e3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-07-22T14:00:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T01:02:37.000Z", "max_issues_repo_path": "advent/utils/loss.py", "max_issues_repo_name": "MLIA/ESL", "max_issues_repo_head_hexsha": "86679fd25d03667880379d59bc73194e7d8d03e3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-10T04:34:45.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-23T12:18:26.000Z", "max_forks_repo_path": "advent/utils/loss.py", "max_forks_repo_name": "MLIA/ESL", "max_forks_repo_head_hexsha": "86679fd25d03667880379d59bc73194e7d8d03e3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-03T07:59:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T09:07:12.000Z", "avg_line_length": 33.5128205128, "max_line_length": 86, "alphanum_fraction": 0.6136189748, "include": true, "reason": "import numpy", "num_tokens": 387}
|
import json
#import lmdb
import pickle
import os
from numpy import random
from PIL import Image
import sys
# import torchwordemb
def calc_f(tp,fp,fn):
precision = tp/(tp*fp)
recall = tp/(tp+fn)
f = 2 * precision * recall / (precision + recall)
print('presision = ', precision, '\nrecall = ', recall, '\nf-measure = ', f)
return
def look_json(path):
with open(path, 'r') as f:
file = json.load(f)
print(file[0])
"""
def look_lmdb(path):
lmdb_env = lmdb.open(path, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False)
lmdb_txn = lmdb_env.begin(write=False)
lmdb_cursor = lmdb_txn.cursor()
count = 0
for key, value in lmdb_cursor:
print("key = ", key, ", \nvalue = ", pickle.loads(value), "\n")
count += 1
if count > 10:
break
"""
def look_pickle(path):
with open(path, 'rb') as f:
file = pickle.load(f)
if type(file) is list:
for key, value in enumerate(file):
print(value)
if key > 10:
break
elif type(file) is dict:
print("key?")
s = input()
print(type(s))
if s in file:
print(file[s])
else:
print(file)
else:
print(file)
def look_txt(path):
with open(path, 'r') as f:
file = f
inglist = []
for line in file:
ing = line.split(" ")
inglist.append(ing[0])
n = 2
while n > 1:
print("ing id?")
n = int(input()) - 1
try:
print(inglist[n])
except:
break
def img_sep(path):
directory = os.listdir(path)
train_list =[]
test_list =[]
val_list =[]
for pic in directory:
seed = random.rand()
if seed < 0.7 and len(train_list) < 42000:
train_list.append(pic)
elif seed < 0.85 and len(test_list) < 9000:
test_list.append(pic)
elif len(val_list) < 9000:
val_list.append(pic)
elif len(train_list) < 42000:
train_list.append(pic)
else:
test_list.append(pic)
print("len(train) = ", len(train_list))
print("len(test) = ", len(test_list))
print("len(val) = ", len(val_list))
with open("train_images.p", 'wb') as f:
pickle.dump(train_list,f)
with open("test_images.p", 'wb') as f:
pickle.dump(test_list,f)
with open("val_images.p", 'wb') as f:
pickle.dump(val_list,f)
def look_image(path):
pic = Image.open(path)
pic.show()
def ingr_max():
count = 0
max_ingr = 0
recipeid = 0
for line in open('data/Rakuten/recipe02_material_20160112.txt', 'r', encoding="utf-8"):
count += 1.0
proceeding = count / 5274990.0 * 100.0
sys.stdout.write("\r%f%%" % proceeding)
linelist = line.split()
if recipeid == 0:
recipeid = linelist[0]
ingrlist = 0
elif not linelist[0] == recipeid:
if ingrlist > max_ingr:
max_ingr = ingrlist
recipeid = linelist[0]
ingrlist = 0
ingrlist += 1
print("max ingr = ", max_ingr)
"""
def look_bin():
name, vec = torchwordemb.load_word2vec_bin("data/vocab.bin")
print(name['*'])
"""
print("MODE? (1 = json, 2 = image, 3 = pickle, 4 = text, 5 = img separation, \n\t6 = recipe_ingr, 7 = bin)")
m = input()
print("PATH?")
path = input()
if m == "1":
look_json(path)
elif m == '2':
look_image('data/images/1040003832.jpg')
elif m == "3":
if path == 'R':
path = 'data/ingredients_dict.p'
look_pickle(path)
elif m == '4':
look_txt("data/vocab.txt")
elif m == '5':
img_sep("data/images/")
elif m == '6':
ingr_max()
elif m == '7':
look_bin()
else:
print("Bad input mode")
|
{"hexsha": "a8a446c65ad190080de7c9aeb6f171c6f8e9818e", "size": 4040, "ext": "py", "lang": "Python", "max_stars_repo_path": "small_tools.py", "max_stars_repo_name": "paisuygoda/im2ingr", "max_stars_repo_head_hexsha": "b69de9a5a0a7fd42f64c091d681803bc501817eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "small_tools.py", "max_issues_repo_name": "paisuygoda/im2ingr", "max_issues_repo_head_hexsha": "b69de9a5a0a7fd42f64c091d681803bc501817eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "small_tools.py", "max_forks_repo_name": "paisuygoda/im2ingr", "max_forks_repo_head_hexsha": "b69de9a5a0a7fd42f64c091d681803bc501817eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0931677019, "max_line_length": 109, "alphanum_fraction": 0.5175742574, "include": true, "reason": "from numpy", "num_tokens": 1081}
|
/*
* The MIT License (MIT)
*
* Copyright (c) <2015> <Stephan Gatzka>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define BOOST_TEST_DYN_LINK
#define BOOST_TEST_MAIN
#define BOOST_TEST_MODULE router
#include <boost/test/unit_test.hpp>
#include "compiler.h"
#include "json/cJSON.h"
#include "peer.h"
#include "router.h"
extern "C" {
cjet_ssize_t socket_read(socket_type sock, void *buf, size_t count)
{
(void)sock;
(void)count;
uint64_t number_of_timeouts = 1;
::memcpy(buf, &number_of_timeouts, sizeof(number_of_timeouts));
return 8;
}
int socket_close(socket_type sock)
{
(void)sock;
return 0;
}
}
static cJSON *create_response_no_id()
{
cJSON *root = cJSON_CreateObject();
BOOST_REQUIRE(root != NULL);
cJSON_AddStringToObject(root, "result", "o.k.");
cJSON *params = cJSON_CreateObject();
BOOST_REQUIRE(params != NULL);
cJSON_AddItemToObject(root, "params", params);
return root;
}
static cJSON *create_response_wrong_id()
{
cJSON *root = cJSON_CreateObject();
BOOST_REQUIRE(root != NULL);
cJSON_AddTrueToObject(root, "id");
cJSON_AddStringToObject(root, "result", "o.k.");
cJSON *params = cJSON_CreateObject();
BOOST_REQUIRE(params != NULL);
cJSON_AddItemToObject(root, "params", params);
return root;
}
struct F {
F()
{
init_peer(&p, false, NULL);
}
~F()
{
free_peer_resources(&p);
}
struct peer p;
};
BOOST_FIXTURE_TEST_CASE(handle_response, F)
{
cJSON *response = create_response_no_id();
cJSON *result = cJSON_GetObjectItem(response, "result");
int ret = handle_routing_response(response, result, "result", &p);
BOOST_CHECK(ret == -1);
cJSON_Delete(response);
response = create_response_wrong_id();
result = cJSON_GetObjectItem(response, "result");
ret = handle_routing_response(response, result, "result", &p);
BOOST_CHECK(ret == -1);
cJSON_Delete(response);
}
|
{"hexsha": "b93138b80fed61a60ece219d637954dbceb8c10e", "size": 2882, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/tests/router_test.cpp", "max_stars_repo_name": "mloy/cjet", "max_stars_repo_head_hexsha": "6645cefebb21bad577ea3792cd3b5c77c61f408a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2015-06-03T22:15:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-16T07:26:12.000Z", "max_issues_repo_path": "src/tests/router_test.cpp", "max_issues_repo_name": "mloy/cjet", "max_issues_repo_head_hexsha": "6645cefebb21bad577ea3792cd3b5c77c61f408a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 108.0, "max_issues_repo_issues_event_min_datetime": "2015-06-03T09:50:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T16:48:26.000Z", "max_forks_repo_path": "src/tests/router_test.cpp", "max_forks_repo_name": "mloy/cjet", "max_forks_repo_head_hexsha": "6645cefebb21bad577ea3792cd3b5c77c61f408a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2015-06-17T06:47:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-28T06:52:46.000Z", "avg_line_length": 26.6851851852, "max_line_length": 72, "alphanum_fraction": 0.7331714087, "num_tokens": 692}
|
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import torch
from tabulate import tabulate
import torchvision.utils as tu
c=[[i*j for i in range (20)] for j in range(10)]
b=torch.rand(120,40,3)*255
a = SummaryWriter(log_dir= "tb_test")
for i in range(10):
a.add_text("tester",tabulate(c),i)
a.add_image("lalalalala", (b**i), i, dataformats="HWC")
print(str(tabulate(c)))
a.close()
|
{"hexsha": "235d563c161bdb16df3ac3350fd7593036791625", "size": 411, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/tensorb.py", "max_stars_repo_name": "neelabh17/SegmenTron", "max_stars_repo_head_hexsha": "69a4d1da858aba9222994847000f9945be3f4cd5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/tensorb.py", "max_issues_repo_name": "neelabh17/SegmenTron", "max_issues_repo_head_hexsha": "69a4d1da858aba9222994847000f9945be3f4cd5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/tensorb.py", "max_forks_repo_name": "neelabh17/SegmenTron", "max_forks_repo_head_hexsha": "69a4d1da858aba9222994847000f9945be3f4cd5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4, "max_line_length": 59, "alphanum_fraction": 0.7201946472, "include": true, "reason": "import numpy", "num_tokens": 124}
|
"""
Solvers for over-identified systems.
@author : davidrpugh
"""
from scipy import optimize
from . import solvers
class LeastSquaresSolver(solvers.Solver):
def solve(self, basis_kwargs, boundary_points, coefs_array, nodes, problem,
**solver_options):
"""
Solve a boundary value problem using the collocation method.
Parameters
----------
basis_kwargs : dict
Dictionary of keyword arguments used to build basis functions.
coefs_array : numpy.ndarray
Array of coefficients for basis functions defining the initial
condition.
problem : bvp.TwoPointBVPLike
A two-point boundary value problem (BVP) to solve.
solver_options : dict
Dictionary of options to pass to the non-linear equation solver.
Return
------
solution: solutions.SolutionLike
An instance of the SolutionLike class representing the solution to
the two-point boundary value problem (BVP)
Notes
-----
"""
result = optimize.leastsq(self._compute_residuals,
x0=coefs_array,
args=(basis_kwargs, boundary_points, nodes, problem),
**solver_options)
solution = self._solution_factory(basis_kwargs, result[0], nodes,
problem, result)
return solution
|
{"hexsha": "7a4a92f9a41e74925756fa6daa7d7b80c45b38c7", "size": 1495, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycollocation/solvers/over_identified.py", "max_stars_repo_name": "davidrpugh/bvp-solver", "max_stars_repo_head_hexsha": "9376f3488a992dc416cfd2a4dbb396d094927569", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2015-10-23T15:54:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-23T15:31:17.000Z", "max_issues_repo_path": "pycollocation/solvers/over_identified.py", "max_issues_repo_name": "davidrpugh/bvp-solver", "max_issues_repo_head_hexsha": "9376f3488a992dc416cfd2a4dbb396d094927569", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2015-03-26T15:35:39.000Z", "max_issues_repo_issues_event_max_datetime": "2016-09-13T06:36:55.000Z", "max_forks_repo_path": "pycollocation/solvers/over_identified.py", "max_forks_repo_name": "davidrpugh/bvp-solver", "max_forks_repo_head_hexsha": "9376f3488a992dc416cfd2a4dbb396d094927569", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2015-04-08T12:45:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T08:56:50.000Z", "avg_line_length": 31.1458333333, "max_line_length": 87, "alphanum_fraction": 0.5846153846, "include": true, "reason": "from scipy", "num_tokens": 275}
|
import numpy as np
import os
class _ADE_proto(object):
def __init__(self):
curr_path = os.path.dirname(os.path.abspath(__file__))
colors = np.load(os.path.join(curr_path, 'color150.npy'))
self.palette = np.full((256, 3), 255, np.uint8)
for i, c in enumerate(colors):
self.palette[i] = c[::-1].astype(np.uint8)
ADE = _ADE_proto()
|
{"hexsha": "2130e4df1610faf4c5a5878b71a338199abd3088", "size": 379, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/utils/dataset_tools/ade.py", "max_stars_repo_name": "js-fan/MCIC", "max_stars_repo_head_hexsha": "a98927e2d88452d96f1fba99a5dc25a5f518caa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-19T21:52:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-19T21:52:46.000Z", "max_issues_repo_path": "core/utils/dataset_tools/ade.py", "max_issues_repo_name": "js-fan/MCIC", "max_issues_repo_head_hexsha": "a98927e2d88452d96f1fba99a5dc25a5f518caa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "core/utils/dataset_tools/ade.py", "max_forks_repo_name": "js-fan/MCIC", "max_forks_repo_head_hexsha": "a98927e2d88452d96f1fba99a5dc25a5f518caa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1538461538, "max_line_length": 65, "alphanum_fraction": 0.6253298153, "include": true, "reason": "import numpy", "num_tokens": 99}
|
import Base: IteratorSize, HasLength, IsInfinite, length
struct FinitePeriodTrigger <: AbstractFiniteTrigger
td::Dates.Period
n::Int
end
struct InfinitePeriodTrigger <: AbstractInfiniteTrigger
td::Dates.Period
end
"""
PeriodTrigger(t::Dates.Time[, n=number_of_times])
A trigger which should trigger a job after a given period (`DatePeriod` or `TimePeriod`)
# Optional parameter
- `n=1`: trigger once
- `n=-1` (default): trigger every day indefinitely
- `n=value`: trigger just a number of times
"""
function PeriodTrigger(td; n=-1)
if n < 0
InfinitePeriodTrigger(td)
else
FinitePeriodTrigger(td, n)
end
end
"""
Trigger(td::Dates.Period[, n=number_of_times])
Return an `PeriodTrigger` which should trigger a job after a given period (`DatePeriod` or `TimePeriod`).
"""
Trigger(td::Dates.Period; kwargs...) = PeriodTrigger(td; kwargs...)
function get_next_dt_fire(trigger::Union{FinitePeriodTrigger,InfinitePeriodTrigger}, dt_previous_fire, dt_now)
if dt_previous_fire == DateTime(0)
dt_now + trigger.td
else
dt_previous_fire + trigger.td
end
end
|
{"hexsha": "88ac040e9fc417d929fcf9a5ac8130deafeffff1", "size": 1130, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/triggers/period.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/ExtensibleScheduler.jl-6837a093-145e-5c9b-b5ad-3b557e31aa31", "max_stars_repo_head_hexsha": "6cf6ab918d8924154e110426c14bea774dc78960", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-01-31T16:45:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T04:42:26.000Z", "max_issues_repo_path": "src/triggers/period.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/ExtensibleScheduler.jl-6837a093-145e-5c9b-b5ad-3b557e31aa31", "max_issues_repo_head_hexsha": "6cf6ab918d8924154e110426c14bea774dc78960", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2017-12-25T09:52:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-14T10:46:33.000Z", "max_forks_repo_path": "src/triggers/period.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/ExtensibleScheduler.jl-6837a093-145e-5c9b-b5ad-3b557e31aa31", "max_forks_repo_head_hexsha": "6cf6ab918d8924154e110426c14bea774dc78960", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-02-26T18:19:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-18T04:42:46.000Z", "avg_line_length": 25.1111111111, "max_line_length": 110, "alphanum_fraction": 0.7150442478, "num_tokens": 288}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.